Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   4 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   5 *          Christoffer Dall <c.dall@virtualopensystems.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
   6 */
   7
   8#include <linux/bsearch.h>
   9#include <linux/mm.h>
  10#include <linux/kvm_host.h>
  11#include <linux/uaccess.h>
  12#include <asm/kvm_arm.h>
  13#include <asm/kvm_host.h>
  14#include <asm/kvm_emulate.h>
  15#include <asm/kvm_coproc.h>
  16#include <asm/kvm_mmu.h>
  17#include <asm/cacheflush.h>
  18#include <asm/cputype.h>
  19#include <trace/events/kvm.h>
  20#include <asm/vfp.h>
  21#include "../vfp/vfpinstr.h"
  22
  23#define CREATE_TRACE_POINTS
  24#include "trace.h"
  25#include "coproc.h"
  26
  27
  28/******************************************************************************
  29 * Co-processor emulation
  30 *****************************************************************************/
  31
  32static bool write_to_read_only(struct kvm_vcpu *vcpu,
  33			       const struct coproc_params *params)
  34{
  35	WARN_ONCE(1, "CP15 write to read-only register\n");
  36	print_cp_instr(params);
  37	kvm_inject_undefined(vcpu);
  38	return false;
  39}
  40
  41static bool read_from_write_only(struct kvm_vcpu *vcpu,
  42				 const struct coproc_params *params)
  43{
  44	WARN_ONCE(1, "CP15 read to write-only register\n");
  45	print_cp_instr(params);
  46	kvm_inject_undefined(vcpu);
  47	return false;
  48}
  49
  50/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  51static u32 cache_levels;
  52
  53/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  54#define CSSELR_MAX 12
  55
  56/*
  57 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
  58 * of cp15 registers can be viewed either as couple of two u32 registers
  59 * or one u64 register. Current u64 register encoding is that least
  60 * significant u32 word is followed by most significant u32 word.
  61 */
  62static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
  63				       const struct coproc_reg *r,
  64				       u64 val)
  65{
  66	vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
  67	vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
  68}
  69
  70static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
  71				      const struct coproc_reg *r)
  72{
  73	u64 val;
  74
  75	val = vcpu_cp15(vcpu, r->reg + 1);
  76	val = val << 32;
  77	val = val | vcpu_cp15(vcpu, r->reg);
  78	return val;
  79}
  80
  81int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  82{
  83	kvm_inject_undefined(vcpu);
  84	return 1;
  85}
  86
  87int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  88{
  89	/*
  90	 * We can get here, if the host has been built without VFPv3 support,
  91	 * but the guest attempted a floating point operation.
  92	 */
  93	kvm_inject_undefined(vcpu);
  94	return 1;
  95}
  96
  97int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  98{
  99	kvm_inject_undefined(vcpu);
 100	return 1;
 101}
 102
 
 
 
 
 
 
 103static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 104{
 105	/*
 106	 * Compute guest MPIDR. We build a virtual cluster out of the
 107	 * vcpu_id, but we read the 'U' bit from the underlying
 108	 * hardware directly.
 109	 */
 110	vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
 111				     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
 112				     (vcpu->vcpu_id & 3));
 113}
 114
 115/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
 116static bool access_actlr(struct kvm_vcpu *vcpu,
 117			 const struct coproc_params *p,
 118			 const struct coproc_reg *r)
 119{
 120	if (p->is_write)
 121		return ignore_write(vcpu, p);
 122
 123	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
 124	return true;
 125}
 126
 127/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
 128static bool access_cbar(struct kvm_vcpu *vcpu,
 129			const struct coproc_params *p,
 130			const struct coproc_reg *r)
 131{
 132	if (p->is_write)
 133		return write_to_read_only(vcpu, p);
 134	return read_zero(vcpu, p);
 135}
 136
 137/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
 138static bool access_l2ctlr(struct kvm_vcpu *vcpu,
 139			  const struct coproc_params *p,
 140			  const struct coproc_reg *r)
 141{
 142	if (p->is_write)
 143		return ignore_write(vcpu, p);
 144
 145	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
 146	return true;
 147}
 148
 149static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 150{
 151	u32 l2ctlr, ncores;
 152
 153	asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
 154	l2ctlr &= ~(3 << 24);
 155	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
 156	/* How many cores in the current cluster and the next ones */
 157	ncores -= (vcpu->vcpu_id & ~3);
 158	/* Cap it to the maximum number of cores in a single cluster */
 159	ncores = min(ncores, 3U);
 160	l2ctlr |= (ncores & 3) << 24;
 161
 162	vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
 163}
 164
 165static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 166{
 167	u32 actlr;
 168
 169	/* ACTLR contains SMP bit: make sure you create all cpus first! */
 170	asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
 171	/* Make the SMP bit consistent with the guest configuration */
 172	if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
 173		actlr |= 1U << 6;
 174	else
 175		actlr &= ~(1U << 6);
 176
 177	vcpu_cp15(vcpu, c1_ACTLR) = actlr;
 178}
 179
 180/*
 181 * TRM entries: A7:4.3.50, A15:4.3.49
 182 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
 183 */
 184static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 185			   const struct coproc_params *p,
 186			   const struct coproc_reg *r)
 187{
 188	if (p->is_write)
 189		return ignore_write(vcpu, p);
 190
 191	*vcpu_reg(vcpu, p->Rt1) = 0;
 192	return true;
 193}
 194
 195/*
 196 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 197 */
 198static bool access_dcsw(struct kvm_vcpu *vcpu,
 199			const struct coproc_params *p,
 200			const struct coproc_reg *r)
 201{
 202	if (!p->is_write)
 203		return read_from_write_only(vcpu, p);
 204
 205	kvm_set_way_flush(vcpu);
 206	return true;
 207}
 208
 209/*
 210 * Generic accessor for VM registers. Only called as long as HCR_TVM
 211 * is set.  If the guest enables the MMU, we stop trapping the VM
 212 * sys_regs and leave it in complete control of the caches.
 213 *
 214 * Used by the cpu-specific code.
 215 */
 216bool access_vm_reg(struct kvm_vcpu *vcpu,
 217		   const struct coproc_params *p,
 218		   const struct coproc_reg *r)
 219{
 220	bool was_enabled = vcpu_has_cache_enabled(vcpu);
 221
 222	BUG_ON(!p->is_write);
 223
 224	vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
 225	if (p->is_64bit)
 226		vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
 227
 228	kvm_toggle_cache(vcpu, was_enabled);
 229	return true;
 230}
 231
 232static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 233			   const struct coproc_params *p,
 234			   const struct coproc_reg *r)
 235{
 236	u64 reg;
 237	bool g1;
 238
 239	if (!p->is_write)
 240		return read_from_write_only(vcpu, p);
 241
 242	reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 243	reg |= *vcpu_reg(vcpu, p->Rt1) ;
 244
 245	/*
 246	 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R access generates
 247	 * Group0 SGIs only, while ICC_SGI1R can generate either group,
 248	 * depending on the SGI configuration. ICC_ASGI1R is effectively
 249	 * equivalent to ICC_SGI0R, as there is no "alternative" secure
 250	 * group.
 251	 */
 252	switch (p->Op1) {
 253	default:		/* Keep GCC quiet */
 254	case 0:			/* ICC_SGI1R */
 255		g1 = true;
 256		break;
 257	case 1:			/* ICC_ASGI1R */
 258	case 2:			/* ICC_SGI0R */
 259		g1 = false;
 260		break;
 261	}
 262
 263	vgic_v3_dispatch_sgi(vcpu, reg, g1);
 264
 265	return true;
 266}
 267
 268static bool access_gic_sre(struct kvm_vcpu *vcpu,
 269			   const struct coproc_params *p,
 270			   const struct coproc_reg *r)
 271{
 272	if (p->is_write)
 273		return ignore_write(vcpu, p);
 274
 275	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
 276
 277	return true;
 278}
 279
 280static bool access_cntp_tval(struct kvm_vcpu *vcpu,
 281			     const struct coproc_params *p,
 282			     const struct coproc_reg *r)
 283{
 284	u32 val;
 285
 286	if (p->is_write) {
 287		val = *vcpu_reg(vcpu, p->Rt1);
 288		kvm_arm_timer_write_sysreg(vcpu,
 289					   TIMER_PTIMER, TIMER_REG_TVAL, val);
 290	} else {
 291		val = kvm_arm_timer_read_sysreg(vcpu,
 292						TIMER_PTIMER, TIMER_REG_TVAL);
 293		*vcpu_reg(vcpu, p->Rt1) = val;
 294	}
 295
 296	return true;
 297}
 298
 299static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
 300			    const struct coproc_params *p,
 301			    const struct coproc_reg *r)
 302{
 303	u32 val;
 304
 305	if (p->is_write) {
 306		val = *vcpu_reg(vcpu, p->Rt1);
 307		kvm_arm_timer_write_sysreg(vcpu,
 308					   TIMER_PTIMER, TIMER_REG_CTL, val);
 309	} else {
 310		val = kvm_arm_timer_read_sysreg(vcpu,
 311						TIMER_PTIMER, TIMER_REG_CTL);
 312		*vcpu_reg(vcpu, p->Rt1) = val;
 313	}
 314
 315	return true;
 316}
 317
 318static bool access_cntp_cval(struct kvm_vcpu *vcpu,
 319			     const struct coproc_params *p,
 320			     const struct coproc_reg *r)
 321{
 322	u64 val;
 323
 324	if (p->is_write) {
 325		val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 326		val |= *vcpu_reg(vcpu, p->Rt1);
 327		kvm_arm_timer_write_sysreg(vcpu,
 328					   TIMER_PTIMER, TIMER_REG_CVAL, val);
 329	} else {
 330		val = kvm_arm_timer_read_sysreg(vcpu,
 331						TIMER_PTIMER, TIMER_REG_CVAL);
 332		*vcpu_reg(vcpu, p->Rt1) = val;
 333		*vcpu_reg(vcpu, p->Rt2) = val >> 32;
 334	}
 335
 336	return true;
 337}
 338
 339/*
 340 * We could trap ID_DFR0 and tell the guest we don't support performance
 341 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 342 * NAKed, so it will read the PMCR anyway.
 343 *
 344 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 345 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 346 * all PM registers, which doesn't crash the guest kernel at least.
 347 */
 348static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 349		    const struct coproc_params *p,
 350		    const struct coproc_reg *r)
 351{
 352	if (p->is_write)
 353		return ignore_write(vcpu, p);
 354	else
 355		return read_zero(vcpu, p);
 356}
 357
 358#define access_pmcr trap_raz_wi
 359#define access_pmcntenset trap_raz_wi
 360#define access_pmcntenclr trap_raz_wi
 361#define access_pmovsr trap_raz_wi
 362#define access_pmselr trap_raz_wi
 363#define access_pmceid0 trap_raz_wi
 364#define access_pmceid1 trap_raz_wi
 365#define access_pmccntr trap_raz_wi
 366#define access_pmxevtyper trap_raz_wi
 367#define access_pmxevcntr trap_raz_wi
 368#define access_pmuserenr trap_raz_wi
 369#define access_pmintenset trap_raz_wi
 370#define access_pmintenclr trap_raz_wi
 371
 372/* Architected CP15 registers.
 373 * CRn denotes the primary register number, but is copied to the CRm in the
 374 * user space API for 64-bit register access in line with the terminology used
 375 * in the ARM ARM.
 376 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
 377 *            registers preceding 32-bit ones.
 378 */
 379static const struct coproc_reg cp15_regs[] = {
 380	/* MPIDR: we use VMPIDR for guest access. */
 381	{ CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
 382			NULL, reset_mpidr, c0_MPIDR },
 383
 384	/* CSSELR: swapped by interrupt.S. */
 385	{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 386			NULL, reset_unknown, c0_CSSELR },
 387
 388	/* ACTLR: trapped by HCR.TAC bit. */
 389	{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
 390			access_actlr, reset_actlr, c1_ACTLR },
 391
 392	/* CPACR: swapped by interrupt.S. */
 393	{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
 394			NULL, reset_val, c1_CPACR, 0x00000000 },
 395
 396	/* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
 397	{ CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
 398	{ CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
 399			access_vm_reg, reset_unknown, c2_TTBR0 },
 400	{ CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
 401			access_vm_reg, reset_unknown, c2_TTBR1 },
 402	{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
 403			access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
 404	{ CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
 405
 406
 407	/* DACR: swapped by interrupt.S. */
 408	{ CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
 409			access_vm_reg, reset_unknown, c3_DACR },
 410
 411	/* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
 412	{ CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
 413			access_vm_reg, reset_unknown, c5_DFSR },
 414	{ CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
 415			access_vm_reg, reset_unknown, c5_IFSR },
 416	{ CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
 417			access_vm_reg, reset_unknown, c5_ADFSR },
 418	{ CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
 419			access_vm_reg, reset_unknown, c5_AIFSR },
 420
 421	/* DFAR/IFAR: swapped by interrupt.S. */
 422	{ CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
 423			access_vm_reg, reset_unknown, c6_DFAR },
 424	{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 425			access_vm_reg, reset_unknown, c6_IFAR },
 426
 427	/* PAR swapped by interrupt.S */
 428	{ CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 429
 430	/*
 431	 * DC{C,I,CI}SW operations:
 432	 */
 433	{ CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
 434	{ CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 435	{ CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 436	/*
 437	 * L2CTLR access (guest wants to know #CPUs).
 438	 */
 439	{ CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
 440			access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
 441	{ CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
 442
 443	/*
 444	 * Dummy performance monitor implementation.
 445	 */
 446	{ CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
 447	{ CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
 448	{ CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
 449	{ CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
 450	{ CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
 451	{ CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
 452	{ CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
 453	{ CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
 454	{ CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
 455	{ CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
 456	{ CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
 457	{ CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
 458	{ CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
 459
 460	/* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
 461	{ CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
 462			access_vm_reg, reset_unknown, c10_PRRR},
 463	{ CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
 464			access_vm_reg, reset_unknown, c10_NMRR},
 465
 466	/* AMAIR0/AMAIR1: swapped by interrupt.S. */
 467	{ CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
 468			access_vm_reg, reset_unknown, c10_AMAIR0},
 469	{ CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
 470			access_vm_reg, reset_unknown, c10_AMAIR1},
 471
 472	/* ICC_SGI1R */
 473	{ CRm64(12), Op1( 0), is64, access_gic_sgi},
 474
 475	/* VBAR: swapped by interrupt.S. */
 476	{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
 477			NULL, reset_val, c12_VBAR, 0x00000000 },
 478
 479	/* ICC_ASGI1R */
 480	{ CRm64(12), Op1( 1), is64, access_gic_sgi},
 481	/* ICC_SGI0R */
 482	{ CRm64(12), Op1( 2), is64, access_gic_sgi},
 483	/* ICC_SRE */
 484	{ CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
 485
 486	/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
 487	{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
 488			access_vm_reg, reset_val, c13_CID, 0x00000000 },
 489	{ CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
 490			NULL, reset_unknown, c13_TID_URW },
 491	{ CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
 492			NULL, reset_unknown, c13_TID_URO },
 493	{ CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
 494			NULL, reset_unknown, c13_TID_PRIV },
 495
 496	/* CNTP */
 497	{ CRm64(14), Op1( 2), is64, access_cntp_cval},
 498
 499	/* CNTKCTL: swapped by interrupt.S. */
 500	{ CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 501			NULL, reset_val, c14_CNTKCTL, 0x00000000 },
 502
 503	/* CNTP */
 504	{ CRn(14), CRm( 2), Op1( 0), Op2( 0), is32, access_cntp_tval },
 505	{ CRn(14), CRm( 2), Op1( 0), Op2( 1), is32, access_cntp_ctl },
 506
 507	/* The Configuration Base Address Register. */
 508	{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 509};
 510
 511static int check_reg_table(const struct coproc_reg *table, unsigned int n)
 512{
 513	unsigned int i;
 514
 515	for (i = 1; i < n; i++) {
 516		if (cmp_reg(&table[i-1], &table[i]) >= 0) {
 517			kvm_err("reg table %p out of order (%d)\n", table, i - 1);
 518			return 1;
 519		}
 520	}
 521
 522	return 0;
 523}
 524
 525/* Target specific emulation tables */
 526static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 527
 528void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 529{
 530	BUG_ON(check_reg_table(table->table, table->num));
 531	target_tables[table->target] = table;
 532}
 533
 534/* Get specific register table for this target. */
 535static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
 536{
 537	struct kvm_coproc_target_table *table;
 538
 539	table = target_tables[target];
 540	*num = table->num;
 541	return table->table;
 542}
 543
 544#define reg_to_match_value(x)						\
 545	({								\
 546		unsigned long val;					\
 547		val  = (x)->CRn << 11;					\
 548		val |= (x)->CRm << 7;					\
 549		val |= (x)->Op1 << 4;					\
 550		val |= (x)->Op2 << 1;					\
 551		val |= !(x)->is_64bit;					\
 552		val;							\
 553	 })
 554
 555static int match_reg(const void *key, const void *elt)
 556{
 557	const unsigned long pval = (unsigned long)key;
 558	const struct coproc_reg *r = elt;
 559
 560	return pval - reg_to_match_value(r);
 561}
 562
 563static const struct coproc_reg *find_reg(const struct coproc_params *params,
 564					 const struct coproc_reg table[],
 565					 unsigned int num)
 566{
 567	unsigned long pval = reg_to_match_value(params);
 568
 569	return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
 570}
 571
 572static int emulate_cp15(struct kvm_vcpu *vcpu,
 573			const struct coproc_params *params)
 574{
 575	size_t num;
 576	const struct coproc_reg *table, *r;
 577
 578	trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
 579				   params->CRm, params->Op2, params->is_write);
 580
 581	table = get_target_table(vcpu->arch.target, &num);
 582
 583	/* Search target-specific then generic table. */
 584	r = find_reg(params, table, num);
 585	if (!r)
 586		r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
 587
 588	if (likely(r)) {
 589		/* If we don't have an accessor, we should never get here! */
 590		BUG_ON(!r->access);
 591
 592		if (likely(r->access(vcpu, params, r))) {
 593			/* Skip instruction, since it was emulated */
 594			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 
 595		}
 596	} else {
 597		/* If access function fails, it should complain. */
 598		kvm_err("Unsupported guest CP15 access at: %08lx [%08lx]\n",
 599			*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
 
 600		print_cp_instr(params);
 601		kvm_inject_undefined(vcpu);
 602	}
 603
 604	return 1;
 605}
 606
 607static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
 
 
 
 
 
 608{
 609	struct coproc_params params;
 610
 611	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 612	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 613	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 614	params.is_64bit = true;
 615
 616	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
 617	params.Op2 = 0;
 618	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 619	params.CRm = 0;
 620
 621	return params;
 622}
 623
 624/**
 625 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
 626 * @vcpu: The VCPU pointer
 627 * @run:  The kvm_run struct
 628 */
 629int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 630{
 631	struct coproc_params params = decode_64bit_hsr(vcpu);
 632
 633	return emulate_cp15(vcpu, &params);
 634}
 635
 636/**
 637 * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
 638 * @vcpu: The VCPU pointer
 639 * @run:  The kvm_run struct
 640 */
 641int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 642{
 643	struct coproc_params params = decode_64bit_hsr(vcpu);
 644
 645	/* raz_wi cp14 */
 646	trap_raz_wi(vcpu, &params, NULL);
 647
 648	/* handled */
 649	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 650	return 1;
 651}
 652
 653static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 654			      const struct coproc_reg *table, size_t num,
 655			      unsigned long *bmap)
 656{
 657	unsigned long i;
 658
 659	for (i = 0; i < num; i++)
 660		if (table[i].reset) {
 661			int reg = table[i].reg;
 662
 663			table[i].reset(vcpu, &table[i]);
 664			if (reg > 0 && reg < NR_CP15_REGS) {
 665				set_bit(reg, bmap);
 666				if (table[i].is_64bit)
 667					set_bit(reg + 1, bmap);
 668			}
 669		}
 670}
 671
 672static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
 
 
 
 
 
 673{
 674	struct coproc_params params;
 675
 676	params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 677	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 678	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 679	params.is_64bit = false;
 680
 681	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 682	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
 683	params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 684	params.Rt2 = 0;
 685
 686	return params;
 687}
 688
 689/**
 690 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
 691 * @vcpu: The VCPU pointer
 692 * @run:  The kvm_run struct
 693 */
 694int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 695{
 696	struct coproc_params params = decode_32bit_hsr(vcpu);
 697	return emulate_cp15(vcpu, &params);
 698}
 699
 700/**
 701 * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
 702 * @vcpu: The VCPU pointer
 703 * @run:  The kvm_run struct
 704 */
 705int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 706{
 707	struct coproc_params params = decode_32bit_hsr(vcpu);
 708
 709	/* raz_wi cp14 */
 710	trap_raz_wi(vcpu, &params, NULL);
 711
 712	/* handled */
 713	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 714	return 1;
 715}
 716
 717/******************************************************************************
 718 * Userspace API
 719 *****************************************************************************/
 720
 721static bool index_to_params(u64 id, struct coproc_params *params)
 722{
 723	switch (id & KVM_REG_SIZE_MASK) {
 724	case KVM_REG_SIZE_U32:
 725		/* Any unused index bits means it's not valid. */
 726		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 727			   | KVM_REG_ARM_COPROC_MASK
 728			   | KVM_REG_ARM_32_CRN_MASK
 729			   | KVM_REG_ARM_CRM_MASK
 730			   | KVM_REG_ARM_OPC1_MASK
 731			   | KVM_REG_ARM_32_OPC2_MASK))
 732			return false;
 733
 734		params->is_64bit = false;
 735		params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
 736			       >> KVM_REG_ARM_32_CRN_SHIFT);
 737		params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
 738			       >> KVM_REG_ARM_CRM_SHIFT);
 739		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 740			       >> KVM_REG_ARM_OPC1_SHIFT);
 741		params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
 742			       >> KVM_REG_ARM_32_OPC2_SHIFT);
 743		return true;
 744	case KVM_REG_SIZE_U64:
 745		/* Any unused index bits means it's not valid. */
 746		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 747			      | KVM_REG_ARM_COPROC_MASK
 748			      | KVM_REG_ARM_CRM_MASK
 749			      | KVM_REG_ARM_OPC1_MASK))
 750			return false;
 751		params->is_64bit = true;
 752		/* CRm to CRn: see cp15_to_index for details */
 753		params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 754			       >> KVM_REG_ARM_CRM_SHIFT);
 755		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 756			       >> KVM_REG_ARM_OPC1_SHIFT);
 757		params->Op2 = 0;
 758		params->CRm = 0;
 759		return true;
 760	default:
 761		return false;
 762	}
 763}
 764
 765/* Decode an index value, and find the cp15 coproc_reg entry. */
 766static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
 767						    u64 id)
 768{
 769	size_t num;
 770	const struct coproc_reg *table, *r;
 771	struct coproc_params params;
 772
 773	/* We only do cp15 for now. */
 774	if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
 775		return NULL;
 776
 777	if (!index_to_params(id, &params))
 778		return NULL;
 779
 780	table = get_target_table(vcpu->arch.target, &num);
 781	r = find_reg(&params, table, num);
 782	if (!r)
 783		r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
 784
 785	/* Not saved in the cp15 array? */
 786	if (r && !r->reg)
 787		r = NULL;
 788
 789	return r;
 790}
 791
 792/*
 793 * These are the invariant cp15 registers: we let the guest see the host
 794 * versions of these, so they're part of the guest state.
 795 *
 796 * A future CPU may provide a mechanism to present different values to
 797 * the guest, or a future kvm may trap them.
 798 */
 799/* Unfortunately, there's no register-argument for mrc, so generate. */
 800#define FUNCTION_FOR32(crn, crm, op1, op2, name)			\
 801	static void get_##name(struct kvm_vcpu *v,			\
 802			       const struct coproc_reg *r)		\
 803	{								\
 804		u32 val;						\
 805									\
 806		asm volatile("mrc p15, " __stringify(op1)		\
 807			     ", %0, c" __stringify(crn)			\
 808			     ", c" __stringify(crm)			\
 809			     ", " __stringify(op2) "\n" : "=r" (val));	\
 810		((struct coproc_reg *)r)->val = val;			\
 811	}
 812
 813FUNCTION_FOR32(0, 0, 0, 0, MIDR)
 814FUNCTION_FOR32(0, 0, 0, 1, CTR)
 815FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
 816FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
 817FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
 818FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
 819FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
 820FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
 821FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
 822FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
 823FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
 824FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
 825FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
 826FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
 827FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
 828FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
 829FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
 830FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
 831FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
 832FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
 833FUNCTION_FOR32(0, 0, 1, 7, AIDR)
 834
 835/* ->val is filled in by kvm_invariant_coproc_table_init() */
 836static struct coproc_reg invariant_cp15[] = {
 837	{ CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
 838	{ CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
 839	{ CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
 840	{ CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 841	{ CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 842
 843	{ CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
 844	{ CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 845
 846	{ CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 847	{ CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 848	{ CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
 849	{ CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
 850	{ CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
 851	{ CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
 852	{ CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
 853	{ CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
 854
 855	{ CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
 856	{ CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
 857	{ CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
 858	{ CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 859	{ CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 860	{ CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
 861};
 862
 863/*
 864 * Reads a register value from a userspace address to a kernel
 865 * variable. Make sure that register size matches sizeof(*__val).
 866 */
 867static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 868{
 869	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
 870		return -EFAULT;
 871	return 0;
 872}
 873
 874/*
 875 * Writes a register value to a userspace address from a kernel variable.
 876 * Make sure that register size matches sizeof(*__val).
 877 */
 878static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 879{
 880	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
 881		return -EFAULT;
 882	return 0;
 883}
 884
 885static int get_invariant_cp15(u64 id, void __user *uaddr)
 886{
 887	struct coproc_params params;
 888	const struct coproc_reg *r;
 889	int ret;
 890
 891	if (!index_to_params(id, &params))
 892		return -ENOENT;
 893
 894	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 895	if (!r)
 896		return -ENOENT;
 897
 898	ret = -ENOENT;
 899	if (KVM_REG_SIZE(id) == 4) {
 900		u32 val = r->val;
 901
 902		ret = reg_to_user(uaddr, &val, id);
 903	} else if (KVM_REG_SIZE(id) == 8) {
 904		ret = reg_to_user(uaddr, &r->val, id);
 905	}
 906	return ret;
 907}
 908
 909static int set_invariant_cp15(u64 id, void __user *uaddr)
 910{
 911	struct coproc_params params;
 912	const struct coproc_reg *r;
 913	int err;
 914	u64 val;
 915
 916	if (!index_to_params(id, &params))
 917		return -ENOENT;
 918	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 919	if (!r)
 920		return -ENOENT;
 921
 922	err = -ENOENT;
 923	if (KVM_REG_SIZE(id) == 4) {
 924		u32 val32;
 925
 926		err = reg_from_user(&val32, uaddr, id);
 927		if (!err)
 928			val = val32;
 929	} else if (KVM_REG_SIZE(id) == 8) {
 930		err = reg_from_user(&val, uaddr, id);
 931	}
 932	if (err)
 933		return err;
 934
 935	/* This is what we mean by invariant: you can't change it. */
 936	if (r->val != val)
 937		return -EINVAL;
 938
 939	return 0;
 940}
 941
 942static bool is_valid_cache(u32 val)
 943{
 944	u32 level, ctype;
 945
 946	if (val >= CSSELR_MAX)
 947		return false;
 948
 949	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
 950        level = (val >> 1);
 951        ctype = (cache_levels >> (level * 3)) & 7;
 952
 953	switch (ctype) {
 954	case 0: /* No cache */
 955		return false;
 956	case 1: /* Instruction cache only */
 957		return (val & 1);
 958	case 2: /* Data cache only */
 959	case 4: /* Unified cache */
 960		return !(val & 1);
 961	case 3: /* Separate instruction and data caches */
 962		return true;
 963	default: /* Reserved: we can't know instruction or data. */
 964		return false;
 965	}
 966}
 967
 968/* Which cache CCSIDR represents depends on CSSELR value. */
 969static u32 get_ccsidr(u32 csselr)
 970{
 971	u32 ccsidr;
 972
 973	/* Make sure noone else changes CSSELR during this! */
 974	local_irq_disable();
 975	/* Put value into CSSELR */
 976	asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
 977	isb();
 978	/* Read result out of CCSIDR */
 979	asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
 980	local_irq_enable();
 981
 982	return ccsidr;
 983}
 984
 985static int demux_c15_get(u64 id, void __user *uaddr)
 986{
 987	u32 val;
 988	u32 __user *uval = uaddr;
 989
 990	/* Fail if we have unknown bits set. */
 991	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 992		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 993		return -ENOENT;
 994
 995	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 996	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 997		if (KVM_REG_SIZE(id) != 4)
 998			return -ENOENT;
 999		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1000			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1001		if (!is_valid_cache(val))
1002			return -ENOENT;
1003
1004		return put_user(get_ccsidr(val), uval);
1005	default:
1006		return -ENOENT;
1007	}
1008}
1009
1010static int demux_c15_set(u64 id, void __user *uaddr)
1011{
1012	u32 val, newval;
1013	u32 __user *uval = uaddr;
1014
1015	/* Fail if we have unknown bits set. */
1016	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1017		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1018		return -ENOENT;
1019
1020	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1021	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1022		if (KVM_REG_SIZE(id) != 4)
1023			return -ENOENT;
1024		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1025			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1026		if (!is_valid_cache(val))
1027			return -ENOENT;
1028
1029		if (get_user(newval, uval))
1030			return -EFAULT;
1031
1032		/* This is also invariant: you can't change it. */
1033		if (newval != get_ccsidr(val))
1034			return -EINVAL;
1035		return 0;
1036	default:
1037		return -ENOENT;
1038	}
1039}
1040
1041#ifdef CONFIG_VFPv3
1042static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
1043				   KVM_REG_ARM_VFP_FPSCR,
1044				   KVM_REG_ARM_VFP_FPINST,
1045				   KVM_REG_ARM_VFP_FPINST2,
1046				   KVM_REG_ARM_VFP_MVFR0,
1047				   KVM_REG_ARM_VFP_MVFR1,
1048				   KVM_REG_ARM_VFP_FPSID };
1049
1050static unsigned int num_fp_regs(void)
1051{
1052	if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
1053		return 32;
1054	else
1055		return 16;
1056}
1057
1058static unsigned int num_vfp_regs(void)
1059{
1060	/* Normal FP regs + control regs. */
1061	return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
1062}
1063
1064static int copy_vfp_regids(u64 __user *uindices)
1065{
1066	unsigned int i;
1067	const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
1068	const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
1069
1070	for (i = 0; i < num_fp_regs(); i++) {
1071		if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
1072			     uindices))
1073			return -EFAULT;
1074		uindices++;
1075	}
1076
1077	for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
1078		if (put_user(u32reg | vfp_sysregs[i], uindices))
1079			return -EFAULT;
1080		uindices++;
1081	}
1082
1083	return num_vfp_regs();
1084}
1085
1086static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1087{
1088	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
1089	u32 val;
1090
1091	/* Fail if we have unknown bits set. */
1092	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1093		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1094		return -ENOENT;
1095
1096	if (vfpid < num_fp_regs()) {
1097		if (KVM_REG_SIZE(id) != 8)
1098			return -ENOENT;
1099		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
1100				   id);
1101	}
1102
1103	/* FP control registers are all 32 bit. */
1104	if (KVM_REG_SIZE(id) != 4)
1105		return -ENOENT;
1106
1107	switch (vfpid) {
1108	case KVM_REG_ARM_VFP_FPEXC:
1109		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
1110	case KVM_REG_ARM_VFP_FPSCR:
1111		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
1112	case KVM_REG_ARM_VFP_FPINST:
1113		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
1114	case KVM_REG_ARM_VFP_FPINST2:
1115		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
1116	case KVM_REG_ARM_VFP_MVFR0:
1117		val = fmrx(MVFR0);
1118		return reg_to_user(uaddr, &val, id);
1119	case KVM_REG_ARM_VFP_MVFR1:
1120		val = fmrx(MVFR1);
1121		return reg_to_user(uaddr, &val, id);
1122	case KVM_REG_ARM_VFP_FPSID:
1123		val = fmrx(FPSID);
1124		return reg_to_user(uaddr, &val, id);
1125	default:
1126		return -ENOENT;
1127	}
1128}
1129
1130static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1131{
1132	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
1133	u32 val;
1134
1135	/* Fail if we have unknown bits set. */
1136	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1137		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1138		return -ENOENT;
1139
1140	if (vfpid < num_fp_regs()) {
1141		if (KVM_REG_SIZE(id) != 8)
1142			return -ENOENT;
1143		return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
1144				     uaddr, id);
1145	}
1146
1147	/* FP control registers are all 32 bit. */
1148	if (KVM_REG_SIZE(id) != 4)
1149		return -ENOENT;
1150
1151	switch (vfpid) {
1152	case KVM_REG_ARM_VFP_FPEXC:
1153		return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
1154	case KVM_REG_ARM_VFP_FPSCR:
1155		return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
1156	case KVM_REG_ARM_VFP_FPINST:
1157		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
1158	case KVM_REG_ARM_VFP_FPINST2:
1159		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
1160	/* These are invariant. */
1161	case KVM_REG_ARM_VFP_MVFR0:
1162		if (reg_from_user(&val, uaddr, id))
1163			return -EFAULT;
1164		if (val != fmrx(MVFR0))
1165			return -EINVAL;
1166		return 0;
1167	case KVM_REG_ARM_VFP_MVFR1:
1168		if (reg_from_user(&val, uaddr, id))
1169			return -EFAULT;
1170		if (val != fmrx(MVFR1))
1171			return -EINVAL;
1172		return 0;
1173	case KVM_REG_ARM_VFP_FPSID:
1174		if (reg_from_user(&val, uaddr, id))
1175			return -EFAULT;
1176		if (val != fmrx(FPSID))
1177			return -EINVAL;
1178		return 0;
1179	default:
1180		return -ENOENT;
1181	}
1182}
1183#else /* !CONFIG_VFPv3 */
1184static unsigned int num_vfp_regs(void)
1185{
1186	return 0;
1187}
1188
1189static int copy_vfp_regids(u64 __user *uindices)
1190{
1191	return 0;
1192}
1193
1194static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1195{
1196	return -ENOENT;
1197}
1198
1199static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1200{
1201	return -ENOENT;
1202}
1203#endif /* !CONFIG_VFPv3 */
1204
1205int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1206{
1207	const struct coproc_reg *r;
1208	void __user *uaddr = (void __user *)(long)reg->addr;
1209	int ret;
1210
1211	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1212		return demux_c15_get(reg->id, uaddr);
1213
1214	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1215		return vfp_get_reg(vcpu, reg->id, uaddr);
1216
1217	r = index_to_coproc_reg(vcpu, reg->id);
1218	if (!r)
1219		return get_invariant_cp15(reg->id, uaddr);
1220
1221	ret = -ENOENT;
1222	if (KVM_REG_SIZE(reg->id) == 8) {
1223		u64 val;
1224
1225		val = vcpu_cp15_reg64_get(vcpu, r);
1226		ret = reg_to_user(uaddr, &val, reg->id);
1227	} else if (KVM_REG_SIZE(reg->id) == 4) {
1228		ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
1229	}
1230
1231	return ret;
1232}
1233
1234int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1235{
1236	const struct coproc_reg *r;
1237	void __user *uaddr = (void __user *)(long)reg->addr;
1238	int ret;
1239
1240	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1241		return demux_c15_set(reg->id, uaddr);
1242
1243	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1244		return vfp_set_reg(vcpu, reg->id, uaddr);
1245
1246	r = index_to_coproc_reg(vcpu, reg->id);
1247	if (!r)
1248		return set_invariant_cp15(reg->id, uaddr);
1249
1250	ret = -ENOENT;
1251	if (KVM_REG_SIZE(reg->id) == 8) {
1252		u64 val;
1253
1254		ret = reg_from_user(&val, uaddr, reg->id);
1255		if (!ret)
1256			vcpu_cp15_reg64_set(vcpu, r, val);
1257	} else if (KVM_REG_SIZE(reg->id) == 4) {
1258		ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
1259	}
1260
1261	return ret;
1262}
1263
1264static unsigned int num_demux_regs(void)
1265{
1266	unsigned int i, count = 0;
1267
1268	for (i = 0; i < CSSELR_MAX; i++)
1269		if (is_valid_cache(i))
1270			count++;
1271
1272	return count;
1273}
1274
1275static int write_demux_regids(u64 __user *uindices)
1276{
1277	u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1278	unsigned int i;
1279
1280	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1281	for (i = 0; i < CSSELR_MAX; i++) {
1282		if (!is_valid_cache(i))
1283			continue;
1284		if (put_user(val | i, uindices))
1285			return -EFAULT;
1286		uindices++;
1287	}
1288	return 0;
1289}
1290
1291static u64 cp15_to_index(const struct coproc_reg *reg)
1292{
1293	u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1294	if (reg->is_64bit) {
1295		val |= KVM_REG_SIZE_U64;
1296		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1297		/*
1298		 * CRn always denotes the primary coproc. reg. nr. for the
1299		 * in-kernel representation, but the user space API uses the
1300		 * CRm for the encoding, because it is modelled after the
1301		 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1302		 * B3-1445
1303		 */
1304		val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
1305	} else {
1306		val |= KVM_REG_SIZE_U32;
1307		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1308		val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
1309		val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
1310		val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
1311	}
1312	return val;
1313}
1314
1315static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
1316{
1317	if (!*uind)
1318		return true;
1319
1320	if (put_user(cp15_to_index(reg), *uind))
1321		return false;
1322
1323	(*uind)++;
1324	return true;
1325}
1326
1327/* Assumed ordered tables, see kvm_coproc_table_init. */
1328static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
1329{
1330	const struct coproc_reg *i1, *i2, *end1, *end2;
1331	unsigned int total = 0;
1332	size_t num;
1333
1334	/* We check for duplicates here, to allow arch-specific overrides. */
1335	i1 = get_target_table(vcpu->arch.target, &num);
1336	end1 = i1 + num;
1337	i2 = cp15_regs;
1338	end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
1339
1340	BUG_ON(i1 == end1 || i2 == end2);
1341
1342	/* Walk carefully, as both tables may refer to the same register. */
1343	while (i1 || i2) {
1344		int cmp = cmp_reg(i1, i2);
1345		/* target-specific overrides generic entry. */
1346		if (cmp <= 0) {
1347			/* Ignore registers we trap but don't save. */
1348			if (i1->reg) {
1349				if (!copy_reg_to_user(i1, &uind))
1350					return -EFAULT;
1351				total++;
1352			}
1353		} else {
1354			/* Ignore registers we trap but don't save. */
1355			if (i2->reg) {
1356				if (!copy_reg_to_user(i2, &uind))
1357					return -EFAULT;
1358				total++;
1359			}
1360		}
1361
1362		if (cmp <= 0 && ++i1 == end1)
1363			i1 = NULL;
1364		if (cmp >= 0 && ++i2 == end2)
1365			i2 = NULL;
1366	}
1367	return total;
1368}
1369
1370unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
1371{
1372	return ARRAY_SIZE(invariant_cp15)
1373		+ num_demux_regs()
1374		+ num_vfp_regs()
1375		+ walk_cp15(vcpu, (u64 __user *)NULL);
1376}
1377
1378int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1379{
1380	unsigned int i;
1381	int err;
1382
1383	/* Then give them all the invariant registers' indices. */
1384	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
1385		if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
1386			return -EFAULT;
1387		uindices++;
1388	}
1389
1390	err = walk_cp15(vcpu, uindices);
1391	if (err < 0)
1392		return err;
1393	uindices += err;
1394
1395	err = copy_vfp_regids(uindices);
1396	if (err < 0)
1397		return err;
1398	uindices += err;
1399
1400	return write_demux_regids(uindices);
1401}
1402
1403void kvm_coproc_table_init(void)
1404{
1405	unsigned int i;
1406
1407	/* Make sure tables are unique and in order. */
1408	BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1409	BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
1410
1411	/* We abuse the reset function to overwrite the table itself. */
1412	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1413		invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1414
1415	/*
1416	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1417	 *
1418	 *   If software reads the Cache Type fields from Ctype1
1419	 *   upwards, once it has seen a value of 0b000, no caches
1420	 *   exist at further-out levels of the hierarchy. So, for
1421	 *   example, if Ctype3 is the first Cache Type field with a
1422	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
1423	 *   ignored.
1424	 */
1425	asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1426	for (i = 0; i < 7; i++)
1427		if (((cache_levels >> (i*3)) & 7) == 0)
1428			break;
1429	/* Clear all higher bits. */
1430	cache_levels &= (1 << (i*3))-1;
1431}
1432
1433/**
1434 * kvm_reset_coprocs - sets cp15 registers to reset value
1435 * @vcpu: The VCPU pointer
1436 *
1437 * This function finds the right table above and sets the registers on the
1438 * virtual CPU struct to their architecturally defined reset values.
1439 */
1440void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1441{
1442	size_t num;
1443	const struct coproc_reg *table;
1444	DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
 
 
1445
1446	/* Generic chip reset first (so target could override). */
1447	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
1448
1449	table = get_target_table(vcpu->arch.target, &num);
1450	reset_coproc_regs(vcpu, table, num, bmap);
1451
1452	for (num = 1; num < NR_CP15_REGS; num++)
1453		WARN(!test_bit(num, bmap),
1454		     "Didn't reset vcpu_cp15(vcpu, %zi)", num);
1455}
v4.10.11
 
   1/*
   2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   4 *          Christoffer Dall <c.dall@virtualopensystems.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License, version 2, as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  18 */
  19
  20#include <linux/bsearch.h>
  21#include <linux/mm.h>
  22#include <linux/kvm_host.h>
  23#include <linux/uaccess.h>
  24#include <asm/kvm_arm.h>
  25#include <asm/kvm_host.h>
  26#include <asm/kvm_emulate.h>
  27#include <asm/kvm_coproc.h>
  28#include <asm/kvm_mmu.h>
  29#include <asm/cacheflush.h>
  30#include <asm/cputype.h>
  31#include <trace/events/kvm.h>
  32#include <asm/vfp.h>
  33#include "../vfp/vfpinstr.h"
  34
 
  35#include "trace.h"
  36#include "coproc.h"
  37
  38
  39/******************************************************************************
  40 * Co-processor emulation
  41 *****************************************************************************/
  42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  43/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  44static u32 cache_levels;
  45
  46/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  47#define CSSELR_MAX 12
  48
  49/*
  50 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
  51 * of cp15 registers can be viewed either as couple of two u32 registers
  52 * or one u64 register. Current u64 register encoding is that least
  53 * significant u32 word is followed by most significant u32 word.
  54 */
  55static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
  56				       const struct coproc_reg *r,
  57				       u64 val)
  58{
  59	vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
  60	vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
  61}
  62
  63static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
  64				      const struct coproc_reg *r)
  65{
  66	u64 val;
  67
  68	val = vcpu_cp15(vcpu, r->reg + 1);
  69	val = val << 32;
  70	val = val | vcpu_cp15(vcpu, r->reg);
  71	return val;
  72}
  73
  74int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  75{
  76	kvm_inject_undefined(vcpu);
  77	return 1;
  78}
  79
  80int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  81{
  82	/*
  83	 * We can get here, if the host has been built without VFPv3 support,
  84	 * but the guest attempted a floating point operation.
  85	 */
  86	kvm_inject_undefined(vcpu);
  87	return 1;
  88}
  89
  90int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  91{
  92	kvm_inject_undefined(vcpu);
  93	return 1;
  94}
  95
  96int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  97{
  98	kvm_inject_undefined(vcpu);
  99	return 1;
 100}
 101
 102static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 103{
 104	/*
 105	 * Compute guest MPIDR. We build a virtual cluster out of the
 106	 * vcpu_id, but we read the 'U' bit from the underlying
 107	 * hardware directly.
 108	 */
 109	vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
 110				     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
 111				     (vcpu->vcpu_id & 3));
 112}
 113
 114/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
 115static bool access_actlr(struct kvm_vcpu *vcpu,
 116			 const struct coproc_params *p,
 117			 const struct coproc_reg *r)
 118{
 119	if (p->is_write)
 120		return ignore_write(vcpu, p);
 121
 122	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
 123	return true;
 124}
 125
 126/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
 127static bool access_cbar(struct kvm_vcpu *vcpu,
 128			const struct coproc_params *p,
 129			const struct coproc_reg *r)
 130{
 131	if (p->is_write)
 132		return write_to_read_only(vcpu, p);
 133	return read_zero(vcpu, p);
 134}
 135
 136/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
 137static bool access_l2ctlr(struct kvm_vcpu *vcpu,
 138			  const struct coproc_params *p,
 139			  const struct coproc_reg *r)
 140{
 141	if (p->is_write)
 142		return ignore_write(vcpu, p);
 143
 144	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
 145	return true;
 146}
 147
 148static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 149{
 150	u32 l2ctlr, ncores;
 151
 152	asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
 153	l2ctlr &= ~(3 << 24);
 154	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
 155	/* How many cores in the current cluster and the next ones */
 156	ncores -= (vcpu->vcpu_id & ~3);
 157	/* Cap it to the maximum number of cores in a single cluster */
 158	ncores = min(ncores, 3U);
 159	l2ctlr |= (ncores & 3) << 24;
 160
 161	vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
 162}
 163
 164static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 165{
 166	u32 actlr;
 167
 168	/* ACTLR contains SMP bit: make sure you create all cpus first! */
 169	asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
 170	/* Make the SMP bit consistent with the guest configuration */
 171	if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
 172		actlr |= 1U << 6;
 173	else
 174		actlr &= ~(1U << 6);
 175
 176	vcpu_cp15(vcpu, c1_ACTLR) = actlr;
 177}
 178
 179/*
 180 * TRM entries: A7:4.3.50, A15:4.3.49
 181 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
 182 */
 183static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 184			   const struct coproc_params *p,
 185			   const struct coproc_reg *r)
 186{
 187	if (p->is_write)
 188		return ignore_write(vcpu, p);
 189
 190	*vcpu_reg(vcpu, p->Rt1) = 0;
 191	return true;
 192}
 193
 194/*
 195 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 196 */
 197static bool access_dcsw(struct kvm_vcpu *vcpu,
 198			const struct coproc_params *p,
 199			const struct coproc_reg *r)
 200{
 201	if (!p->is_write)
 202		return read_from_write_only(vcpu, p);
 203
 204	kvm_set_way_flush(vcpu);
 205	return true;
 206}
 207
 208/*
 209 * Generic accessor for VM registers. Only called as long as HCR_TVM
 210 * is set.  If the guest enables the MMU, we stop trapping the VM
 211 * sys_regs and leave it in complete control of the caches.
 212 *
 213 * Used by the cpu-specific code.
 214 */
 215bool access_vm_reg(struct kvm_vcpu *vcpu,
 216		   const struct coproc_params *p,
 217		   const struct coproc_reg *r)
 218{
 219	bool was_enabled = vcpu_has_cache_enabled(vcpu);
 220
 221	BUG_ON(!p->is_write);
 222
 223	vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
 224	if (p->is_64bit)
 225		vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
 226
 227	kvm_toggle_cache(vcpu, was_enabled);
 228	return true;
 229}
 230
 231static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 232			   const struct coproc_params *p,
 233			   const struct coproc_reg *r)
 234{
 235	u64 reg;
 
 236
 237	if (!p->is_write)
 238		return read_from_write_only(vcpu, p);
 239
 240	reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 241	reg |= *vcpu_reg(vcpu, p->Rt1) ;
 242
 243	vgic_v3_dispatch_sgi(vcpu, reg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 244
 245	return true;
 246}
 247
 248static bool access_gic_sre(struct kvm_vcpu *vcpu,
 249			   const struct coproc_params *p,
 250			   const struct coproc_reg *r)
 251{
 252	if (p->is_write)
 253		return ignore_write(vcpu, p);
 254
 255	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
 256
 257	return true;
 258}
 259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 260/*
 261 * We could trap ID_DFR0 and tell the guest we don't support performance
 262 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 263 * NAKed, so it will read the PMCR anyway.
 264 *
 265 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 266 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 267 * all PM registers, which doesn't crash the guest kernel at least.
 268 */
 269static bool pm_fake(struct kvm_vcpu *vcpu,
 270		    const struct coproc_params *p,
 271		    const struct coproc_reg *r)
 272{
 273	if (p->is_write)
 274		return ignore_write(vcpu, p);
 275	else
 276		return read_zero(vcpu, p);
 277}
 278
 279#define access_pmcr pm_fake
 280#define access_pmcntenset pm_fake
 281#define access_pmcntenclr pm_fake
 282#define access_pmovsr pm_fake
 283#define access_pmselr pm_fake
 284#define access_pmceid0 pm_fake
 285#define access_pmceid1 pm_fake
 286#define access_pmccntr pm_fake
 287#define access_pmxevtyper pm_fake
 288#define access_pmxevcntr pm_fake
 289#define access_pmuserenr pm_fake
 290#define access_pmintenset pm_fake
 291#define access_pmintenclr pm_fake
 292
 293/* Architected CP15 registers.
 294 * CRn denotes the primary register number, but is copied to the CRm in the
 295 * user space API for 64-bit register access in line with the terminology used
 296 * in the ARM ARM.
 297 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
 298 *            registers preceding 32-bit ones.
 299 */
 300static const struct coproc_reg cp15_regs[] = {
 301	/* MPIDR: we use VMPIDR for guest access. */
 302	{ CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
 303			NULL, reset_mpidr, c0_MPIDR },
 304
 305	/* CSSELR: swapped by interrupt.S. */
 306	{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 307			NULL, reset_unknown, c0_CSSELR },
 308
 309	/* ACTLR: trapped by HCR.TAC bit. */
 310	{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
 311			access_actlr, reset_actlr, c1_ACTLR },
 312
 313	/* CPACR: swapped by interrupt.S. */
 314	{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
 315			NULL, reset_val, c1_CPACR, 0x00000000 },
 316
 317	/* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
 318	{ CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
 319	{ CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
 320			access_vm_reg, reset_unknown, c2_TTBR0 },
 321	{ CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
 322			access_vm_reg, reset_unknown, c2_TTBR1 },
 323	{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
 324			access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
 325	{ CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
 326
 327
 328	/* DACR: swapped by interrupt.S. */
 329	{ CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
 330			access_vm_reg, reset_unknown, c3_DACR },
 331
 332	/* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
 333	{ CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
 334			access_vm_reg, reset_unknown, c5_DFSR },
 335	{ CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
 336			access_vm_reg, reset_unknown, c5_IFSR },
 337	{ CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
 338			access_vm_reg, reset_unknown, c5_ADFSR },
 339	{ CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
 340			access_vm_reg, reset_unknown, c5_AIFSR },
 341
 342	/* DFAR/IFAR: swapped by interrupt.S. */
 343	{ CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
 344			access_vm_reg, reset_unknown, c6_DFAR },
 345	{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 346			access_vm_reg, reset_unknown, c6_IFAR },
 347
 348	/* PAR swapped by interrupt.S */
 349	{ CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 350
 351	/*
 352	 * DC{C,I,CI}SW operations:
 353	 */
 354	{ CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
 355	{ CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 356	{ CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 357	/*
 358	 * L2CTLR access (guest wants to know #CPUs).
 359	 */
 360	{ CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
 361			access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
 362	{ CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
 363
 364	/*
 365	 * Dummy performance monitor implementation.
 366	 */
 367	{ CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
 368	{ CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
 369	{ CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
 370	{ CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
 371	{ CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
 372	{ CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
 373	{ CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
 374	{ CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
 375	{ CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
 376	{ CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
 377	{ CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
 378	{ CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
 379	{ CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
 380
 381	/* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
 382	{ CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
 383			access_vm_reg, reset_unknown, c10_PRRR},
 384	{ CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
 385			access_vm_reg, reset_unknown, c10_NMRR},
 386
 387	/* AMAIR0/AMAIR1: swapped by interrupt.S. */
 388	{ CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
 389			access_vm_reg, reset_unknown, c10_AMAIR0},
 390	{ CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
 391			access_vm_reg, reset_unknown, c10_AMAIR1},
 392
 393	/* ICC_SGI1R */
 394	{ CRm64(12), Op1( 0), is64, access_gic_sgi},
 395
 396	/* VBAR: swapped by interrupt.S. */
 397	{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
 398			NULL, reset_val, c12_VBAR, 0x00000000 },
 399
 
 
 
 
 400	/* ICC_SRE */
 401	{ CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
 402
 403	/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
 404	{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
 405			access_vm_reg, reset_val, c13_CID, 0x00000000 },
 406	{ CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
 407			NULL, reset_unknown, c13_TID_URW },
 408	{ CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
 409			NULL, reset_unknown, c13_TID_URO },
 410	{ CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
 411			NULL, reset_unknown, c13_TID_PRIV },
 412
 
 
 
 413	/* CNTKCTL: swapped by interrupt.S. */
 414	{ CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 415			NULL, reset_val, c14_CNTKCTL, 0x00000000 },
 416
 
 
 
 
 417	/* The Configuration Base Address Register. */
 418	{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 419};
 420
 421static int check_reg_table(const struct coproc_reg *table, unsigned int n)
 422{
 423	unsigned int i;
 424
 425	for (i = 1; i < n; i++) {
 426		if (cmp_reg(&table[i-1], &table[i]) >= 0) {
 427			kvm_err("reg table %p out of order (%d)\n", table, i - 1);
 428			return 1;
 429		}
 430	}
 431
 432	return 0;
 433}
 434
 435/* Target specific emulation tables */
 436static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 437
 438void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 439{
 440	BUG_ON(check_reg_table(table->table, table->num));
 441	target_tables[table->target] = table;
 442}
 443
 444/* Get specific register table for this target. */
 445static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
 446{
 447	struct kvm_coproc_target_table *table;
 448
 449	table = target_tables[target];
 450	*num = table->num;
 451	return table->table;
 452}
 453
 454#define reg_to_match_value(x)						\
 455	({								\
 456		unsigned long val;					\
 457		val  = (x)->CRn << 11;					\
 458		val |= (x)->CRm << 7;					\
 459		val |= (x)->Op1 << 4;					\
 460		val |= (x)->Op2 << 1;					\
 461		val |= !(x)->is_64bit;					\
 462		val;							\
 463	 })
 464
 465static int match_reg(const void *key, const void *elt)
 466{
 467	const unsigned long pval = (unsigned long)key;
 468	const struct coproc_reg *r = elt;
 469
 470	return pval - reg_to_match_value(r);
 471}
 472
 473static const struct coproc_reg *find_reg(const struct coproc_params *params,
 474					 const struct coproc_reg table[],
 475					 unsigned int num)
 476{
 477	unsigned long pval = reg_to_match_value(params);
 478
 479	return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
 480}
 481
 482static int emulate_cp15(struct kvm_vcpu *vcpu,
 483			const struct coproc_params *params)
 484{
 485	size_t num;
 486	const struct coproc_reg *table, *r;
 487
 488	trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
 489				   params->CRm, params->Op2, params->is_write);
 490
 491	table = get_target_table(vcpu->arch.target, &num);
 492
 493	/* Search target-specific then generic table. */
 494	r = find_reg(params, table, num);
 495	if (!r)
 496		r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
 497
 498	if (likely(r)) {
 499		/* If we don't have an accessor, we should never get here! */
 500		BUG_ON(!r->access);
 501
 502		if (likely(r->access(vcpu, params, r))) {
 503			/* Skip instruction, since it was emulated */
 504			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 505			return 1;
 506		}
 
 507		/* If access function fails, it should complain. */
 508	} else {
 509		kvm_err("Unsupported guest CP15 access at: %08lx\n",
 510			*vcpu_pc(vcpu));
 511		print_cp_instr(params);
 
 512	}
 513	kvm_inject_undefined(vcpu);
 514	return 1;
 515}
 516
 517/**
 518 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
 519 * @vcpu: The VCPU pointer
 520 * @run:  The kvm_run struct
 521 */
 522int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 523{
 524	struct coproc_params params;
 525
 526	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 527	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 528	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 529	params.is_64bit = true;
 530
 531	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
 532	params.Op2 = 0;
 533	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 534	params.CRm = 0;
 535
 
 
 
 
 
 
 
 
 
 
 
 
 536	return emulate_cp15(vcpu, &params);
 537}
 538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 540			      const struct coproc_reg *table, size_t num)
 
 541{
 542	unsigned long i;
 543
 544	for (i = 0; i < num; i++)
 545		if (table[i].reset)
 
 
 546			table[i].reset(vcpu, &table[i]);
 
 
 
 
 
 
 547}
 548
 549/**
 550 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
 551 * @vcpu: The VCPU pointer
 552 * @run:  The kvm_run struct
 553 */
 554int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 555{
 556	struct coproc_params params;
 557
 558	params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 559	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 560	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 561	params.is_64bit = false;
 562
 563	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 564	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
 565	params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 566	params.Rt2 = 0;
 567
 
 
 
 
 
 
 
 
 
 
 
 568	return emulate_cp15(vcpu, &params);
 569}
 570
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 571/******************************************************************************
 572 * Userspace API
 573 *****************************************************************************/
 574
 575static bool index_to_params(u64 id, struct coproc_params *params)
 576{
 577	switch (id & KVM_REG_SIZE_MASK) {
 578	case KVM_REG_SIZE_U32:
 579		/* Any unused index bits means it's not valid. */
 580		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 581			   | KVM_REG_ARM_COPROC_MASK
 582			   | KVM_REG_ARM_32_CRN_MASK
 583			   | KVM_REG_ARM_CRM_MASK
 584			   | KVM_REG_ARM_OPC1_MASK
 585			   | KVM_REG_ARM_32_OPC2_MASK))
 586			return false;
 587
 588		params->is_64bit = false;
 589		params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
 590			       >> KVM_REG_ARM_32_CRN_SHIFT);
 591		params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
 592			       >> KVM_REG_ARM_CRM_SHIFT);
 593		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 594			       >> KVM_REG_ARM_OPC1_SHIFT);
 595		params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
 596			       >> KVM_REG_ARM_32_OPC2_SHIFT);
 597		return true;
 598	case KVM_REG_SIZE_U64:
 599		/* Any unused index bits means it's not valid. */
 600		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 601			      | KVM_REG_ARM_COPROC_MASK
 602			      | KVM_REG_ARM_CRM_MASK
 603			      | KVM_REG_ARM_OPC1_MASK))
 604			return false;
 605		params->is_64bit = true;
 606		/* CRm to CRn: see cp15_to_index for details */
 607		params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 608			       >> KVM_REG_ARM_CRM_SHIFT);
 609		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 610			       >> KVM_REG_ARM_OPC1_SHIFT);
 611		params->Op2 = 0;
 612		params->CRm = 0;
 613		return true;
 614	default:
 615		return false;
 616	}
 617}
 618
 619/* Decode an index value, and find the cp15 coproc_reg entry. */
 620static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
 621						    u64 id)
 622{
 623	size_t num;
 624	const struct coproc_reg *table, *r;
 625	struct coproc_params params;
 626
 627	/* We only do cp15 for now. */
 628	if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
 629		return NULL;
 630
 631	if (!index_to_params(id, &params))
 632		return NULL;
 633
 634	table = get_target_table(vcpu->arch.target, &num);
 635	r = find_reg(&params, table, num);
 636	if (!r)
 637		r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
 638
 639	/* Not saved in the cp15 array? */
 640	if (r && !r->reg)
 641		r = NULL;
 642
 643	return r;
 644}
 645
 646/*
 647 * These are the invariant cp15 registers: we let the guest see the host
 648 * versions of these, so they're part of the guest state.
 649 *
 650 * A future CPU may provide a mechanism to present different values to
 651 * the guest, or a future kvm may trap them.
 652 */
 653/* Unfortunately, there's no register-argument for mrc, so generate. */
 654#define FUNCTION_FOR32(crn, crm, op1, op2, name)			\
 655	static void get_##name(struct kvm_vcpu *v,			\
 656			       const struct coproc_reg *r)		\
 657	{								\
 658		u32 val;						\
 659									\
 660		asm volatile("mrc p15, " __stringify(op1)		\
 661			     ", %0, c" __stringify(crn)			\
 662			     ", c" __stringify(crm)			\
 663			     ", " __stringify(op2) "\n" : "=r" (val));	\
 664		((struct coproc_reg *)r)->val = val;			\
 665	}
 666
 667FUNCTION_FOR32(0, 0, 0, 0, MIDR)
 668FUNCTION_FOR32(0, 0, 0, 1, CTR)
 669FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
 670FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
 671FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
 672FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
 673FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
 674FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
 675FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
 676FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
 677FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
 678FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
 679FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
 680FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
 681FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
 682FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
 683FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
 684FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
 685FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
 686FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
 687FUNCTION_FOR32(0, 0, 1, 7, AIDR)
 688
 689/* ->val is filled in by kvm_invariant_coproc_table_init() */
 690static struct coproc_reg invariant_cp15[] = {
 691	{ CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
 692	{ CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
 693	{ CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
 694	{ CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 695	{ CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 696
 697	{ CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
 698	{ CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 699
 700	{ CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 701	{ CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 702	{ CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
 703	{ CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
 704	{ CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
 705	{ CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
 706	{ CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
 707	{ CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
 708
 709	{ CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
 710	{ CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
 711	{ CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
 712	{ CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 713	{ CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 714	{ CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
 715};
 716
 717/*
 718 * Reads a register value from a userspace address to a kernel
 719 * variable. Make sure that register size matches sizeof(*__val).
 720 */
 721static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 722{
 723	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
 724		return -EFAULT;
 725	return 0;
 726}
 727
 728/*
 729 * Writes a register value to a userspace address from a kernel variable.
 730 * Make sure that register size matches sizeof(*__val).
 731 */
 732static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 733{
 734	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
 735		return -EFAULT;
 736	return 0;
 737}
 738
 739static int get_invariant_cp15(u64 id, void __user *uaddr)
 740{
 741	struct coproc_params params;
 742	const struct coproc_reg *r;
 743	int ret;
 744
 745	if (!index_to_params(id, &params))
 746		return -ENOENT;
 747
 748	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 749	if (!r)
 750		return -ENOENT;
 751
 752	ret = -ENOENT;
 753	if (KVM_REG_SIZE(id) == 4) {
 754		u32 val = r->val;
 755
 756		ret = reg_to_user(uaddr, &val, id);
 757	} else if (KVM_REG_SIZE(id) == 8) {
 758		ret = reg_to_user(uaddr, &r->val, id);
 759	}
 760	return ret;
 761}
 762
 763static int set_invariant_cp15(u64 id, void __user *uaddr)
 764{
 765	struct coproc_params params;
 766	const struct coproc_reg *r;
 767	int err;
 768	u64 val;
 769
 770	if (!index_to_params(id, &params))
 771		return -ENOENT;
 772	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 773	if (!r)
 774		return -ENOENT;
 775
 776	err = -ENOENT;
 777	if (KVM_REG_SIZE(id) == 4) {
 778		u32 val32;
 779
 780		err = reg_from_user(&val32, uaddr, id);
 781		if (!err)
 782			val = val32;
 783	} else if (KVM_REG_SIZE(id) == 8) {
 784		err = reg_from_user(&val, uaddr, id);
 785	}
 786	if (err)
 787		return err;
 788
 789	/* This is what we mean by invariant: you can't change it. */
 790	if (r->val != val)
 791		return -EINVAL;
 792
 793	return 0;
 794}
 795
 796static bool is_valid_cache(u32 val)
 797{
 798	u32 level, ctype;
 799
 800	if (val >= CSSELR_MAX)
 801		return false;
 802
 803	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
 804        level = (val >> 1);
 805        ctype = (cache_levels >> (level * 3)) & 7;
 806
 807	switch (ctype) {
 808	case 0: /* No cache */
 809		return false;
 810	case 1: /* Instruction cache only */
 811		return (val & 1);
 812	case 2: /* Data cache only */
 813	case 4: /* Unified cache */
 814		return !(val & 1);
 815	case 3: /* Separate instruction and data caches */
 816		return true;
 817	default: /* Reserved: we can't know instruction or data. */
 818		return false;
 819	}
 820}
 821
 822/* Which cache CCSIDR represents depends on CSSELR value. */
 823static u32 get_ccsidr(u32 csselr)
 824{
 825	u32 ccsidr;
 826
 827	/* Make sure noone else changes CSSELR during this! */
 828	local_irq_disable();
 829	/* Put value into CSSELR */
 830	asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
 831	isb();
 832	/* Read result out of CCSIDR */
 833	asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
 834	local_irq_enable();
 835
 836	return ccsidr;
 837}
 838
 839static int demux_c15_get(u64 id, void __user *uaddr)
 840{
 841	u32 val;
 842	u32 __user *uval = uaddr;
 843
 844	/* Fail if we have unknown bits set. */
 845	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 846		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 847		return -ENOENT;
 848
 849	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 850	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 851		if (KVM_REG_SIZE(id) != 4)
 852			return -ENOENT;
 853		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 854			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 855		if (!is_valid_cache(val))
 856			return -ENOENT;
 857
 858		return put_user(get_ccsidr(val), uval);
 859	default:
 860		return -ENOENT;
 861	}
 862}
 863
 864static int demux_c15_set(u64 id, void __user *uaddr)
 865{
 866	u32 val, newval;
 867	u32 __user *uval = uaddr;
 868
 869	/* Fail if we have unknown bits set. */
 870	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 871		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 872		return -ENOENT;
 873
 874	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 875	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 876		if (KVM_REG_SIZE(id) != 4)
 877			return -ENOENT;
 878		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 879			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 880		if (!is_valid_cache(val))
 881			return -ENOENT;
 882
 883		if (get_user(newval, uval))
 884			return -EFAULT;
 885
 886		/* This is also invariant: you can't change it. */
 887		if (newval != get_ccsidr(val))
 888			return -EINVAL;
 889		return 0;
 890	default:
 891		return -ENOENT;
 892	}
 893}
 894
 895#ifdef CONFIG_VFPv3
 896static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
 897				   KVM_REG_ARM_VFP_FPSCR,
 898				   KVM_REG_ARM_VFP_FPINST,
 899				   KVM_REG_ARM_VFP_FPINST2,
 900				   KVM_REG_ARM_VFP_MVFR0,
 901				   KVM_REG_ARM_VFP_MVFR1,
 902				   KVM_REG_ARM_VFP_FPSID };
 903
 904static unsigned int num_fp_regs(void)
 905{
 906	if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
 907		return 32;
 908	else
 909		return 16;
 910}
 911
 912static unsigned int num_vfp_regs(void)
 913{
 914	/* Normal FP regs + control regs. */
 915	return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
 916}
 917
 918static int copy_vfp_regids(u64 __user *uindices)
 919{
 920	unsigned int i;
 921	const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
 922	const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
 923
 924	for (i = 0; i < num_fp_regs(); i++) {
 925		if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
 926			     uindices))
 927			return -EFAULT;
 928		uindices++;
 929	}
 930
 931	for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
 932		if (put_user(u32reg | vfp_sysregs[i], uindices))
 933			return -EFAULT;
 934		uindices++;
 935	}
 936
 937	return num_vfp_regs();
 938}
 939
 940static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
 941{
 942	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 943	u32 val;
 944
 945	/* Fail if we have unknown bits set. */
 946	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 947		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 948		return -ENOENT;
 949
 950	if (vfpid < num_fp_regs()) {
 951		if (KVM_REG_SIZE(id) != 8)
 952			return -ENOENT;
 953		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
 954				   id);
 955	}
 956
 957	/* FP control registers are all 32 bit. */
 958	if (KVM_REG_SIZE(id) != 4)
 959		return -ENOENT;
 960
 961	switch (vfpid) {
 962	case KVM_REG_ARM_VFP_FPEXC:
 963		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
 964	case KVM_REG_ARM_VFP_FPSCR:
 965		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
 966	case KVM_REG_ARM_VFP_FPINST:
 967		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
 968	case KVM_REG_ARM_VFP_FPINST2:
 969		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
 970	case KVM_REG_ARM_VFP_MVFR0:
 971		val = fmrx(MVFR0);
 972		return reg_to_user(uaddr, &val, id);
 973	case KVM_REG_ARM_VFP_MVFR1:
 974		val = fmrx(MVFR1);
 975		return reg_to_user(uaddr, &val, id);
 976	case KVM_REG_ARM_VFP_FPSID:
 977		val = fmrx(FPSID);
 978		return reg_to_user(uaddr, &val, id);
 979	default:
 980		return -ENOENT;
 981	}
 982}
 983
 984static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
 985{
 986	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 987	u32 val;
 988
 989	/* Fail if we have unknown bits set. */
 990	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 991		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 992		return -ENOENT;
 993
 994	if (vfpid < num_fp_regs()) {
 995		if (KVM_REG_SIZE(id) != 8)
 996			return -ENOENT;
 997		return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
 998				     uaddr, id);
 999	}
1000
1001	/* FP control registers are all 32 bit. */
1002	if (KVM_REG_SIZE(id) != 4)
1003		return -ENOENT;
1004
1005	switch (vfpid) {
1006	case KVM_REG_ARM_VFP_FPEXC:
1007		return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
1008	case KVM_REG_ARM_VFP_FPSCR:
1009		return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
1010	case KVM_REG_ARM_VFP_FPINST:
1011		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
1012	case KVM_REG_ARM_VFP_FPINST2:
1013		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
1014	/* These are invariant. */
1015	case KVM_REG_ARM_VFP_MVFR0:
1016		if (reg_from_user(&val, uaddr, id))
1017			return -EFAULT;
1018		if (val != fmrx(MVFR0))
1019			return -EINVAL;
1020		return 0;
1021	case KVM_REG_ARM_VFP_MVFR1:
1022		if (reg_from_user(&val, uaddr, id))
1023			return -EFAULT;
1024		if (val != fmrx(MVFR1))
1025			return -EINVAL;
1026		return 0;
1027	case KVM_REG_ARM_VFP_FPSID:
1028		if (reg_from_user(&val, uaddr, id))
1029			return -EFAULT;
1030		if (val != fmrx(FPSID))
1031			return -EINVAL;
1032		return 0;
1033	default:
1034		return -ENOENT;
1035	}
1036}
1037#else /* !CONFIG_VFPv3 */
1038static unsigned int num_vfp_regs(void)
1039{
1040	return 0;
1041}
1042
1043static int copy_vfp_regids(u64 __user *uindices)
1044{
1045	return 0;
1046}
1047
1048static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1049{
1050	return -ENOENT;
1051}
1052
1053static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1054{
1055	return -ENOENT;
1056}
1057#endif /* !CONFIG_VFPv3 */
1058
1059int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1060{
1061	const struct coproc_reg *r;
1062	void __user *uaddr = (void __user *)(long)reg->addr;
1063	int ret;
1064
1065	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1066		return demux_c15_get(reg->id, uaddr);
1067
1068	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1069		return vfp_get_reg(vcpu, reg->id, uaddr);
1070
1071	r = index_to_coproc_reg(vcpu, reg->id);
1072	if (!r)
1073		return get_invariant_cp15(reg->id, uaddr);
1074
1075	ret = -ENOENT;
1076	if (KVM_REG_SIZE(reg->id) == 8) {
1077		u64 val;
1078
1079		val = vcpu_cp15_reg64_get(vcpu, r);
1080		ret = reg_to_user(uaddr, &val, reg->id);
1081	} else if (KVM_REG_SIZE(reg->id) == 4) {
1082		ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
1083	}
1084
1085	return ret;
1086}
1087
1088int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1089{
1090	const struct coproc_reg *r;
1091	void __user *uaddr = (void __user *)(long)reg->addr;
1092	int ret;
1093
1094	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1095		return demux_c15_set(reg->id, uaddr);
1096
1097	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1098		return vfp_set_reg(vcpu, reg->id, uaddr);
1099
1100	r = index_to_coproc_reg(vcpu, reg->id);
1101	if (!r)
1102		return set_invariant_cp15(reg->id, uaddr);
1103
1104	ret = -ENOENT;
1105	if (KVM_REG_SIZE(reg->id) == 8) {
1106		u64 val;
1107
1108		ret = reg_from_user(&val, uaddr, reg->id);
1109		if (!ret)
1110			vcpu_cp15_reg64_set(vcpu, r, val);
1111	} else if (KVM_REG_SIZE(reg->id) == 4) {
1112		ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
1113	}
1114
1115	return ret;
1116}
1117
1118static unsigned int num_demux_regs(void)
1119{
1120	unsigned int i, count = 0;
1121
1122	for (i = 0; i < CSSELR_MAX; i++)
1123		if (is_valid_cache(i))
1124			count++;
1125
1126	return count;
1127}
1128
1129static int write_demux_regids(u64 __user *uindices)
1130{
1131	u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1132	unsigned int i;
1133
1134	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1135	for (i = 0; i < CSSELR_MAX; i++) {
1136		if (!is_valid_cache(i))
1137			continue;
1138		if (put_user(val | i, uindices))
1139			return -EFAULT;
1140		uindices++;
1141	}
1142	return 0;
1143}
1144
1145static u64 cp15_to_index(const struct coproc_reg *reg)
1146{
1147	u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1148	if (reg->is_64bit) {
1149		val |= KVM_REG_SIZE_U64;
1150		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1151		/*
1152		 * CRn always denotes the primary coproc. reg. nr. for the
1153		 * in-kernel representation, but the user space API uses the
1154		 * CRm for the encoding, because it is modelled after the
1155		 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1156		 * B3-1445
1157		 */
1158		val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
1159	} else {
1160		val |= KVM_REG_SIZE_U32;
1161		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1162		val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
1163		val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
1164		val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
1165	}
1166	return val;
1167}
1168
1169static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
1170{
1171	if (!*uind)
1172		return true;
1173
1174	if (put_user(cp15_to_index(reg), *uind))
1175		return false;
1176
1177	(*uind)++;
1178	return true;
1179}
1180
1181/* Assumed ordered tables, see kvm_coproc_table_init. */
1182static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
1183{
1184	const struct coproc_reg *i1, *i2, *end1, *end2;
1185	unsigned int total = 0;
1186	size_t num;
1187
1188	/* We check for duplicates here, to allow arch-specific overrides. */
1189	i1 = get_target_table(vcpu->arch.target, &num);
1190	end1 = i1 + num;
1191	i2 = cp15_regs;
1192	end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
1193
1194	BUG_ON(i1 == end1 || i2 == end2);
1195
1196	/* Walk carefully, as both tables may refer to the same register. */
1197	while (i1 || i2) {
1198		int cmp = cmp_reg(i1, i2);
1199		/* target-specific overrides generic entry. */
1200		if (cmp <= 0) {
1201			/* Ignore registers we trap but don't save. */
1202			if (i1->reg) {
1203				if (!copy_reg_to_user(i1, &uind))
1204					return -EFAULT;
1205				total++;
1206			}
1207		} else {
1208			/* Ignore registers we trap but don't save. */
1209			if (i2->reg) {
1210				if (!copy_reg_to_user(i2, &uind))
1211					return -EFAULT;
1212				total++;
1213			}
1214		}
1215
1216		if (cmp <= 0 && ++i1 == end1)
1217			i1 = NULL;
1218		if (cmp >= 0 && ++i2 == end2)
1219			i2 = NULL;
1220	}
1221	return total;
1222}
1223
1224unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
1225{
1226	return ARRAY_SIZE(invariant_cp15)
1227		+ num_demux_regs()
1228		+ num_vfp_regs()
1229		+ walk_cp15(vcpu, (u64 __user *)NULL);
1230}
1231
1232int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1233{
1234	unsigned int i;
1235	int err;
1236
1237	/* Then give them all the invariant registers' indices. */
1238	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
1239		if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
1240			return -EFAULT;
1241		uindices++;
1242	}
1243
1244	err = walk_cp15(vcpu, uindices);
1245	if (err < 0)
1246		return err;
1247	uindices += err;
1248
1249	err = copy_vfp_regids(uindices);
1250	if (err < 0)
1251		return err;
1252	uindices += err;
1253
1254	return write_demux_regids(uindices);
1255}
1256
1257void kvm_coproc_table_init(void)
1258{
1259	unsigned int i;
1260
1261	/* Make sure tables are unique and in order. */
1262	BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1263	BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
1264
1265	/* We abuse the reset function to overwrite the table itself. */
1266	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1267		invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1268
1269	/*
1270	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1271	 *
1272	 *   If software reads the Cache Type fields from Ctype1
1273	 *   upwards, once it has seen a value of 0b000, no caches
1274	 *   exist at further-out levels of the hierarchy. So, for
1275	 *   example, if Ctype3 is the first Cache Type field with a
1276	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
1277	 *   ignored.
1278	 */
1279	asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1280	for (i = 0; i < 7; i++)
1281		if (((cache_levels >> (i*3)) & 7) == 0)
1282			break;
1283	/* Clear all higher bits. */
1284	cache_levels &= (1 << (i*3))-1;
1285}
1286
1287/**
1288 * kvm_reset_coprocs - sets cp15 registers to reset value
1289 * @vcpu: The VCPU pointer
1290 *
1291 * This function finds the right table above and sets the registers on the
1292 * virtual CPU struct to their architecturally defined reset values.
1293 */
1294void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1295{
1296	size_t num;
1297	const struct coproc_reg *table;
1298
1299	/* Catch someone adding a register without putting in reset entry. */
1300	memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
1301
1302	/* Generic chip reset first (so target could override). */
1303	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
1304
1305	table = get_target_table(vcpu->arch.target, &num);
1306	reset_coproc_regs(vcpu, table, num);
1307
1308	for (num = 1; num < NR_CP15_REGS; num++)
1309		if (vcpu_cp15(vcpu, num) == 0x42424242)
1310			panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
1311}