Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   4 *          Christoffer Dall <c.dall@virtualopensystems.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License, version 2, as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  18 */
  19
  20#include <linux/bsearch.h>
  21#include <linux/mm.h>
  22#include <linux/kvm_host.h>
  23#include <linux/uaccess.h>
  24#include <asm/kvm_arm.h>
  25#include <asm/kvm_host.h>
  26#include <asm/kvm_emulate.h>
  27#include <asm/kvm_coproc.h>
  28#include <asm/kvm_mmu.h>
  29#include <asm/cacheflush.h>
  30#include <asm/cputype.h>
  31#include <trace/events/kvm.h>
  32#include <asm/vfp.h>
  33#include "../vfp/vfpinstr.h"
  34
  35#define CREATE_TRACE_POINTS
  36#include "trace.h"
  37#include "coproc.h"
  38
  39
  40/******************************************************************************
  41 * Co-processor emulation
  42 *****************************************************************************/
  43
  44static bool write_to_read_only(struct kvm_vcpu *vcpu,
  45			       const struct coproc_params *params)
  46{
  47	WARN_ONCE(1, "CP15 write to read-only register\n");
  48	print_cp_instr(params);
  49	kvm_inject_undefined(vcpu);
  50	return false;
  51}
  52
  53static bool read_from_write_only(struct kvm_vcpu *vcpu,
  54				 const struct coproc_params *params)
  55{
  56	WARN_ONCE(1, "CP15 read to write-only register\n");
  57	print_cp_instr(params);
  58	kvm_inject_undefined(vcpu);
  59	return false;
  60}
  61
  62/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  63static u32 cache_levels;
  64
  65/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  66#define CSSELR_MAX 12
  67
  68/*
  69 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
  70 * of cp15 registers can be viewed either as couple of two u32 registers
  71 * or one u64 register. Current u64 register encoding is that least
  72 * significant u32 word is followed by most significant u32 word.
  73 */
  74static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
  75				       const struct coproc_reg *r,
  76				       u64 val)
  77{
  78	vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
  79	vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
  80}
  81
  82static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
  83				      const struct coproc_reg *r)
  84{
  85	u64 val;
  86
  87	val = vcpu_cp15(vcpu, r->reg + 1);
  88	val = val << 32;
  89	val = val | vcpu_cp15(vcpu, r->reg);
  90	return val;
  91}
  92
  93int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  94{
  95	kvm_inject_undefined(vcpu);
  96	return 1;
  97}
  98
  99int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
 100{
 101	/*
 102	 * We can get here, if the host has been built without VFPv3 support,
 103	 * but the guest attempted a floating point operation.
 104	 */
 105	kvm_inject_undefined(vcpu);
 106	return 1;
 107}
 108
 109int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
 110{
 111	kvm_inject_undefined(vcpu);
 112	return 1;
 113}
 114
 115static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 116{
 117	/*
 118	 * Compute guest MPIDR. We build a virtual cluster out of the
 119	 * vcpu_id, but we read the 'U' bit from the underlying
 120	 * hardware directly.
 121	 */
 122	vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
 123				     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
 124				     (vcpu->vcpu_id & 3));
 125}
 126
 127/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
 128static bool access_actlr(struct kvm_vcpu *vcpu,
 129			 const struct coproc_params *p,
 130			 const struct coproc_reg *r)
 131{
 132	if (p->is_write)
 133		return ignore_write(vcpu, p);
 134
 135	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
 136	return true;
 137}
 138
 139/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
 140static bool access_cbar(struct kvm_vcpu *vcpu,
 141			const struct coproc_params *p,
 142			const struct coproc_reg *r)
 143{
 144	if (p->is_write)
 145		return write_to_read_only(vcpu, p);
 146	return read_zero(vcpu, p);
 147}
 148
 149/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
 150static bool access_l2ctlr(struct kvm_vcpu *vcpu,
 151			  const struct coproc_params *p,
 152			  const struct coproc_reg *r)
 153{
 154	if (p->is_write)
 155		return ignore_write(vcpu, p);
 156
 157	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
 158	return true;
 159}
 160
 161static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 162{
 163	u32 l2ctlr, ncores;
 164
 165	asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
 166	l2ctlr &= ~(3 << 24);
 167	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
 168	/* How many cores in the current cluster and the next ones */
 169	ncores -= (vcpu->vcpu_id & ~3);
 170	/* Cap it to the maximum number of cores in a single cluster */
 171	ncores = min(ncores, 3U);
 172	l2ctlr |= (ncores & 3) << 24;
 173
 174	vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
 175}
 176
 177static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 178{
 179	u32 actlr;
 180
 181	/* ACTLR contains SMP bit: make sure you create all cpus first! */
 182	asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
 183	/* Make the SMP bit consistent with the guest configuration */
 184	if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
 185		actlr |= 1U << 6;
 186	else
 187		actlr &= ~(1U << 6);
 188
 189	vcpu_cp15(vcpu, c1_ACTLR) = actlr;
 190}
 191
 192/*
 193 * TRM entries: A7:4.3.50, A15:4.3.49
 194 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
 195 */
 196static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 197			   const struct coproc_params *p,
 198			   const struct coproc_reg *r)
 199{
 200	if (p->is_write)
 201		return ignore_write(vcpu, p);
 202
 203	*vcpu_reg(vcpu, p->Rt1) = 0;
 204	return true;
 205}
 206
 207/*
 208 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 209 */
 210static bool access_dcsw(struct kvm_vcpu *vcpu,
 211			const struct coproc_params *p,
 212			const struct coproc_reg *r)
 213{
 214	if (!p->is_write)
 215		return read_from_write_only(vcpu, p);
 216
 217	kvm_set_way_flush(vcpu);
 218	return true;
 219}
 220
 221/*
 222 * Generic accessor for VM registers. Only called as long as HCR_TVM
 223 * is set.  If the guest enables the MMU, we stop trapping the VM
 224 * sys_regs and leave it in complete control of the caches.
 225 *
 226 * Used by the cpu-specific code.
 227 */
 228bool access_vm_reg(struct kvm_vcpu *vcpu,
 229		   const struct coproc_params *p,
 230		   const struct coproc_reg *r)
 231{
 232	bool was_enabled = vcpu_has_cache_enabled(vcpu);
 233
 234	BUG_ON(!p->is_write);
 235
 236	vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
 237	if (p->is_64bit)
 238		vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
 239
 240	kvm_toggle_cache(vcpu, was_enabled);
 241	return true;
 242}
 243
 244static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 245			   const struct coproc_params *p,
 246			   const struct coproc_reg *r)
 247{
 248	u64 reg;
 249
 250	if (!p->is_write)
 251		return read_from_write_only(vcpu, p);
 252
 253	reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 254	reg |= *vcpu_reg(vcpu, p->Rt1) ;
 255
 256	vgic_v3_dispatch_sgi(vcpu, reg);
 257
 258	return true;
 259}
 260
 261static bool access_gic_sre(struct kvm_vcpu *vcpu,
 262			   const struct coproc_params *p,
 263			   const struct coproc_reg *r)
 264{
 265	if (p->is_write)
 266		return ignore_write(vcpu, p);
 267
 268	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
 269
 270	return true;
 271}
 272
 273static bool access_cntp_tval(struct kvm_vcpu *vcpu,
 274			     const struct coproc_params *p,
 275			     const struct coproc_reg *r)
 276{
 277	u64 now = kvm_phys_timer_read();
 278	u64 val;
 279
 280	if (p->is_write) {
 281		val = *vcpu_reg(vcpu, p->Rt1);
 282		kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val + now);
 283	} else {
 284		val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
 285		*vcpu_reg(vcpu, p->Rt1) = val - now;
 286	}
 287
 288	return true;
 289}
 290
 291static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
 292			    const struct coproc_params *p,
 293			    const struct coproc_reg *r)
 294{
 295	u32 val;
 296
 297	if (p->is_write) {
 298		val = *vcpu_reg(vcpu, p->Rt1);
 299		kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, val);
 300	} else {
 301		val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
 302		*vcpu_reg(vcpu, p->Rt1) = val;
 303	}
 304
 305	return true;
 306}
 307
 308static bool access_cntp_cval(struct kvm_vcpu *vcpu,
 309			     const struct coproc_params *p,
 310			     const struct coproc_reg *r)
 311{
 312	u64 val;
 313
 314	if (p->is_write) {
 315		val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 316		val |= *vcpu_reg(vcpu, p->Rt1);
 317		kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val);
 318	} else {
 319		val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
 320		*vcpu_reg(vcpu, p->Rt1) = val;
 321		*vcpu_reg(vcpu, p->Rt2) = val >> 32;
 322	}
 323
 324	return true;
 325}
 326
 327/*
 328 * We could trap ID_DFR0 and tell the guest we don't support performance
 329 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 330 * NAKed, so it will read the PMCR anyway.
 331 *
 332 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 333 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 334 * all PM registers, which doesn't crash the guest kernel at least.
 335 */
 336static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 337		    const struct coproc_params *p,
 338		    const struct coproc_reg *r)
 339{
 340	if (p->is_write)
 341		return ignore_write(vcpu, p);
 342	else
 343		return read_zero(vcpu, p);
 344}
 345
 346#define access_pmcr trap_raz_wi
 347#define access_pmcntenset trap_raz_wi
 348#define access_pmcntenclr trap_raz_wi
 349#define access_pmovsr trap_raz_wi
 350#define access_pmselr trap_raz_wi
 351#define access_pmceid0 trap_raz_wi
 352#define access_pmceid1 trap_raz_wi
 353#define access_pmccntr trap_raz_wi
 354#define access_pmxevtyper trap_raz_wi
 355#define access_pmxevcntr trap_raz_wi
 356#define access_pmuserenr trap_raz_wi
 357#define access_pmintenset trap_raz_wi
 358#define access_pmintenclr trap_raz_wi
 359
 360/* Architected CP15 registers.
 361 * CRn denotes the primary register number, but is copied to the CRm in the
 362 * user space API for 64-bit register access in line with the terminology used
 363 * in the ARM ARM.
 364 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
 365 *            registers preceding 32-bit ones.
 366 */
 367static const struct coproc_reg cp15_regs[] = {
 368	/* MPIDR: we use VMPIDR for guest access. */
 369	{ CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
 370			NULL, reset_mpidr, c0_MPIDR },
 371
 372	/* CSSELR: swapped by interrupt.S. */
 373	{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 374			NULL, reset_unknown, c0_CSSELR },
 375
 376	/* ACTLR: trapped by HCR.TAC bit. */
 377	{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
 378			access_actlr, reset_actlr, c1_ACTLR },
 379
 380	/* CPACR: swapped by interrupt.S. */
 381	{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
 382			NULL, reset_val, c1_CPACR, 0x00000000 },
 383
 384	/* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
 385	{ CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
 386	{ CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
 387			access_vm_reg, reset_unknown, c2_TTBR0 },
 388	{ CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
 389			access_vm_reg, reset_unknown, c2_TTBR1 },
 390	{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
 391			access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
 392	{ CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
 393
 394
 395	/* DACR: swapped by interrupt.S. */
 396	{ CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
 397			access_vm_reg, reset_unknown, c3_DACR },
 398
 399	/* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
 400	{ CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
 401			access_vm_reg, reset_unknown, c5_DFSR },
 402	{ CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
 403			access_vm_reg, reset_unknown, c5_IFSR },
 404	{ CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
 405			access_vm_reg, reset_unknown, c5_ADFSR },
 406	{ CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
 407			access_vm_reg, reset_unknown, c5_AIFSR },
 408
 409	/* DFAR/IFAR: swapped by interrupt.S. */
 410	{ CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
 411			access_vm_reg, reset_unknown, c6_DFAR },
 412	{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 413			access_vm_reg, reset_unknown, c6_IFAR },
 414
 415	/* PAR swapped by interrupt.S */
 416	{ CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 417
 418	/*
 419	 * DC{C,I,CI}SW operations:
 420	 */
 421	{ CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
 422	{ CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 423	{ CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 424	/*
 425	 * L2CTLR access (guest wants to know #CPUs).
 426	 */
 427	{ CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
 428			access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
 429	{ CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
 430
 431	/*
 432	 * Dummy performance monitor implementation.
 433	 */
 434	{ CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
 435	{ CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
 436	{ CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
 437	{ CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
 438	{ CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
 439	{ CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
 440	{ CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
 441	{ CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
 442	{ CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
 443	{ CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
 444	{ CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
 445	{ CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
 446	{ CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
 447
 448	/* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
 449	{ CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
 450			access_vm_reg, reset_unknown, c10_PRRR},
 451	{ CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
 452			access_vm_reg, reset_unknown, c10_NMRR},
 453
 454	/* AMAIR0/AMAIR1: swapped by interrupt.S. */
 455	{ CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
 456			access_vm_reg, reset_unknown, c10_AMAIR0},
 457	{ CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
 458			access_vm_reg, reset_unknown, c10_AMAIR1},
 459
 460	/* ICC_SGI1R */
 461	{ CRm64(12), Op1( 0), is64, access_gic_sgi},
 462
 463	/* VBAR: swapped by interrupt.S. */
 464	{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
 465			NULL, reset_val, c12_VBAR, 0x00000000 },
 466
 467	/* ICC_SRE */
 468	{ CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
 469
 470	/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
 471	{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
 472			access_vm_reg, reset_val, c13_CID, 0x00000000 },
 473	{ CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
 474			NULL, reset_unknown, c13_TID_URW },
 475	{ CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
 476			NULL, reset_unknown, c13_TID_URO },
 477	{ CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
 478			NULL, reset_unknown, c13_TID_PRIV },
 479
 480	/* CNTP */
 481	{ CRm64(14), Op1( 2), is64, access_cntp_cval},
 482
 483	/* CNTKCTL: swapped by interrupt.S. */
 484	{ CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 485			NULL, reset_val, c14_CNTKCTL, 0x00000000 },
 486
 487	/* CNTP */
 488	{ CRn(14), CRm( 2), Op1( 0), Op2( 0), is32, access_cntp_tval },
 489	{ CRn(14), CRm( 2), Op1( 0), Op2( 1), is32, access_cntp_ctl },
 490
 491	/* The Configuration Base Address Register. */
 492	{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 493};
 494
 495static int check_reg_table(const struct coproc_reg *table, unsigned int n)
 496{
 497	unsigned int i;
 498
 499	for (i = 1; i < n; i++) {
 500		if (cmp_reg(&table[i-1], &table[i]) >= 0) {
 501			kvm_err("reg table %p out of order (%d)\n", table, i - 1);
 502			return 1;
 503		}
 504	}
 505
 506	return 0;
 507}
 508
 509/* Target specific emulation tables */
 510static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 511
 512void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 513{
 514	BUG_ON(check_reg_table(table->table, table->num));
 515	target_tables[table->target] = table;
 516}
 517
 518/* Get specific register table for this target. */
 519static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
 520{
 521	struct kvm_coproc_target_table *table;
 522
 523	table = target_tables[target];
 524	*num = table->num;
 525	return table->table;
 526}
 527
 528#define reg_to_match_value(x)						\
 529	({								\
 530		unsigned long val;					\
 531		val  = (x)->CRn << 11;					\
 532		val |= (x)->CRm << 7;					\
 533		val |= (x)->Op1 << 4;					\
 534		val |= (x)->Op2 << 1;					\
 535		val |= !(x)->is_64bit;					\
 536		val;							\
 537	 })
 538
 539static int match_reg(const void *key, const void *elt)
 540{
 541	const unsigned long pval = (unsigned long)key;
 542	const struct coproc_reg *r = elt;
 543
 544	return pval - reg_to_match_value(r);
 545}
 546
 547static const struct coproc_reg *find_reg(const struct coproc_params *params,
 548					 const struct coproc_reg table[],
 549					 unsigned int num)
 550{
 551	unsigned long pval = reg_to_match_value(params);
 552
 553	return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
 554}
 555
 556static int emulate_cp15(struct kvm_vcpu *vcpu,
 557			const struct coproc_params *params)
 558{
 559	size_t num;
 560	const struct coproc_reg *table, *r;
 561
 562	trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
 563				   params->CRm, params->Op2, params->is_write);
 564
 565	table = get_target_table(vcpu->arch.target, &num);
 566
 567	/* Search target-specific then generic table. */
 568	r = find_reg(params, table, num);
 569	if (!r)
 570		r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
 571
 572	if (likely(r)) {
 573		/* If we don't have an accessor, we should never get here! */
 574		BUG_ON(!r->access);
 575
 576		if (likely(r->access(vcpu, params, r))) {
 577			/* Skip instruction, since it was emulated */
 578			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 579		}
 580	} else {
 581		/* If access function fails, it should complain. */
 582		kvm_err("Unsupported guest CP15 access at: %08lx\n",
 583			*vcpu_pc(vcpu));
 584		print_cp_instr(params);
 585		kvm_inject_undefined(vcpu);
 586	}
 587
 588	return 1;
 589}
 590
 591static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
 592{
 593	struct coproc_params params;
 594
 595	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 596	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 597	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 598	params.is_64bit = true;
 599
 600	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
 601	params.Op2 = 0;
 602	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 603	params.CRm = 0;
 604
 605	return params;
 606}
 607
 608/**
 609 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
 610 * @vcpu: The VCPU pointer
 611 * @run:  The kvm_run struct
 612 */
 613int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 614{
 615	struct coproc_params params = decode_64bit_hsr(vcpu);
 616
 617	return emulate_cp15(vcpu, &params);
 618}
 619
 620/**
 621 * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
 622 * @vcpu: The VCPU pointer
 623 * @run:  The kvm_run struct
 624 */
 625int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 626{
 627	struct coproc_params params = decode_64bit_hsr(vcpu);
 628
 629	/* raz_wi cp14 */
 630	trap_raz_wi(vcpu, &params, NULL);
 631
 632	/* handled */
 633	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 634	return 1;
 635}
 636
 637static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 638			      const struct coproc_reg *table, size_t num)
 639{
 640	unsigned long i;
 641
 642	for (i = 0; i < num; i++)
 643		if (table[i].reset)
 644			table[i].reset(vcpu, &table[i]);
 645}
 646
 647static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
 648{
 649	struct coproc_params params;
 650
 651	params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 652	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 653	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 654	params.is_64bit = false;
 655
 656	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 657	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
 658	params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 659	params.Rt2 = 0;
 660
 661	return params;
 662}
 663
 664/**
 665 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
 666 * @vcpu: The VCPU pointer
 667 * @run:  The kvm_run struct
 668 */
 669int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 670{
 671	struct coproc_params params = decode_32bit_hsr(vcpu);
 672	return emulate_cp15(vcpu, &params);
 673}
 674
 675/**
 676 * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
 677 * @vcpu: The VCPU pointer
 678 * @run:  The kvm_run struct
 679 */
 680int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 681{
 682	struct coproc_params params = decode_32bit_hsr(vcpu);
 683
 684	/* raz_wi cp14 */
 685	trap_raz_wi(vcpu, &params, NULL);
 686
 687	/* handled */
 688	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 689	return 1;
 690}
 691
 692/******************************************************************************
 693 * Userspace API
 694 *****************************************************************************/
 695
 696static bool index_to_params(u64 id, struct coproc_params *params)
 697{
 698	switch (id & KVM_REG_SIZE_MASK) {
 699	case KVM_REG_SIZE_U32:
 700		/* Any unused index bits means it's not valid. */
 701		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 702			   | KVM_REG_ARM_COPROC_MASK
 703			   | KVM_REG_ARM_32_CRN_MASK
 704			   | KVM_REG_ARM_CRM_MASK
 705			   | KVM_REG_ARM_OPC1_MASK
 706			   | KVM_REG_ARM_32_OPC2_MASK))
 707			return false;
 708
 709		params->is_64bit = false;
 710		params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
 711			       >> KVM_REG_ARM_32_CRN_SHIFT);
 712		params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
 713			       >> KVM_REG_ARM_CRM_SHIFT);
 714		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 715			       >> KVM_REG_ARM_OPC1_SHIFT);
 716		params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
 717			       >> KVM_REG_ARM_32_OPC2_SHIFT);
 718		return true;
 719	case KVM_REG_SIZE_U64:
 720		/* Any unused index bits means it's not valid. */
 721		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 722			      | KVM_REG_ARM_COPROC_MASK
 723			      | KVM_REG_ARM_CRM_MASK
 724			      | KVM_REG_ARM_OPC1_MASK))
 725			return false;
 726		params->is_64bit = true;
 727		/* CRm to CRn: see cp15_to_index for details */
 728		params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 729			       >> KVM_REG_ARM_CRM_SHIFT);
 730		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 731			       >> KVM_REG_ARM_OPC1_SHIFT);
 732		params->Op2 = 0;
 733		params->CRm = 0;
 734		return true;
 735	default:
 736		return false;
 737	}
 738}
 739
 740/* Decode an index value, and find the cp15 coproc_reg entry. */
 741static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
 742						    u64 id)
 743{
 744	size_t num;
 745	const struct coproc_reg *table, *r;
 746	struct coproc_params params;
 747
 748	/* We only do cp15 for now. */
 749	if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
 750		return NULL;
 751
 752	if (!index_to_params(id, &params))
 753		return NULL;
 754
 755	table = get_target_table(vcpu->arch.target, &num);
 756	r = find_reg(&params, table, num);
 757	if (!r)
 758		r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
 759
 760	/* Not saved in the cp15 array? */
 761	if (r && !r->reg)
 762		r = NULL;
 763
 764	return r;
 765}
 766
 767/*
 768 * These are the invariant cp15 registers: we let the guest see the host
 769 * versions of these, so they're part of the guest state.
 770 *
 771 * A future CPU may provide a mechanism to present different values to
 772 * the guest, or a future kvm may trap them.
 773 */
 774/* Unfortunately, there's no register-argument for mrc, so generate. */
 775#define FUNCTION_FOR32(crn, crm, op1, op2, name)			\
 776	static void get_##name(struct kvm_vcpu *v,			\
 777			       const struct coproc_reg *r)		\
 778	{								\
 779		u32 val;						\
 780									\
 781		asm volatile("mrc p15, " __stringify(op1)		\
 782			     ", %0, c" __stringify(crn)			\
 783			     ", c" __stringify(crm)			\
 784			     ", " __stringify(op2) "\n" : "=r" (val));	\
 785		((struct coproc_reg *)r)->val = val;			\
 786	}
 787
 788FUNCTION_FOR32(0, 0, 0, 0, MIDR)
 789FUNCTION_FOR32(0, 0, 0, 1, CTR)
 790FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
 791FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
 792FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
 793FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
 794FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
 795FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
 796FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
 797FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
 798FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
 799FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
 800FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
 801FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
 802FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
 803FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
 804FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
 805FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
 806FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
 807FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
 808FUNCTION_FOR32(0, 0, 1, 7, AIDR)
 809
 810/* ->val is filled in by kvm_invariant_coproc_table_init() */
 811static struct coproc_reg invariant_cp15[] = {
 812	{ CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
 813	{ CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
 814	{ CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
 815	{ CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 816	{ CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 817
 818	{ CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
 819	{ CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 820
 821	{ CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 822	{ CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 823	{ CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
 824	{ CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
 825	{ CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
 826	{ CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
 827	{ CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
 828	{ CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
 829
 830	{ CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
 831	{ CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
 832	{ CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
 833	{ CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 834	{ CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 835	{ CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
 836};
 837
 838/*
 839 * Reads a register value from a userspace address to a kernel
 840 * variable. Make sure that register size matches sizeof(*__val).
 841 */
 842static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 843{
 844	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
 845		return -EFAULT;
 846	return 0;
 847}
 848
 849/*
 850 * Writes a register value to a userspace address from a kernel variable.
 851 * Make sure that register size matches sizeof(*__val).
 852 */
 853static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 854{
 855	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
 856		return -EFAULT;
 857	return 0;
 858}
 859
 860static int get_invariant_cp15(u64 id, void __user *uaddr)
 861{
 862	struct coproc_params params;
 863	const struct coproc_reg *r;
 864	int ret;
 865
 866	if (!index_to_params(id, &params))
 867		return -ENOENT;
 868
 869	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 870	if (!r)
 871		return -ENOENT;
 872
 873	ret = -ENOENT;
 874	if (KVM_REG_SIZE(id) == 4) {
 875		u32 val = r->val;
 876
 877		ret = reg_to_user(uaddr, &val, id);
 878	} else if (KVM_REG_SIZE(id) == 8) {
 879		ret = reg_to_user(uaddr, &r->val, id);
 880	}
 881	return ret;
 882}
 883
 884static int set_invariant_cp15(u64 id, void __user *uaddr)
 885{
 886	struct coproc_params params;
 887	const struct coproc_reg *r;
 888	int err;
 889	u64 val;
 890
 891	if (!index_to_params(id, &params))
 892		return -ENOENT;
 893	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 894	if (!r)
 895		return -ENOENT;
 896
 897	err = -ENOENT;
 898	if (KVM_REG_SIZE(id) == 4) {
 899		u32 val32;
 900
 901		err = reg_from_user(&val32, uaddr, id);
 902		if (!err)
 903			val = val32;
 904	} else if (KVM_REG_SIZE(id) == 8) {
 905		err = reg_from_user(&val, uaddr, id);
 906	}
 907	if (err)
 908		return err;
 909
 910	/* This is what we mean by invariant: you can't change it. */
 911	if (r->val != val)
 912		return -EINVAL;
 913
 914	return 0;
 915}
 916
 917static bool is_valid_cache(u32 val)
 918{
 919	u32 level, ctype;
 920
 921	if (val >= CSSELR_MAX)
 922		return false;
 923
 924	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
 925        level = (val >> 1);
 926        ctype = (cache_levels >> (level * 3)) & 7;
 927
 928	switch (ctype) {
 929	case 0: /* No cache */
 930		return false;
 931	case 1: /* Instruction cache only */
 932		return (val & 1);
 933	case 2: /* Data cache only */
 934	case 4: /* Unified cache */
 935		return !(val & 1);
 936	case 3: /* Separate instruction and data caches */
 937		return true;
 938	default: /* Reserved: we can't know instruction or data. */
 939		return false;
 940	}
 941}
 942
 943/* Which cache CCSIDR represents depends on CSSELR value. */
 944static u32 get_ccsidr(u32 csselr)
 945{
 946	u32 ccsidr;
 947
 948	/* Make sure noone else changes CSSELR during this! */
 949	local_irq_disable();
 950	/* Put value into CSSELR */
 951	asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
 952	isb();
 953	/* Read result out of CCSIDR */
 954	asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
 955	local_irq_enable();
 956
 957	return ccsidr;
 958}
 959
 960static int demux_c15_get(u64 id, void __user *uaddr)
 961{
 962	u32 val;
 963	u32 __user *uval = uaddr;
 964
 965	/* Fail if we have unknown bits set. */
 966	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 967		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 968		return -ENOENT;
 969
 970	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 971	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 972		if (KVM_REG_SIZE(id) != 4)
 973			return -ENOENT;
 974		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 975			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 976		if (!is_valid_cache(val))
 977			return -ENOENT;
 978
 979		return put_user(get_ccsidr(val), uval);
 980	default:
 981		return -ENOENT;
 982	}
 983}
 984
 985static int demux_c15_set(u64 id, void __user *uaddr)
 986{
 987	u32 val, newval;
 988	u32 __user *uval = uaddr;
 989
 990	/* Fail if we have unknown bits set. */
 991	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 992		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 993		return -ENOENT;
 994
 995	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 996	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 997		if (KVM_REG_SIZE(id) != 4)
 998			return -ENOENT;
 999		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1000			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1001		if (!is_valid_cache(val))
1002			return -ENOENT;
1003
1004		if (get_user(newval, uval))
1005			return -EFAULT;
1006
1007		/* This is also invariant: you can't change it. */
1008		if (newval != get_ccsidr(val))
1009			return -EINVAL;
1010		return 0;
1011	default:
1012		return -ENOENT;
1013	}
1014}
1015
1016#ifdef CONFIG_VFPv3
1017static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
1018				   KVM_REG_ARM_VFP_FPSCR,
1019				   KVM_REG_ARM_VFP_FPINST,
1020				   KVM_REG_ARM_VFP_FPINST2,
1021				   KVM_REG_ARM_VFP_MVFR0,
1022				   KVM_REG_ARM_VFP_MVFR1,
1023				   KVM_REG_ARM_VFP_FPSID };
1024
1025static unsigned int num_fp_regs(void)
1026{
1027	if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
1028		return 32;
1029	else
1030		return 16;
1031}
1032
1033static unsigned int num_vfp_regs(void)
1034{
1035	/* Normal FP regs + control regs. */
1036	return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
1037}
1038
1039static int copy_vfp_regids(u64 __user *uindices)
1040{
1041	unsigned int i;
1042	const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
1043	const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
1044
1045	for (i = 0; i < num_fp_regs(); i++) {
1046		if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
1047			     uindices))
1048			return -EFAULT;
1049		uindices++;
1050	}
1051
1052	for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
1053		if (put_user(u32reg | vfp_sysregs[i], uindices))
1054			return -EFAULT;
1055		uindices++;
1056	}
1057
1058	return num_vfp_regs();
1059}
1060
1061static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1062{
1063	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
1064	u32 val;
1065
1066	/* Fail if we have unknown bits set. */
1067	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1068		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1069		return -ENOENT;
1070
1071	if (vfpid < num_fp_regs()) {
1072		if (KVM_REG_SIZE(id) != 8)
1073			return -ENOENT;
1074		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
1075				   id);
1076	}
1077
1078	/* FP control registers are all 32 bit. */
1079	if (KVM_REG_SIZE(id) != 4)
1080		return -ENOENT;
1081
1082	switch (vfpid) {
1083	case KVM_REG_ARM_VFP_FPEXC:
1084		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
1085	case KVM_REG_ARM_VFP_FPSCR:
1086		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
1087	case KVM_REG_ARM_VFP_FPINST:
1088		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
1089	case KVM_REG_ARM_VFP_FPINST2:
1090		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
1091	case KVM_REG_ARM_VFP_MVFR0:
1092		val = fmrx(MVFR0);
1093		return reg_to_user(uaddr, &val, id);
1094	case KVM_REG_ARM_VFP_MVFR1:
1095		val = fmrx(MVFR1);
1096		return reg_to_user(uaddr, &val, id);
1097	case KVM_REG_ARM_VFP_FPSID:
1098		val = fmrx(FPSID);
1099		return reg_to_user(uaddr, &val, id);
1100	default:
1101		return -ENOENT;
1102	}
1103}
1104
1105static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1106{
1107	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
1108	u32 val;
1109
1110	/* Fail if we have unknown bits set. */
1111	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1112		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1113		return -ENOENT;
1114
1115	if (vfpid < num_fp_regs()) {
1116		if (KVM_REG_SIZE(id) != 8)
1117			return -ENOENT;
1118		return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
1119				     uaddr, id);
1120	}
1121
1122	/* FP control registers are all 32 bit. */
1123	if (KVM_REG_SIZE(id) != 4)
1124		return -ENOENT;
1125
1126	switch (vfpid) {
1127	case KVM_REG_ARM_VFP_FPEXC:
1128		return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
1129	case KVM_REG_ARM_VFP_FPSCR:
1130		return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
1131	case KVM_REG_ARM_VFP_FPINST:
1132		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
1133	case KVM_REG_ARM_VFP_FPINST2:
1134		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
1135	/* These are invariant. */
1136	case KVM_REG_ARM_VFP_MVFR0:
1137		if (reg_from_user(&val, uaddr, id))
1138			return -EFAULT;
1139		if (val != fmrx(MVFR0))
1140			return -EINVAL;
1141		return 0;
1142	case KVM_REG_ARM_VFP_MVFR1:
1143		if (reg_from_user(&val, uaddr, id))
1144			return -EFAULT;
1145		if (val != fmrx(MVFR1))
1146			return -EINVAL;
1147		return 0;
1148	case KVM_REG_ARM_VFP_FPSID:
1149		if (reg_from_user(&val, uaddr, id))
1150			return -EFAULT;
1151		if (val != fmrx(FPSID))
1152			return -EINVAL;
1153		return 0;
1154	default:
1155		return -ENOENT;
1156	}
1157}
1158#else /* !CONFIG_VFPv3 */
1159static unsigned int num_vfp_regs(void)
1160{
1161	return 0;
1162}
1163
1164static int copy_vfp_regids(u64 __user *uindices)
1165{
1166	return 0;
1167}
1168
1169static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1170{
1171	return -ENOENT;
1172}
1173
1174static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1175{
1176	return -ENOENT;
1177}
1178#endif /* !CONFIG_VFPv3 */
1179
1180int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1181{
1182	const struct coproc_reg *r;
1183	void __user *uaddr = (void __user *)(long)reg->addr;
1184	int ret;
1185
1186	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1187		return demux_c15_get(reg->id, uaddr);
1188
1189	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1190		return vfp_get_reg(vcpu, reg->id, uaddr);
1191
1192	r = index_to_coproc_reg(vcpu, reg->id);
1193	if (!r)
1194		return get_invariant_cp15(reg->id, uaddr);
1195
1196	ret = -ENOENT;
1197	if (KVM_REG_SIZE(reg->id) == 8) {
1198		u64 val;
1199
1200		val = vcpu_cp15_reg64_get(vcpu, r);
1201		ret = reg_to_user(uaddr, &val, reg->id);
1202	} else if (KVM_REG_SIZE(reg->id) == 4) {
1203		ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
1204	}
1205
1206	return ret;
1207}
1208
1209int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1210{
1211	const struct coproc_reg *r;
1212	void __user *uaddr = (void __user *)(long)reg->addr;
1213	int ret;
1214
1215	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1216		return demux_c15_set(reg->id, uaddr);
1217
1218	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1219		return vfp_set_reg(vcpu, reg->id, uaddr);
1220
1221	r = index_to_coproc_reg(vcpu, reg->id);
1222	if (!r)
1223		return set_invariant_cp15(reg->id, uaddr);
1224
1225	ret = -ENOENT;
1226	if (KVM_REG_SIZE(reg->id) == 8) {
1227		u64 val;
1228
1229		ret = reg_from_user(&val, uaddr, reg->id);
1230		if (!ret)
1231			vcpu_cp15_reg64_set(vcpu, r, val);
1232	} else if (KVM_REG_SIZE(reg->id) == 4) {
1233		ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
1234	}
1235
1236	return ret;
1237}
1238
1239static unsigned int num_demux_regs(void)
1240{
1241	unsigned int i, count = 0;
1242
1243	for (i = 0; i < CSSELR_MAX; i++)
1244		if (is_valid_cache(i))
1245			count++;
1246
1247	return count;
1248}
1249
1250static int write_demux_regids(u64 __user *uindices)
1251{
1252	u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1253	unsigned int i;
1254
1255	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1256	for (i = 0; i < CSSELR_MAX; i++) {
1257		if (!is_valid_cache(i))
1258			continue;
1259		if (put_user(val | i, uindices))
1260			return -EFAULT;
1261		uindices++;
1262	}
1263	return 0;
1264}
1265
1266static u64 cp15_to_index(const struct coproc_reg *reg)
1267{
1268	u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1269	if (reg->is_64bit) {
1270		val |= KVM_REG_SIZE_U64;
1271		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1272		/*
1273		 * CRn always denotes the primary coproc. reg. nr. for the
1274		 * in-kernel representation, but the user space API uses the
1275		 * CRm for the encoding, because it is modelled after the
1276		 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1277		 * B3-1445
1278		 */
1279		val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
1280	} else {
1281		val |= KVM_REG_SIZE_U32;
1282		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1283		val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
1284		val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
1285		val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
1286	}
1287	return val;
1288}
1289
1290static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
1291{
1292	if (!*uind)
1293		return true;
1294
1295	if (put_user(cp15_to_index(reg), *uind))
1296		return false;
1297
1298	(*uind)++;
1299	return true;
1300}
1301
1302/* Assumed ordered tables, see kvm_coproc_table_init. */
1303static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
1304{
1305	const struct coproc_reg *i1, *i2, *end1, *end2;
1306	unsigned int total = 0;
1307	size_t num;
1308
1309	/* We check for duplicates here, to allow arch-specific overrides. */
1310	i1 = get_target_table(vcpu->arch.target, &num);
1311	end1 = i1 + num;
1312	i2 = cp15_regs;
1313	end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
1314
1315	BUG_ON(i1 == end1 || i2 == end2);
1316
1317	/* Walk carefully, as both tables may refer to the same register. */
1318	while (i1 || i2) {
1319		int cmp = cmp_reg(i1, i2);
1320		/* target-specific overrides generic entry. */
1321		if (cmp <= 0) {
1322			/* Ignore registers we trap but don't save. */
1323			if (i1->reg) {
1324				if (!copy_reg_to_user(i1, &uind))
1325					return -EFAULT;
1326				total++;
1327			}
1328		} else {
1329			/* Ignore registers we trap but don't save. */
1330			if (i2->reg) {
1331				if (!copy_reg_to_user(i2, &uind))
1332					return -EFAULT;
1333				total++;
1334			}
1335		}
1336
1337		if (cmp <= 0 && ++i1 == end1)
1338			i1 = NULL;
1339		if (cmp >= 0 && ++i2 == end2)
1340			i2 = NULL;
1341	}
1342	return total;
1343}
1344
1345unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
1346{
1347	return ARRAY_SIZE(invariant_cp15)
1348		+ num_demux_regs()
1349		+ num_vfp_regs()
1350		+ walk_cp15(vcpu, (u64 __user *)NULL);
1351}
1352
1353int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1354{
1355	unsigned int i;
1356	int err;
1357
1358	/* Then give them all the invariant registers' indices. */
1359	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
1360		if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
1361			return -EFAULT;
1362		uindices++;
1363	}
1364
1365	err = walk_cp15(vcpu, uindices);
1366	if (err < 0)
1367		return err;
1368	uindices += err;
1369
1370	err = copy_vfp_regids(uindices);
1371	if (err < 0)
1372		return err;
1373	uindices += err;
1374
1375	return write_demux_regids(uindices);
1376}
1377
1378void kvm_coproc_table_init(void)
1379{
1380	unsigned int i;
1381
1382	/* Make sure tables are unique and in order. */
1383	BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1384	BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
1385
1386	/* We abuse the reset function to overwrite the table itself. */
1387	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1388		invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1389
1390	/*
1391	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1392	 *
1393	 *   If software reads the Cache Type fields from Ctype1
1394	 *   upwards, once it has seen a value of 0b000, no caches
1395	 *   exist at further-out levels of the hierarchy. So, for
1396	 *   example, if Ctype3 is the first Cache Type field with a
1397	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
1398	 *   ignored.
1399	 */
1400	asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1401	for (i = 0; i < 7; i++)
1402		if (((cache_levels >> (i*3)) & 7) == 0)
1403			break;
1404	/* Clear all higher bits. */
1405	cache_levels &= (1 << (i*3))-1;
1406}
1407
1408/**
1409 * kvm_reset_coprocs - sets cp15 registers to reset value
1410 * @vcpu: The VCPU pointer
1411 *
1412 * This function finds the right table above and sets the registers on the
1413 * virtual CPU struct to their architecturally defined reset values.
1414 */
1415void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1416{
1417	size_t num;
1418	const struct coproc_reg *table;
1419
1420	/* Catch someone adding a register without putting in reset entry. */
1421	memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
1422
1423	/* Generic chip reset first (so target could override). */
1424	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
1425
1426	table = get_target_table(vcpu->arch.target, &num);
1427	reset_coproc_regs(vcpu, table, num);
1428
1429	for (num = 1; num < NR_CP15_REGS; num++)
1430		if (vcpu_cp15(vcpu, num) == 0x42424242)
1431			panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
1432}