Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   4 *          Christoffer Dall <c.dall@virtualopensystems.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License, version 2, as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  18 */
  19
  20#include <linux/bsearch.h>
  21#include <linux/mm.h>
  22#include <linux/kvm_host.h>
  23#include <linux/uaccess.h>
  24#include <asm/kvm_arm.h>
  25#include <asm/kvm_host.h>
  26#include <asm/kvm_emulate.h>
  27#include <asm/kvm_coproc.h>
  28#include <asm/kvm_mmu.h>
  29#include <asm/cacheflush.h>
  30#include <asm/cputype.h>
  31#include <trace/events/kvm.h>
  32#include <asm/vfp.h>
  33#include "../vfp/vfpinstr.h"
  34
  35#include "trace.h"
  36#include "coproc.h"
  37
  38
  39/******************************************************************************
  40 * Co-processor emulation
  41 *****************************************************************************/
  42
  43/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  44static u32 cache_levels;
  45
  46/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  47#define CSSELR_MAX 12
  48
  49/*
  50 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
  51 * of cp15 registers can be viewed either as couple of two u32 registers
  52 * or one u64 register. Current u64 register encoding is that least
  53 * significant u32 word is followed by most significant u32 word.
  54 */
  55static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
  56				       const struct coproc_reg *r,
  57				       u64 val)
  58{
  59	vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
  60	vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
  61}
  62
  63static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
  64				      const struct coproc_reg *r)
  65{
  66	u64 val;
  67
  68	val = vcpu_cp15(vcpu, r->reg + 1);
  69	val = val << 32;
  70	val = val | vcpu_cp15(vcpu, r->reg);
  71	return val;
  72}
  73
  74int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  75{
  76	kvm_inject_undefined(vcpu);
  77	return 1;
  78}
  79
  80int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  81{
  82	/*
  83	 * We can get here, if the host has been built without VFPv3 support,
  84	 * but the guest attempted a floating point operation.
  85	 */
  86	kvm_inject_undefined(vcpu);
  87	return 1;
  88}
  89
  90int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  91{
  92	kvm_inject_undefined(vcpu);
  93	return 1;
  94}
  95
  96int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  97{
  98	kvm_inject_undefined(vcpu);
  99	return 1;
 100}
 101
 102static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 103{
 104	/*
 105	 * Compute guest MPIDR. We build a virtual cluster out of the
 106	 * vcpu_id, but we read the 'U' bit from the underlying
 107	 * hardware directly.
 108	 */
 109	vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
 110				     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
 111				     (vcpu->vcpu_id & 3));
 112}
 113
 114/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
 115static bool access_actlr(struct kvm_vcpu *vcpu,
 116			 const struct coproc_params *p,
 117			 const struct coproc_reg *r)
 118{
 119	if (p->is_write)
 120		return ignore_write(vcpu, p);
 121
 122	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
 123	return true;
 124}
 125
 126/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
 127static bool access_cbar(struct kvm_vcpu *vcpu,
 128			const struct coproc_params *p,
 129			const struct coproc_reg *r)
 130{
 131	if (p->is_write)
 132		return write_to_read_only(vcpu, p);
 133	return read_zero(vcpu, p);
 134}
 135
 136/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
 137static bool access_l2ctlr(struct kvm_vcpu *vcpu,
 138			  const struct coproc_params *p,
 139			  const struct coproc_reg *r)
 140{
 141	if (p->is_write)
 142		return ignore_write(vcpu, p);
 143
 144	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
 145	return true;
 146}
 147
 148static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 149{
 150	u32 l2ctlr, ncores;
 151
 152	asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
 153	l2ctlr &= ~(3 << 24);
 154	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
 155	/* How many cores in the current cluster and the next ones */
 156	ncores -= (vcpu->vcpu_id & ~3);
 157	/* Cap it to the maximum number of cores in a single cluster */
 158	ncores = min(ncores, 3U);
 159	l2ctlr |= (ncores & 3) << 24;
 160
 161	vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
 162}
 163
 164static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 165{
 166	u32 actlr;
 167
 168	/* ACTLR contains SMP bit: make sure you create all cpus first! */
 169	asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
 170	/* Make the SMP bit consistent with the guest configuration */
 171	if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
 172		actlr |= 1U << 6;
 173	else
 174		actlr &= ~(1U << 6);
 175
 176	vcpu_cp15(vcpu, c1_ACTLR) = actlr;
 177}
 178
 179/*
 180 * TRM entries: A7:4.3.50, A15:4.3.49
 181 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
 182 */
 183static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 184			   const struct coproc_params *p,
 185			   const struct coproc_reg *r)
 186{
 187	if (p->is_write)
 188		return ignore_write(vcpu, p);
 189
 190	*vcpu_reg(vcpu, p->Rt1) = 0;
 191	return true;
 192}
 193
 194/*
 195 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 196 */
 197static bool access_dcsw(struct kvm_vcpu *vcpu,
 198			const struct coproc_params *p,
 199			const struct coproc_reg *r)
 200{
 201	if (!p->is_write)
 202		return read_from_write_only(vcpu, p);
 203
 204	kvm_set_way_flush(vcpu);
 205	return true;
 206}
 207
 208/*
 209 * Generic accessor for VM registers. Only called as long as HCR_TVM
 210 * is set.  If the guest enables the MMU, we stop trapping the VM
 211 * sys_regs and leave it in complete control of the caches.
 212 *
 213 * Used by the cpu-specific code.
 214 */
 215bool access_vm_reg(struct kvm_vcpu *vcpu,
 216		   const struct coproc_params *p,
 217		   const struct coproc_reg *r)
 218{
 219	bool was_enabled = vcpu_has_cache_enabled(vcpu);
 220
 221	BUG_ON(!p->is_write);
 222
 223	vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
 224	if (p->is_64bit)
 225		vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
 226
 227	kvm_toggle_cache(vcpu, was_enabled);
 228	return true;
 229}
 230
 231/*
 232 * We could trap ID_DFR0 and tell the guest we don't support performance
 233 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 234 * NAKed, so it will read the PMCR anyway.
 235 *
 236 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 237 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 238 * all PM registers, which doesn't crash the guest kernel at least.
 239 */
 240static bool pm_fake(struct kvm_vcpu *vcpu,
 241		    const struct coproc_params *p,
 242		    const struct coproc_reg *r)
 243{
 244	if (p->is_write)
 245		return ignore_write(vcpu, p);
 246	else
 247		return read_zero(vcpu, p);
 248}
 249
 250#define access_pmcr pm_fake
 251#define access_pmcntenset pm_fake
 252#define access_pmcntenclr pm_fake
 253#define access_pmovsr pm_fake
 254#define access_pmselr pm_fake
 255#define access_pmceid0 pm_fake
 256#define access_pmceid1 pm_fake
 257#define access_pmccntr pm_fake
 258#define access_pmxevtyper pm_fake
 259#define access_pmxevcntr pm_fake
 260#define access_pmuserenr pm_fake
 261#define access_pmintenset pm_fake
 262#define access_pmintenclr pm_fake
 263
 264/* Architected CP15 registers.
 265 * CRn denotes the primary register number, but is copied to the CRm in the
 266 * user space API for 64-bit register access in line with the terminology used
 267 * in the ARM ARM.
 268 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
 269 *            registers preceding 32-bit ones.
 270 */
 271static const struct coproc_reg cp15_regs[] = {
 272	/* MPIDR: we use VMPIDR for guest access. */
 273	{ CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
 274			NULL, reset_mpidr, c0_MPIDR },
 275
 276	/* CSSELR: swapped by interrupt.S. */
 277	{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 278			NULL, reset_unknown, c0_CSSELR },
 279
 280	/* ACTLR: trapped by HCR.TAC bit. */
 281	{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
 282			access_actlr, reset_actlr, c1_ACTLR },
 283
 284	/* CPACR: swapped by interrupt.S. */
 285	{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
 286			NULL, reset_val, c1_CPACR, 0x00000000 },
 287
 288	/* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
 289	{ CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
 290	{ CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
 291			access_vm_reg, reset_unknown, c2_TTBR0 },
 292	{ CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
 293			access_vm_reg, reset_unknown, c2_TTBR1 },
 294	{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
 295			access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
 296	{ CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
 297
 298
 299	/* DACR: swapped by interrupt.S. */
 300	{ CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
 301			access_vm_reg, reset_unknown, c3_DACR },
 302
 303	/* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
 304	{ CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
 305			access_vm_reg, reset_unknown, c5_DFSR },
 306	{ CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
 307			access_vm_reg, reset_unknown, c5_IFSR },
 308	{ CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
 309			access_vm_reg, reset_unknown, c5_ADFSR },
 310	{ CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
 311			access_vm_reg, reset_unknown, c5_AIFSR },
 312
 313	/* DFAR/IFAR: swapped by interrupt.S. */
 314	{ CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
 315			access_vm_reg, reset_unknown, c6_DFAR },
 316	{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 317			access_vm_reg, reset_unknown, c6_IFAR },
 318
 319	/* PAR swapped by interrupt.S */
 320	{ CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 321
 322	/*
 323	 * DC{C,I,CI}SW operations:
 324	 */
 325	{ CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
 326	{ CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 327	{ CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 328	/*
 329	 * L2CTLR access (guest wants to know #CPUs).
 330	 */
 331	{ CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
 332			access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
 333	{ CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
 334
 335	/*
 336	 * Dummy performance monitor implementation.
 337	 */
 338	{ CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
 339	{ CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
 340	{ CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
 341	{ CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
 342	{ CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
 343	{ CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
 344	{ CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
 345	{ CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
 346	{ CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
 347	{ CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
 348	{ CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
 349	{ CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
 350	{ CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
 351
 352	/* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
 353	{ CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
 354			access_vm_reg, reset_unknown, c10_PRRR},
 355	{ CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
 356			access_vm_reg, reset_unknown, c10_NMRR},
 357
 358	/* AMAIR0/AMAIR1: swapped by interrupt.S. */
 359	{ CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
 360			access_vm_reg, reset_unknown, c10_AMAIR0},
 361	{ CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
 362			access_vm_reg, reset_unknown, c10_AMAIR1},
 363
 364	/* VBAR: swapped by interrupt.S. */
 365	{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
 366			NULL, reset_val, c12_VBAR, 0x00000000 },
 367
 368	/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
 369	{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
 370			access_vm_reg, reset_val, c13_CID, 0x00000000 },
 371	{ CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
 372			NULL, reset_unknown, c13_TID_URW },
 373	{ CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
 374			NULL, reset_unknown, c13_TID_URO },
 375	{ CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
 376			NULL, reset_unknown, c13_TID_PRIV },
 377
 378	/* CNTKCTL: swapped by interrupt.S. */
 379	{ CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 380			NULL, reset_val, c14_CNTKCTL, 0x00000000 },
 381
 382	/* The Configuration Base Address Register. */
 383	{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 384};
 385
 386static int check_reg_table(const struct coproc_reg *table, unsigned int n)
 387{
 388	unsigned int i;
 389
 390	for (i = 1; i < n; i++) {
 391		if (cmp_reg(&table[i-1], &table[i]) >= 0) {
 392			kvm_err("reg table %p out of order (%d)\n", table, i - 1);
 393			return 1;
 394		}
 395	}
 396
 397	return 0;
 398}
 399
 400/* Target specific emulation tables */
 401static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 402
 403void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 404{
 405	BUG_ON(check_reg_table(table->table, table->num));
 406	target_tables[table->target] = table;
 407}
 408
 409/* Get specific register table for this target. */
 410static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
 411{
 412	struct kvm_coproc_target_table *table;
 413
 414	table = target_tables[target];
 415	*num = table->num;
 416	return table->table;
 417}
 418
 419#define reg_to_match_value(x)						\
 420	({								\
 421		unsigned long val;					\
 422		val  = (x)->CRn << 11;					\
 423		val |= (x)->CRm << 7;					\
 424		val |= (x)->Op1 << 4;					\
 425		val |= (x)->Op2 << 1;					\
 426		val |= !(x)->is_64bit;					\
 427		val;							\
 428	 })
 429
 430static int match_reg(const void *key, const void *elt)
 431{
 432	const unsigned long pval = (unsigned long)key;
 433	const struct coproc_reg *r = elt;
 434
 435	return pval - reg_to_match_value(r);
 436}
 437
 438static const struct coproc_reg *find_reg(const struct coproc_params *params,
 439					 const struct coproc_reg table[],
 440					 unsigned int num)
 441{
 442	unsigned long pval = reg_to_match_value(params);
 443
 444	return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
 445}
 446
 447static int emulate_cp15(struct kvm_vcpu *vcpu,
 448			const struct coproc_params *params)
 449{
 450	size_t num;
 451	const struct coproc_reg *table, *r;
 452
 453	trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
 454				   params->CRm, params->Op2, params->is_write);
 455
 456	table = get_target_table(vcpu->arch.target, &num);
 457
 458	/* Search target-specific then generic table. */
 459	r = find_reg(params, table, num);
 460	if (!r)
 461		r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
 462
 463	if (likely(r)) {
 464		/* If we don't have an accessor, we should never get here! */
 465		BUG_ON(!r->access);
 466
 467		if (likely(r->access(vcpu, params, r))) {
 468			/* Skip instruction, since it was emulated */
 469			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 470			return 1;
 471		}
 472		/* If access function fails, it should complain. */
 473	} else {
 474		kvm_err("Unsupported guest CP15 access at: %08lx\n",
 475			*vcpu_pc(vcpu));
 476		print_cp_instr(params);
 477	}
 478	kvm_inject_undefined(vcpu);
 479	return 1;
 480}
 481
 482/**
 483 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
 484 * @vcpu: The VCPU pointer
 485 * @run:  The kvm_run struct
 486 */
 487int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 488{
 489	struct coproc_params params;
 490
 491	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 492	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 493	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 494	params.is_64bit = true;
 495
 496	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
 497	params.Op2 = 0;
 498	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 499	params.CRm = 0;
 500
 501	return emulate_cp15(vcpu, &params);
 502}
 503
 504static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 505			      const struct coproc_reg *table, size_t num)
 506{
 507	unsigned long i;
 508
 509	for (i = 0; i < num; i++)
 510		if (table[i].reset)
 511			table[i].reset(vcpu, &table[i]);
 512}
 513
 514/**
 515 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
 516 * @vcpu: The VCPU pointer
 517 * @run:  The kvm_run struct
 518 */
 519int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 520{
 521	struct coproc_params params;
 522
 523	params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 524	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 525	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 526	params.is_64bit = false;
 527
 528	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 529	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
 530	params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 531	params.Rt2 = 0;
 532
 533	return emulate_cp15(vcpu, &params);
 534}
 535
 536/******************************************************************************
 537 * Userspace API
 538 *****************************************************************************/
 539
 540static bool index_to_params(u64 id, struct coproc_params *params)
 541{
 542	switch (id & KVM_REG_SIZE_MASK) {
 543	case KVM_REG_SIZE_U32:
 544		/* Any unused index bits means it's not valid. */
 545		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 546			   | KVM_REG_ARM_COPROC_MASK
 547			   | KVM_REG_ARM_32_CRN_MASK
 548			   | KVM_REG_ARM_CRM_MASK
 549			   | KVM_REG_ARM_OPC1_MASK
 550			   | KVM_REG_ARM_32_OPC2_MASK))
 551			return false;
 552
 553		params->is_64bit = false;
 554		params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
 555			       >> KVM_REG_ARM_32_CRN_SHIFT);
 556		params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
 557			       >> KVM_REG_ARM_CRM_SHIFT);
 558		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 559			       >> KVM_REG_ARM_OPC1_SHIFT);
 560		params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
 561			       >> KVM_REG_ARM_32_OPC2_SHIFT);
 562		return true;
 563	case KVM_REG_SIZE_U64:
 564		/* Any unused index bits means it's not valid. */
 565		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 566			      | KVM_REG_ARM_COPROC_MASK
 567			      | KVM_REG_ARM_CRM_MASK
 568			      | KVM_REG_ARM_OPC1_MASK))
 569			return false;
 570		params->is_64bit = true;
 571		/* CRm to CRn: see cp15_to_index for details */
 572		params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 573			       >> KVM_REG_ARM_CRM_SHIFT);
 574		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 575			       >> KVM_REG_ARM_OPC1_SHIFT);
 576		params->Op2 = 0;
 577		params->CRm = 0;
 578		return true;
 579	default:
 580		return false;
 581	}
 582}
 583
 584/* Decode an index value, and find the cp15 coproc_reg entry. */
 585static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
 586						    u64 id)
 587{
 588	size_t num;
 589	const struct coproc_reg *table, *r;
 590	struct coproc_params params;
 591
 592	/* We only do cp15 for now. */
 593	if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
 594		return NULL;
 595
 596	if (!index_to_params(id, &params))
 597		return NULL;
 598
 599	table = get_target_table(vcpu->arch.target, &num);
 600	r = find_reg(&params, table, num);
 601	if (!r)
 602		r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
 603
 604	/* Not saved in the cp15 array? */
 605	if (r && !r->reg)
 606		r = NULL;
 607
 608	return r;
 609}
 610
 611/*
 612 * These are the invariant cp15 registers: we let the guest see the host
 613 * versions of these, so they're part of the guest state.
 614 *
 615 * A future CPU may provide a mechanism to present different values to
 616 * the guest, or a future kvm may trap them.
 617 */
 618/* Unfortunately, there's no register-argument for mrc, so generate. */
 619#define FUNCTION_FOR32(crn, crm, op1, op2, name)			\
 620	static void get_##name(struct kvm_vcpu *v,			\
 621			       const struct coproc_reg *r)		\
 622	{								\
 623		u32 val;						\
 624									\
 625		asm volatile("mrc p15, " __stringify(op1)		\
 626			     ", %0, c" __stringify(crn)			\
 627			     ", c" __stringify(crm)			\
 628			     ", " __stringify(op2) "\n" : "=r" (val));	\
 629		((struct coproc_reg *)r)->val = val;			\
 630	}
 631
 632FUNCTION_FOR32(0, 0, 0, 0, MIDR)
 633FUNCTION_FOR32(0, 0, 0, 1, CTR)
 634FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
 635FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
 636FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
 637FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
 638FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
 639FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
 640FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
 641FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
 642FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
 643FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
 644FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
 645FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
 646FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
 647FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
 648FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
 649FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
 650FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
 651FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
 652FUNCTION_FOR32(0, 0, 1, 7, AIDR)
 653
 654/* ->val is filled in by kvm_invariant_coproc_table_init() */
 655static struct coproc_reg invariant_cp15[] = {
 656	{ CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
 657	{ CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
 658	{ CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
 659	{ CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 660	{ CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 661
 662	{ CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
 663	{ CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 664
 665	{ CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 666	{ CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 667	{ CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
 668	{ CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
 669	{ CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
 670	{ CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
 671	{ CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
 672	{ CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
 673
 674	{ CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
 675	{ CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
 676	{ CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
 677	{ CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 678	{ CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 679	{ CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
 680};
 681
 682/*
 683 * Reads a register value from a userspace address to a kernel
 684 * variable. Make sure that register size matches sizeof(*__val).
 685 */
 686static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 687{
 688	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
 689		return -EFAULT;
 690	return 0;
 691}
 692
 693/*
 694 * Writes a register value to a userspace address from a kernel variable.
 695 * Make sure that register size matches sizeof(*__val).
 696 */
 697static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 698{
 699	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
 700		return -EFAULT;
 701	return 0;
 702}
 703
 704static int get_invariant_cp15(u64 id, void __user *uaddr)
 705{
 706	struct coproc_params params;
 707	const struct coproc_reg *r;
 708	int ret;
 709
 710	if (!index_to_params(id, &params))
 711		return -ENOENT;
 712
 713	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 714	if (!r)
 715		return -ENOENT;
 716
 717	ret = -ENOENT;
 718	if (KVM_REG_SIZE(id) == 4) {
 719		u32 val = r->val;
 720
 721		ret = reg_to_user(uaddr, &val, id);
 722	} else if (KVM_REG_SIZE(id) == 8) {
 723		ret = reg_to_user(uaddr, &r->val, id);
 724	}
 725	return ret;
 726}
 727
 728static int set_invariant_cp15(u64 id, void __user *uaddr)
 729{
 730	struct coproc_params params;
 731	const struct coproc_reg *r;
 732	int err;
 733	u64 val;
 734
 735	if (!index_to_params(id, &params))
 736		return -ENOENT;
 737	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 738	if (!r)
 739		return -ENOENT;
 740
 741	err = -ENOENT;
 742	if (KVM_REG_SIZE(id) == 4) {
 743		u32 val32;
 744
 745		err = reg_from_user(&val32, uaddr, id);
 746		if (!err)
 747			val = val32;
 748	} else if (KVM_REG_SIZE(id) == 8) {
 749		err = reg_from_user(&val, uaddr, id);
 750	}
 751	if (err)
 752		return err;
 753
 754	/* This is what we mean by invariant: you can't change it. */
 755	if (r->val != val)
 756		return -EINVAL;
 757
 758	return 0;
 759}
 760
 761static bool is_valid_cache(u32 val)
 762{
 763	u32 level, ctype;
 764
 765	if (val >= CSSELR_MAX)
 766		return false;
 767
 768	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
 769        level = (val >> 1);
 770        ctype = (cache_levels >> (level * 3)) & 7;
 771
 772	switch (ctype) {
 773	case 0: /* No cache */
 774		return false;
 775	case 1: /* Instruction cache only */
 776		return (val & 1);
 777	case 2: /* Data cache only */
 778	case 4: /* Unified cache */
 779		return !(val & 1);
 780	case 3: /* Separate instruction and data caches */
 781		return true;
 782	default: /* Reserved: we can't know instruction or data. */
 783		return false;
 784	}
 785}
 786
 787/* Which cache CCSIDR represents depends on CSSELR value. */
 788static u32 get_ccsidr(u32 csselr)
 789{
 790	u32 ccsidr;
 791
 792	/* Make sure noone else changes CSSELR during this! */
 793	local_irq_disable();
 794	/* Put value into CSSELR */
 795	asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
 796	isb();
 797	/* Read result out of CCSIDR */
 798	asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
 799	local_irq_enable();
 800
 801	return ccsidr;
 802}
 803
 804static int demux_c15_get(u64 id, void __user *uaddr)
 805{
 806	u32 val;
 807	u32 __user *uval = uaddr;
 808
 809	/* Fail if we have unknown bits set. */
 810	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 811		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 812		return -ENOENT;
 813
 814	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 815	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 816		if (KVM_REG_SIZE(id) != 4)
 817			return -ENOENT;
 818		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 819			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 820		if (!is_valid_cache(val))
 821			return -ENOENT;
 822
 823		return put_user(get_ccsidr(val), uval);
 824	default:
 825		return -ENOENT;
 826	}
 827}
 828
 829static int demux_c15_set(u64 id, void __user *uaddr)
 830{
 831	u32 val, newval;
 832	u32 __user *uval = uaddr;
 833
 834	/* Fail if we have unknown bits set. */
 835	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 836		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 837		return -ENOENT;
 838
 839	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 840	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 841		if (KVM_REG_SIZE(id) != 4)
 842			return -ENOENT;
 843		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 844			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 845		if (!is_valid_cache(val))
 846			return -ENOENT;
 847
 848		if (get_user(newval, uval))
 849			return -EFAULT;
 850
 851		/* This is also invariant: you can't change it. */
 852		if (newval != get_ccsidr(val))
 853			return -EINVAL;
 854		return 0;
 855	default:
 856		return -ENOENT;
 857	}
 858}
 859
 860#ifdef CONFIG_VFPv3
 861static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
 862				   KVM_REG_ARM_VFP_FPSCR,
 863				   KVM_REG_ARM_VFP_FPINST,
 864				   KVM_REG_ARM_VFP_FPINST2,
 865				   KVM_REG_ARM_VFP_MVFR0,
 866				   KVM_REG_ARM_VFP_MVFR1,
 867				   KVM_REG_ARM_VFP_FPSID };
 868
 869static unsigned int num_fp_regs(void)
 870{
 871	if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
 872		return 32;
 873	else
 874		return 16;
 875}
 876
 877static unsigned int num_vfp_regs(void)
 878{
 879	/* Normal FP regs + control regs. */
 880	return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
 881}
 882
 883static int copy_vfp_regids(u64 __user *uindices)
 884{
 885	unsigned int i;
 886	const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
 887	const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
 888
 889	for (i = 0; i < num_fp_regs(); i++) {
 890		if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
 891			     uindices))
 892			return -EFAULT;
 893		uindices++;
 894	}
 895
 896	for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
 897		if (put_user(u32reg | vfp_sysregs[i], uindices))
 898			return -EFAULT;
 899		uindices++;
 900	}
 901
 902	return num_vfp_regs();
 903}
 904
 905static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
 906{
 907	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 908	u32 val;
 909
 910	/* Fail if we have unknown bits set. */
 911	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 912		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 913		return -ENOENT;
 914
 915	if (vfpid < num_fp_regs()) {
 916		if (KVM_REG_SIZE(id) != 8)
 917			return -ENOENT;
 918		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
 919				   id);
 920	}
 921
 922	/* FP control registers are all 32 bit. */
 923	if (KVM_REG_SIZE(id) != 4)
 924		return -ENOENT;
 925
 926	switch (vfpid) {
 927	case KVM_REG_ARM_VFP_FPEXC:
 928		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
 929	case KVM_REG_ARM_VFP_FPSCR:
 930		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
 931	case KVM_REG_ARM_VFP_FPINST:
 932		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
 933	case KVM_REG_ARM_VFP_FPINST2:
 934		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
 935	case KVM_REG_ARM_VFP_MVFR0:
 936		val = fmrx(MVFR0);
 937		return reg_to_user(uaddr, &val, id);
 938	case KVM_REG_ARM_VFP_MVFR1:
 939		val = fmrx(MVFR1);
 940		return reg_to_user(uaddr, &val, id);
 941	case KVM_REG_ARM_VFP_FPSID:
 942		val = fmrx(FPSID);
 943		return reg_to_user(uaddr, &val, id);
 944	default:
 945		return -ENOENT;
 946	}
 947}
 948
 949static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
 950{
 951	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 952	u32 val;
 953
 954	/* Fail if we have unknown bits set. */
 955	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 956		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 957		return -ENOENT;
 958
 959	if (vfpid < num_fp_regs()) {
 960		if (KVM_REG_SIZE(id) != 8)
 961			return -ENOENT;
 962		return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
 963				     uaddr, id);
 964	}
 965
 966	/* FP control registers are all 32 bit. */
 967	if (KVM_REG_SIZE(id) != 4)
 968		return -ENOENT;
 969
 970	switch (vfpid) {
 971	case KVM_REG_ARM_VFP_FPEXC:
 972		return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
 973	case KVM_REG_ARM_VFP_FPSCR:
 974		return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
 975	case KVM_REG_ARM_VFP_FPINST:
 976		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
 977	case KVM_REG_ARM_VFP_FPINST2:
 978		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
 979	/* These are invariant. */
 980	case KVM_REG_ARM_VFP_MVFR0:
 981		if (reg_from_user(&val, uaddr, id))
 982			return -EFAULT;
 983		if (val != fmrx(MVFR0))
 984			return -EINVAL;
 985		return 0;
 986	case KVM_REG_ARM_VFP_MVFR1:
 987		if (reg_from_user(&val, uaddr, id))
 988			return -EFAULT;
 989		if (val != fmrx(MVFR1))
 990			return -EINVAL;
 991		return 0;
 992	case KVM_REG_ARM_VFP_FPSID:
 993		if (reg_from_user(&val, uaddr, id))
 994			return -EFAULT;
 995		if (val != fmrx(FPSID))
 996			return -EINVAL;
 997		return 0;
 998	default:
 999		return -ENOENT;
1000	}
1001}
1002#else /* !CONFIG_VFPv3 */
1003static unsigned int num_vfp_regs(void)
1004{
1005	return 0;
1006}
1007
1008static int copy_vfp_regids(u64 __user *uindices)
1009{
1010	return 0;
1011}
1012
1013static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1014{
1015	return -ENOENT;
1016}
1017
1018static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1019{
1020	return -ENOENT;
1021}
1022#endif /* !CONFIG_VFPv3 */
1023
1024int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1025{
1026	const struct coproc_reg *r;
1027	void __user *uaddr = (void __user *)(long)reg->addr;
1028	int ret;
1029
1030	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1031		return demux_c15_get(reg->id, uaddr);
1032
1033	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1034		return vfp_get_reg(vcpu, reg->id, uaddr);
1035
1036	r = index_to_coproc_reg(vcpu, reg->id);
1037	if (!r)
1038		return get_invariant_cp15(reg->id, uaddr);
1039
1040	ret = -ENOENT;
1041	if (KVM_REG_SIZE(reg->id) == 8) {
1042		u64 val;
1043
1044		val = vcpu_cp15_reg64_get(vcpu, r);
1045		ret = reg_to_user(uaddr, &val, reg->id);
1046	} else if (KVM_REG_SIZE(reg->id) == 4) {
1047		ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
1048	}
1049
1050	return ret;
1051}
1052
1053int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1054{
1055	const struct coproc_reg *r;
1056	void __user *uaddr = (void __user *)(long)reg->addr;
1057	int ret;
1058
1059	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1060		return demux_c15_set(reg->id, uaddr);
1061
1062	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1063		return vfp_set_reg(vcpu, reg->id, uaddr);
1064
1065	r = index_to_coproc_reg(vcpu, reg->id);
1066	if (!r)
1067		return set_invariant_cp15(reg->id, uaddr);
1068
1069	ret = -ENOENT;
1070	if (KVM_REG_SIZE(reg->id) == 8) {
1071		u64 val;
1072
1073		ret = reg_from_user(&val, uaddr, reg->id);
1074		if (!ret)
1075			vcpu_cp15_reg64_set(vcpu, r, val);
1076	} else if (KVM_REG_SIZE(reg->id) == 4) {
1077		ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
1078	}
1079
1080	return ret;
1081}
1082
1083static unsigned int num_demux_regs(void)
1084{
1085	unsigned int i, count = 0;
1086
1087	for (i = 0; i < CSSELR_MAX; i++)
1088		if (is_valid_cache(i))
1089			count++;
1090
1091	return count;
1092}
1093
1094static int write_demux_regids(u64 __user *uindices)
1095{
1096	u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1097	unsigned int i;
1098
1099	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1100	for (i = 0; i < CSSELR_MAX; i++) {
1101		if (!is_valid_cache(i))
1102			continue;
1103		if (put_user(val | i, uindices))
1104			return -EFAULT;
1105		uindices++;
1106	}
1107	return 0;
1108}
1109
1110static u64 cp15_to_index(const struct coproc_reg *reg)
1111{
1112	u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1113	if (reg->is_64bit) {
1114		val |= KVM_REG_SIZE_U64;
1115		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1116		/*
1117		 * CRn always denotes the primary coproc. reg. nr. for the
1118		 * in-kernel representation, but the user space API uses the
1119		 * CRm for the encoding, because it is modelled after the
1120		 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1121		 * B3-1445
1122		 */
1123		val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
1124	} else {
1125		val |= KVM_REG_SIZE_U32;
1126		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1127		val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
1128		val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
1129		val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
1130	}
1131	return val;
1132}
1133
1134static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
1135{
1136	if (!*uind)
1137		return true;
1138
1139	if (put_user(cp15_to_index(reg), *uind))
1140		return false;
1141
1142	(*uind)++;
1143	return true;
1144}
1145
1146/* Assumed ordered tables, see kvm_coproc_table_init. */
1147static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
1148{
1149	const struct coproc_reg *i1, *i2, *end1, *end2;
1150	unsigned int total = 0;
1151	size_t num;
1152
1153	/* We check for duplicates here, to allow arch-specific overrides. */
1154	i1 = get_target_table(vcpu->arch.target, &num);
1155	end1 = i1 + num;
1156	i2 = cp15_regs;
1157	end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
1158
1159	BUG_ON(i1 == end1 || i2 == end2);
1160
1161	/* Walk carefully, as both tables may refer to the same register. */
1162	while (i1 || i2) {
1163		int cmp = cmp_reg(i1, i2);
1164		/* target-specific overrides generic entry. */
1165		if (cmp <= 0) {
1166			/* Ignore registers we trap but don't save. */
1167			if (i1->reg) {
1168				if (!copy_reg_to_user(i1, &uind))
1169					return -EFAULT;
1170				total++;
1171			}
1172		} else {
1173			/* Ignore registers we trap but don't save. */
1174			if (i2->reg) {
1175				if (!copy_reg_to_user(i2, &uind))
1176					return -EFAULT;
1177				total++;
1178			}
1179		}
1180
1181		if (cmp <= 0 && ++i1 == end1)
1182			i1 = NULL;
1183		if (cmp >= 0 && ++i2 == end2)
1184			i2 = NULL;
1185	}
1186	return total;
1187}
1188
1189unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
1190{
1191	return ARRAY_SIZE(invariant_cp15)
1192		+ num_demux_regs()
1193		+ num_vfp_regs()
1194		+ walk_cp15(vcpu, (u64 __user *)NULL);
1195}
1196
1197int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1198{
1199	unsigned int i;
1200	int err;
1201
1202	/* Then give them all the invariant registers' indices. */
1203	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
1204		if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
1205			return -EFAULT;
1206		uindices++;
1207	}
1208
1209	err = walk_cp15(vcpu, uindices);
1210	if (err < 0)
1211		return err;
1212	uindices += err;
1213
1214	err = copy_vfp_regids(uindices);
1215	if (err < 0)
1216		return err;
1217	uindices += err;
1218
1219	return write_demux_regids(uindices);
1220}
1221
1222void kvm_coproc_table_init(void)
1223{
1224	unsigned int i;
1225
1226	/* Make sure tables are unique and in order. */
1227	BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1228	BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
1229
1230	/* We abuse the reset function to overwrite the table itself. */
1231	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1232		invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1233
1234	/*
1235	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1236	 *
1237	 *   If software reads the Cache Type fields from Ctype1
1238	 *   upwards, once it has seen a value of 0b000, no caches
1239	 *   exist at further-out levels of the hierarchy. So, for
1240	 *   example, if Ctype3 is the first Cache Type field with a
1241	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
1242	 *   ignored.
1243	 */
1244	asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1245	for (i = 0; i < 7; i++)
1246		if (((cache_levels >> (i*3)) & 7) == 0)
1247			break;
1248	/* Clear all higher bits. */
1249	cache_levels &= (1 << (i*3))-1;
1250}
1251
1252/**
1253 * kvm_reset_coprocs - sets cp15 registers to reset value
1254 * @vcpu: The VCPU pointer
1255 *
1256 * This function finds the right table above and sets the registers on the
1257 * virtual CPU struct to their architecturally defined reset values.
1258 */
1259void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1260{
1261	size_t num;
1262	const struct coproc_reg *table;
1263
1264	/* Catch someone adding a register without putting in reset entry. */
1265	memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
1266
1267	/* Generic chip reset first (so target could override). */
1268	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
1269
1270	table = get_target_table(vcpu->arch.target, &num);
1271	reset_coproc_regs(vcpu, table, num);
1272
1273	for (num = 1; num < NR_CP15_REGS; num++)
1274		if (vcpu_cp15(vcpu, num) == 0x42424242)
1275			panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
1276}