Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * kvm nested virtualization support for s390x
   4 *
   5 * Copyright IBM Corp. 2016, 2018
   6 *
   7 *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
   8 */
   9#include <linux/vmalloc.h>
  10#include <linux/kvm_host.h>
  11#include <linux/bug.h>
  12#include <linux/list.h>
  13#include <linux/bitmap.h>
  14#include <linux/sched/signal.h>
  15
  16#include <asm/gmap.h>
  17#include <asm/mmu_context.h>
  18#include <asm/sclp.h>
  19#include <asm/nmi.h>
  20#include <asm/dis.h>
  21#include <asm/fpu/api.h>
  22#include <asm/facility.h>
  23#include "kvm-s390.h"
  24#include "gaccess.h"
  25
  26struct vsie_page {
  27	struct kvm_s390_sie_block scb_s;	/* 0x0000 */
  28	/*
  29	 * the backup info for machine check. ensure it's at
  30	 * the same offset as that in struct sie_page!
  31	 */
  32	struct mcck_volatile_info mcck_info;    /* 0x0200 */
  33	/*
  34	 * The pinned original scb. Be aware that other VCPUs can modify
  35	 * it while we read from it. Values that are used for conditions or
  36	 * are reused conditionally, should be accessed via READ_ONCE.
  37	 */
  38	struct kvm_s390_sie_block *scb_o;	/* 0x0218 */
  39	/* the shadow gmap in use by the vsie_page */
  40	struct gmap *gmap;			/* 0x0220 */
  41	/* address of the last reported fault to guest2 */
  42	unsigned long fault_addr;		/* 0x0228 */
  43	/* calculated guest addresses of satellite control blocks */
  44	gpa_t sca_gpa;				/* 0x0230 */
  45	gpa_t itdba_gpa;			/* 0x0238 */
  46	gpa_t gvrd_gpa;				/* 0x0240 */
  47	gpa_t riccbd_gpa;			/* 0x0248 */
  48	gpa_t sdnx_gpa;				/* 0x0250 */
  49	__u8 reserved[0x0700 - 0x0258];		/* 0x0258 */
  50	struct kvm_s390_crypto_cb crycb;	/* 0x0700 */
  51	__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE];	/* 0x0800 */
  52};
  53
  54/* trigger a validity icpt for the given scb */
  55static int set_validity_icpt(struct kvm_s390_sie_block *scb,
  56			     __u16 reason_code)
  57{
  58	scb->ipa = 0x1000;
  59	scb->ipb = ((__u32) reason_code) << 16;
  60	scb->icptcode = ICPT_VALIDITY;
  61	return 1;
  62}
  63
  64/* mark the prefix as unmapped, this will block the VSIE */
  65static void prefix_unmapped(struct vsie_page *vsie_page)
  66{
  67	atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
  68}
  69
  70/* mark the prefix as unmapped and wait until the VSIE has been left */
  71static void prefix_unmapped_sync(struct vsie_page *vsie_page)
  72{
  73	prefix_unmapped(vsie_page);
  74	if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  75		atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
  76	while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  77		cpu_relax();
  78}
  79
  80/* mark the prefix as mapped, this will allow the VSIE to run */
  81static void prefix_mapped(struct vsie_page *vsie_page)
  82{
  83	atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
  84}
  85
  86/* test if the prefix is mapped into the gmap shadow */
  87static int prefix_is_mapped(struct vsie_page *vsie_page)
  88{
  89	return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
  90}
  91
  92/* copy the updated intervention request bits into the shadow scb */
  93static void update_intervention_requests(struct vsie_page *vsie_page)
  94{
  95	const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
  96	int cpuflags;
  97
  98	cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
  99	atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
 100	atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
 101}
 102
 103/* shadow (filter and validate) the cpuflags  */
 104static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 105{
 106	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 107	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 108	int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
 109
 110	/* we don't allow ESA/390 guests */
 111	if (!(cpuflags & CPUSTAT_ZARCH))
 112		return set_validity_icpt(scb_s, 0x0001U);
 113
 114	if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
 115		return set_validity_icpt(scb_s, 0x0001U);
 116	else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
 117		return set_validity_icpt(scb_s, 0x0007U);
 118
 119	/* intervention requests will be set later */
 120	newflags = CPUSTAT_ZARCH;
 121	if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
 122		newflags |= CPUSTAT_GED;
 123	if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
 124		if (cpuflags & CPUSTAT_GED)
 125			return set_validity_icpt(scb_s, 0x0001U);
 126		newflags |= CPUSTAT_GED2;
 127	}
 128	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
 129		newflags |= cpuflags & CPUSTAT_P;
 130	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
 131		newflags |= cpuflags & CPUSTAT_SM;
 132	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
 133		newflags |= cpuflags & CPUSTAT_IBS;
 134	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
 135		newflags |= cpuflags & CPUSTAT_KSS;
 136
 137	atomic_set(&scb_s->cpuflags, newflags);
 138	return 0;
 139}
 140/* Copy to APCB FORMAT1 from APCB FORMAT0 */
 141static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
 142			unsigned long crycb_gpa, struct kvm_s390_apcb1 *apcb_h)
 143{
 144	struct kvm_s390_apcb0 tmp;
 145	unsigned long apcb_gpa;
 146
 147	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
 148
 149	if (read_guest_real(vcpu, apcb_gpa, &tmp,
 150			    sizeof(struct kvm_s390_apcb0)))
 151		return -EFAULT;
 152
 153	apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
 154	apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
 155	apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
 156
 157	return 0;
 158
 159}
 160
 161/**
 162 * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
 163 * @vcpu: pointer to the virtual CPU
 164 * @apcb_s: pointer to start of apcb in the shadow crycb
 165 * @crycb_gpa: guest physical address to start of original guest crycb
 166 * @apcb_h: pointer to start of apcb in the guest1
 167 *
 168 * Returns 0 and -EFAULT on error reading guest apcb
 169 */
 170static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
 171			unsigned long crycb_gpa, unsigned long *apcb_h)
 172{
 173	unsigned long apcb_gpa;
 174
 175	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
 176
 177	if (read_guest_real(vcpu, apcb_gpa, apcb_s,
 178			    sizeof(struct kvm_s390_apcb0)))
 179		return -EFAULT;
 180
 181	bitmap_and(apcb_s, apcb_s, apcb_h,
 182		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0));
 183
 184	return 0;
 185}
 186
 187/**
 188 * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
 189 * @vcpu: pointer to the virtual CPU
 190 * @apcb_s: pointer to start of apcb in the shadow crycb
 191 * @crycb_gpa: guest physical address to start of original guest crycb
 192 * @apcb_h: pointer to start of apcb in the host
 193 *
 194 * Returns 0 and -EFAULT on error reading guest apcb
 195 */
 196static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
 197			unsigned long crycb_gpa,
 198			unsigned long *apcb_h)
 199{
 200	unsigned long apcb_gpa;
 201
 202	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb1);
 203
 204	if (read_guest_real(vcpu, apcb_gpa, apcb_s,
 205			    sizeof(struct kvm_s390_apcb1)))
 206		return -EFAULT;
 207
 208	bitmap_and(apcb_s, apcb_s, apcb_h,
 209		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1));
 210
 211	return 0;
 212}
 213
 214/**
 215 * setup_apcb - Create a shadow copy of the apcb.
 216 * @vcpu: pointer to the virtual CPU
 217 * @crycb_s: pointer to shadow crycb
 218 * @crycb_gpa: guest physical address of original guest crycb
 219 * @crycb_h: pointer to the host crycb
 220 * @fmt_o: format of the original guest crycb.
 221 * @fmt_h: format of the host crycb.
 222 *
 223 * Checks the compatibility between the guest and host crycb and calls the
 224 * appropriate copy function.
 225 *
 226 * Return 0 or an error number if the guest and host crycb are incompatible.
 227 */
 228static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
 229	       const u32 crycb_gpa,
 230	       struct kvm_s390_crypto_cb *crycb_h,
 231	       int fmt_o, int fmt_h)
 232{
 
 
 
 
 233	switch (fmt_o) {
 234	case CRYCB_FORMAT2:
 235		if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 256) & PAGE_MASK))
 236			return -EACCES;
 237		if (fmt_h != CRYCB_FORMAT2)
 238			return -EINVAL;
 239		return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
 240				    crycb_gpa,
 241				    (unsigned long *)&crycb_h->apcb1);
 242	case CRYCB_FORMAT1:
 243		switch (fmt_h) {
 244		case CRYCB_FORMAT2:
 245			return setup_apcb10(vcpu, &crycb_s->apcb1,
 246					    crycb_gpa,
 247					    &crycb_h->apcb1);
 248		case CRYCB_FORMAT1:
 249			return setup_apcb00(vcpu,
 250					    (unsigned long *) &crycb_s->apcb0,
 251					    crycb_gpa,
 252					    (unsigned long *) &crycb_h->apcb0);
 253		}
 254		break;
 255	case CRYCB_FORMAT0:
 256		if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 32) & PAGE_MASK))
 257			return -EACCES;
 258
 259		switch (fmt_h) {
 260		case CRYCB_FORMAT2:
 261			return setup_apcb10(vcpu, &crycb_s->apcb1,
 262					    crycb_gpa,
 263					    &crycb_h->apcb1);
 264		case CRYCB_FORMAT1:
 265		case CRYCB_FORMAT0:
 266			return setup_apcb00(vcpu,
 267					    (unsigned long *) &crycb_s->apcb0,
 268					    crycb_gpa,
 269					    (unsigned long *) &crycb_h->apcb0);
 270		}
 271	}
 272	return -EINVAL;
 273}
 274
 275/**
 276 * shadow_crycb - Create a shadow copy of the crycb block
 277 * @vcpu: a pointer to the virtual CPU
 278 * @vsie_page: a pointer to internal date used for the vSIE
 279 *
 280 * Create a shadow copy of the crycb block and setup key wrapping, if
 281 * requested for guest 3 and enabled for guest 2.
 282 *
 283 * We accept format-1 or format-2, but we convert format-1 into format-2
 284 * in the shadow CRYCB.
 285 * Using format-2 enables the firmware to choose the right format when
 286 * scheduling the SIE.
 287 * There is nothing to do for format-0.
 288 *
 289 * This function centralize the issuing of set_validity_icpt() for all
 290 * the subfunctions working on the crycb.
 291 *
 292 * Returns: - 0 if shadowed or nothing to do
 293 *          - > 0 if control has to be given to guest 2
 294 */
 295static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 296{
 297	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 298	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 299	const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
 300	const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
 301	unsigned long *b1, *b2;
 302	u8 ecb3_flags;
 303	u32 ecd_flags;
 304	int apie_h;
 305	int apie_s;
 306	int key_msk = test_kvm_facility(vcpu->kvm, 76);
 307	int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
 308	int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
 309	int ret = 0;
 310
 311	scb_s->crycbd = 0;
 312
 313	apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
 314	apie_s = apie_h & scb_o->eca;
 315	if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
 316		return 0;
 317
 318	if (!crycb_addr)
 319		return set_validity_icpt(scb_s, 0x0039U);
 320
 321	if (fmt_o == CRYCB_FORMAT1)
 322		if ((crycb_addr & PAGE_MASK) !=
 323		    ((crycb_addr + 128) & PAGE_MASK))
 324			return set_validity_icpt(scb_s, 0x003CU);
 325
 326	if (apie_s) {
 327		ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
 328				 vcpu->kvm->arch.crypto.crycb,
 329				 fmt_o, fmt_h);
 330		if (ret)
 331			goto end;
 332		scb_s->eca |= scb_o->eca & ECA_APIE;
 333	}
 334
 335	/* we may only allow it if enabled for guest 2 */
 336	ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
 337		     (ECB3_AES | ECB3_DEA);
 338	ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
 339	if (!ecb3_flags && !ecd_flags)
 340		goto end;
 341
 342	/* copy only the wrapping keys */
 343	if (read_guest_real(vcpu, crycb_addr + 72,
 344			    vsie_page->crycb.dea_wrapping_key_mask, 56))
 345		return set_validity_icpt(scb_s, 0x0035U);
 346
 347	scb_s->ecb3 |= ecb3_flags;
 348	scb_s->ecd |= ecd_flags;
 349
 350	/* xor both blocks in one run */
 351	b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
 352	b2 = (unsigned long *)
 353			    vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
 354	/* as 56%8 == 0, bitmap_xor won't overwrite any data */
 355	bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
 356end:
 357	switch (ret) {
 358	case -EINVAL:
 359		return set_validity_icpt(scb_s, 0x0022U);
 360	case -EFAULT:
 361		return set_validity_icpt(scb_s, 0x0035U);
 362	case -EACCES:
 363		return set_validity_icpt(scb_s, 0x003CU);
 364	}
 365	scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
 366	return 0;
 367}
 368
 369/* shadow (round up/down) the ibc to avoid validity icpt */
 370static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 371{
 372	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 373	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 374	/* READ_ONCE does not work on bitfields - use a temporary variable */
 375	const uint32_t __new_ibc = scb_o->ibc;
 376	const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
 377	__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
 378
 379	scb_s->ibc = 0;
 380	/* ibc installed in g2 and requested for g3 */
 381	if (vcpu->kvm->arch.model.ibc && new_ibc) {
 382		scb_s->ibc = new_ibc;
 383		/* takte care of the minimum ibc level of the machine */
 384		if (scb_s->ibc < min_ibc)
 385			scb_s->ibc = min_ibc;
 386		/* take care of the maximum ibc level set for the guest */
 387		if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
 388			scb_s->ibc = vcpu->kvm->arch.model.ibc;
 389	}
 390}
 391
 392/* unshadow the scb, copying parameters back to the real scb */
 393static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 394{
 395	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 396	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 397
 398	/* interception */
 399	scb_o->icptcode = scb_s->icptcode;
 400	scb_o->icptstatus = scb_s->icptstatus;
 401	scb_o->ipa = scb_s->ipa;
 402	scb_o->ipb = scb_s->ipb;
 403	scb_o->gbea = scb_s->gbea;
 404
 405	/* timer */
 406	scb_o->cputm = scb_s->cputm;
 407	scb_o->ckc = scb_s->ckc;
 408	scb_o->todpr = scb_s->todpr;
 409
 410	/* guest state */
 411	scb_o->gpsw = scb_s->gpsw;
 412	scb_o->gg14 = scb_s->gg14;
 413	scb_o->gg15 = scb_s->gg15;
 414	memcpy(scb_o->gcr, scb_s->gcr, 128);
 415	scb_o->pp = scb_s->pp;
 416
 417	/* branch prediction */
 418	if (test_kvm_facility(vcpu->kvm, 82)) {
 419		scb_o->fpf &= ~FPF_BPBC;
 420		scb_o->fpf |= scb_s->fpf & FPF_BPBC;
 421	}
 422
 423	/* interrupt intercept */
 424	switch (scb_s->icptcode) {
 425	case ICPT_PROGI:
 426	case ICPT_INSTPROGI:
 427	case ICPT_EXTINT:
 428		memcpy((void *)((u64)scb_o + 0xc0),
 429		       (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
 430		break;
 
 
 
 
 
 431	}
 432
 433	if (scb_s->ihcpu != 0xffffU)
 434		scb_o->ihcpu = scb_s->ihcpu;
 435}
 436
 437/*
 438 * Setup the shadow scb by copying and checking the relevant parts of the g2
 439 * provided scb.
 440 *
 441 * Returns: - 0 if the scb has been shadowed
 442 *          - > 0 if control has to be given to guest 2
 443 */
 444static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 445{
 446	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 447	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 448	/* READ_ONCE does not work on bitfields - use a temporary variable */
 449	const uint32_t __new_prefix = scb_o->prefix;
 450	const uint32_t new_prefix = READ_ONCE(__new_prefix);
 451	const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
 452	bool had_tx = scb_s->ecb & ECB_TE;
 453	unsigned long new_mso = 0;
 454	int rc;
 455
 456	/* make sure we don't have any leftovers when reusing the scb */
 457	scb_s->icptcode = 0;
 458	scb_s->eca = 0;
 459	scb_s->ecb = 0;
 460	scb_s->ecb2 = 0;
 461	scb_s->ecb3 = 0;
 462	scb_s->ecd = 0;
 463	scb_s->fac = 0;
 464	scb_s->fpf = 0;
 465
 466	rc = prepare_cpuflags(vcpu, vsie_page);
 467	if (rc)
 468		goto out;
 469
 470	/* timer */
 471	scb_s->cputm = scb_o->cputm;
 472	scb_s->ckc = scb_o->ckc;
 473	scb_s->todpr = scb_o->todpr;
 474	scb_s->epoch = scb_o->epoch;
 475
 476	/* guest state */
 477	scb_s->gpsw = scb_o->gpsw;
 478	scb_s->gg14 = scb_o->gg14;
 479	scb_s->gg15 = scb_o->gg15;
 480	memcpy(scb_s->gcr, scb_o->gcr, 128);
 481	scb_s->pp = scb_o->pp;
 482
 483	/* interception / execution handling */
 484	scb_s->gbea = scb_o->gbea;
 485	scb_s->lctl = scb_o->lctl;
 486	scb_s->svcc = scb_o->svcc;
 487	scb_s->ictl = scb_o->ictl;
 488	/*
 489	 * SKEY handling functions can't deal with false setting of PTE invalid
 490	 * bits. Therefore we cannot provide interpretation and would later
 491	 * have to provide own emulation handlers.
 492	 */
 493	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
 494		scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
 495
 496	scb_s->icpua = scb_o->icpua;
 497
 498	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
 499		new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
 500	/* if the hva of the prefix changes, we have to remap the prefix */
 501	if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
 502		prefix_unmapped(vsie_page);
 503	 /* SIE will do mso/msl validity and exception checks for us */
 504	scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
 505	scb_s->mso = new_mso;
 506	scb_s->prefix = new_prefix;
 507
 508	/* We have to definitely flush the tlb if this scb never ran */
 509	if (scb_s->ihcpu != 0xffffU)
 510		scb_s->ihcpu = scb_o->ihcpu;
 511
 512	/* MVPG and Protection Exception Interpretation are always available */
 513	scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
 514	/* Host-protection-interruption introduced with ESOP */
 515	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
 516		scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
 517	/*
 518	 * CPU Topology
 519	 * This facility only uses the utility field of the SCA and none of
 520	 * the cpu entries that are problematic with the other interpretation
 521	 * facilities so we can pass it through
 522	 */
 523	if (test_kvm_facility(vcpu->kvm, 11))
 524		scb_s->ecb |= scb_o->ecb & ECB_PTF;
 525	/* transactional execution */
 526	if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
 527		/* remap the prefix is tx is toggled on */
 528		if (!had_tx)
 529			prefix_unmapped(vsie_page);
 530		scb_s->ecb |= ECB_TE;
 531	}
 532	/* specification exception interpretation */
 533	scb_s->ecb |= scb_o->ecb & ECB_SPECI;
 534	/* branch prediction */
 535	if (test_kvm_facility(vcpu->kvm, 82))
 536		scb_s->fpf |= scb_o->fpf & FPF_BPBC;
 537	/* SIMD */
 538	if (test_kvm_facility(vcpu->kvm, 129)) {
 539		scb_s->eca |= scb_o->eca & ECA_VX;
 540		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
 541	}
 542	/* Run-time-Instrumentation */
 543	if (test_kvm_facility(vcpu->kvm, 64))
 544		scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
 545	/* Instruction Execution Prevention */
 546	if (test_kvm_facility(vcpu->kvm, 130))
 547		scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
 548	/* Guarded Storage */
 549	if (test_kvm_facility(vcpu->kvm, 133)) {
 550		scb_s->ecb |= scb_o->ecb & ECB_GS;
 551		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
 552	}
 553	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
 554		scb_s->eca |= scb_o->eca & ECA_SII;
 555	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
 556		scb_s->eca |= scb_o->eca & ECA_IB;
 557	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
 558		scb_s->eca |= scb_o->eca & ECA_CEI;
 559	/* Epoch Extension */
 560	if (test_kvm_facility(vcpu->kvm, 139)) {
 561		scb_s->ecd |= scb_o->ecd & ECD_MEF;
 562		scb_s->epdx = scb_o->epdx;
 563	}
 564
 565	/* etoken */
 566	if (test_kvm_facility(vcpu->kvm, 156))
 567		scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
 568
 569	scb_s->hpid = HPID_VSIE;
 570	scb_s->cpnc = scb_o->cpnc;
 571
 572	prepare_ibc(vcpu, vsie_page);
 573	rc = shadow_crycb(vcpu, vsie_page);
 574out:
 575	if (rc)
 576		unshadow_scb(vcpu, vsie_page);
 577	return rc;
 578}
 579
 580void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
 581				 unsigned long end)
 582{
 583	struct kvm *kvm = gmap->private;
 584	struct vsie_page *cur;
 585	unsigned long prefix;
 586	struct page *page;
 587	int i;
 588
 589	if (!gmap_is_shadow(gmap))
 590		return;
 
 
 
 
 591	/*
 592	 * Only new shadow blocks are added to the list during runtime,
 593	 * therefore we can safely reference them all the time.
 594	 */
 595	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
 596		page = READ_ONCE(kvm->arch.vsie.pages[i]);
 597		if (!page)
 598			continue;
 599		cur = page_to_virt(page);
 600		if (READ_ONCE(cur->gmap) != gmap)
 601			continue;
 602		prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
 603		/* with mso/msl, the prefix lies at an offset */
 604		prefix += cur->scb_s.mso;
 605		if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
 606			prefix_unmapped_sync(cur);
 607	}
 608}
 609
 610/*
 611 * Map the first prefix page and if tx is enabled also the second prefix page.
 612 *
 613 * The prefix will be protected, a gmap notifier will inform about unmaps.
 614 * The shadow scb must not be executed until the prefix is remapped, this is
 615 * guaranteed by properly handling PROG_REQUEST.
 616 *
 617 * Returns: - 0 on if successfully mapped or already mapped
 618 *          - > 0 if control has to be given to guest 2
 619 *          - -EAGAIN if the caller can retry immediately
 620 *          - -ENOMEM if out of memory
 621 */
 622static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 623{
 624	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 625	u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
 626	int rc;
 627
 628	if (prefix_is_mapped(vsie_page))
 629		return 0;
 630
 631	/* mark it as mapped so we can catch any concurrent unmappers */
 632	prefix_mapped(vsie_page);
 633
 634	/* with mso/msl, the prefix lies at offset *mso* */
 635	prefix += scb_s->mso;
 636
 637	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
 638	if (!rc && (scb_s->ecb & ECB_TE))
 639		rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 640					   prefix + PAGE_SIZE, NULL);
 641	/*
 642	 * We don't have to mprotect, we will be called for all unshadows.
 643	 * SIE will detect if protection applies and trigger a validity.
 644	 */
 645	if (rc)
 646		prefix_unmapped(vsie_page);
 647	if (rc > 0 || rc == -EFAULT)
 648		rc = set_validity_icpt(scb_s, 0x0037U);
 649	return rc;
 650}
 651
 652/*
 653 * Pin the guest page given by gpa and set hpa to the pinned host address.
 654 * Will always be pinned writable.
 655 *
 656 * Returns: - 0 on success
 657 *          - -EINVAL if the gpa is not valid guest storage
 658 */
 659static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
 660{
 661	struct page *page;
 662
 663	page = gfn_to_page(kvm, gpa_to_gfn(gpa));
 664	if (is_error_page(page))
 665		return -EINVAL;
 666	*hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK);
 667	return 0;
 668}
 669
 670/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
 671static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
 672{
 673	kvm_release_pfn_dirty(hpa >> PAGE_SHIFT);
 674	/* mark the page always as dirty for migration */
 675	mark_page_dirty(kvm, gpa_to_gfn(gpa));
 676}
 677
 678/* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
 679static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 680{
 681	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 682	hpa_t hpa;
 683
 684	hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
 685	if (hpa) {
 686		unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
 687		vsie_page->sca_gpa = 0;
 688		scb_s->scaol = 0;
 689		scb_s->scaoh = 0;
 690	}
 691
 692	hpa = scb_s->itdba;
 693	if (hpa) {
 694		unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
 695		vsie_page->itdba_gpa = 0;
 696		scb_s->itdba = 0;
 697	}
 698
 699	hpa = scb_s->gvrd;
 700	if (hpa) {
 701		unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
 702		vsie_page->gvrd_gpa = 0;
 703		scb_s->gvrd = 0;
 704	}
 705
 706	hpa = scb_s->riccbd;
 707	if (hpa) {
 708		unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
 709		vsie_page->riccbd_gpa = 0;
 710		scb_s->riccbd = 0;
 711	}
 712
 713	hpa = scb_s->sdnxo;
 714	if (hpa) {
 715		unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
 716		vsie_page->sdnx_gpa = 0;
 717		scb_s->sdnxo = 0;
 718	}
 719}
 720
 721/*
 722 * Instead of shadowing some blocks, we can simply forward them because the
 723 * addresses in the scb are 64 bit long.
 724 *
 725 * This works as long as the data lies in one page. If blocks ever exceed one
 726 * page, we have to fall back to shadowing.
 727 *
 728 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
 729 * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
 730 *
 731 * Returns: - 0 if all blocks were pinned.
 732 *          - > 0 if control has to be given to guest 2
 733 *          - -ENOMEM if out of memory
 734 */
 735static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 736{
 737	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 738	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 739	hpa_t hpa;
 740	gpa_t gpa;
 741	int rc = 0;
 742
 743	gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
 744	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
 745		gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
 746	if (gpa) {
 747		if (gpa < 2 * PAGE_SIZE)
 748			rc = set_validity_icpt(scb_s, 0x0038U);
 749		else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
 750			rc = set_validity_icpt(scb_s, 0x0011U);
 751		else if ((gpa & PAGE_MASK) !=
 752			 ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
 753			rc = set_validity_icpt(scb_s, 0x003bU);
 754		if (!rc) {
 755			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 756			if (rc)
 757				rc = set_validity_icpt(scb_s, 0x0034U);
 758		}
 759		if (rc)
 760			goto unpin;
 761		vsie_page->sca_gpa = gpa;
 762		scb_s->scaoh = (u32)((u64)hpa >> 32);
 763		scb_s->scaol = (u32)(u64)hpa;
 764	}
 765
 766	gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
 767	if (gpa && (scb_s->ecb & ECB_TE)) {
 768		if (gpa < 2 * PAGE_SIZE) {
 769			rc = set_validity_icpt(scb_s, 0x0080U);
 770			goto unpin;
 771		}
 772		/* 256 bytes cannot cross page boundaries */
 773		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 774		if (rc) {
 775			rc = set_validity_icpt(scb_s, 0x0080U);
 776			goto unpin;
 777		}
 778		vsie_page->itdba_gpa = gpa;
 779		scb_s->itdba = hpa;
 780	}
 781
 782	gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
 783	if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
 784		if (gpa < 2 * PAGE_SIZE) {
 785			rc = set_validity_icpt(scb_s, 0x1310U);
 786			goto unpin;
 787		}
 788		/*
 789		 * 512 bytes vector registers cannot cross page boundaries
 790		 * if this block gets bigger, we have to shadow it.
 791		 */
 792		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 793		if (rc) {
 794			rc = set_validity_icpt(scb_s, 0x1310U);
 795			goto unpin;
 796		}
 797		vsie_page->gvrd_gpa = gpa;
 798		scb_s->gvrd = hpa;
 799	}
 800
 801	gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
 802	if (gpa && (scb_s->ecb3 & ECB3_RI)) {
 803		if (gpa < 2 * PAGE_SIZE) {
 804			rc = set_validity_icpt(scb_s, 0x0043U);
 805			goto unpin;
 806		}
 807		/* 64 bytes cannot cross page boundaries */
 808		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 809		if (rc) {
 810			rc = set_validity_icpt(scb_s, 0x0043U);
 811			goto unpin;
 812		}
 813		/* Validity 0x0044 will be checked by SIE */
 814		vsie_page->riccbd_gpa = gpa;
 815		scb_s->riccbd = hpa;
 816	}
 817	if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
 818	    (scb_s->ecd & ECD_ETOKENF)) {
 819		unsigned long sdnxc;
 820
 821		gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
 822		sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
 823		if (!gpa || gpa < 2 * PAGE_SIZE) {
 824			rc = set_validity_icpt(scb_s, 0x10b0U);
 825			goto unpin;
 826		}
 827		if (sdnxc < 6 || sdnxc > 12) {
 828			rc = set_validity_icpt(scb_s, 0x10b1U);
 829			goto unpin;
 830		}
 831		if (gpa & ((1 << sdnxc) - 1)) {
 832			rc = set_validity_icpt(scb_s, 0x10b2U);
 833			goto unpin;
 834		}
 835		/* Due to alignment rules (checked above) this cannot
 836		 * cross page boundaries
 837		 */
 838		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 839		if (rc) {
 840			rc = set_validity_icpt(scb_s, 0x10b0U);
 841			goto unpin;
 842		}
 843		vsie_page->sdnx_gpa = gpa;
 844		scb_s->sdnxo = hpa | sdnxc;
 845	}
 846	return 0;
 847unpin:
 848	unpin_blocks(vcpu, vsie_page);
 849	return rc;
 850}
 851
 852/* unpin the scb provided by guest 2, marking it as dirty */
 853static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 854		      gpa_t gpa)
 855{
 856	hpa_t hpa = (hpa_t) vsie_page->scb_o;
 857
 858	if (hpa)
 859		unpin_guest_page(vcpu->kvm, gpa, hpa);
 860	vsie_page->scb_o = NULL;
 861}
 862
 863/*
 864 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
 865 *
 866 * Returns: - 0 if the scb was pinned.
 867 *          - > 0 if control has to be given to guest 2
 868 */
 869static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 870		   gpa_t gpa)
 871{
 872	hpa_t hpa;
 873	int rc;
 874
 875	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 876	if (rc) {
 877		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 878		WARN_ON_ONCE(rc);
 879		return 1;
 880	}
 881	vsie_page->scb_o = phys_to_virt(hpa);
 882	return 0;
 883}
 884
 885/*
 886 * Inject a fault into guest 2.
 887 *
 888 * Returns: - > 0 if control has to be given to guest 2
 889 *            < 0 if an error occurred during injection.
 890 */
 891static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
 892			bool write_flag)
 893{
 894	struct kvm_s390_pgm_info pgm = {
 895		.code = code,
 896		.trans_exc_code =
 897			/* 0-51: virtual address */
 898			(vaddr & 0xfffffffffffff000UL) |
 899			/* 52-53: store / fetch */
 900			(((unsigned int) !write_flag) + 1) << 10,
 901			/* 62-63: asce id (always primary == 0) */
 902		.exc_access_id = 0, /* always primary */
 903		.op_access_id = 0, /* not MVPG */
 904	};
 905	int rc;
 906
 907	if (code == PGM_PROTECTION)
 908		pgm.trans_exc_code |= 0x4UL;
 909
 910	rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
 911	return rc ? rc : 1;
 912}
 913
 914/*
 915 * Handle a fault during vsie execution on a gmap shadow.
 916 *
 917 * Returns: - 0 if the fault was resolved
 918 *          - > 0 if control has to be given to guest 2
 919 *          - < 0 if an error occurred
 920 */
 921static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 922{
 923	int rc;
 924
 925	if (current->thread.gmap_int_code == PGM_PROTECTION)
 926		/* we can directly forward all protection exceptions */
 927		return inject_fault(vcpu, PGM_PROTECTION,
 928				    current->thread.gmap_addr, 1);
 929
 930	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 931				   current->thread.gmap_addr, NULL);
 932	if (rc > 0) {
 933		rc = inject_fault(vcpu, rc,
 934				  current->thread.gmap_addr,
 935				  current->thread.gmap_write_flag);
 936		if (rc >= 0)
 937			vsie_page->fault_addr = current->thread.gmap_addr;
 938	}
 939	return rc;
 940}
 941
 942/*
 943 * Retry the previous fault that required guest 2 intervention. This avoids
 944 * one superfluous SIE re-entry and direct exit.
 945 *
 946 * Will ignore any errors. The next SIE fault will do proper fault handling.
 947 */
 948static void handle_last_fault(struct kvm_vcpu *vcpu,
 949			      struct vsie_page *vsie_page)
 950{
 951	if (vsie_page->fault_addr)
 952		kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 953				      vsie_page->fault_addr, NULL);
 954	vsie_page->fault_addr = 0;
 955}
 956
 957static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
 958{
 959	vsie_page->scb_s.icptcode = 0;
 960}
 961
 962/* rewind the psw and clear the vsie icpt, so we can retry execution */
 963static void retry_vsie_icpt(struct vsie_page *vsie_page)
 964{
 965	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 966	int ilen = insn_length(scb_s->ipa >> 8);
 967
 968	/* take care of EXECUTE instructions */
 969	if (scb_s->icptstatus & 1) {
 970		ilen = (scb_s->icptstatus >> 4) & 0x6;
 971		if (!ilen)
 972			ilen = 4;
 973	}
 974	scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
 975	clear_vsie_icpt(vsie_page);
 976}
 977
 978/*
 979 * Try to shadow + enable the guest 2 provided facility list.
 980 * Retry instruction execution if enabled for and provided by guest 2.
 981 *
 982 * Returns: - 0 if handled (retry or guest 2 icpt)
 983 *          - > 0 if control has to be given to guest 2
 984 */
 985static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 986{
 987	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 988	__u32 fac = READ_ONCE(vsie_page->scb_o->fac);
 989
 990	/*
 991	 * Alternate-STFLE-Interpretive-Execution facilities are not supported
 992	 * -> format-0 flcb
 993	 */
 994	if (fac && test_kvm_facility(vcpu->kvm, 7)) {
 995		retry_vsie_icpt(vsie_page);
 996		/*
 997		 * The facility list origin (FLO) is in bits 1 - 28 of the FLD
 998		 * so we need to mask here before reading.
 999		 */
1000		fac = fac & 0x7ffffff8U;
1001		/*
1002		 * format-0 -> size of nested guest's facility list == guest's size
1003		 * guest's size == host's size, since STFLE is interpretatively executed
1004		 * using a format-0 for the guest, too.
1005		 */
1006		if (read_guest_real(vcpu, fac, &vsie_page->fac,
1007				    stfle_size() * sizeof(u64)))
1008			return set_validity_icpt(scb_s, 0x1090U);
1009		scb_s->fac = (__u32)(__u64) &vsie_page->fac;
1010	}
1011	return 0;
1012}
1013
1014/*
1015 * Get a register for a nested guest.
1016 * @vcpu the vcpu of the guest
1017 * @vsie_page the vsie_page for the nested guest
1018 * @reg the register number, the upper 4 bits are ignored.
1019 * returns: the value of the register.
1020 */
1021static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
1022{
1023	/* no need to validate the parameter and/or perform error handling */
1024	reg &= 0xf;
1025	switch (reg) {
1026	case 15:
1027		return vsie_page->scb_s.gg15;
1028	case 14:
1029		return vsie_page->scb_s.gg14;
1030	default:
1031		return vcpu->run->s.regs.gprs[reg];
1032	}
1033}
1034
1035static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1036{
1037	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1038	unsigned long pei_dest, pei_src, src, dest, mask, prefix;
1039	u64 *pei_block = &vsie_page->scb_o->mcic;
1040	int edat, rc_dest, rc_src;
1041	union ctlreg0 cr0;
1042
1043	cr0.val = vcpu->arch.sie_block->gcr[0];
1044	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1045	mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
1046	prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
1047
1048	dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
1049	dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
1050	src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
1051	src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
1052
1053	rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
1054	rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
1055	/*
1056	 * Either everything went well, or something non-critical went wrong
1057	 * e.g. because of a race. In either case, simply retry.
1058	 */
1059	if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
1060		retry_vsie_icpt(vsie_page);
1061		return -EAGAIN;
1062	}
1063	/* Something more serious went wrong, propagate the error */
1064	if (rc_dest < 0)
1065		return rc_dest;
1066	if (rc_src < 0)
1067		return rc_src;
1068
1069	/* The only possible suppressing exception: just deliver it */
1070	if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
1071		clear_vsie_icpt(vsie_page);
1072		rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
1073		WARN_ON_ONCE(rc_dest);
1074		return 1;
1075	}
1076
1077	/*
1078	 * Forward the PEI intercept to the guest if it was a page fault, or
1079	 * also for segment and region table faults if EDAT applies.
1080	 */
1081	if (edat) {
1082		rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
1083		rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
1084	} else {
1085		rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
1086		rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
1087	}
1088	if (!rc_dest && !rc_src) {
1089		pei_block[0] = pei_dest;
1090		pei_block[1] = pei_src;
1091		return 1;
1092	}
1093
1094	retry_vsie_icpt(vsie_page);
1095
1096	/*
1097	 * The host has edat, and the guest does not, or it was an ASCE type
1098	 * exception. The host needs to inject the appropriate DAT interrupts
1099	 * into the guest.
1100	 */
1101	if (rc_dest)
1102		return inject_fault(vcpu, rc_dest, dest, 1);
1103	return inject_fault(vcpu, rc_src, src, 0);
1104}
1105
1106/*
1107 * Run the vsie on a shadow scb and a shadow gmap, without any further
1108 * sanity checks, handling SIE faults.
1109 *
1110 * Returns: - 0 everything went fine
1111 *          - > 0 if control has to be given to guest 2
1112 *          - < 0 if an error occurred
1113 */
1114static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1115	__releases(vcpu->kvm->srcu)
1116	__acquires(vcpu->kvm->srcu)
1117{
1118	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1119	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
1120	int guest_bp_isolation;
1121	int rc = 0;
1122
1123	handle_last_fault(vcpu, vsie_page);
1124
1125	kvm_vcpu_srcu_read_unlock(vcpu);
1126
1127	/* save current guest state of bp isolation override */
1128	guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
1129
1130	/*
1131	 * The guest is running with BPBC, so we have to force it on for our
1132	 * nested guest. This is done by enabling BPBC globally, so the BPBC
1133	 * control in the SCB (which the nested guest can modify) is simply
1134	 * ignored.
1135	 */
1136	if (test_kvm_facility(vcpu->kvm, 82) &&
1137	    vcpu->arch.sie_block->fpf & FPF_BPBC)
1138		set_thread_flag(TIF_ISOLATE_BP_GUEST);
1139
1140	local_irq_disable();
1141	guest_enter_irqoff();
1142	local_irq_enable();
1143
1144	/*
1145	 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
1146	 * and VCPU requests also hinder the vSIE from running and lead
1147	 * to an immediate exit. kvm_s390_vsie_kick() has to be used to
1148	 * also kick the vSIE.
1149	 */
1150	vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
1151	barrier();
1152	if (test_cpu_flag(CIF_FPU))
1153		load_fpu_regs();
1154	if (!kvm_s390_vcpu_sie_inhibited(vcpu))
1155		rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
1156	barrier();
1157	vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
1158
1159	local_irq_disable();
1160	guest_exit_irqoff();
1161	local_irq_enable();
1162
1163	/* restore guest state for bp isolation override */
1164	if (!guest_bp_isolation)
1165		clear_thread_flag(TIF_ISOLATE_BP_GUEST);
1166
1167	kvm_vcpu_srcu_read_lock(vcpu);
1168
1169	if (rc == -EINTR) {
1170		VCPU_EVENT(vcpu, 3, "%s", "machine check");
1171		kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
1172		return 0;
1173	}
1174
1175	if (rc > 0)
1176		rc = 0; /* we could still have an icpt */
1177	else if (rc == -EFAULT)
1178		return handle_fault(vcpu, vsie_page);
1179
1180	switch (scb_s->icptcode) {
1181	case ICPT_INST:
1182		if (scb_s->ipa == 0xb2b0)
1183			rc = handle_stfle(vcpu, vsie_page);
1184		break;
1185	case ICPT_STOP:
1186		/* stop not requested by g2 - must have been a kick */
1187		if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
1188			clear_vsie_icpt(vsie_page);
1189		break;
1190	case ICPT_VALIDITY:
1191		if ((scb_s->ipa & 0xf000) != 0xf000)
1192			scb_s->ipa += 0x1000;
1193		break;
1194	case ICPT_PARTEXEC:
1195		if (scb_s->ipa == 0xb254)
1196			rc = vsie_handle_mvpg(vcpu, vsie_page);
1197		break;
1198	}
1199	return rc;
1200}
1201
1202static void release_gmap_shadow(struct vsie_page *vsie_page)
1203{
1204	if (vsie_page->gmap)
1205		gmap_put(vsie_page->gmap);
1206	WRITE_ONCE(vsie_page->gmap, NULL);
1207	prefix_unmapped(vsie_page);
1208}
1209
1210static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
1211			       struct vsie_page *vsie_page)
1212{
1213	unsigned long asce;
1214	union ctlreg0 cr0;
1215	struct gmap *gmap;
1216	int edat;
1217
1218	asce = vcpu->arch.sie_block->gcr[1];
1219	cr0.val = vcpu->arch.sie_block->gcr[0];
1220	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1221	edat += edat && test_kvm_facility(vcpu->kvm, 78);
1222
1223	/*
1224	 * ASCE or EDAT could have changed since last icpt, or the gmap
1225	 * we're holding has been unshadowed. If the gmap is still valid,
1226	 * we can safely reuse it.
1227	 */
1228	if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) {
1229		vcpu->kvm->stat.gmap_shadow_reuse++;
1230		return 0;
1231	}
1232
1233	/* release the old shadow - if any, and mark the prefix as unmapped */
1234	release_gmap_shadow(vsie_page);
1235	gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
1236	if (IS_ERR(gmap))
1237		return PTR_ERR(gmap);
1238	vcpu->kvm->stat.gmap_shadow_create++;
1239	WRITE_ONCE(vsie_page->gmap, gmap);
1240	return 0;
1241}
1242
1243/*
1244 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
1245 */
1246static void register_shadow_scb(struct kvm_vcpu *vcpu,
1247				struct vsie_page *vsie_page)
1248{
1249	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1250
1251	WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
1252	/*
1253	 * External calls have to lead to a kick of the vcpu and
1254	 * therefore the vsie -> Simulate Wait state.
1255	 */
1256	kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
1257	/*
1258	 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
1259	 * automatically be adjusted on tod clock changes via kvm_sync_clock.
1260	 */
1261	preempt_disable();
1262	scb_s->epoch += vcpu->kvm->arch.epoch;
1263
1264	if (scb_s->ecd & ECD_MEF) {
1265		scb_s->epdx += vcpu->kvm->arch.epdx;
1266		if (scb_s->epoch < vcpu->kvm->arch.epoch)
1267			scb_s->epdx += 1;
1268	}
1269
1270	preempt_enable();
1271}
1272
1273/*
1274 * Unregister a shadow scb from a VCPU.
1275 */
1276static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
1277{
1278	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
1279	WRITE_ONCE(vcpu->arch.vsie_block, NULL);
1280}
1281
1282/*
1283 * Run the vsie on a shadowed scb, managing the gmap shadow, handling
1284 * prefix pages and faults.
1285 *
1286 * Returns: - 0 if no errors occurred
1287 *          - > 0 if control has to be given to guest 2
1288 *          - -ENOMEM if out of memory
1289 */
1290static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1291{
1292	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1293	int rc = 0;
1294
1295	while (1) {
1296		rc = acquire_gmap_shadow(vcpu, vsie_page);
1297		if (!rc)
1298			rc = map_prefix(vcpu, vsie_page);
1299		if (!rc) {
1300			gmap_enable(vsie_page->gmap);
1301			update_intervention_requests(vsie_page);
1302			rc = do_vsie_run(vcpu, vsie_page);
1303			gmap_enable(vcpu->arch.gmap);
1304		}
1305		atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
1306
1307		if (rc == -EAGAIN)
1308			rc = 0;
1309		if (rc || scb_s->icptcode || signal_pending(current) ||
1310		    kvm_s390_vcpu_has_irq(vcpu, 0) ||
1311		    kvm_s390_vcpu_sie_inhibited(vcpu))
1312			break;
1313		cond_resched();
1314	}
1315
1316	if (rc == -EFAULT) {
1317		/*
1318		 * Addressing exceptions are always presentes as intercepts.
1319		 * As addressing exceptions are suppressing and our guest 3 PSW
1320		 * points at the responsible instruction, we have to
1321		 * forward the PSW and set the ilc. If we can't read guest 3
1322		 * instruction, we can use an arbitrary ilc. Let's always use
1323		 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
1324		 * memory. (we could also fake the shadow so the hardware
1325		 * handles it).
1326		 */
1327		scb_s->icptcode = ICPT_PROGI;
1328		scb_s->iprcc = PGM_ADDRESSING;
1329		scb_s->pgmilc = 4;
1330		scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
1331		rc = 1;
1332	}
1333	return rc;
1334}
1335
1336/*
1337 * Get or create a vsie page for a scb address.
1338 *
1339 * Returns: - address of a vsie page (cached or new one)
1340 *          - NULL if the same scb address is already used by another VCPU
1341 *          - ERR_PTR(-ENOMEM) if out of memory
1342 */
1343static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
1344{
1345	struct vsie_page *vsie_page;
1346	struct page *page;
1347	int nr_vcpus;
1348
1349	rcu_read_lock();
1350	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
1351	rcu_read_unlock();
1352	if (page) {
1353		if (page_ref_inc_return(page) == 2)
1354			return page_to_virt(page);
1355		page_ref_dec(page);
1356	}
1357
1358	/*
1359	 * We want at least #online_vcpus shadows, so every VCPU can execute
1360	 * the VSIE in parallel.
1361	 */
1362	nr_vcpus = atomic_read(&kvm->online_vcpus);
1363
1364	mutex_lock(&kvm->arch.vsie.mutex);
1365	if (kvm->arch.vsie.page_count < nr_vcpus) {
1366		page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
1367		if (!page) {
1368			mutex_unlock(&kvm->arch.vsie.mutex);
1369			return ERR_PTR(-ENOMEM);
1370		}
1371		page_ref_inc(page);
1372		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
1373		kvm->arch.vsie.page_count++;
1374	} else {
1375		/* reuse an existing entry that belongs to nobody */
1376		while (true) {
1377			page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
1378			if (page_ref_inc_return(page) == 2)
1379				break;
1380			page_ref_dec(page);
1381			kvm->arch.vsie.next++;
1382			kvm->arch.vsie.next %= nr_vcpus;
1383		}
1384		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
1385	}
1386	page->index = addr;
1387	/* double use of the same address */
1388	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
1389		page_ref_dec(page);
1390		mutex_unlock(&kvm->arch.vsie.mutex);
1391		return NULL;
1392	}
1393	mutex_unlock(&kvm->arch.vsie.mutex);
1394
1395	vsie_page = page_to_virt(page);
1396	memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
1397	release_gmap_shadow(vsie_page);
1398	vsie_page->fault_addr = 0;
1399	vsie_page->scb_s.ihcpu = 0xffffU;
1400	return vsie_page;
1401}
1402
1403/* put a vsie page acquired via get_vsie_page */
1404static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
1405{
1406	struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
1407
1408	page_ref_dec(page);
1409}
1410
1411int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
1412{
1413	struct vsie_page *vsie_page;
1414	unsigned long scb_addr;
1415	int rc;
1416
1417	vcpu->stat.instruction_sie++;
1418	if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
1419		return -EOPNOTSUPP;
1420	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1421		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1422
1423	BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
1424	scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
1425
1426	/* 512 byte alignment */
1427	if (unlikely(scb_addr & 0x1ffUL))
1428		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1429
1430	if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
1431	    kvm_s390_vcpu_sie_inhibited(vcpu))
1432		return 0;
1433
1434	vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
1435	if (IS_ERR(vsie_page))
1436		return PTR_ERR(vsie_page);
1437	else if (!vsie_page)
1438		/* double use of sie control block - simply do nothing */
1439		return 0;
1440
1441	rc = pin_scb(vcpu, vsie_page, scb_addr);
1442	if (rc)
1443		goto out_put;
1444	rc = shadow_scb(vcpu, vsie_page);
1445	if (rc)
1446		goto out_unpin_scb;
1447	rc = pin_blocks(vcpu, vsie_page);
1448	if (rc)
1449		goto out_unshadow;
1450	register_shadow_scb(vcpu, vsie_page);
1451	rc = vsie_run(vcpu, vsie_page);
1452	unregister_shadow_scb(vcpu);
1453	unpin_blocks(vcpu, vsie_page);
1454out_unshadow:
1455	unshadow_scb(vcpu, vsie_page);
1456out_unpin_scb:
1457	unpin_scb(vcpu, vsie_page, scb_addr);
1458out_put:
1459	put_vsie_page(vcpu->kvm, vsie_page);
1460
1461	return rc < 0 ? rc : 0;
1462}
1463
1464/* Init the vsie data structures. To be called when a vm is initialized. */
1465void kvm_s390_vsie_init(struct kvm *kvm)
1466{
1467	mutex_init(&kvm->arch.vsie.mutex);
1468	INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL_ACCOUNT);
1469}
1470
1471/* Destroy the vsie data structures. To be called when a vm is destroyed. */
1472void kvm_s390_vsie_destroy(struct kvm *kvm)
1473{
1474	struct vsie_page *vsie_page;
1475	struct page *page;
1476	int i;
1477
1478	mutex_lock(&kvm->arch.vsie.mutex);
1479	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
1480		page = kvm->arch.vsie.pages[i];
1481		kvm->arch.vsie.pages[i] = NULL;
1482		vsie_page = page_to_virt(page);
1483		release_gmap_shadow(vsie_page);
1484		/* free the radix tree entry */
1485		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
1486		__free_page(page);
1487	}
1488	kvm->arch.vsie.page_count = 0;
1489	mutex_unlock(&kvm->arch.vsie.mutex);
1490}
1491
1492void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
1493{
1494	struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
1495
1496	/*
1497	 * Even if the VCPU lets go of the shadow sie block reference, it is
1498	 * still valid in the cache. So we can safely kick it.
1499	 */
1500	if (scb) {
1501		atomic_or(PROG_BLOCK_SIE, &scb->prog20);
1502		if (scb->prog0c & PROG_IN_SIE)
1503			atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
1504	}
1505}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * kvm nested virtualization support for s390x
   4 *
   5 * Copyright IBM Corp. 2016, 2018
   6 *
   7 *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
   8 */
   9#include <linux/vmalloc.h>
  10#include <linux/kvm_host.h>
  11#include <linux/bug.h>
  12#include <linux/list.h>
  13#include <linux/bitmap.h>
  14#include <linux/sched/signal.h>
  15
  16#include <asm/gmap.h>
  17#include <asm/mmu_context.h>
  18#include <asm/sclp.h>
  19#include <asm/nmi.h>
  20#include <asm/dis.h>
 
 
  21#include "kvm-s390.h"
  22#include "gaccess.h"
  23
  24struct vsie_page {
  25	struct kvm_s390_sie_block scb_s;	/* 0x0000 */
  26	/*
  27	 * the backup info for machine check. ensure it's at
  28	 * the same offset as that in struct sie_page!
  29	 */
  30	struct mcck_volatile_info mcck_info;    /* 0x0200 */
  31	/*
  32	 * The pinned original scb. Be aware that other VCPUs can modify
  33	 * it while we read from it. Values that are used for conditions or
  34	 * are reused conditionally, should be accessed via READ_ONCE.
  35	 */
  36	struct kvm_s390_sie_block *scb_o;	/* 0x0218 */
  37	/* the shadow gmap in use by the vsie_page */
  38	struct gmap *gmap;			/* 0x0220 */
  39	/* address of the last reported fault to guest2 */
  40	unsigned long fault_addr;		/* 0x0228 */
  41	/* calculated guest addresses of satellite control blocks */
  42	gpa_t sca_gpa;				/* 0x0230 */
  43	gpa_t itdba_gpa;			/* 0x0238 */
  44	gpa_t gvrd_gpa;				/* 0x0240 */
  45	gpa_t riccbd_gpa;			/* 0x0248 */
  46	gpa_t sdnx_gpa;				/* 0x0250 */
  47	__u8 reserved[0x0700 - 0x0258];		/* 0x0258 */
  48	struct kvm_s390_crypto_cb crycb;	/* 0x0700 */
  49	__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE];	/* 0x0800 */
  50};
  51
  52/* trigger a validity icpt for the given scb */
  53static int set_validity_icpt(struct kvm_s390_sie_block *scb,
  54			     __u16 reason_code)
  55{
  56	scb->ipa = 0x1000;
  57	scb->ipb = ((__u32) reason_code) << 16;
  58	scb->icptcode = ICPT_VALIDITY;
  59	return 1;
  60}
  61
  62/* mark the prefix as unmapped, this will block the VSIE */
  63static void prefix_unmapped(struct vsie_page *vsie_page)
  64{
  65	atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
  66}
  67
  68/* mark the prefix as unmapped and wait until the VSIE has been left */
  69static void prefix_unmapped_sync(struct vsie_page *vsie_page)
  70{
  71	prefix_unmapped(vsie_page);
  72	if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  73		atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
  74	while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  75		cpu_relax();
  76}
  77
  78/* mark the prefix as mapped, this will allow the VSIE to run */
  79static void prefix_mapped(struct vsie_page *vsie_page)
  80{
  81	atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
  82}
  83
  84/* test if the prefix is mapped into the gmap shadow */
  85static int prefix_is_mapped(struct vsie_page *vsie_page)
  86{
  87	return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
  88}
  89
  90/* copy the updated intervention request bits into the shadow scb */
  91static void update_intervention_requests(struct vsie_page *vsie_page)
  92{
  93	const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
  94	int cpuflags;
  95
  96	cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
  97	atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
  98	atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
  99}
 100
 101/* shadow (filter and validate) the cpuflags  */
 102static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 103{
 104	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 105	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 106	int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
 107
 108	/* we don't allow ESA/390 guests */
 109	if (!(cpuflags & CPUSTAT_ZARCH))
 110		return set_validity_icpt(scb_s, 0x0001U);
 111
 112	if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
 113		return set_validity_icpt(scb_s, 0x0001U);
 114	else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
 115		return set_validity_icpt(scb_s, 0x0007U);
 116
 117	/* intervention requests will be set later */
 118	newflags = CPUSTAT_ZARCH;
 119	if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
 120		newflags |= CPUSTAT_GED;
 121	if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
 122		if (cpuflags & CPUSTAT_GED)
 123			return set_validity_icpt(scb_s, 0x0001U);
 124		newflags |= CPUSTAT_GED2;
 125	}
 126	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
 127		newflags |= cpuflags & CPUSTAT_P;
 128	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
 129		newflags |= cpuflags & CPUSTAT_SM;
 130	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
 131		newflags |= cpuflags & CPUSTAT_IBS;
 132	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
 133		newflags |= cpuflags & CPUSTAT_KSS;
 134
 135	atomic_set(&scb_s->cpuflags, newflags);
 136	return 0;
 137}
 138/* Copy to APCB FORMAT1 from APCB FORMAT0 */
 139static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
 140			unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
 141{
 142	struct kvm_s390_apcb0 tmp;
 
 143
 144	if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
 
 
 
 145		return -EFAULT;
 146
 147	apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
 148	apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
 149	apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
 150
 151	return 0;
 152
 153}
 154
 155/**
 156 * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
 157 * @vcpu: pointer to the virtual CPU
 158 * @apcb_s: pointer to start of apcb in the shadow crycb
 159 * @apcb_o: pointer to start of original apcb in the guest2
 160 * @apcb_h: pointer to start of apcb in the guest1
 161 *
 162 * Returns 0 and -EFAULT on error reading guest apcb
 163 */
 164static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
 165			unsigned long apcb_o, unsigned long *apcb_h)
 166{
 167	if (read_guest_real(vcpu, apcb_o, apcb_s,
 
 
 
 
 168			    sizeof(struct kvm_s390_apcb0)))
 169		return -EFAULT;
 170
 171	bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
 
 172
 173	return 0;
 174}
 175
 176/**
 177 * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
 178 * @vcpu: pointer to the virtual CPU
 179 * @apcb_s: pointer to start of apcb in the shadow crycb
 180 * @apcb_o: pointer to start of original guest apcb
 181 * @apcb_h: pointer to start of apcb in the host
 182 *
 183 * Returns 0 and -EFAULT on error reading guest apcb
 184 */
 185static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
 186			unsigned long apcb_o,
 187			unsigned long *apcb_h)
 188{
 189	if (read_guest_real(vcpu, apcb_o, apcb_s,
 
 
 
 
 190			    sizeof(struct kvm_s390_apcb1)))
 191		return -EFAULT;
 192
 193	bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
 
 194
 195	return 0;
 196}
 197
 198/**
 199 * setup_apcb - Create a shadow copy of the apcb.
 200 * @vcpu: pointer to the virtual CPU
 201 * @crycb_s: pointer to shadow crycb
 202 * @crycb_o: pointer to original guest crycb
 203 * @crycb_h: pointer to the host crycb
 204 * @fmt_o: format of the original guest crycb.
 205 * @fmt_h: format of the host crycb.
 206 *
 207 * Checks the compatibility between the guest and host crycb and calls the
 208 * appropriate copy function.
 209 *
 210 * Return 0 or an error number if the guest and host crycb are incompatible.
 211 */
 212static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
 213	       const u32 crycb_o,
 214	       struct kvm_s390_crypto_cb *crycb_h,
 215	       int fmt_o, int fmt_h)
 216{
 217	struct kvm_s390_crypto_cb *crycb;
 218
 219	crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
 220
 221	switch (fmt_o) {
 222	case CRYCB_FORMAT2:
 223		if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
 224			return -EACCES;
 225		if (fmt_h != CRYCB_FORMAT2)
 226			return -EINVAL;
 227		return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
 228				    (unsigned long) &crycb->apcb1,
 229				    (unsigned long *)&crycb_h->apcb1);
 230	case CRYCB_FORMAT1:
 231		switch (fmt_h) {
 232		case CRYCB_FORMAT2:
 233			return setup_apcb10(vcpu, &crycb_s->apcb1,
 234					    (unsigned long) &crycb->apcb0,
 235					    &crycb_h->apcb1);
 236		case CRYCB_FORMAT1:
 237			return setup_apcb00(vcpu,
 238					    (unsigned long *) &crycb_s->apcb0,
 239					    (unsigned long) &crycb->apcb0,
 240					    (unsigned long *) &crycb_h->apcb0);
 241		}
 242		break;
 243	case CRYCB_FORMAT0:
 244		if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
 245			return -EACCES;
 246
 247		switch (fmt_h) {
 248		case CRYCB_FORMAT2:
 249			return setup_apcb10(vcpu, &crycb_s->apcb1,
 250					    (unsigned long) &crycb->apcb0,
 251					    &crycb_h->apcb1);
 252		case CRYCB_FORMAT1:
 253		case CRYCB_FORMAT0:
 254			return setup_apcb00(vcpu,
 255					    (unsigned long *) &crycb_s->apcb0,
 256					    (unsigned long) &crycb->apcb0,
 257					    (unsigned long *) &crycb_h->apcb0);
 258		}
 259	}
 260	return -EINVAL;
 261}
 262
 263/**
 264 * shadow_crycb - Create a shadow copy of the crycb block
 265 * @vcpu: a pointer to the virtual CPU
 266 * @vsie_page: a pointer to internal date used for the vSIE
 267 *
 268 * Create a shadow copy of the crycb block and setup key wrapping, if
 269 * requested for guest 3 and enabled for guest 2.
 270 *
 271 * We accept format-1 or format-2, but we convert format-1 into format-2
 272 * in the shadow CRYCB.
 273 * Using format-2 enables the firmware to choose the right format when
 274 * scheduling the SIE.
 275 * There is nothing to do for format-0.
 276 *
 277 * This function centralize the issuing of set_validity_icpt() for all
 278 * the subfunctions working on the crycb.
 279 *
 280 * Returns: - 0 if shadowed or nothing to do
 281 *          - > 0 if control has to be given to guest 2
 282 */
 283static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 284{
 285	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 286	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 287	const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
 288	const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
 289	unsigned long *b1, *b2;
 290	u8 ecb3_flags;
 291	u32 ecd_flags;
 292	int apie_h;
 293	int apie_s;
 294	int key_msk = test_kvm_facility(vcpu->kvm, 76);
 295	int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
 296	int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
 297	int ret = 0;
 298
 299	scb_s->crycbd = 0;
 300
 301	apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
 302	apie_s = apie_h & scb_o->eca;
 303	if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
 304		return 0;
 305
 306	if (!crycb_addr)
 307		return set_validity_icpt(scb_s, 0x0039U);
 308
 309	if (fmt_o == CRYCB_FORMAT1)
 310		if ((crycb_addr & PAGE_MASK) !=
 311		    ((crycb_addr + 128) & PAGE_MASK))
 312			return set_validity_icpt(scb_s, 0x003CU);
 313
 314	if (apie_s) {
 315		ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
 316				 vcpu->kvm->arch.crypto.crycb,
 317				 fmt_o, fmt_h);
 318		if (ret)
 319			goto end;
 320		scb_s->eca |= scb_o->eca & ECA_APIE;
 321	}
 322
 323	/* we may only allow it if enabled for guest 2 */
 324	ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
 325		     (ECB3_AES | ECB3_DEA);
 326	ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
 327	if (!ecb3_flags && !ecd_flags)
 328		goto end;
 329
 330	/* copy only the wrapping keys */
 331	if (read_guest_real(vcpu, crycb_addr + 72,
 332			    vsie_page->crycb.dea_wrapping_key_mask, 56))
 333		return set_validity_icpt(scb_s, 0x0035U);
 334
 335	scb_s->ecb3 |= ecb3_flags;
 336	scb_s->ecd |= ecd_flags;
 337
 338	/* xor both blocks in one run */
 339	b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
 340	b2 = (unsigned long *)
 341			    vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
 342	/* as 56%8 == 0, bitmap_xor won't overwrite any data */
 343	bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
 344end:
 345	switch (ret) {
 346	case -EINVAL:
 347		return set_validity_icpt(scb_s, 0x0022U);
 348	case -EFAULT:
 349		return set_validity_icpt(scb_s, 0x0035U);
 350	case -EACCES:
 351		return set_validity_icpt(scb_s, 0x003CU);
 352	}
 353	scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
 354	return 0;
 355}
 356
 357/* shadow (round up/down) the ibc to avoid validity icpt */
 358static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 359{
 360	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 361	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 362	/* READ_ONCE does not work on bitfields - use a temporary variable */
 363	const uint32_t __new_ibc = scb_o->ibc;
 364	const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
 365	__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
 366
 367	scb_s->ibc = 0;
 368	/* ibc installed in g2 and requested for g3 */
 369	if (vcpu->kvm->arch.model.ibc && new_ibc) {
 370		scb_s->ibc = new_ibc;
 371		/* takte care of the minimum ibc level of the machine */
 372		if (scb_s->ibc < min_ibc)
 373			scb_s->ibc = min_ibc;
 374		/* take care of the maximum ibc level set for the guest */
 375		if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
 376			scb_s->ibc = vcpu->kvm->arch.model.ibc;
 377	}
 378}
 379
 380/* unshadow the scb, copying parameters back to the real scb */
 381static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 382{
 383	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 384	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 385
 386	/* interception */
 387	scb_o->icptcode = scb_s->icptcode;
 388	scb_o->icptstatus = scb_s->icptstatus;
 389	scb_o->ipa = scb_s->ipa;
 390	scb_o->ipb = scb_s->ipb;
 391	scb_o->gbea = scb_s->gbea;
 392
 393	/* timer */
 394	scb_o->cputm = scb_s->cputm;
 395	scb_o->ckc = scb_s->ckc;
 396	scb_o->todpr = scb_s->todpr;
 397
 398	/* guest state */
 399	scb_o->gpsw = scb_s->gpsw;
 400	scb_o->gg14 = scb_s->gg14;
 401	scb_o->gg15 = scb_s->gg15;
 402	memcpy(scb_o->gcr, scb_s->gcr, 128);
 403	scb_o->pp = scb_s->pp;
 404
 405	/* branch prediction */
 406	if (test_kvm_facility(vcpu->kvm, 82)) {
 407		scb_o->fpf &= ~FPF_BPBC;
 408		scb_o->fpf |= scb_s->fpf & FPF_BPBC;
 409	}
 410
 411	/* interrupt intercept */
 412	switch (scb_s->icptcode) {
 413	case ICPT_PROGI:
 414	case ICPT_INSTPROGI:
 415	case ICPT_EXTINT:
 416		memcpy((void *)((u64)scb_o + 0xc0),
 417		       (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
 418		break;
 419	case ICPT_PARTEXEC:
 420		/* MVPG only */
 421		memcpy((void *)((u64)scb_o + 0xc0),
 422		       (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
 423		break;
 424	}
 425
 426	if (scb_s->ihcpu != 0xffffU)
 427		scb_o->ihcpu = scb_s->ihcpu;
 428}
 429
 430/*
 431 * Setup the shadow scb by copying and checking the relevant parts of the g2
 432 * provided scb.
 433 *
 434 * Returns: - 0 if the scb has been shadowed
 435 *          - > 0 if control has to be given to guest 2
 436 */
 437static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 438{
 439	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 440	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 441	/* READ_ONCE does not work on bitfields - use a temporary variable */
 442	const uint32_t __new_prefix = scb_o->prefix;
 443	const uint32_t new_prefix = READ_ONCE(__new_prefix);
 444	const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
 445	bool had_tx = scb_s->ecb & ECB_TE;
 446	unsigned long new_mso = 0;
 447	int rc;
 448
 449	/* make sure we don't have any leftovers when reusing the scb */
 450	scb_s->icptcode = 0;
 451	scb_s->eca = 0;
 452	scb_s->ecb = 0;
 453	scb_s->ecb2 = 0;
 454	scb_s->ecb3 = 0;
 455	scb_s->ecd = 0;
 456	scb_s->fac = 0;
 457	scb_s->fpf = 0;
 458
 459	rc = prepare_cpuflags(vcpu, vsie_page);
 460	if (rc)
 461		goto out;
 462
 463	/* timer */
 464	scb_s->cputm = scb_o->cputm;
 465	scb_s->ckc = scb_o->ckc;
 466	scb_s->todpr = scb_o->todpr;
 467	scb_s->epoch = scb_o->epoch;
 468
 469	/* guest state */
 470	scb_s->gpsw = scb_o->gpsw;
 471	scb_s->gg14 = scb_o->gg14;
 472	scb_s->gg15 = scb_o->gg15;
 473	memcpy(scb_s->gcr, scb_o->gcr, 128);
 474	scb_s->pp = scb_o->pp;
 475
 476	/* interception / execution handling */
 477	scb_s->gbea = scb_o->gbea;
 478	scb_s->lctl = scb_o->lctl;
 479	scb_s->svcc = scb_o->svcc;
 480	scb_s->ictl = scb_o->ictl;
 481	/*
 482	 * SKEY handling functions can't deal with false setting of PTE invalid
 483	 * bits. Therefore we cannot provide interpretation and would later
 484	 * have to provide own emulation handlers.
 485	 */
 486	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
 487		scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
 488
 489	scb_s->icpua = scb_o->icpua;
 490
 491	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
 492		new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
 493	/* if the hva of the prefix changes, we have to remap the prefix */
 494	if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
 495		prefix_unmapped(vsie_page);
 496	 /* SIE will do mso/msl validity and exception checks for us */
 497	scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
 498	scb_s->mso = new_mso;
 499	scb_s->prefix = new_prefix;
 500
 501	/* We have to definetly flush the tlb if this scb never ran */
 502	if (scb_s->ihcpu != 0xffffU)
 503		scb_s->ihcpu = scb_o->ihcpu;
 504
 505	/* MVPG and Protection Exception Interpretation are always available */
 506	scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
 507	/* Host-protection-interruption introduced with ESOP */
 508	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
 509		scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
 
 
 
 
 
 
 
 
 510	/* transactional execution */
 511	if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
 512		/* remap the prefix is tx is toggled on */
 513		if (!had_tx)
 514			prefix_unmapped(vsie_page);
 515		scb_s->ecb |= ECB_TE;
 516	}
 
 
 517	/* branch prediction */
 518	if (test_kvm_facility(vcpu->kvm, 82))
 519		scb_s->fpf |= scb_o->fpf & FPF_BPBC;
 520	/* SIMD */
 521	if (test_kvm_facility(vcpu->kvm, 129)) {
 522		scb_s->eca |= scb_o->eca & ECA_VX;
 523		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
 524	}
 525	/* Run-time-Instrumentation */
 526	if (test_kvm_facility(vcpu->kvm, 64))
 527		scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
 528	/* Instruction Execution Prevention */
 529	if (test_kvm_facility(vcpu->kvm, 130))
 530		scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
 531	/* Guarded Storage */
 532	if (test_kvm_facility(vcpu->kvm, 133)) {
 533		scb_s->ecb |= scb_o->ecb & ECB_GS;
 534		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
 535	}
 536	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
 537		scb_s->eca |= scb_o->eca & ECA_SII;
 538	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
 539		scb_s->eca |= scb_o->eca & ECA_IB;
 540	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
 541		scb_s->eca |= scb_o->eca & ECA_CEI;
 542	/* Epoch Extension */
 543	if (test_kvm_facility(vcpu->kvm, 139))
 544		scb_s->ecd |= scb_o->ecd & ECD_MEF;
 
 
 545
 546	/* etoken */
 547	if (test_kvm_facility(vcpu->kvm, 156))
 548		scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
 549
 550	scb_s->hpid = HPID_VSIE;
 551	scb_s->cpnc = scb_o->cpnc;
 552
 553	prepare_ibc(vcpu, vsie_page);
 554	rc = shadow_crycb(vcpu, vsie_page);
 555out:
 556	if (rc)
 557		unshadow_scb(vcpu, vsie_page);
 558	return rc;
 559}
 560
 561void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
 562				 unsigned long end)
 563{
 564	struct kvm *kvm = gmap->private;
 565	struct vsie_page *cur;
 566	unsigned long prefix;
 567	struct page *page;
 568	int i;
 569
 570	if (!gmap_is_shadow(gmap))
 571		return;
 572	if (start >= 1UL << 31)
 573		/* We are only interested in prefix pages */
 574		return;
 575
 576	/*
 577	 * Only new shadow blocks are added to the list during runtime,
 578	 * therefore we can safely reference them all the time.
 579	 */
 580	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
 581		page = READ_ONCE(kvm->arch.vsie.pages[i]);
 582		if (!page)
 583			continue;
 584		cur = page_to_virt(page);
 585		if (READ_ONCE(cur->gmap) != gmap)
 586			continue;
 587		prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
 588		/* with mso/msl, the prefix lies at an offset */
 589		prefix += cur->scb_s.mso;
 590		if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
 591			prefix_unmapped_sync(cur);
 592	}
 593}
 594
 595/*
 596 * Map the first prefix page and if tx is enabled also the second prefix page.
 597 *
 598 * The prefix will be protected, a gmap notifier will inform about unmaps.
 599 * The shadow scb must not be executed until the prefix is remapped, this is
 600 * guaranteed by properly handling PROG_REQUEST.
 601 *
 602 * Returns: - 0 on if successfully mapped or already mapped
 603 *          - > 0 if control has to be given to guest 2
 604 *          - -EAGAIN if the caller can retry immediately
 605 *          - -ENOMEM if out of memory
 606 */
 607static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 608{
 609	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 610	u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
 611	int rc;
 612
 613	if (prefix_is_mapped(vsie_page))
 614		return 0;
 615
 616	/* mark it as mapped so we can catch any concurrent unmappers */
 617	prefix_mapped(vsie_page);
 618
 619	/* with mso/msl, the prefix lies at offset *mso* */
 620	prefix += scb_s->mso;
 621
 622	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
 623	if (!rc && (scb_s->ecb & ECB_TE))
 624		rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 625					   prefix + PAGE_SIZE);
 626	/*
 627	 * We don't have to mprotect, we will be called for all unshadows.
 628	 * SIE will detect if protection applies and trigger a validity.
 629	 */
 630	if (rc)
 631		prefix_unmapped(vsie_page);
 632	if (rc > 0 || rc == -EFAULT)
 633		rc = set_validity_icpt(scb_s, 0x0037U);
 634	return rc;
 635}
 636
 637/*
 638 * Pin the guest page given by gpa and set hpa to the pinned host address.
 639 * Will always be pinned writable.
 640 *
 641 * Returns: - 0 on success
 642 *          - -EINVAL if the gpa is not valid guest storage
 643 */
 644static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
 645{
 646	struct page *page;
 647
 648	page = gfn_to_page(kvm, gpa_to_gfn(gpa));
 649	if (is_error_page(page))
 650		return -EINVAL;
 651	*hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
 652	return 0;
 653}
 654
 655/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
 656static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
 657{
 658	kvm_release_pfn_dirty(hpa >> PAGE_SHIFT);
 659	/* mark the page always as dirty for migration */
 660	mark_page_dirty(kvm, gpa_to_gfn(gpa));
 661}
 662
 663/* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
 664static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 665{
 666	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 667	hpa_t hpa;
 668
 669	hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
 670	if (hpa) {
 671		unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
 672		vsie_page->sca_gpa = 0;
 673		scb_s->scaol = 0;
 674		scb_s->scaoh = 0;
 675	}
 676
 677	hpa = scb_s->itdba;
 678	if (hpa) {
 679		unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
 680		vsie_page->itdba_gpa = 0;
 681		scb_s->itdba = 0;
 682	}
 683
 684	hpa = scb_s->gvrd;
 685	if (hpa) {
 686		unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
 687		vsie_page->gvrd_gpa = 0;
 688		scb_s->gvrd = 0;
 689	}
 690
 691	hpa = scb_s->riccbd;
 692	if (hpa) {
 693		unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
 694		vsie_page->riccbd_gpa = 0;
 695		scb_s->riccbd = 0;
 696	}
 697
 698	hpa = scb_s->sdnxo;
 699	if (hpa) {
 700		unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
 701		vsie_page->sdnx_gpa = 0;
 702		scb_s->sdnxo = 0;
 703	}
 704}
 705
 706/*
 707 * Instead of shadowing some blocks, we can simply forward them because the
 708 * addresses in the scb are 64 bit long.
 709 *
 710 * This works as long as the data lies in one page. If blocks ever exceed one
 711 * page, we have to fall back to shadowing.
 712 *
 713 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
 714 * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
 715 *
 716 * Returns: - 0 if all blocks were pinned.
 717 *          - > 0 if control has to be given to guest 2
 718 *          - -ENOMEM if out of memory
 719 */
 720static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 721{
 722	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 723	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 724	hpa_t hpa;
 725	gpa_t gpa;
 726	int rc = 0;
 727
 728	gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
 729	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
 730		gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
 731	if (gpa) {
 732		if (gpa < 2 * PAGE_SIZE)
 733			rc = set_validity_icpt(scb_s, 0x0038U);
 734		else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
 735			rc = set_validity_icpt(scb_s, 0x0011U);
 736		else if ((gpa & PAGE_MASK) !=
 737			 ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
 738			rc = set_validity_icpt(scb_s, 0x003bU);
 739		if (!rc) {
 740			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 741			if (rc)
 742				rc = set_validity_icpt(scb_s, 0x0034U);
 743		}
 744		if (rc)
 745			goto unpin;
 746		vsie_page->sca_gpa = gpa;
 747		scb_s->scaoh = (u32)((u64)hpa >> 32);
 748		scb_s->scaol = (u32)(u64)hpa;
 749	}
 750
 751	gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
 752	if (gpa && (scb_s->ecb & ECB_TE)) {
 753		if (gpa < 2 * PAGE_SIZE) {
 754			rc = set_validity_icpt(scb_s, 0x0080U);
 755			goto unpin;
 756		}
 757		/* 256 bytes cannot cross page boundaries */
 758		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 759		if (rc) {
 760			rc = set_validity_icpt(scb_s, 0x0080U);
 761			goto unpin;
 762		}
 763		vsie_page->itdba_gpa = gpa;
 764		scb_s->itdba = hpa;
 765	}
 766
 767	gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
 768	if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
 769		if (gpa < 2 * PAGE_SIZE) {
 770			rc = set_validity_icpt(scb_s, 0x1310U);
 771			goto unpin;
 772		}
 773		/*
 774		 * 512 bytes vector registers cannot cross page boundaries
 775		 * if this block gets bigger, we have to shadow it.
 776		 */
 777		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 778		if (rc) {
 779			rc = set_validity_icpt(scb_s, 0x1310U);
 780			goto unpin;
 781		}
 782		vsie_page->gvrd_gpa = gpa;
 783		scb_s->gvrd = hpa;
 784	}
 785
 786	gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
 787	if (gpa && (scb_s->ecb3 & ECB3_RI)) {
 788		if (gpa < 2 * PAGE_SIZE) {
 789			rc = set_validity_icpt(scb_s, 0x0043U);
 790			goto unpin;
 791		}
 792		/* 64 bytes cannot cross page boundaries */
 793		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 794		if (rc) {
 795			rc = set_validity_icpt(scb_s, 0x0043U);
 796			goto unpin;
 797		}
 798		/* Validity 0x0044 will be checked by SIE */
 799		vsie_page->riccbd_gpa = gpa;
 800		scb_s->riccbd = hpa;
 801	}
 802	if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
 803	    (scb_s->ecd & ECD_ETOKENF)) {
 804		unsigned long sdnxc;
 805
 806		gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
 807		sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
 808		if (!gpa || gpa < 2 * PAGE_SIZE) {
 809			rc = set_validity_icpt(scb_s, 0x10b0U);
 810			goto unpin;
 811		}
 812		if (sdnxc < 6 || sdnxc > 12) {
 813			rc = set_validity_icpt(scb_s, 0x10b1U);
 814			goto unpin;
 815		}
 816		if (gpa & ((1 << sdnxc) - 1)) {
 817			rc = set_validity_icpt(scb_s, 0x10b2U);
 818			goto unpin;
 819		}
 820		/* Due to alignment rules (checked above) this cannot
 821		 * cross page boundaries
 822		 */
 823		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 824		if (rc) {
 825			rc = set_validity_icpt(scb_s, 0x10b0U);
 826			goto unpin;
 827		}
 828		vsie_page->sdnx_gpa = gpa;
 829		scb_s->sdnxo = hpa | sdnxc;
 830	}
 831	return 0;
 832unpin:
 833	unpin_blocks(vcpu, vsie_page);
 834	return rc;
 835}
 836
 837/* unpin the scb provided by guest 2, marking it as dirty */
 838static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 839		      gpa_t gpa)
 840{
 841	hpa_t hpa = (hpa_t) vsie_page->scb_o;
 842
 843	if (hpa)
 844		unpin_guest_page(vcpu->kvm, gpa, hpa);
 845	vsie_page->scb_o = NULL;
 846}
 847
 848/*
 849 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
 850 *
 851 * Returns: - 0 if the scb was pinned.
 852 *          - > 0 if control has to be given to guest 2
 853 */
 854static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 855		   gpa_t gpa)
 856{
 857	hpa_t hpa;
 858	int rc;
 859
 860	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 861	if (rc) {
 862		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 863		WARN_ON_ONCE(rc);
 864		return 1;
 865	}
 866	vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
 867	return 0;
 868}
 869
 870/*
 871 * Inject a fault into guest 2.
 872 *
 873 * Returns: - > 0 if control has to be given to guest 2
 874 *            < 0 if an error occurred during injection.
 875 */
 876static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
 877			bool write_flag)
 878{
 879	struct kvm_s390_pgm_info pgm = {
 880		.code = code,
 881		.trans_exc_code =
 882			/* 0-51: virtual address */
 883			(vaddr & 0xfffffffffffff000UL) |
 884			/* 52-53: store / fetch */
 885			(((unsigned int) !write_flag) + 1) << 10,
 886			/* 62-63: asce id (alway primary == 0) */
 887		.exc_access_id = 0, /* always primary */
 888		.op_access_id = 0, /* not MVPG */
 889	};
 890	int rc;
 891
 892	if (code == PGM_PROTECTION)
 893		pgm.trans_exc_code |= 0x4UL;
 894
 895	rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
 896	return rc ? rc : 1;
 897}
 898
 899/*
 900 * Handle a fault during vsie execution on a gmap shadow.
 901 *
 902 * Returns: - 0 if the fault was resolved
 903 *          - > 0 if control has to be given to guest 2
 904 *          - < 0 if an error occurred
 905 */
 906static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 907{
 908	int rc;
 909
 910	if (current->thread.gmap_int_code == PGM_PROTECTION)
 911		/* we can directly forward all protection exceptions */
 912		return inject_fault(vcpu, PGM_PROTECTION,
 913				    current->thread.gmap_addr, 1);
 914
 915	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 916				   current->thread.gmap_addr);
 917	if (rc > 0) {
 918		rc = inject_fault(vcpu, rc,
 919				  current->thread.gmap_addr,
 920				  current->thread.gmap_write_flag);
 921		if (rc >= 0)
 922			vsie_page->fault_addr = current->thread.gmap_addr;
 923	}
 924	return rc;
 925}
 926
 927/*
 928 * Retry the previous fault that required guest 2 intervention. This avoids
 929 * one superfluous SIE re-entry and direct exit.
 930 *
 931 * Will ignore any errors. The next SIE fault will do proper fault handling.
 932 */
 933static void handle_last_fault(struct kvm_vcpu *vcpu,
 934			      struct vsie_page *vsie_page)
 935{
 936	if (vsie_page->fault_addr)
 937		kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 938				      vsie_page->fault_addr);
 939	vsie_page->fault_addr = 0;
 940}
 941
 942static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
 943{
 944	vsie_page->scb_s.icptcode = 0;
 945}
 946
 947/* rewind the psw and clear the vsie icpt, so we can retry execution */
 948static void retry_vsie_icpt(struct vsie_page *vsie_page)
 949{
 950	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 951	int ilen = insn_length(scb_s->ipa >> 8);
 952
 953	/* take care of EXECUTE instructions */
 954	if (scb_s->icptstatus & 1) {
 955		ilen = (scb_s->icptstatus >> 4) & 0x6;
 956		if (!ilen)
 957			ilen = 4;
 958	}
 959	scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
 960	clear_vsie_icpt(vsie_page);
 961}
 962
 963/*
 964 * Try to shadow + enable the guest 2 provided facility list.
 965 * Retry instruction execution if enabled for and provided by guest 2.
 966 *
 967 * Returns: - 0 if handled (retry or guest 2 icpt)
 968 *          - > 0 if control has to be given to guest 2
 969 */
 970static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 971{
 972	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 973	__u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
 974
 
 
 
 
 975	if (fac && test_kvm_facility(vcpu->kvm, 7)) {
 976		retry_vsie_icpt(vsie_page);
 
 
 
 
 
 
 
 
 
 
 977		if (read_guest_real(vcpu, fac, &vsie_page->fac,
 978				    sizeof(vsie_page->fac)))
 979			return set_validity_icpt(scb_s, 0x1090U);
 980		scb_s->fac = (__u32)(__u64) &vsie_page->fac;
 981	}
 982	return 0;
 983}
 984
 985/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 986 * Run the vsie on a shadow scb and a shadow gmap, without any further
 987 * sanity checks, handling SIE faults.
 988 *
 989 * Returns: - 0 everything went fine
 990 *          - > 0 if control has to be given to guest 2
 991 *          - < 0 if an error occurred
 992 */
 993static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 994	__releases(vcpu->kvm->srcu)
 995	__acquires(vcpu->kvm->srcu)
 996{
 997	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 998	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 999	int guest_bp_isolation;
1000	int rc = 0;
1001
1002	handle_last_fault(vcpu, vsie_page);
1003
1004	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1005
1006	/* save current guest state of bp isolation override */
1007	guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
1008
1009	/*
1010	 * The guest is running with BPBC, so we have to force it on for our
1011	 * nested guest. This is done by enabling BPBC globally, so the BPBC
1012	 * control in the SCB (which the nested guest can modify) is simply
1013	 * ignored.
1014	 */
1015	if (test_kvm_facility(vcpu->kvm, 82) &&
1016	    vcpu->arch.sie_block->fpf & FPF_BPBC)
1017		set_thread_flag(TIF_ISOLATE_BP_GUEST);
1018
1019	local_irq_disable();
1020	guest_enter_irqoff();
1021	local_irq_enable();
1022
1023	/*
1024	 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
1025	 * and VCPU requests also hinder the vSIE from running and lead
1026	 * to an immediate exit. kvm_s390_vsie_kick() has to be used to
1027	 * also kick the vSIE.
1028	 */
1029	vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
1030	barrier();
 
 
1031	if (!kvm_s390_vcpu_sie_inhibited(vcpu))
1032		rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
1033	barrier();
1034	vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
1035
1036	local_irq_disable();
1037	guest_exit_irqoff();
1038	local_irq_enable();
1039
1040	/* restore guest state for bp isolation override */
1041	if (!guest_bp_isolation)
1042		clear_thread_flag(TIF_ISOLATE_BP_GUEST);
1043
1044	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1045
1046	if (rc == -EINTR) {
1047		VCPU_EVENT(vcpu, 3, "%s", "machine check");
1048		kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
1049		return 0;
1050	}
1051
1052	if (rc > 0)
1053		rc = 0; /* we could still have an icpt */
1054	else if (rc == -EFAULT)
1055		return handle_fault(vcpu, vsie_page);
1056
1057	switch (scb_s->icptcode) {
1058	case ICPT_INST:
1059		if (scb_s->ipa == 0xb2b0)
1060			rc = handle_stfle(vcpu, vsie_page);
1061		break;
1062	case ICPT_STOP:
1063		/* stop not requested by g2 - must have been a kick */
1064		if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
1065			clear_vsie_icpt(vsie_page);
1066		break;
1067	case ICPT_VALIDITY:
1068		if ((scb_s->ipa & 0xf000) != 0xf000)
1069			scb_s->ipa += 0x1000;
1070		break;
 
 
 
 
1071	}
1072	return rc;
1073}
1074
1075static void release_gmap_shadow(struct vsie_page *vsie_page)
1076{
1077	if (vsie_page->gmap)
1078		gmap_put(vsie_page->gmap);
1079	WRITE_ONCE(vsie_page->gmap, NULL);
1080	prefix_unmapped(vsie_page);
1081}
1082
1083static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
1084			       struct vsie_page *vsie_page)
1085{
1086	unsigned long asce;
1087	union ctlreg0 cr0;
1088	struct gmap *gmap;
1089	int edat;
1090
1091	asce = vcpu->arch.sie_block->gcr[1];
1092	cr0.val = vcpu->arch.sie_block->gcr[0];
1093	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1094	edat += edat && test_kvm_facility(vcpu->kvm, 78);
1095
1096	/*
1097	 * ASCE or EDAT could have changed since last icpt, or the gmap
1098	 * we're holding has been unshadowed. If the gmap is still valid,
1099	 * we can safely reuse it.
1100	 */
1101	if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
 
1102		return 0;
 
1103
1104	/* release the old shadow - if any, and mark the prefix as unmapped */
1105	release_gmap_shadow(vsie_page);
1106	gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
1107	if (IS_ERR(gmap))
1108		return PTR_ERR(gmap);
1109	gmap->private = vcpu->kvm;
1110	WRITE_ONCE(vsie_page->gmap, gmap);
1111	return 0;
1112}
1113
1114/*
1115 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
1116 */
1117static void register_shadow_scb(struct kvm_vcpu *vcpu,
1118				struct vsie_page *vsie_page)
1119{
1120	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1121
1122	WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
1123	/*
1124	 * External calls have to lead to a kick of the vcpu and
1125	 * therefore the vsie -> Simulate Wait state.
1126	 */
1127	kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
1128	/*
1129	 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
1130	 * automatically be adjusted on tod clock changes via kvm_sync_clock.
1131	 */
1132	preempt_disable();
1133	scb_s->epoch += vcpu->kvm->arch.epoch;
1134
1135	if (scb_s->ecd & ECD_MEF) {
1136		scb_s->epdx += vcpu->kvm->arch.epdx;
1137		if (scb_s->epoch < vcpu->kvm->arch.epoch)
1138			scb_s->epdx += 1;
1139	}
1140
1141	preempt_enable();
1142}
1143
1144/*
1145 * Unregister a shadow scb from a VCPU.
1146 */
1147static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
1148{
1149	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
1150	WRITE_ONCE(vcpu->arch.vsie_block, NULL);
1151}
1152
1153/*
1154 * Run the vsie on a shadowed scb, managing the gmap shadow, handling
1155 * prefix pages and faults.
1156 *
1157 * Returns: - 0 if no errors occurred
1158 *          - > 0 if control has to be given to guest 2
1159 *          - -ENOMEM if out of memory
1160 */
1161static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1162{
1163	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1164	int rc = 0;
1165
1166	while (1) {
1167		rc = acquire_gmap_shadow(vcpu, vsie_page);
1168		if (!rc)
1169			rc = map_prefix(vcpu, vsie_page);
1170		if (!rc) {
1171			gmap_enable(vsie_page->gmap);
1172			update_intervention_requests(vsie_page);
1173			rc = do_vsie_run(vcpu, vsie_page);
1174			gmap_enable(vcpu->arch.gmap);
1175		}
1176		atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
1177
1178		if (rc == -EAGAIN)
1179			rc = 0;
1180		if (rc || scb_s->icptcode || signal_pending(current) ||
1181		    kvm_s390_vcpu_has_irq(vcpu, 0) ||
1182		    kvm_s390_vcpu_sie_inhibited(vcpu))
1183			break;
1184		cond_resched();
1185	}
1186
1187	if (rc == -EFAULT) {
1188		/*
1189		 * Addressing exceptions are always presentes as intercepts.
1190		 * As addressing exceptions are suppressing and our guest 3 PSW
1191		 * points at the responsible instruction, we have to
1192		 * forward the PSW and set the ilc. If we can't read guest 3
1193		 * instruction, we can use an arbitrary ilc. Let's always use
1194		 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
1195		 * memory. (we could also fake the shadow so the hardware
1196		 * handles it).
1197		 */
1198		scb_s->icptcode = ICPT_PROGI;
1199		scb_s->iprcc = PGM_ADDRESSING;
1200		scb_s->pgmilc = 4;
1201		scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
1202		rc = 1;
1203	}
1204	return rc;
1205}
1206
1207/*
1208 * Get or create a vsie page for a scb address.
1209 *
1210 * Returns: - address of a vsie page (cached or new one)
1211 *          - NULL if the same scb address is already used by another VCPU
1212 *          - ERR_PTR(-ENOMEM) if out of memory
1213 */
1214static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
1215{
1216	struct vsie_page *vsie_page;
1217	struct page *page;
1218	int nr_vcpus;
1219
1220	rcu_read_lock();
1221	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
1222	rcu_read_unlock();
1223	if (page) {
1224		if (page_ref_inc_return(page) == 2)
1225			return page_to_virt(page);
1226		page_ref_dec(page);
1227	}
1228
1229	/*
1230	 * We want at least #online_vcpus shadows, so every VCPU can execute
1231	 * the VSIE in parallel.
1232	 */
1233	nr_vcpus = atomic_read(&kvm->online_vcpus);
1234
1235	mutex_lock(&kvm->arch.vsie.mutex);
1236	if (kvm->arch.vsie.page_count < nr_vcpus) {
1237		page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA);
1238		if (!page) {
1239			mutex_unlock(&kvm->arch.vsie.mutex);
1240			return ERR_PTR(-ENOMEM);
1241		}
1242		page_ref_inc(page);
1243		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
1244		kvm->arch.vsie.page_count++;
1245	} else {
1246		/* reuse an existing entry that belongs to nobody */
1247		while (true) {
1248			page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
1249			if (page_ref_inc_return(page) == 2)
1250				break;
1251			page_ref_dec(page);
1252			kvm->arch.vsie.next++;
1253			kvm->arch.vsie.next %= nr_vcpus;
1254		}
1255		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
1256	}
1257	page->index = addr;
1258	/* double use of the same address */
1259	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
1260		page_ref_dec(page);
1261		mutex_unlock(&kvm->arch.vsie.mutex);
1262		return NULL;
1263	}
1264	mutex_unlock(&kvm->arch.vsie.mutex);
1265
1266	vsie_page = page_to_virt(page);
1267	memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
1268	release_gmap_shadow(vsie_page);
1269	vsie_page->fault_addr = 0;
1270	vsie_page->scb_s.ihcpu = 0xffffU;
1271	return vsie_page;
1272}
1273
1274/* put a vsie page acquired via get_vsie_page */
1275static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
1276{
1277	struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
1278
1279	page_ref_dec(page);
1280}
1281
1282int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
1283{
1284	struct vsie_page *vsie_page;
1285	unsigned long scb_addr;
1286	int rc;
1287
1288	vcpu->stat.instruction_sie++;
1289	if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
1290		return -EOPNOTSUPP;
1291	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1292		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1293
1294	BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
1295	scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
1296
1297	/* 512 byte alignment */
1298	if (unlikely(scb_addr & 0x1ffUL))
1299		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1300
1301	if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
1302	    kvm_s390_vcpu_sie_inhibited(vcpu))
1303		return 0;
1304
1305	vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
1306	if (IS_ERR(vsie_page))
1307		return PTR_ERR(vsie_page);
1308	else if (!vsie_page)
1309		/* double use of sie control block - simply do nothing */
1310		return 0;
1311
1312	rc = pin_scb(vcpu, vsie_page, scb_addr);
1313	if (rc)
1314		goto out_put;
1315	rc = shadow_scb(vcpu, vsie_page);
1316	if (rc)
1317		goto out_unpin_scb;
1318	rc = pin_blocks(vcpu, vsie_page);
1319	if (rc)
1320		goto out_unshadow;
1321	register_shadow_scb(vcpu, vsie_page);
1322	rc = vsie_run(vcpu, vsie_page);
1323	unregister_shadow_scb(vcpu);
1324	unpin_blocks(vcpu, vsie_page);
1325out_unshadow:
1326	unshadow_scb(vcpu, vsie_page);
1327out_unpin_scb:
1328	unpin_scb(vcpu, vsie_page, scb_addr);
1329out_put:
1330	put_vsie_page(vcpu->kvm, vsie_page);
1331
1332	return rc < 0 ? rc : 0;
1333}
1334
1335/* Init the vsie data structures. To be called when a vm is initialized. */
1336void kvm_s390_vsie_init(struct kvm *kvm)
1337{
1338	mutex_init(&kvm->arch.vsie.mutex);
1339	INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
1340}
1341
1342/* Destroy the vsie data structures. To be called when a vm is destroyed. */
1343void kvm_s390_vsie_destroy(struct kvm *kvm)
1344{
1345	struct vsie_page *vsie_page;
1346	struct page *page;
1347	int i;
1348
1349	mutex_lock(&kvm->arch.vsie.mutex);
1350	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
1351		page = kvm->arch.vsie.pages[i];
1352		kvm->arch.vsie.pages[i] = NULL;
1353		vsie_page = page_to_virt(page);
1354		release_gmap_shadow(vsie_page);
1355		/* free the radix tree entry */
1356		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
1357		__free_page(page);
1358	}
1359	kvm->arch.vsie.page_count = 0;
1360	mutex_unlock(&kvm->arch.vsie.mutex);
1361}
1362
1363void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
1364{
1365	struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
1366
1367	/*
1368	 * Even if the VCPU lets go of the shadow sie block reference, it is
1369	 * still valid in the cache. So we can safely kick it.
1370	 */
1371	if (scb) {
1372		atomic_or(PROG_BLOCK_SIE, &scb->prog20);
1373		if (scb->prog0c & PROG_IN_SIE)
1374			atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
1375	}
1376}