Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * kvm nested virtualization support for s390x
   4 *
   5 * Copyright IBM Corp. 2016, 2018
   6 *
   7 *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
   8 */
   9#include <linux/vmalloc.h>
  10#include <linux/kvm_host.h>
  11#include <linux/bug.h>
  12#include <linux/list.h>
  13#include <linux/bitmap.h>
  14#include <linux/sched/signal.h>
  15#include <linux/io.h>
  16
  17#include <asm/gmap.h>
  18#include <asm/mmu_context.h>
  19#include <asm/sclp.h>
  20#include <asm/nmi.h>
  21#include <asm/dis.h>
  22#include <asm/facility.h>
  23#include "kvm-s390.h"
  24#include "gaccess.h"
  25
  26struct vsie_page {
  27	struct kvm_s390_sie_block scb_s;	/* 0x0000 */
  28	/*
  29	 * the backup info for machine check. ensure it's at
  30	 * the same offset as that in struct sie_page!
  31	 */
  32	struct mcck_volatile_info mcck_info;    /* 0x0200 */
  33	/*
  34	 * The pinned original scb. Be aware that other VCPUs can modify
  35	 * it while we read from it. Values that are used for conditions or
  36	 * are reused conditionally, should be accessed via READ_ONCE.
  37	 */
  38	struct kvm_s390_sie_block *scb_o;	/* 0x0218 */
  39	/* the shadow gmap in use by the vsie_page */
  40	struct gmap *gmap;			/* 0x0220 */
  41	/* address of the last reported fault to guest2 */
  42	unsigned long fault_addr;		/* 0x0228 */
  43	/* calculated guest addresses of satellite control blocks */
  44	gpa_t sca_gpa;				/* 0x0230 */
  45	gpa_t itdba_gpa;			/* 0x0238 */
  46	gpa_t gvrd_gpa;				/* 0x0240 */
  47	gpa_t riccbd_gpa;			/* 0x0248 */
  48	gpa_t sdnx_gpa;				/* 0x0250 */
  49	__u8 reserved[0x0700 - 0x0258];		/* 0x0258 */
  50	struct kvm_s390_crypto_cb crycb;	/* 0x0700 */
  51	__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE];	/* 0x0800 */
  52};
  53
  54/* trigger a validity icpt for the given scb */
  55static int set_validity_icpt(struct kvm_s390_sie_block *scb,
  56			     __u16 reason_code)
  57{
  58	scb->ipa = 0x1000;
  59	scb->ipb = ((__u32) reason_code) << 16;
  60	scb->icptcode = ICPT_VALIDITY;
  61	return 1;
  62}
  63
  64/* mark the prefix as unmapped, this will block the VSIE */
  65static void prefix_unmapped(struct vsie_page *vsie_page)
  66{
  67	atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
  68}
  69
  70/* mark the prefix as unmapped and wait until the VSIE has been left */
  71static void prefix_unmapped_sync(struct vsie_page *vsie_page)
  72{
  73	prefix_unmapped(vsie_page);
  74	if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  75		atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
  76	while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  77		cpu_relax();
  78}
  79
  80/* mark the prefix as mapped, this will allow the VSIE to run */
  81static void prefix_mapped(struct vsie_page *vsie_page)
  82{
  83	atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
  84}
  85
  86/* test if the prefix is mapped into the gmap shadow */
  87static int prefix_is_mapped(struct vsie_page *vsie_page)
  88{
  89	return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
  90}
  91
  92/* copy the updated intervention request bits into the shadow scb */
  93static void update_intervention_requests(struct vsie_page *vsie_page)
  94{
  95	const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
  96	int cpuflags;
  97
  98	cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
  99	atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
 100	atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
 101}
 102
 103/* shadow (filter and validate) the cpuflags  */
 104static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 105{
 106	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 107	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 108	int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
 109
 110	/* we don't allow ESA/390 guests */
 111	if (!(cpuflags & CPUSTAT_ZARCH))
 112		return set_validity_icpt(scb_s, 0x0001U);
 113
 114	if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
 115		return set_validity_icpt(scb_s, 0x0001U);
 116	else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
 117		return set_validity_icpt(scb_s, 0x0007U);
 118
 119	/* intervention requests will be set later */
 120	newflags = CPUSTAT_ZARCH;
 121	if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
 122		newflags |= CPUSTAT_GED;
 123	if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
 124		if (cpuflags & CPUSTAT_GED)
 125			return set_validity_icpt(scb_s, 0x0001U);
 126		newflags |= CPUSTAT_GED2;
 127	}
 128	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
 129		newflags |= cpuflags & CPUSTAT_P;
 130	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
 131		newflags |= cpuflags & CPUSTAT_SM;
 132	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
 133		newflags |= cpuflags & CPUSTAT_IBS;
 134	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
 135		newflags |= cpuflags & CPUSTAT_KSS;
 136
 137	atomic_set(&scb_s->cpuflags, newflags);
 138	return 0;
 139}
 140/* Copy to APCB FORMAT1 from APCB FORMAT0 */
 141static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
 142			unsigned long crycb_gpa, struct kvm_s390_apcb1 *apcb_h)
 143{
 144	struct kvm_s390_apcb0 tmp;
 145	unsigned long apcb_gpa;
 146
 147	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
 148
 149	if (read_guest_real(vcpu, apcb_gpa, &tmp,
 150			    sizeof(struct kvm_s390_apcb0)))
 151		return -EFAULT;
 152
 153	apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
 154	apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
 155	apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
 156
 157	return 0;
 158
 159}
 160
 161/**
 162 * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
 163 * @vcpu: pointer to the virtual CPU
 164 * @apcb_s: pointer to start of apcb in the shadow crycb
 165 * @crycb_gpa: guest physical address to start of original guest crycb
 166 * @apcb_h: pointer to start of apcb in the guest1
 167 *
 168 * Returns 0 and -EFAULT on error reading guest apcb
 169 */
 170static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
 171			unsigned long crycb_gpa, unsigned long *apcb_h)
 172{
 173	unsigned long apcb_gpa;
 174
 175	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
 176
 177	if (read_guest_real(vcpu, apcb_gpa, apcb_s,
 178			    sizeof(struct kvm_s390_apcb0)))
 179		return -EFAULT;
 180
 181	bitmap_and(apcb_s, apcb_s, apcb_h,
 182		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0));
 183
 184	return 0;
 185}
 186
 187/**
 188 * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
 189 * @vcpu: pointer to the virtual CPU
 190 * @apcb_s: pointer to start of apcb in the shadow crycb
 191 * @crycb_gpa: guest physical address to start of original guest crycb
 192 * @apcb_h: pointer to start of apcb in the host
 193 *
 194 * Returns 0 and -EFAULT on error reading guest apcb
 195 */
 196static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
 197			unsigned long crycb_gpa,
 198			unsigned long *apcb_h)
 199{
 200	unsigned long apcb_gpa;
 201
 202	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb1);
 203
 204	if (read_guest_real(vcpu, apcb_gpa, apcb_s,
 205			    sizeof(struct kvm_s390_apcb1)))
 206		return -EFAULT;
 207
 208	bitmap_and(apcb_s, apcb_s, apcb_h,
 209		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1));
 210
 211	return 0;
 212}
 213
 214/**
 215 * setup_apcb - Create a shadow copy of the apcb.
 216 * @vcpu: pointer to the virtual CPU
 217 * @crycb_s: pointer to shadow crycb
 218 * @crycb_gpa: guest physical address of original guest crycb
 219 * @crycb_h: pointer to the host crycb
 220 * @fmt_o: format of the original guest crycb.
 221 * @fmt_h: format of the host crycb.
 222 *
 223 * Checks the compatibility between the guest and host crycb and calls the
 224 * appropriate copy function.
 225 *
 226 * Return 0 or an error number if the guest and host crycb are incompatible.
 227 */
 228static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
 229	       const u32 crycb_gpa,
 230	       struct kvm_s390_crypto_cb *crycb_h,
 231	       int fmt_o, int fmt_h)
 232{
 
 
 
 
 233	switch (fmt_o) {
 234	case CRYCB_FORMAT2:
 235		if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 256) & PAGE_MASK))
 236			return -EACCES;
 237		if (fmt_h != CRYCB_FORMAT2)
 238			return -EINVAL;
 239		return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
 240				    crycb_gpa,
 241				    (unsigned long *)&crycb_h->apcb1);
 242	case CRYCB_FORMAT1:
 243		switch (fmt_h) {
 244		case CRYCB_FORMAT2:
 245			return setup_apcb10(vcpu, &crycb_s->apcb1,
 246					    crycb_gpa,
 247					    &crycb_h->apcb1);
 248		case CRYCB_FORMAT1:
 249			return setup_apcb00(vcpu,
 250					    (unsigned long *) &crycb_s->apcb0,
 251					    crycb_gpa,
 252					    (unsigned long *) &crycb_h->apcb0);
 253		}
 254		break;
 255	case CRYCB_FORMAT0:
 256		if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 32) & PAGE_MASK))
 257			return -EACCES;
 258
 259		switch (fmt_h) {
 260		case CRYCB_FORMAT2:
 261			return setup_apcb10(vcpu, &crycb_s->apcb1,
 262					    crycb_gpa,
 263					    &crycb_h->apcb1);
 264		case CRYCB_FORMAT1:
 265		case CRYCB_FORMAT0:
 266			return setup_apcb00(vcpu,
 267					    (unsigned long *) &crycb_s->apcb0,
 268					    crycb_gpa,
 269					    (unsigned long *) &crycb_h->apcb0);
 270		}
 271	}
 272	return -EINVAL;
 273}
 274
 275/**
 276 * shadow_crycb - Create a shadow copy of the crycb block
 277 * @vcpu: a pointer to the virtual CPU
 278 * @vsie_page: a pointer to internal date used for the vSIE
 279 *
 280 * Create a shadow copy of the crycb block and setup key wrapping, if
 281 * requested for guest 3 and enabled for guest 2.
 282 *
 283 * We accept format-1 or format-2, but we convert format-1 into format-2
 284 * in the shadow CRYCB.
 285 * Using format-2 enables the firmware to choose the right format when
 286 * scheduling the SIE.
 287 * There is nothing to do for format-0.
 288 *
 289 * This function centralize the issuing of set_validity_icpt() for all
 290 * the subfunctions working on the crycb.
 291 *
 292 * Returns: - 0 if shadowed or nothing to do
 293 *          - > 0 if control has to be given to guest 2
 294 */
 295static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 296{
 297	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 298	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 299	const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
 300	const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
 301	unsigned long *b1, *b2;
 302	u8 ecb3_flags;
 303	u32 ecd_flags;
 304	int apie_h;
 305	int apie_s;
 306	int key_msk = test_kvm_facility(vcpu->kvm, 76);
 307	int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
 308	int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
 309	int ret = 0;
 310
 311	scb_s->crycbd = 0;
 312
 313	apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
 314	apie_s = apie_h & scb_o->eca;
 315	if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
 316		return 0;
 317
 318	if (!crycb_addr)
 319		return set_validity_icpt(scb_s, 0x0039U);
 320
 321	if (fmt_o == CRYCB_FORMAT1)
 322		if ((crycb_addr & PAGE_MASK) !=
 323		    ((crycb_addr + 128) & PAGE_MASK))
 324			return set_validity_icpt(scb_s, 0x003CU);
 325
 326	if (apie_s) {
 327		ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
 328				 vcpu->kvm->arch.crypto.crycb,
 329				 fmt_o, fmt_h);
 330		if (ret)
 331			goto end;
 332		scb_s->eca |= scb_o->eca & ECA_APIE;
 333	}
 334
 335	/* we may only allow it if enabled for guest 2 */
 336	ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
 337		     (ECB3_AES | ECB3_DEA);
 338	ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd &
 339		     (ECD_ECC | ECD_HMAC);
 340	if (!ecb3_flags && !ecd_flags)
 341		goto end;
 342
 343	/* copy only the wrapping keys */
 344	if (read_guest_real(vcpu, crycb_addr + 72,
 345			    vsie_page->crycb.dea_wrapping_key_mask, 56))
 346		return set_validity_icpt(scb_s, 0x0035U);
 347
 348	scb_s->ecb3 |= ecb3_flags;
 349	scb_s->ecd |= ecd_flags;
 350
 351	/* xor both blocks in one run */
 352	b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
 353	b2 = (unsigned long *)
 354			    vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
 355	/* as 56%8 == 0, bitmap_xor won't overwrite any data */
 356	bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
 357end:
 358	switch (ret) {
 359	case -EINVAL:
 360		return set_validity_icpt(scb_s, 0x0022U);
 361	case -EFAULT:
 362		return set_validity_icpt(scb_s, 0x0035U);
 363	case -EACCES:
 364		return set_validity_icpt(scb_s, 0x003CU);
 365	}
 366	scb_s->crycbd = (u32)virt_to_phys(&vsie_page->crycb) | CRYCB_FORMAT2;
 367	return 0;
 368}
 369
 370/* shadow (round up/down) the ibc to avoid validity icpt */
 371static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 372{
 373	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 374	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 375	/* READ_ONCE does not work on bitfields - use a temporary variable */
 376	const uint32_t __new_ibc = scb_o->ibc;
 377	const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
 378	__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
 379
 380	scb_s->ibc = 0;
 381	/* ibc installed in g2 and requested for g3 */
 382	if (vcpu->kvm->arch.model.ibc && new_ibc) {
 383		scb_s->ibc = new_ibc;
 384		/* takte care of the minimum ibc level of the machine */
 385		if (scb_s->ibc < min_ibc)
 386			scb_s->ibc = min_ibc;
 387		/* take care of the maximum ibc level set for the guest */
 388		if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
 389			scb_s->ibc = vcpu->kvm->arch.model.ibc;
 390	}
 391}
 392
 393/* unshadow the scb, copying parameters back to the real scb */
 394static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 395{
 396	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 397	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 398
 399	/* interception */
 400	scb_o->icptcode = scb_s->icptcode;
 401	scb_o->icptstatus = scb_s->icptstatus;
 402	scb_o->ipa = scb_s->ipa;
 403	scb_o->ipb = scb_s->ipb;
 404	scb_o->gbea = scb_s->gbea;
 405
 406	/* timer */
 407	scb_o->cputm = scb_s->cputm;
 408	scb_o->ckc = scb_s->ckc;
 409	scb_o->todpr = scb_s->todpr;
 410
 411	/* guest state */
 412	scb_o->gpsw = scb_s->gpsw;
 413	scb_o->gg14 = scb_s->gg14;
 414	scb_o->gg15 = scb_s->gg15;
 415	memcpy(scb_o->gcr, scb_s->gcr, 128);
 416	scb_o->pp = scb_s->pp;
 417
 418	/* branch prediction */
 419	if (test_kvm_facility(vcpu->kvm, 82)) {
 420		scb_o->fpf &= ~FPF_BPBC;
 421		scb_o->fpf |= scb_s->fpf & FPF_BPBC;
 422	}
 423
 424	/* interrupt intercept */
 425	switch (scb_s->icptcode) {
 426	case ICPT_PROGI:
 427	case ICPT_INSTPROGI:
 428	case ICPT_EXTINT:
 429		memcpy((void *)((u64)scb_o + 0xc0),
 430		       (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
 431		break;
 
 
 
 
 
 432	}
 433
 434	if (scb_s->ihcpu != 0xffffU)
 435		scb_o->ihcpu = scb_s->ihcpu;
 436}
 437
 438/*
 439 * Setup the shadow scb by copying and checking the relevant parts of the g2
 440 * provided scb.
 441 *
 442 * Returns: - 0 if the scb has been shadowed
 443 *          - > 0 if control has to be given to guest 2
 444 */
 445static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 446{
 447	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 448	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 449	/* READ_ONCE does not work on bitfields - use a temporary variable */
 450	const uint32_t __new_prefix = scb_o->prefix;
 451	const uint32_t new_prefix = READ_ONCE(__new_prefix);
 452	const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
 453	bool had_tx = scb_s->ecb & ECB_TE;
 454	unsigned long new_mso = 0;
 455	int rc;
 456
 457	/* make sure we don't have any leftovers when reusing the scb */
 458	scb_s->icptcode = 0;
 459	scb_s->eca = 0;
 460	scb_s->ecb = 0;
 461	scb_s->ecb2 = 0;
 462	scb_s->ecb3 = 0;
 463	scb_s->ecd = 0;
 464	scb_s->fac = 0;
 465	scb_s->fpf = 0;
 466
 467	rc = prepare_cpuflags(vcpu, vsie_page);
 468	if (rc)
 469		goto out;
 470
 471	/* timer */
 472	scb_s->cputm = scb_o->cputm;
 473	scb_s->ckc = scb_o->ckc;
 474	scb_s->todpr = scb_o->todpr;
 475	scb_s->epoch = scb_o->epoch;
 476
 477	/* guest state */
 478	scb_s->gpsw = scb_o->gpsw;
 479	scb_s->gg14 = scb_o->gg14;
 480	scb_s->gg15 = scb_o->gg15;
 481	memcpy(scb_s->gcr, scb_o->gcr, 128);
 482	scb_s->pp = scb_o->pp;
 483
 484	/* interception / execution handling */
 485	scb_s->gbea = scb_o->gbea;
 486	scb_s->lctl = scb_o->lctl;
 487	scb_s->svcc = scb_o->svcc;
 488	scb_s->ictl = scb_o->ictl;
 489	/*
 490	 * SKEY handling functions can't deal with false setting of PTE invalid
 491	 * bits. Therefore we cannot provide interpretation and would later
 492	 * have to provide own emulation handlers.
 493	 */
 494	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
 495		scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
 496
 497	scb_s->icpua = scb_o->icpua;
 498
 499	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
 500		new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
 501	/* if the hva of the prefix changes, we have to remap the prefix */
 502	if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
 503		prefix_unmapped(vsie_page);
 504	 /* SIE will do mso/msl validity and exception checks for us */
 505	scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
 506	scb_s->mso = new_mso;
 507	scb_s->prefix = new_prefix;
 508
 509	/* We have to definitely flush the tlb if this scb never ran */
 510	if (scb_s->ihcpu != 0xffffU)
 511		scb_s->ihcpu = scb_o->ihcpu;
 512
 513	/* MVPG and Protection Exception Interpretation are always available */
 514	scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
 515	/* Host-protection-interruption introduced with ESOP */
 516	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
 517		scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
 518	/*
 519	 * CPU Topology
 520	 * This facility only uses the utility field of the SCA and none of
 521	 * the cpu entries that are problematic with the other interpretation
 522	 * facilities so we can pass it through
 523	 */
 524	if (test_kvm_facility(vcpu->kvm, 11))
 525		scb_s->ecb |= scb_o->ecb & ECB_PTF;
 526	/* transactional execution */
 527	if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
 528		/* remap the prefix is tx is toggled on */
 529		if (!had_tx)
 530			prefix_unmapped(vsie_page);
 531		scb_s->ecb |= ECB_TE;
 532	}
 533	/* specification exception interpretation */
 534	scb_s->ecb |= scb_o->ecb & ECB_SPECI;
 535	/* branch prediction */
 536	if (test_kvm_facility(vcpu->kvm, 82))
 537		scb_s->fpf |= scb_o->fpf & FPF_BPBC;
 538	/* SIMD */
 539	if (test_kvm_facility(vcpu->kvm, 129)) {
 540		scb_s->eca |= scb_o->eca & ECA_VX;
 541		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
 542	}
 543	/* Run-time-Instrumentation */
 544	if (test_kvm_facility(vcpu->kvm, 64))
 545		scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
 546	/* Instruction Execution Prevention */
 547	if (test_kvm_facility(vcpu->kvm, 130))
 548		scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
 549	/* Guarded Storage */
 550	if (test_kvm_facility(vcpu->kvm, 133)) {
 551		scb_s->ecb |= scb_o->ecb & ECB_GS;
 552		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
 553	}
 554	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
 555		scb_s->eca |= scb_o->eca & ECA_SII;
 556	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
 557		scb_s->eca |= scb_o->eca & ECA_IB;
 558	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
 559		scb_s->eca |= scb_o->eca & ECA_CEI;
 560	/* Epoch Extension */
 561	if (test_kvm_facility(vcpu->kvm, 139)) {
 562		scb_s->ecd |= scb_o->ecd & ECD_MEF;
 563		scb_s->epdx = scb_o->epdx;
 564	}
 565
 566	/* etoken */
 567	if (test_kvm_facility(vcpu->kvm, 156))
 568		scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
 569
 570	scb_s->hpid = HPID_VSIE;
 571	scb_s->cpnc = scb_o->cpnc;
 572
 573	prepare_ibc(vcpu, vsie_page);
 574	rc = shadow_crycb(vcpu, vsie_page);
 575out:
 576	if (rc)
 577		unshadow_scb(vcpu, vsie_page);
 578	return rc;
 579}
 580
 581void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
 582				 unsigned long end)
 583{
 584	struct kvm *kvm = gmap->private;
 585	struct vsie_page *cur;
 586	unsigned long prefix;
 587	struct page *page;
 588	int i;
 589
 590	if (!gmap_is_shadow(gmap))
 591		return;
 
 
 
 
 592	/*
 593	 * Only new shadow blocks are added to the list during runtime,
 594	 * therefore we can safely reference them all the time.
 595	 */
 596	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
 597		page = READ_ONCE(kvm->arch.vsie.pages[i]);
 598		if (!page)
 599			continue;
 600		cur = page_to_virt(page);
 601		if (READ_ONCE(cur->gmap) != gmap)
 602			continue;
 603		prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
 604		/* with mso/msl, the prefix lies at an offset */
 605		prefix += cur->scb_s.mso;
 606		if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
 607			prefix_unmapped_sync(cur);
 608	}
 609}
 610
 611/*
 612 * Map the first prefix page and if tx is enabled also the second prefix page.
 613 *
 614 * The prefix will be protected, a gmap notifier will inform about unmaps.
 615 * The shadow scb must not be executed until the prefix is remapped, this is
 616 * guaranteed by properly handling PROG_REQUEST.
 617 *
 618 * Returns: - 0 on if successfully mapped or already mapped
 619 *          - > 0 if control has to be given to guest 2
 620 *          - -EAGAIN if the caller can retry immediately
 621 *          - -ENOMEM if out of memory
 622 */
 623static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 624{
 625	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 626	u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
 627	int rc;
 628
 629	if (prefix_is_mapped(vsie_page))
 630		return 0;
 631
 632	/* mark it as mapped so we can catch any concurrent unmappers */
 633	prefix_mapped(vsie_page);
 634
 635	/* with mso/msl, the prefix lies at offset *mso* */
 636	prefix += scb_s->mso;
 637
 638	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
 639	if (!rc && (scb_s->ecb & ECB_TE))
 640		rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 641					   prefix + PAGE_SIZE, NULL);
 642	/*
 643	 * We don't have to mprotect, we will be called for all unshadows.
 644	 * SIE will detect if protection applies and trigger a validity.
 645	 */
 646	if (rc)
 647		prefix_unmapped(vsie_page);
 648	if (rc > 0 || rc == -EFAULT)
 649		rc = set_validity_icpt(scb_s, 0x0037U);
 650	return rc;
 651}
 652
 653/*
 654 * Pin the guest page given by gpa and set hpa to the pinned host address.
 655 * Will always be pinned writable.
 656 *
 657 * Returns: - 0 on success
 658 *          - -EINVAL if the gpa is not valid guest storage
 659 */
 660static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
 661{
 662	struct page *page;
 663
 664	page = gfn_to_page(kvm, gpa_to_gfn(gpa));
 665	if (!page)
 666		return -EINVAL;
 667	*hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK);
 668	return 0;
 669}
 670
 671/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
 672static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
 673{
 674	kvm_release_page_dirty(pfn_to_page(hpa >> PAGE_SHIFT));
 675	/* mark the page always as dirty for migration */
 676	mark_page_dirty(kvm, gpa_to_gfn(gpa));
 677}
 678
 679/* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
 680static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 681{
 682	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 683	hpa_t hpa;
 684
 685	hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
 686	if (hpa) {
 687		unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
 688		vsie_page->sca_gpa = 0;
 689		scb_s->scaol = 0;
 690		scb_s->scaoh = 0;
 691	}
 692
 693	hpa = scb_s->itdba;
 694	if (hpa) {
 695		unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
 696		vsie_page->itdba_gpa = 0;
 697		scb_s->itdba = 0;
 698	}
 699
 700	hpa = scb_s->gvrd;
 701	if (hpa) {
 702		unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
 703		vsie_page->gvrd_gpa = 0;
 704		scb_s->gvrd = 0;
 705	}
 706
 707	hpa = scb_s->riccbd;
 708	if (hpa) {
 709		unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
 710		vsie_page->riccbd_gpa = 0;
 711		scb_s->riccbd = 0;
 712	}
 713
 714	hpa = scb_s->sdnxo;
 715	if (hpa) {
 716		unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
 717		vsie_page->sdnx_gpa = 0;
 718		scb_s->sdnxo = 0;
 719	}
 720}
 721
 722/*
 723 * Instead of shadowing some blocks, we can simply forward them because the
 724 * addresses in the scb are 64 bit long.
 725 *
 726 * This works as long as the data lies in one page. If blocks ever exceed one
 727 * page, we have to fall back to shadowing.
 728 *
 729 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
 730 * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
 731 *
 732 * Returns: - 0 if all blocks were pinned.
 733 *          - > 0 if control has to be given to guest 2
 734 *          - -ENOMEM if out of memory
 735 */
 736static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 737{
 738	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 739	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 740	hpa_t hpa;
 741	gpa_t gpa;
 742	int rc = 0;
 743
 744	gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
 745	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
 746		gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
 747	if (gpa) {
 748		if (gpa < 2 * PAGE_SIZE)
 749			rc = set_validity_icpt(scb_s, 0x0038U);
 750		else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
 751			rc = set_validity_icpt(scb_s, 0x0011U);
 752		else if ((gpa & PAGE_MASK) !=
 753			 ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
 754			rc = set_validity_icpt(scb_s, 0x003bU);
 755		if (!rc) {
 756			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 757			if (rc)
 758				rc = set_validity_icpt(scb_s, 0x0034U);
 759		}
 760		if (rc)
 761			goto unpin;
 762		vsie_page->sca_gpa = gpa;
 763		scb_s->scaoh = (u32)((u64)hpa >> 32);
 764		scb_s->scaol = (u32)(u64)hpa;
 765	}
 766
 767	gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
 768	if (gpa && (scb_s->ecb & ECB_TE)) {
 769		if (gpa < 2 * PAGE_SIZE) {
 770			rc = set_validity_icpt(scb_s, 0x0080U);
 771			goto unpin;
 772		}
 773		/* 256 bytes cannot cross page boundaries */
 774		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 775		if (rc) {
 776			rc = set_validity_icpt(scb_s, 0x0080U);
 777			goto unpin;
 778		}
 779		vsie_page->itdba_gpa = gpa;
 780		scb_s->itdba = hpa;
 781	}
 782
 783	gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
 784	if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
 785		if (gpa < 2 * PAGE_SIZE) {
 786			rc = set_validity_icpt(scb_s, 0x1310U);
 787			goto unpin;
 788		}
 789		/*
 790		 * 512 bytes vector registers cannot cross page boundaries
 791		 * if this block gets bigger, we have to shadow it.
 792		 */
 793		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 794		if (rc) {
 795			rc = set_validity_icpt(scb_s, 0x1310U);
 796			goto unpin;
 797		}
 798		vsie_page->gvrd_gpa = gpa;
 799		scb_s->gvrd = hpa;
 800	}
 801
 802	gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
 803	if (gpa && (scb_s->ecb3 & ECB3_RI)) {
 804		if (gpa < 2 * PAGE_SIZE) {
 805			rc = set_validity_icpt(scb_s, 0x0043U);
 806			goto unpin;
 807		}
 808		/* 64 bytes cannot cross page boundaries */
 809		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 810		if (rc) {
 811			rc = set_validity_icpt(scb_s, 0x0043U);
 812			goto unpin;
 813		}
 814		/* Validity 0x0044 will be checked by SIE */
 815		vsie_page->riccbd_gpa = gpa;
 816		scb_s->riccbd = hpa;
 817	}
 818	if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
 819	    (scb_s->ecd & ECD_ETOKENF)) {
 820		unsigned long sdnxc;
 821
 822		gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
 823		sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
 824		if (!gpa || gpa < 2 * PAGE_SIZE) {
 825			rc = set_validity_icpt(scb_s, 0x10b0U);
 826			goto unpin;
 827		}
 828		if (sdnxc < 6 || sdnxc > 12) {
 829			rc = set_validity_icpt(scb_s, 0x10b1U);
 830			goto unpin;
 831		}
 832		if (gpa & ((1 << sdnxc) - 1)) {
 833			rc = set_validity_icpt(scb_s, 0x10b2U);
 834			goto unpin;
 835		}
 836		/* Due to alignment rules (checked above) this cannot
 837		 * cross page boundaries
 838		 */
 839		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 840		if (rc) {
 841			rc = set_validity_icpt(scb_s, 0x10b0U);
 842			goto unpin;
 843		}
 844		vsie_page->sdnx_gpa = gpa;
 845		scb_s->sdnxo = hpa | sdnxc;
 846	}
 847	return 0;
 848unpin:
 849	unpin_blocks(vcpu, vsie_page);
 850	return rc;
 851}
 852
 853/* unpin the scb provided by guest 2, marking it as dirty */
 854static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 855		      gpa_t gpa)
 856{
 857	hpa_t hpa = virt_to_phys(vsie_page->scb_o);
 858
 859	if (hpa)
 860		unpin_guest_page(vcpu->kvm, gpa, hpa);
 861	vsie_page->scb_o = NULL;
 862}
 863
 864/*
 865 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
 866 *
 867 * Returns: - 0 if the scb was pinned.
 868 *          - > 0 if control has to be given to guest 2
 869 */
 870static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 871		   gpa_t gpa)
 872{
 873	hpa_t hpa;
 874	int rc;
 875
 876	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 877	if (rc) {
 878		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 879		WARN_ON_ONCE(rc);
 880		return 1;
 881	}
 882	vsie_page->scb_o = phys_to_virt(hpa);
 883	return 0;
 884}
 885
 886/*
 887 * Inject a fault into guest 2.
 888 *
 889 * Returns: - > 0 if control has to be given to guest 2
 890 *            < 0 if an error occurred during injection.
 891 */
 892static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
 893			bool write_flag)
 894{
 895	struct kvm_s390_pgm_info pgm = {
 896		.code = code,
 897		.trans_exc_code =
 898			/* 0-51: virtual address */
 899			(vaddr & 0xfffffffffffff000UL) |
 900			/* 52-53: store / fetch */
 901			(((unsigned int) !write_flag) + 1) << 10,
 902			/* 62-63: asce id (always primary == 0) */
 903		.exc_access_id = 0, /* always primary */
 904		.op_access_id = 0, /* not MVPG */
 905	};
 906	int rc;
 907
 908	if (code == PGM_PROTECTION)
 909		pgm.trans_exc_code |= 0x4UL;
 910
 911	rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
 912	return rc ? rc : 1;
 913}
 914
 915/*
 916 * Handle a fault during vsie execution on a gmap shadow.
 917 *
 918 * Returns: - 0 if the fault was resolved
 919 *          - > 0 if control has to be given to guest 2
 920 *          - < 0 if an error occurred
 921 */
 922static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 923{
 924	int rc;
 925
 926	if ((current->thread.gmap_int_code & PGM_INT_CODE_MASK) == PGM_PROTECTION)
 927		/* we can directly forward all protection exceptions */
 928		return inject_fault(vcpu, PGM_PROTECTION,
 929				    current->thread.gmap_teid.addr * PAGE_SIZE, 1);
 930
 931	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 932				   current->thread.gmap_teid.addr * PAGE_SIZE, NULL);
 933	if (rc > 0) {
 934		rc = inject_fault(vcpu, rc,
 935				  current->thread.gmap_teid.addr * PAGE_SIZE,
 936				  kvm_s390_cur_gmap_fault_is_write());
 937		if (rc >= 0)
 938			vsie_page->fault_addr = current->thread.gmap_teid.addr * PAGE_SIZE;
 939	}
 940	return rc;
 941}
 942
 943/*
 944 * Retry the previous fault that required guest 2 intervention. This avoids
 945 * one superfluous SIE re-entry and direct exit.
 946 *
 947 * Will ignore any errors. The next SIE fault will do proper fault handling.
 948 */
 949static void handle_last_fault(struct kvm_vcpu *vcpu,
 950			      struct vsie_page *vsie_page)
 951{
 952	if (vsie_page->fault_addr)
 953		kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 954				      vsie_page->fault_addr, NULL);
 955	vsie_page->fault_addr = 0;
 956}
 957
 958static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
 959{
 960	vsie_page->scb_s.icptcode = 0;
 961}
 962
 963/* rewind the psw and clear the vsie icpt, so we can retry execution */
 964static void retry_vsie_icpt(struct vsie_page *vsie_page)
 965{
 966	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 967	int ilen = insn_length(scb_s->ipa >> 8);
 968
 969	/* take care of EXECUTE instructions */
 970	if (scb_s->icptstatus & 1) {
 971		ilen = (scb_s->icptstatus >> 4) & 0x6;
 972		if (!ilen)
 973			ilen = 4;
 974	}
 975	scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
 976	clear_vsie_icpt(vsie_page);
 977}
 978
 979/*
 980 * Try to shadow + enable the guest 2 provided facility list.
 981 * Retry instruction execution if enabled for and provided by guest 2.
 982 *
 983 * Returns: - 0 if handled (retry or guest 2 icpt)
 984 *          - > 0 if control has to be given to guest 2
 985 */
 986static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 987{
 988	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 989	__u32 fac = READ_ONCE(vsie_page->scb_o->fac);
 990
 991	/*
 992	 * Alternate-STFLE-Interpretive-Execution facilities are not supported
 993	 * -> format-0 flcb
 994	 */
 995	if (fac && test_kvm_facility(vcpu->kvm, 7)) {
 996		retry_vsie_icpt(vsie_page);
 997		/*
 998		 * The facility list origin (FLO) is in bits 1 - 28 of the FLD
 999		 * so we need to mask here before reading.
1000		 */
1001		fac = fac & 0x7ffffff8U;
1002		/*
1003		 * format-0 -> size of nested guest's facility list == guest's size
1004		 * guest's size == host's size, since STFLE is interpretatively executed
1005		 * using a format-0 for the guest, too.
1006		 */
1007		if (read_guest_real(vcpu, fac, &vsie_page->fac,
1008				    stfle_size() * sizeof(u64)))
1009			return set_validity_icpt(scb_s, 0x1090U);
1010		scb_s->fac = (u32)virt_to_phys(&vsie_page->fac);
1011	}
1012	return 0;
1013}
1014
1015/*
1016 * Get a register for a nested guest.
1017 * @vcpu the vcpu of the guest
1018 * @vsie_page the vsie_page for the nested guest
1019 * @reg the register number, the upper 4 bits are ignored.
1020 * returns: the value of the register.
1021 */
1022static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
1023{
1024	/* no need to validate the parameter and/or perform error handling */
1025	reg &= 0xf;
1026	switch (reg) {
1027	case 15:
1028		return vsie_page->scb_s.gg15;
1029	case 14:
1030		return vsie_page->scb_s.gg14;
1031	default:
1032		return vcpu->run->s.regs.gprs[reg];
1033	}
1034}
1035
1036static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1037{
1038	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1039	unsigned long pei_dest, pei_src, src, dest, mask, prefix;
1040	u64 *pei_block = &vsie_page->scb_o->mcic;
1041	int edat, rc_dest, rc_src;
1042	union ctlreg0 cr0;
1043
1044	cr0.val = vcpu->arch.sie_block->gcr[0];
1045	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1046	mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
1047	prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
1048
1049	dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
1050	dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
1051	src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
1052	src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
1053
1054	rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
1055	rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
1056	/*
1057	 * Either everything went well, or something non-critical went wrong
1058	 * e.g. because of a race. In either case, simply retry.
1059	 */
1060	if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
1061		retry_vsie_icpt(vsie_page);
1062		return -EAGAIN;
1063	}
1064	/* Something more serious went wrong, propagate the error */
1065	if (rc_dest < 0)
1066		return rc_dest;
1067	if (rc_src < 0)
1068		return rc_src;
1069
1070	/* The only possible suppressing exception: just deliver it */
1071	if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
1072		clear_vsie_icpt(vsie_page);
1073		rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
1074		WARN_ON_ONCE(rc_dest);
1075		return 1;
1076	}
1077
1078	/*
1079	 * Forward the PEI intercept to the guest if it was a page fault, or
1080	 * also for segment and region table faults if EDAT applies.
1081	 */
1082	if (edat) {
1083		rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
1084		rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
1085	} else {
1086		rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
1087		rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
1088	}
1089	if (!rc_dest && !rc_src) {
1090		pei_block[0] = pei_dest;
1091		pei_block[1] = pei_src;
1092		return 1;
1093	}
1094
1095	retry_vsie_icpt(vsie_page);
1096
1097	/*
1098	 * The host has edat, and the guest does not, or it was an ASCE type
1099	 * exception. The host needs to inject the appropriate DAT interrupts
1100	 * into the guest.
1101	 */
1102	if (rc_dest)
1103		return inject_fault(vcpu, rc_dest, dest, 1);
1104	return inject_fault(vcpu, rc_src, src, 0);
1105}
1106
1107/*
1108 * Run the vsie on a shadow scb and a shadow gmap, without any further
1109 * sanity checks, handling SIE faults.
1110 *
1111 * Returns: - 0 everything went fine
1112 *          - > 0 if control has to be given to guest 2
1113 *          - < 0 if an error occurred
1114 */
1115static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1116	__releases(vcpu->kvm->srcu)
1117	__acquires(vcpu->kvm->srcu)
1118{
1119	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1120	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
1121	int guest_bp_isolation;
1122	int rc = 0;
1123
1124	handle_last_fault(vcpu, vsie_page);
1125
1126	kvm_vcpu_srcu_read_unlock(vcpu);
 
 
 
 
 
1127
1128	/* save current guest state of bp isolation override */
1129	guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
1130
1131	/*
1132	 * The guest is running with BPBC, so we have to force it on for our
1133	 * nested guest. This is done by enabling BPBC globally, so the BPBC
1134	 * control in the SCB (which the nested guest can modify) is simply
1135	 * ignored.
1136	 */
1137	if (test_kvm_facility(vcpu->kvm, 82) &&
1138	    vcpu->arch.sie_block->fpf & FPF_BPBC)
1139		set_thread_flag(TIF_ISOLATE_BP_GUEST);
1140
1141	local_irq_disable();
1142	guest_enter_irqoff();
1143	local_irq_enable();
1144
1145	/*
1146	 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
1147	 * and VCPU requests also hinder the vSIE from running and lead
1148	 * to an immediate exit. kvm_s390_vsie_kick() has to be used to
1149	 * also kick the vSIE.
1150	 */
1151	vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
1152	current->thread.gmap_int_code = 0;
1153	barrier();
1154	if (!kvm_s390_vcpu_sie_inhibited(vcpu))
1155		rc = sie64a(scb_s, vcpu->run->s.regs.gprs, vsie_page->gmap->asce);
1156	barrier();
1157	vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
1158
1159	local_irq_disable();
1160	guest_exit_irqoff();
1161	local_irq_enable();
1162
1163	/* restore guest state for bp isolation override */
1164	if (!guest_bp_isolation)
1165		clear_thread_flag(TIF_ISOLATE_BP_GUEST);
1166
1167	kvm_vcpu_srcu_read_lock(vcpu);
1168
1169	if (rc == -EINTR) {
1170		VCPU_EVENT(vcpu, 3, "%s", "machine check");
1171		kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
1172		return 0;
1173	}
1174
1175	if (rc > 0)
1176		rc = 0; /* we could still have an icpt */
1177	else if (current->thread.gmap_int_code)
1178		return handle_fault(vcpu, vsie_page);
1179
1180	switch (scb_s->icptcode) {
1181	case ICPT_INST:
1182		if (scb_s->ipa == 0xb2b0)
1183			rc = handle_stfle(vcpu, vsie_page);
1184		break;
1185	case ICPT_STOP:
1186		/* stop not requested by g2 - must have been a kick */
1187		if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
1188			clear_vsie_icpt(vsie_page);
1189		break;
1190	case ICPT_VALIDITY:
1191		if ((scb_s->ipa & 0xf000) != 0xf000)
1192			scb_s->ipa += 0x1000;
1193		break;
1194	case ICPT_PARTEXEC:
1195		if (scb_s->ipa == 0xb254)
1196			rc = vsie_handle_mvpg(vcpu, vsie_page);
1197		break;
1198	}
1199	return rc;
1200}
1201
1202static void release_gmap_shadow(struct vsie_page *vsie_page)
1203{
1204	if (vsie_page->gmap)
1205		gmap_put(vsie_page->gmap);
1206	WRITE_ONCE(vsie_page->gmap, NULL);
1207	prefix_unmapped(vsie_page);
1208}
1209
1210static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
1211			       struct vsie_page *vsie_page)
1212{
1213	unsigned long asce;
1214	union ctlreg0 cr0;
1215	struct gmap *gmap;
1216	int edat;
1217
1218	asce = vcpu->arch.sie_block->gcr[1];
1219	cr0.val = vcpu->arch.sie_block->gcr[0];
1220	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1221	edat += edat && test_kvm_facility(vcpu->kvm, 78);
1222
1223	/*
1224	 * ASCE or EDAT could have changed since last icpt, or the gmap
1225	 * we're holding has been unshadowed. If the gmap is still valid,
1226	 * we can safely reuse it.
1227	 */
1228	if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) {
1229		vcpu->kvm->stat.gmap_shadow_reuse++;
1230		return 0;
1231	}
1232
1233	/* release the old shadow - if any, and mark the prefix as unmapped */
1234	release_gmap_shadow(vsie_page);
1235	gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
1236	if (IS_ERR(gmap))
1237		return PTR_ERR(gmap);
1238	vcpu->kvm->stat.gmap_shadow_create++;
1239	WRITE_ONCE(vsie_page->gmap, gmap);
1240	return 0;
1241}
1242
1243/*
1244 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
1245 */
1246static void register_shadow_scb(struct kvm_vcpu *vcpu,
1247				struct vsie_page *vsie_page)
1248{
1249	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1250
1251	WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
1252	/*
1253	 * External calls have to lead to a kick of the vcpu and
1254	 * therefore the vsie -> Simulate Wait state.
1255	 */
1256	kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
1257	/*
1258	 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
1259	 * automatically be adjusted on tod clock changes via kvm_sync_clock.
1260	 */
1261	preempt_disable();
1262	scb_s->epoch += vcpu->kvm->arch.epoch;
1263
1264	if (scb_s->ecd & ECD_MEF) {
1265		scb_s->epdx += vcpu->kvm->arch.epdx;
1266		if (scb_s->epoch < vcpu->kvm->arch.epoch)
1267			scb_s->epdx += 1;
1268	}
1269
1270	preempt_enable();
1271}
1272
1273/*
1274 * Unregister a shadow scb from a VCPU.
1275 */
1276static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
1277{
1278	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
1279	WRITE_ONCE(vcpu->arch.vsie_block, NULL);
1280}
1281
1282/*
1283 * Run the vsie on a shadowed scb, managing the gmap shadow, handling
1284 * prefix pages and faults.
1285 *
1286 * Returns: - 0 if no errors occurred
1287 *          - > 0 if control has to be given to guest 2
1288 *          - -ENOMEM if out of memory
1289 */
1290static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1291{
1292	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1293	int rc = 0;
1294
1295	while (1) {
1296		rc = acquire_gmap_shadow(vcpu, vsie_page);
1297		if (!rc)
1298			rc = map_prefix(vcpu, vsie_page);
1299		if (!rc) {
 
1300			update_intervention_requests(vsie_page);
1301			rc = do_vsie_run(vcpu, vsie_page);
 
1302		}
1303		atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
1304
1305		if (rc == -EAGAIN)
1306			rc = 0;
1307
1308		/*
1309		 * Exit the loop if the guest needs to process the intercept
1310		 */
1311		if (rc || scb_s->icptcode)
1312			break;
1313
1314		/*
1315		 * Exit the loop if the host needs to process an intercept,
1316		 * but rewind the PSW to re-enter SIE once that's completed
1317		 * instead of passing a "no action" intercept to the guest.
1318		 */
1319		if (signal_pending(current) ||
1320		    kvm_s390_vcpu_has_irq(vcpu, 0) ||
1321		    kvm_s390_vcpu_sie_inhibited(vcpu)) {
1322			kvm_s390_rewind_psw(vcpu, 4);
1323			break;
1324		}
1325		cond_resched();
1326	}
1327
1328	if (rc == -EFAULT) {
1329		/*
1330		 * Addressing exceptions are always presentes as intercepts.
1331		 * As addressing exceptions are suppressing and our guest 3 PSW
1332		 * points at the responsible instruction, we have to
1333		 * forward the PSW and set the ilc. If we can't read guest 3
1334		 * instruction, we can use an arbitrary ilc. Let's always use
1335		 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
1336		 * memory. (we could also fake the shadow so the hardware
1337		 * handles it).
1338		 */
1339		scb_s->icptcode = ICPT_PROGI;
1340		scb_s->iprcc = PGM_ADDRESSING;
1341		scb_s->pgmilc = 4;
1342		scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
1343		rc = 1;
1344	}
1345	return rc;
1346}
1347
1348/*
1349 * Get or create a vsie page for a scb address.
1350 *
1351 * Returns: - address of a vsie page (cached or new one)
1352 *          - NULL if the same scb address is already used by another VCPU
1353 *          - ERR_PTR(-ENOMEM) if out of memory
1354 */
1355static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
1356{
1357	struct vsie_page *vsie_page;
1358	struct page *page;
1359	int nr_vcpus;
1360
1361	rcu_read_lock();
1362	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
1363	rcu_read_unlock();
1364	if (page) {
1365		if (page_ref_inc_return(page) == 2) {
1366			if (page->index == addr)
1367				return page_to_virt(page);
1368			/*
1369			 * We raced with someone reusing + putting this vsie
1370			 * page before we grabbed it.
1371			 */
1372		}
1373		page_ref_dec(page);
1374	}
1375
1376	/*
1377	 * We want at least #online_vcpus shadows, so every VCPU can execute
1378	 * the VSIE in parallel.
1379	 */
1380	nr_vcpus = atomic_read(&kvm->online_vcpus);
1381
1382	mutex_lock(&kvm->arch.vsie.mutex);
1383	if (kvm->arch.vsie.page_count < nr_vcpus) {
1384		page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
1385		if (!page) {
1386			mutex_unlock(&kvm->arch.vsie.mutex);
1387			return ERR_PTR(-ENOMEM);
1388		}
1389		page_ref_inc(page);
1390		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
1391		kvm->arch.vsie.page_count++;
1392	} else {
1393		/* reuse an existing entry that belongs to nobody */
1394		while (true) {
1395			page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
1396			if (page_ref_inc_return(page) == 2)
1397				break;
1398			page_ref_dec(page);
1399			kvm->arch.vsie.next++;
1400			kvm->arch.vsie.next %= nr_vcpus;
1401		}
1402		if (page->index != ULONG_MAX)
1403			radix_tree_delete(&kvm->arch.vsie.addr_to_page,
1404					  page->index >> 9);
1405	}
1406	/* Mark it as invalid until it resides in the tree. */
1407	page->index = ULONG_MAX;
1408
1409	/* Double use of the same address or allocation failure. */
1410	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
1411		page_ref_dec(page);
1412		mutex_unlock(&kvm->arch.vsie.mutex);
1413		return NULL;
1414	}
1415	page->index = addr;
1416	mutex_unlock(&kvm->arch.vsie.mutex);
1417
1418	vsie_page = page_to_virt(page);
1419	memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
1420	release_gmap_shadow(vsie_page);
1421	vsie_page->fault_addr = 0;
1422	vsie_page->scb_s.ihcpu = 0xffffU;
1423	return vsie_page;
1424}
1425
1426/* put a vsie page acquired via get_vsie_page */
1427static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
1428{
1429	struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
1430
1431	page_ref_dec(page);
1432}
1433
1434int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
1435{
1436	struct vsie_page *vsie_page;
1437	unsigned long scb_addr;
1438	int rc;
1439
1440	vcpu->stat.instruction_sie++;
1441	if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
1442		return -EOPNOTSUPP;
1443	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1444		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1445
1446	BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
1447	scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
1448
1449	/* 512 byte alignment */
1450	if (unlikely(scb_addr & 0x1ffUL))
1451		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1452
1453	if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
1454	    kvm_s390_vcpu_sie_inhibited(vcpu)) {
1455		kvm_s390_rewind_psw(vcpu, 4);
1456		return 0;
1457	}
1458
1459	vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
1460	if (IS_ERR(vsie_page))
1461		return PTR_ERR(vsie_page);
1462	else if (!vsie_page)
1463		/* double use of sie control block - simply do nothing */
1464		return 0;
1465
1466	rc = pin_scb(vcpu, vsie_page, scb_addr);
1467	if (rc)
1468		goto out_put;
1469	rc = shadow_scb(vcpu, vsie_page);
1470	if (rc)
1471		goto out_unpin_scb;
1472	rc = pin_blocks(vcpu, vsie_page);
1473	if (rc)
1474		goto out_unshadow;
1475	register_shadow_scb(vcpu, vsie_page);
1476	rc = vsie_run(vcpu, vsie_page);
1477	unregister_shadow_scb(vcpu);
1478	unpin_blocks(vcpu, vsie_page);
1479out_unshadow:
1480	unshadow_scb(vcpu, vsie_page);
1481out_unpin_scb:
1482	unpin_scb(vcpu, vsie_page, scb_addr);
1483out_put:
1484	put_vsie_page(vcpu->kvm, vsie_page);
1485
1486	return rc < 0 ? rc : 0;
1487}
1488
1489/* Init the vsie data structures. To be called when a vm is initialized. */
1490void kvm_s390_vsie_init(struct kvm *kvm)
1491{
1492	mutex_init(&kvm->arch.vsie.mutex);
1493	INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL_ACCOUNT);
1494}
1495
1496/* Destroy the vsie data structures. To be called when a vm is destroyed. */
1497void kvm_s390_vsie_destroy(struct kvm *kvm)
1498{
1499	struct vsie_page *vsie_page;
1500	struct page *page;
1501	int i;
1502
1503	mutex_lock(&kvm->arch.vsie.mutex);
1504	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
1505		page = kvm->arch.vsie.pages[i];
1506		kvm->arch.vsie.pages[i] = NULL;
1507		vsie_page = page_to_virt(page);
1508		release_gmap_shadow(vsie_page);
1509		/* free the radix tree entry */
1510		if (page->index != ULONG_MAX)
1511			radix_tree_delete(&kvm->arch.vsie.addr_to_page,
1512					  page->index >> 9);
1513		__free_page(page);
1514	}
1515	kvm->arch.vsie.page_count = 0;
1516	mutex_unlock(&kvm->arch.vsie.mutex);
1517}
1518
1519void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
1520{
1521	struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
1522
1523	/*
1524	 * Even if the VCPU lets go of the shadow sie block reference, it is
1525	 * still valid in the cache. So we can safely kick it.
1526	 */
1527	if (scb) {
1528		atomic_or(PROG_BLOCK_SIE, &scb->prog20);
1529		if (scb->prog0c & PROG_IN_SIE)
1530			atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
1531	}
1532}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * kvm nested virtualization support for s390x
   4 *
   5 * Copyright IBM Corp. 2016, 2018
   6 *
   7 *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
   8 */
   9#include <linux/vmalloc.h>
  10#include <linux/kvm_host.h>
  11#include <linux/bug.h>
  12#include <linux/list.h>
  13#include <linux/bitmap.h>
  14#include <linux/sched/signal.h>
 
  15
  16#include <asm/gmap.h>
  17#include <asm/mmu_context.h>
  18#include <asm/sclp.h>
  19#include <asm/nmi.h>
  20#include <asm/dis.h>
 
  21#include "kvm-s390.h"
  22#include "gaccess.h"
  23
  24struct vsie_page {
  25	struct kvm_s390_sie_block scb_s;	/* 0x0000 */
  26	/*
  27	 * the backup info for machine check. ensure it's at
  28	 * the same offset as that in struct sie_page!
  29	 */
  30	struct mcck_volatile_info mcck_info;    /* 0x0200 */
  31	/*
  32	 * The pinned original scb. Be aware that other VCPUs can modify
  33	 * it while we read from it. Values that are used for conditions or
  34	 * are reused conditionally, should be accessed via READ_ONCE.
  35	 */
  36	struct kvm_s390_sie_block *scb_o;	/* 0x0218 */
  37	/* the shadow gmap in use by the vsie_page */
  38	struct gmap *gmap;			/* 0x0220 */
  39	/* address of the last reported fault to guest2 */
  40	unsigned long fault_addr;		/* 0x0228 */
  41	/* calculated guest addresses of satellite control blocks */
  42	gpa_t sca_gpa;				/* 0x0230 */
  43	gpa_t itdba_gpa;			/* 0x0238 */
  44	gpa_t gvrd_gpa;				/* 0x0240 */
  45	gpa_t riccbd_gpa;			/* 0x0248 */
  46	gpa_t sdnx_gpa;				/* 0x0250 */
  47	__u8 reserved[0x0700 - 0x0258];		/* 0x0258 */
  48	struct kvm_s390_crypto_cb crycb;	/* 0x0700 */
  49	__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE];	/* 0x0800 */
  50};
  51
  52/* trigger a validity icpt for the given scb */
  53static int set_validity_icpt(struct kvm_s390_sie_block *scb,
  54			     __u16 reason_code)
  55{
  56	scb->ipa = 0x1000;
  57	scb->ipb = ((__u32) reason_code) << 16;
  58	scb->icptcode = ICPT_VALIDITY;
  59	return 1;
  60}
  61
  62/* mark the prefix as unmapped, this will block the VSIE */
  63static void prefix_unmapped(struct vsie_page *vsie_page)
  64{
  65	atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
  66}
  67
  68/* mark the prefix as unmapped and wait until the VSIE has been left */
  69static void prefix_unmapped_sync(struct vsie_page *vsie_page)
  70{
  71	prefix_unmapped(vsie_page);
  72	if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  73		atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
  74	while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  75		cpu_relax();
  76}
  77
  78/* mark the prefix as mapped, this will allow the VSIE to run */
  79static void prefix_mapped(struct vsie_page *vsie_page)
  80{
  81	atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
  82}
  83
  84/* test if the prefix is mapped into the gmap shadow */
  85static int prefix_is_mapped(struct vsie_page *vsie_page)
  86{
  87	return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
  88}
  89
  90/* copy the updated intervention request bits into the shadow scb */
  91static void update_intervention_requests(struct vsie_page *vsie_page)
  92{
  93	const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
  94	int cpuflags;
  95
  96	cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
  97	atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
  98	atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
  99}
 100
 101/* shadow (filter and validate) the cpuflags  */
 102static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 103{
 104	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 105	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 106	int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
 107
 108	/* we don't allow ESA/390 guests */
 109	if (!(cpuflags & CPUSTAT_ZARCH))
 110		return set_validity_icpt(scb_s, 0x0001U);
 111
 112	if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
 113		return set_validity_icpt(scb_s, 0x0001U);
 114	else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
 115		return set_validity_icpt(scb_s, 0x0007U);
 116
 117	/* intervention requests will be set later */
 118	newflags = CPUSTAT_ZARCH;
 119	if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
 120		newflags |= CPUSTAT_GED;
 121	if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
 122		if (cpuflags & CPUSTAT_GED)
 123			return set_validity_icpt(scb_s, 0x0001U);
 124		newflags |= CPUSTAT_GED2;
 125	}
 126	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
 127		newflags |= cpuflags & CPUSTAT_P;
 128	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
 129		newflags |= cpuflags & CPUSTAT_SM;
 130	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
 131		newflags |= cpuflags & CPUSTAT_IBS;
 132	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
 133		newflags |= cpuflags & CPUSTAT_KSS;
 134
 135	atomic_set(&scb_s->cpuflags, newflags);
 136	return 0;
 137}
 138/* Copy to APCB FORMAT1 from APCB FORMAT0 */
 139static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
 140			unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
 141{
 142	struct kvm_s390_apcb0 tmp;
 
 143
 144	if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
 
 
 
 145		return -EFAULT;
 146
 147	apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
 148	apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
 149	apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
 150
 151	return 0;
 152
 153}
 154
 155/**
 156 * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
 157 * @vcpu: pointer to the virtual CPU
 158 * @apcb_s: pointer to start of apcb in the shadow crycb
 159 * @apcb_o: pointer to start of original apcb in the guest2
 160 * @apcb_h: pointer to start of apcb in the guest1
 161 *
 162 * Returns 0 and -EFAULT on error reading guest apcb
 163 */
 164static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
 165			unsigned long apcb_o, unsigned long *apcb_h)
 166{
 167	if (read_guest_real(vcpu, apcb_o, apcb_s,
 
 
 
 
 168			    sizeof(struct kvm_s390_apcb0)))
 169		return -EFAULT;
 170
 171	bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
 
 172
 173	return 0;
 174}
 175
 176/**
 177 * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
 178 * @vcpu: pointer to the virtual CPU
 179 * @apcb_s: pointer to start of apcb in the shadow crycb
 180 * @apcb_o: pointer to start of original guest apcb
 181 * @apcb_h: pointer to start of apcb in the host
 182 *
 183 * Returns 0 and -EFAULT on error reading guest apcb
 184 */
 185static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
 186			unsigned long apcb_o,
 187			unsigned long *apcb_h)
 188{
 189	if (read_guest_real(vcpu, apcb_o, apcb_s,
 
 
 
 
 190			    sizeof(struct kvm_s390_apcb1)))
 191		return -EFAULT;
 192
 193	bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
 
 194
 195	return 0;
 196}
 197
 198/**
 199 * setup_apcb - Create a shadow copy of the apcb.
 200 * @vcpu: pointer to the virtual CPU
 201 * @crycb_s: pointer to shadow crycb
 202 * @crycb_o: pointer to original guest crycb
 203 * @crycb_h: pointer to the host crycb
 204 * @fmt_o: format of the original guest crycb.
 205 * @fmt_h: format of the host crycb.
 206 *
 207 * Checks the compatibility between the guest and host crycb and calls the
 208 * appropriate copy function.
 209 *
 210 * Return 0 or an error number if the guest and host crycb are incompatible.
 211 */
 212static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
 213	       const u32 crycb_o,
 214	       struct kvm_s390_crypto_cb *crycb_h,
 215	       int fmt_o, int fmt_h)
 216{
 217	struct kvm_s390_crypto_cb *crycb;
 218
 219	crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
 220
 221	switch (fmt_o) {
 222	case CRYCB_FORMAT2:
 223		if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
 224			return -EACCES;
 225		if (fmt_h != CRYCB_FORMAT2)
 226			return -EINVAL;
 227		return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
 228				    (unsigned long) &crycb->apcb1,
 229				    (unsigned long *)&crycb_h->apcb1);
 230	case CRYCB_FORMAT1:
 231		switch (fmt_h) {
 232		case CRYCB_FORMAT2:
 233			return setup_apcb10(vcpu, &crycb_s->apcb1,
 234					    (unsigned long) &crycb->apcb0,
 235					    &crycb_h->apcb1);
 236		case CRYCB_FORMAT1:
 237			return setup_apcb00(vcpu,
 238					    (unsigned long *) &crycb_s->apcb0,
 239					    (unsigned long) &crycb->apcb0,
 240					    (unsigned long *) &crycb_h->apcb0);
 241		}
 242		break;
 243	case CRYCB_FORMAT0:
 244		if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
 245			return -EACCES;
 246
 247		switch (fmt_h) {
 248		case CRYCB_FORMAT2:
 249			return setup_apcb10(vcpu, &crycb_s->apcb1,
 250					    (unsigned long) &crycb->apcb0,
 251					    &crycb_h->apcb1);
 252		case CRYCB_FORMAT1:
 253		case CRYCB_FORMAT0:
 254			return setup_apcb00(vcpu,
 255					    (unsigned long *) &crycb_s->apcb0,
 256					    (unsigned long) &crycb->apcb0,
 257					    (unsigned long *) &crycb_h->apcb0);
 258		}
 259	}
 260	return -EINVAL;
 261}
 262
 263/**
 264 * shadow_crycb - Create a shadow copy of the crycb block
 265 * @vcpu: a pointer to the virtual CPU
 266 * @vsie_page: a pointer to internal date used for the vSIE
 267 *
 268 * Create a shadow copy of the crycb block and setup key wrapping, if
 269 * requested for guest 3 and enabled for guest 2.
 270 *
 271 * We accept format-1 or format-2, but we convert format-1 into format-2
 272 * in the shadow CRYCB.
 273 * Using format-2 enables the firmware to choose the right format when
 274 * scheduling the SIE.
 275 * There is nothing to do for format-0.
 276 *
 277 * This function centralize the issuing of set_validity_icpt() for all
 278 * the subfunctions working on the crycb.
 279 *
 280 * Returns: - 0 if shadowed or nothing to do
 281 *          - > 0 if control has to be given to guest 2
 282 */
 283static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 284{
 285	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 286	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 287	const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
 288	const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
 289	unsigned long *b1, *b2;
 290	u8 ecb3_flags;
 291	u32 ecd_flags;
 292	int apie_h;
 293	int apie_s;
 294	int key_msk = test_kvm_facility(vcpu->kvm, 76);
 295	int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
 296	int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
 297	int ret = 0;
 298
 299	scb_s->crycbd = 0;
 300
 301	apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
 302	apie_s = apie_h & scb_o->eca;
 303	if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
 304		return 0;
 305
 306	if (!crycb_addr)
 307		return set_validity_icpt(scb_s, 0x0039U);
 308
 309	if (fmt_o == CRYCB_FORMAT1)
 310		if ((crycb_addr & PAGE_MASK) !=
 311		    ((crycb_addr + 128) & PAGE_MASK))
 312			return set_validity_icpt(scb_s, 0x003CU);
 313
 314	if (apie_s) {
 315		ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
 316				 vcpu->kvm->arch.crypto.crycb,
 317				 fmt_o, fmt_h);
 318		if (ret)
 319			goto end;
 320		scb_s->eca |= scb_o->eca & ECA_APIE;
 321	}
 322
 323	/* we may only allow it if enabled for guest 2 */
 324	ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
 325		     (ECB3_AES | ECB3_DEA);
 326	ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
 
 327	if (!ecb3_flags && !ecd_flags)
 328		goto end;
 329
 330	/* copy only the wrapping keys */
 331	if (read_guest_real(vcpu, crycb_addr + 72,
 332			    vsie_page->crycb.dea_wrapping_key_mask, 56))
 333		return set_validity_icpt(scb_s, 0x0035U);
 334
 335	scb_s->ecb3 |= ecb3_flags;
 336	scb_s->ecd |= ecd_flags;
 337
 338	/* xor both blocks in one run */
 339	b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
 340	b2 = (unsigned long *)
 341			    vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
 342	/* as 56%8 == 0, bitmap_xor won't overwrite any data */
 343	bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
 344end:
 345	switch (ret) {
 346	case -EINVAL:
 347		return set_validity_icpt(scb_s, 0x0022U);
 348	case -EFAULT:
 349		return set_validity_icpt(scb_s, 0x0035U);
 350	case -EACCES:
 351		return set_validity_icpt(scb_s, 0x003CU);
 352	}
 353	scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
 354	return 0;
 355}
 356
 357/* shadow (round up/down) the ibc to avoid validity icpt */
 358static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 359{
 360	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 361	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 362	/* READ_ONCE does not work on bitfields - use a temporary variable */
 363	const uint32_t __new_ibc = scb_o->ibc;
 364	const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
 365	__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
 366
 367	scb_s->ibc = 0;
 368	/* ibc installed in g2 and requested for g3 */
 369	if (vcpu->kvm->arch.model.ibc && new_ibc) {
 370		scb_s->ibc = new_ibc;
 371		/* takte care of the minimum ibc level of the machine */
 372		if (scb_s->ibc < min_ibc)
 373			scb_s->ibc = min_ibc;
 374		/* take care of the maximum ibc level set for the guest */
 375		if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
 376			scb_s->ibc = vcpu->kvm->arch.model.ibc;
 377	}
 378}
 379
 380/* unshadow the scb, copying parameters back to the real scb */
 381static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 382{
 383	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 384	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 385
 386	/* interception */
 387	scb_o->icptcode = scb_s->icptcode;
 388	scb_o->icptstatus = scb_s->icptstatus;
 389	scb_o->ipa = scb_s->ipa;
 390	scb_o->ipb = scb_s->ipb;
 391	scb_o->gbea = scb_s->gbea;
 392
 393	/* timer */
 394	scb_o->cputm = scb_s->cputm;
 395	scb_o->ckc = scb_s->ckc;
 396	scb_o->todpr = scb_s->todpr;
 397
 398	/* guest state */
 399	scb_o->gpsw = scb_s->gpsw;
 400	scb_o->gg14 = scb_s->gg14;
 401	scb_o->gg15 = scb_s->gg15;
 402	memcpy(scb_o->gcr, scb_s->gcr, 128);
 403	scb_o->pp = scb_s->pp;
 404
 405	/* branch prediction */
 406	if (test_kvm_facility(vcpu->kvm, 82)) {
 407		scb_o->fpf &= ~FPF_BPBC;
 408		scb_o->fpf |= scb_s->fpf & FPF_BPBC;
 409	}
 410
 411	/* interrupt intercept */
 412	switch (scb_s->icptcode) {
 413	case ICPT_PROGI:
 414	case ICPT_INSTPROGI:
 415	case ICPT_EXTINT:
 416		memcpy((void *)((u64)scb_o + 0xc0),
 417		       (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
 418		break;
 419	case ICPT_PARTEXEC:
 420		/* MVPG only */
 421		memcpy((void *)((u64)scb_o + 0xc0),
 422		       (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
 423		break;
 424	}
 425
 426	if (scb_s->ihcpu != 0xffffU)
 427		scb_o->ihcpu = scb_s->ihcpu;
 428}
 429
 430/*
 431 * Setup the shadow scb by copying and checking the relevant parts of the g2
 432 * provided scb.
 433 *
 434 * Returns: - 0 if the scb has been shadowed
 435 *          - > 0 if control has to be given to guest 2
 436 */
 437static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 438{
 439	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 440	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 441	/* READ_ONCE does not work on bitfields - use a temporary variable */
 442	const uint32_t __new_prefix = scb_o->prefix;
 443	const uint32_t new_prefix = READ_ONCE(__new_prefix);
 444	const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
 445	bool had_tx = scb_s->ecb & ECB_TE;
 446	unsigned long new_mso = 0;
 447	int rc;
 448
 449	/* make sure we don't have any leftovers when reusing the scb */
 450	scb_s->icptcode = 0;
 451	scb_s->eca = 0;
 452	scb_s->ecb = 0;
 453	scb_s->ecb2 = 0;
 454	scb_s->ecb3 = 0;
 455	scb_s->ecd = 0;
 456	scb_s->fac = 0;
 457	scb_s->fpf = 0;
 458
 459	rc = prepare_cpuflags(vcpu, vsie_page);
 460	if (rc)
 461		goto out;
 462
 463	/* timer */
 464	scb_s->cputm = scb_o->cputm;
 465	scb_s->ckc = scb_o->ckc;
 466	scb_s->todpr = scb_o->todpr;
 467	scb_s->epoch = scb_o->epoch;
 468
 469	/* guest state */
 470	scb_s->gpsw = scb_o->gpsw;
 471	scb_s->gg14 = scb_o->gg14;
 472	scb_s->gg15 = scb_o->gg15;
 473	memcpy(scb_s->gcr, scb_o->gcr, 128);
 474	scb_s->pp = scb_o->pp;
 475
 476	/* interception / execution handling */
 477	scb_s->gbea = scb_o->gbea;
 478	scb_s->lctl = scb_o->lctl;
 479	scb_s->svcc = scb_o->svcc;
 480	scb_s->ictl = scb_o->ictl;
 481	/*
 482	 * SKEY handling functions can't deal with false setting of PTE invalid
 483	 * bits. Therefore we cannot provide interpretation and would later
 484	 * have to provide own emulation handlers.
 485	 */
 486	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
 487		scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
 488
 489	scb_s->icpua = scb_o->icpua;
 490
 491	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
 492		new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
 493	/* if the hva of the prefix changes, we have to remap the prefix */
 494	if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
 495		prefix_unmapped(vsie_page);
 496	 /* SIE will do mso/msl validity and exception checks for us */
 497	scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
 498	scb_s->mso = new_mso;
 499	scb_s->prefix = new_prefix;
 500
 501	/* We have to definetly flush the tlb if this scb never ran */
 502	if (scb_s->ihcpu != 0xffffU)
 503		scb_s->ihcpu = scb_o->ihcpu;
 504
 505	/* MVPG and Protection Exception Interpretation are always available */
 506	scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
 507	/* Host-protection-interruption introduced with ESOP */
 508	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
 509		scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
 
 
 
 
 
 
 
 
 510	/* transactional execution */
 511	if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
 512		/* remap the prefix is tx is toggled on */
 513		if (!had_tx)
 514			prefix_unmapped(vsie_page);
 515		scb_s->ecb |= ECB_TE;
 516	}
 
 
 517	/* branch prediction */
 518	if (test_kvm_facility(vcpu->kvm, 82))
 519		scb_s->fpf |= scb_o->fpf & FPF_BPBC;
 520	/* SIMD */
 521	if (test_kvm_facility(vcpu->kvm, 129)) {
 522		scb_s->eca |= scb_o->eca & ECA_VX;
 523		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
 524	}
 525	/* Run-time-Instrumentation */
 526	if (test_kvm_facility(vcpu->kvm, 64))
 527		scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
 528	/* Instruction Execution Prevention */
 529	if (test_kvm_facility(vcpu->kvm, 130))
 530		scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
 531	/* Guarded Storage */
 532	if (test_kvm_facility(vcpu->kvm, 133)) {
 533		scb_s->ecb |= scb_o->ecb & ECB_GS;
 534		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
 535	}
 536	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
 537		scb_s->eca |= scb_o->eca & ECA_SII;
 538	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
 539		scb_s->eca |= scb_o->eca & ECA_IB;
 540	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
 541		scb_s->eca |= scb_o->eca & ECA_CEI;
 542	/* Epoch Extension */
 543	if (test_kvm_facility(vcpu->kvm, 139))
 544		scb_s->ecd |= scb_o->ecd & ECD_MEF;
 
 
 545
 546	/* etoken */
 547	if (test_kvm_facility(vcpu->kvm, 156))
 548		scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
 549
 550	scb_s->hpid = HPID_VSIE;
 
 551
 552	prepare_ibc(vcpu, vsie_page);
 553	rc = shadow_crycb(vcpu, vsie_page);
 554out:
 555	if (rc)
 556		unshadow_scb(vcpu, vsie_page);
 557	return rc;
 558}
 559
 560void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
 561				 unsigned long end)
 562{
 563	struct kvm *kvm = gmap->private;
 564	struct vsie_page *cur;
 565	unsigned long prefix;
 566	struct page *page;
 567	int i;
 568
 569	if (!gmap_is_shadow(gmap))
 570		return;
 571	if (start >= 1UL << 31)
 572		/* We are only interested in prefix pages */
 573		return;
 574
 575	/*
 576	 * Only new shadow blocks are added to the list during runtime,
 577	 * therefore we can safely reference them all the time.
 578	 */
 579	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
 580		page = READ_ONCE(kvm->arch.vsie.pages[i]);
 581		if (!page)
 582			continue;
 583		cur = page_to_virt(page);
 584		if (READ_ONCE(cur->gmap) != gmap)
 585			continue;
 586		prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
 587		/* with mso/msl, the prefix lies at an offset */
 588		prefix += cur->scb_s.mso;
 589		if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
 590			prefix_unmapped_sync(cur);
 591	}
 592}
 593
 594/*
 595 * Map the first prefix page and if tx is enabled also the second prefix page.
 596 *
 597 * The prefix will be protected, a gmap notifier will inform about unmaps.
 598 * The shadow scb must not be executed until the prefix is remapped, this is
 599 * guaranteed by properly handling PROG_REQUEST.
 600 *
 601 * Returns: - 0 on if successfully mapped or already mapped
 602 *          - > 0 if control has to be given to guest 2
 603 *          - -EAGAIN if the caller can retry immediately
 604 *          - -ENOMEM if out of memory
 605 */
 606static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 607{
 608	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 609	u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
 610	int rc;
 611
 612	if (prefix_is_mapped(vsie_page))
 613		return 0;
 614
 615	/* mark it as mapped so we can catch any concurrent unmappers */
 616	prefix_mapped(vsie_page);
 617
 618	/* with mso/msl, the prefix lies at offset *mso* */
 619	prefix += scb_s->mso;
 620
 621	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
 622	if (!rc && (scb_s->ecb & ECB_TE))
 623		rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 624					   prefix + PAGE_SIZE);
 625	/*
 626	 * We don't have to mprotect, we will be called for all unshadows.
 627	 * SIE will detect if protection applies and trigger a validity.
 628	 */
 629	if (rc)
 630		prefix_unmapped(vsie_page);
 631	if (rc > 0 || rc == -EFAULT)
 632		rc = set_validity_icpt(scb_s, 0x0037U);
 633	return rc;
 634}
 635
 636/*
 637 * Pin the guest page given by gpa and set hpa to the pinned host address.
 638 * Will always be pinned writable.
 639 *
 640 * Returns: - 0 on success
 641 *          - -EINVAL if the gpa is not valid guest storage
 642 */
 643static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
 644{
 645	struct page *page;
 646
 647	page = gfn_to_page(kvm, gpa_to_gfn(gpa));
 648	if (is_error_page(page))
 649		return -EINVAL;
 650	*hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
 651	return 0;
 652}
 653
 654/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
 655static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
 656{
 657	kvm_release_pfn_dirty(hpa >> PAGE_SHIFT);
 658	/* mark the page always as dirty for migration */
 659	mark_page_dirty(kvm, gpa_to_gfn(gpa));
 660}
 661
 662/* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
 663static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 664{
 665	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 666	hpa_t hpa;
 667
 668	hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
 669	if (hpa) {
 670		unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
 671		vsie_page->sca_gpa = 0;
 672		scb_s->scaol = 0;
 673		scb_s->scaoh = 0;
 674	}
 675
 676	hpa = scb_s->itdba;
 677	if (hpa) {
 678		unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
 679		vsie_page->itdba_gpa = 0;
 680		scb_s->itdba = 0;
 681	}
 682
 683	hpa = scb_s->gvrd;
 684	if (hpa) {
 685		unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
 686		vsie_page->gvrd_gpa = 0;
 687		scb_s->gvrd = 0;
 688	}
 689
 690	hpa = scb_s->riccbd;
 691	if (hpa) {
 692		unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
 693		vsie_page->riccbd_gpa = 0;
 694		scb_s->riccbd = 0;
 695	}
 696
 697	hpa = scb_s->sdnxo;
 698	if (hpa) {
 699		unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
 700		vsie_page->sdnx_gpa = 0;
 701		scb_s->sdnxo = 0;
 702	}
 703}
 704
 705/*
 706 * Instead of shadowing some blocks, we can simply forward them because the
 707 * addresses in the scb are 64 bit long.
 708 *
 709 * This works as long as the data lies in one page. If blocks ever exceed one
 710 * page, we have to fall back to shadowing.
 711 *
 712 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
 713 * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
 714 *
 715 * Returns: - 0 if all blocks were pinned.
 716 *          - > 0 if control has to be given to guest 2
 717 *          - -ENOMEM if out of memory
 718 */
 719static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 720{
 721	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 722	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 723	hpa_t hpa;
 724	gpa_t gpa;
 725	int rc = 0;
 726
 727	gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
 728	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
 729		gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
 730	if (gpa) {
 731		if (gpa < 2 * PAGE_SIZE)
 732			rc = set_validity_icpt(scb_s, 0x0038U);
 733		else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
 734			rc = set_validity_icpt(scb_s, 0x0011U);
 735		else if ((gpa & PAGE_MASK) !=
 736			 ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
 737			rc = set_validity_icpt(scb_s, 0x003bU);
 738		if (!rc) {
 739			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 740			if (rc)
 741				rc = set_validity_icpt(scb_s, 0x0034U);
 742		}
 743		if (rc)
 744			goto unpin;
 745		vsie_page->sca_gpa = gpa;
 746		scb_s->scaoh = (u32)((u64)hpa >> 32);
 747		scb_s->scaol = (u32)(u64)hpa;
 748	}
 749
 750	gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
 751	if (gpa && (scb_s->ecb & ECB_TE)) {
 752		if (gpa < 2 * PAGE_SIZE) {
 753			rc = set_validity_icpt(scb_s, 0x0080U);
 754			goto unpin;
 755		}
 756		/* 256 bytes cannot cross page boundaries */
 757		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 758		if (rc) {
 759			rc = set_validity_icpt(scb_s, 0x0080U);
 760			goto unpin;
 761		}
 762		vsie_page->itdba_gpa = gpa;
 763		scb_s->itdba = hpa;
 764	}
 765
 766	gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
 767	if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
 768		if (gpa < 2 * PAGE_SIZE) {
 769			rc = set_validity_icpt(scb_s, 0x1310U);
 770			goto unpin;
 771		}
 772		/*
 773		 * 512 bytes vector registers cannot cross page boundaries
 774		 * if this block gets bigger, we have to shadow it.
 775		 */
 776		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 777		if (rc) {
 778			rc = set_validity_icpt(scb_s, 0x1310U);
 779			goto unpin;
 780		}
 781		vsie_page->gvrd_gpa = gpa;
 782		scb_s->gvrd = hpa;
 783	}
 784
 785	gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
 786	if (gpa && (scb_s->ecb3 & ECB3_RI)) {
 787		if (gpa < 2 * PAGE_SIZE) {
 788			rc = set_validity_icpt(scb_s, 0x0043U);
 789			goto unpin;
 790		}
 791		/* 64 bytes cannot cross page boundaries */
 792		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 793		if (rc) {
 794			rc = set_validity_icpt(scb_s, 0x0043U);
 795			goto unpin;
 796		}
 797		/* Validity 0x0044 will be checked by SIE */
 798		vsie_page->riccbd_gpa = gpa;
 799		scb_s->riccbd = hpa;
 800	}
 801	if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
 802	    (scb_s->ecd & ECD_ETOKENF)) {
 803		unsigned long sdnxc;
 804
 805		gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
 806		sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
 807		if (!gpa || gpa < 2 * PAGE_SIZE) {
 808			rc = set_validity_icpt(scb_s, 0x10b0U);
 809			goto unpin;
 810		}
 811		if (sdnxc < 6 || sdnxc > 12) {
 812			rc = set_validity_icpt(scb_s, 0x10b1U);
 813			goto unpin;
 814		}
 815		if (gpa & ((1 << sdnxc) - 1)) {
 816			rc = set_validity_icpt(scb_s, 0x10b2U);
 817			goto unpin;
 818		}
 819		/* Due to alignment rules (checked above) this cannot
 820		 * cross page boundaries
 821		 */
 822		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 823		if (rc) {
 824			rc = set_validity_icpt(scb_s, 0x10b0U);
 825			goto unpin;
 826		}
 827		vsie_page->sdnx_gpa = gpa;
 828		scb_s->sdnxo = hpa | sdnxc;
 829	}
 830	return 0;
 831unpin:
 832	unpin_blocks(vcpu, vsie_page);
 833	return rc;
 834}
 835
 836/* unpin the scb provided by guest 2, marking it as dirty */
 837static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 838		      gpa_t gpa)
 839{
 840	hpa_t hpa = (hpa_t) vsie_page->scb_o;
 841
 842	if (hpa)
 843		unpin_guest_page(vcpu->kvm, gpa, hpa);
 844	vsie_page->scb_o = NULL;
 845}
 846
 847/*
 848 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
 849 *
 850 * Returns: - 0 if the scb was pinned.
 851 *          - > 0 if control has to be given to guest 2
 852 */
 853static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 854		   gpa_t gpa)
 855{
 856	hpa_t hpa;
 857	int rc;
 858
 859	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 860	if (rc) {
 861		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 862		WARN_ON_ONCE(rc);
 863		return 1;
 864	}
 865	vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
 866	return 0;
 867}
 868
 869/*
 870 * Inject a fault into guest 2.
 871 *
 872 * Returns: - > 0 if control has to be given to guest 2
 873 *            < 0 if an error occurred during injection.
 874 */
 875static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
 876			bool write_flag)
 877{
 878	struct kvm_s390_pgm_info pgm = {
 879		.code = code,
 880		.trans_exc_code =
 881			/* 0-51: virtual address */
 882			(vaddr & 0xfffffffffffff000UL) |
 883			/* 52-53: store / fetch */
 884			(((unsigned int) !write_flag) + 1) << 10,
 885			/* 62-63: asce id (alway primary == 0) */
 886		.exc_access_id = 0, /* always primary */
 887		.op_access_id = 0, /* not MVPG */
 888	};
 889	int rc;
 890
 891	if (code == PGM_PROTECTION)
 892		pgm.trans_exc_code |= 0x4UL;
 893
 894	rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
 895	return rc ? rc : 1;
 896}
 897
 898/*
 899 * Handle a fault during vsie execution on a gmap shadow.
 900 *
 901 * Returns: - 0 if the fault was resolved
 902 *          - > 0 if control has to be given to guest 2
 903 *          - < 0 if an error occurred
 904 */
 905static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 906{
 907	int rc;
 908
 909	if (current->thread.gmap_int_code == PGM_PROTECTION)
 910		/* we can directly forward all protection exceptions */
 911		return inject_fault(vcpu, PGM_PROTECTION,
 912				    current->thread.gmap_addr, 1);
 913
 914	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 915				   current->thread.gmap_addr);
 916	if (rc > 0) {
 917		rc = inject_fault(vcpu, rc,
 918				  current->thread.gmap_addr,
 919				  current->thread.gmap_write_flag);
 920		if (rc >= 0)
 921			vsie_page->fault_addr = current->thread.gmap_addr;
 922	}
 923	return rc;
 924}
 925
 926/*
 927 * Retry the previous fault that required guest 2 intervention. This avoids
 928 * one superfluous SIE re-entry and direct exit.
 929 *
 930 * Will ignore any errors. The next SIE fault will do proper fault handling.
 931 */
 932static void handle_last_fault(struct kvm_vcpu *vcpu,
 933			      struct vsie_page *vsie_page)
 934{
 935	if (vsie_page->fault_addr)
 936		kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 937				      vsie_page->fault_addr);
 938	vsie_page->fault_addr = 0;
 939}
 940
 941static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
 942{
 943	vsie_page->scb_s.icptcode = 0;
 944}
 945
 946/* rewind the psw and clear the vsie icpt, so we can retry execution */
 947static void retry_vsie_icpt(struct vsie_page *vsie_page)
 948{
 949	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 950	int ilen = insn_length(scb_s->ipa >> 8);
 951
 952	/* take care of EXECUTE instructions */
 953	if (scb_s->icptstatus & 1) {
 954		ilen = (scb_s->icptstatus >> 4) & 0x6;
 955		if (!ilen)
 956			ilen = 4;
 957	}
 958	scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
 959	clear_vsie_icpt(vsie_page);
 960}
 961
 962/*
 963 * Try to shadow + enable the guest 2 provided facility list.
 964 * Retry instruction execution if enabled for and provided by guest 2.
 965 *
 966 * Returns: - 0 if handled (retry or guest 2 icpt)
 967 *          - > 0 if control has to be given to guest 2
 968 */
 969static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 970{
 971	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 972	__u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
 973
 
 
 
 
 974	if (fac && test_kvm_facility(vcpu->kvm, 7)) {
 975		retry_vsie_icpt(vsie_page);
 
 
 
 
 
 
 
 
 
 
 976		if (read_guest_real(vcpu, fac, &vsie_page->fac,
 977				    sizeof(vsie_page->fac)))
 978			return set_validity_icpt(scb_s, 0x1090U);
 979		scb_s->fac = (__u32)(__u64) &vsie_page->fac;
 980	}
 981	return 0;
 982}
 983
 984/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985 * Run the vsie on a shadow scb and a shadow gmap, without any further
 986 * sanity checks, handling SIE faults.
 987 *
 988 * Returns: - 0 everything went fine
 989 *          - > 0 if control has to be given to guest 2
 990 *          - < 0 if an error occurred
 991 */
 992static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 993	__releases(vcpu->kvm->srcu)
 994	__acquires(vcpu->kvm->srcu)
 995{
 996	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 997	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 998	int guest_bp_isolation;
 999	int rc = 0;
1000
1001	handle_last_fault(vcpu, vsie_page);
1002
1003	if (need_resched())
1004		schedule();
1005	if (test_cpu_flag(CIF_MCCK_PENDING))
1006		s390_handle_mcck();
1007
1008	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1009
1010	/* save current guest state of bp isolation override */
1011	guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
1012
1013	/*
1014	 * The guest is running with BPBC, so we have to force it on for our
1015	 * nested guest. This is done by enabling BPBC globally, so the BPBC
1016	 * control in the SCB (which the nested guest can modify) is simply
1017	 * ignored.
1018	 */
1019	if (test_kvm_facility(vcpu->kvm, 82) &&
1020	    vcpu->arch.sie_block->fpf & FPF_BPBC)
1021		set_thread_flag(TIF_ISOLATE_BP_GUEST);
1022
1023	local_irq_disable();
1024	guest_enter_irqoff();
1025	local_irq_enable();
1026
1027	/*
1028	 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
1029	 * and VCPU requests also hinder the vSIE from running and lead
1030	 * to an immediate exit. kvm_s390_vsie_kick() has to be used to
1031	 * also kick the vSIE.
1032	 */
1033	vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
 
1034	barrier();
1035	if (!kvm_s390_vcpu_sie_inhibited(vcpu))
1036		rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
1037	barrier();
1038	vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
1039
1040	local_irq_disable();
1041	guest_exit_irqoff();
1042	local_irq_enable();
1043
1044	/* restore guest state for bp isolation override */
1045	if (!guest_bp_isolation)
1046		clear_thread_flag(TIF_ISOLATE_BP_GUEST);
1047
1048	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1049
1050	if (rc == -EINTR) {
1051		VCPU_EVENT(vcpu, 3, "%s", "machine check");
1052		kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
1053		return 0;
1054	}
1055
1056	if (rc > 0)
1057		rc = 0; /* we could still have an icpt */
1058	else if (rc == -EFAULT)
1059		return handle_fault(vcpu, vsie_page);
1060
1061	switch (scb_s->icptcode) {
1062	case ICPT_INST:
1063		if (scb_s->ipa == 0xb2b0)
1064			rc = handle_stfle(vcpu, vsie_page);
1065		break;
1066	case ICPT_STOP:
1067		/* stop not requested by g2 - must have been a kick */
1068		if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
1069			clear_vsie_icpt(vsie_page);
1070		break;
1071	case ICPT_VALIDITY:
1072		if ((scb_s->ipa & 0xf000) != 0xf000)
1073			scb_s->ipa += 0x1000;
1074		break;
 
 
 
 
1075	}
1076	return rc;
1077}
1078
1079static void release_gmap_shadow(struct vsie_page *vsie_page)
1080{
1081	if (vsie_page->gmap)
1082		gmap_put(vsie_page->gmap);
1083	WRITE_ONCE(vsie_page->gmap, NULL);
1084	prefix_unmapped(vsie_page);
1085}
1086
1087static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
1088			       struct vsie_page *vsie_page)
1089{
1090	unsigned long asce;
1091	union ctlreg0 cr0;
1092	struct gmap *gmap;
1093	int edat;
1094
1095	asce = vcpu->arch.sie_block->gcr[1];
1096	cr0.val = vcpu->arch.sie_block->gcr[0];
1097	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1098	edat += edat && test_kvm_facility(vcpu->kvm, 78);
1099
1100	/*
1101	 * ASCE or EDAT could have changed since last icpt, or the gmap
1102	 * we're holding has been unshadowed. If the gmap is still valid,
1103	 * we can safely reuse it.
1104	 */
1105	if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
 
1106		return 0;
 
1107
1108	/* release the old shadow - if any, and mark the prefix as unmapped */
1109	release_gmap_shadow(vsie_page);
1110	gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
1111	if (IS_ERR(gmap))
1112		return PTR_ERR(gmap);
1113	gmap->private = vcpu->kvm;
1114	WRITE_ONCE(vsie_page->gmap, gmap);
1115	return 0;
1116}
1117
1118/*
1119 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
1120 */
1121static void register_shadow_scb(struct kvm_vcpu *vcpu,
1122				struct vsie_page *vsie_page)
1123{
1124	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1125
1126	WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
1127	/*
1128	 * External calls have to lead to a kick of the vcpu and
1129	 * therefore the vsie -> Simulate Wait state.
1130	 */
1131	kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
1132	/*
1133	 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
1134	 * automatically be adjusted on tod clock changes via kvm_sync_clock.
1135	 */
1136	preempt_disable();
1137	scb_s->epoch += vcpu->kvm->arch.epoch;
1138
1139	if (scb_s->ecd & ECD_MEF) {
1140		scb_s->epdx += vcpu->kvm->arch.epdx;
1141		if (scb_s->epoch < vcpu->kvm->arch.epoch)
1142			scb_s->epdx += 1;
1143	}
1144
1145	preempt_enable();
1146}
1147
1148/*
1149 * Unregister a shadow scb from a VCPU.
1150 */
1151static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
1152{
1153	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
1154	WRITE_ONCE(vcpu->arch.vsie_block, NULL);
1155}
1156
1157/*
1158 * Run the vsie on a shadowed scb, managing the gmap shadow, handling
1159 * prefix pages and faults.
1160 *
1161 * Returns: - 0 if no errors occurred
1162 *          - > 0 if control has to be given to guest 2
1163 *          - -ENOMEM if out of memory
1164 */
1165static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1166{
1167	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1168	int rc = 0;
1169
1170	while (1) {
1171		rc = acquire_gmap_shadow(vcpu, vsie_page);
1172		if (!rc)
1173			rc = map_prefix(vcpu, vsie_page);
1174		if (!rc) {
1175			gmap_enable(vsie_page->gmap);
1176			update_intervention_requests(vsie_page);
1177			rc = do_vsie_run(vcpu, vsie_page);
1178			gmap_enable(vcpu->arch.gmap);
1179		}
1180		atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
1181
1182		if (rc == -EAGAIN)
1183			rc = 0;
1184		if (rc || scb_s->icptcode || signal_pending(current) ||
 
 
 
 
 
 
 
 
 
 
 
 
1185		    kvm_s390_vcpu_has_irq(vcpu, 0) ||
1186		    kvm_s390_vcpu_sie_inhibited(vcpu))
 
1187			break;
 
 
1188	}
1189
1190	if (rc == -EFAULT) {
1191		/*
1192		 * Addressing exceptions are always presentes as intercepts.
1193		 * As addressing exceptions are suppressing and our guest 3 PSW
1194		 * points at the responsible instruction, we have to
1195		 * forward the PSW and set the ilc. If we can't read guest 3
1196		 * instruction, we can use an arbitrary ilc. Let's always use
1197		 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
1198		 * memory. (we could also fake the shadow so the hardware
1199		 * handles it).
1200		 */
1201		scb_s->icptcode = ICPT_PROGI;
1202		scb_s->iprcc = PGM_ADDRESSING;
1203		scb_s->pgmilc = 4;
1204		scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
 
1205	}
1206	return rc;
1207}
1208
1209/*
1210 * Get or create a vsie page for a scb address.
1211 *
1212 * Returns: - address of a vsie page (cached or new one)
1213 *          - NULL if the same scb address is already used by another VCPU
1214 *          - ERR_PTR(-ENOMEM) if out of memory
1215 */
1216static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
1217{
1218	struct vsie_page *vsie_page;
1219	struct page *page;
1220	int nr_vcpus;
1221
1222	rcu_read_lock();
1223	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
1224	rcu_read_unlock();
1225	if (page) {
1226		if (page_ref_inc_return(page) == 2)
1227			return page_to_virt(page);
 
 
 
 
 
 
1228		page_ref_dec(page);
1229	}
1230
1231	/*
1232	 * We want at least #online_vcpus shadows, so every VCPU can execute
1233	 * the VSIE in parallel.
1234	 */
1235	nr_vcpus = atomic_read(&kvm->online_vcpus);
1236
1237	mutex_lock(&kvm->arch.vsie.mutex);
1238	if (kvm->arch.vsie.page_count < nr_vcpus) {
1239		page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA);
1240		if (!page) {
1241			mutex_unlock(&kvm->arch.vsie.mutex);
1242			return ERR_PTR(-ENOMEM);
1243		}
1244		page_ref_inc(page);
1245		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
1246		kvm->arch.vsie.page_count++;
1247	} else {
1248		/* reuse an existing entry that belongs to nobody */
1249		while (true) {
1250			page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
1251			if (page_ref_inc_return(page) == 2)
1252				break;
1253			page_ref_dec(page);
1254			kvm->arch.vsie.next++;
1255			kvm->arch.vsie.next %= nr_vcpus;
1256		}
1257		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
 
 
1258	}
1259	page->index = addr;
1260	/* double use of the same address */
 
 
1261	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
1262		page_ref_dec(page);
1263		mutex_unlock(&kvm->arch.vsie.mutex);
1264		return NULL;
1265	}
 
1266	mutex_unlock(&kvm->arch.vsie.mutex);
1267
1268	vsie_page = page_to_virt(page);
1269	memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
1270	release_gmap_shadow(vsie_page);
1271	vsie_page->fault_addr = 0;
1272	vsie_page->scb_s.ihcpu = 0xffffU;
1273	return vsie_page;
1274}
1275
1276/* put a vsie page acquired via get_vsie_page */
1277static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
1278{
1279	struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
1280
1281	page_ref_dec(page);
1282}
1283
1284int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
1285{
1286	struct vsie_page *vsie_page;
1287	unsigned long scb_addr;
1288	int rc;
1289
1290	vcpu->stat.instruction_sie++;
1291	if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
1292		return -EOPNOTSUPP;
1293	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1294		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1295
1296	BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
1297	scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
1298
1299	/* 512 byte alignment */
1300	if (unlikely(scb_addr & 0x1ffUL))
1301		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1302
1303	if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
1304	    kvm_s390_vcpu_sie_inhibited(vcpu))
 
1305		return 0;
 
1306
1307	vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
1308	if (IS_ERR(vsie_page))
1309		return PTR_ERR(vsie_page);
1310	else if (!vsie_page)
1311		/* double use of sie control block - simply do nothing */
1312		return 0;
1313
1314	rc = pin_scb(vcpu, vsie_page, scb_addr);
1315	if (rc)
1316		goto out_put;
1317	rc = shadow_scb(vcpu, vsie_page);
1318	if (rc)
1319		goto out_unpin_scb;
1320	rc = pin_blocks(vcpu, vsie_page);
1321	if (rc)
1322		goto out_unshadow;
1323	register_shadow_scb(vcpu, vsie_page);
1324	rc = vsie_run(vcpu, vsie_page);
1325	unregister_shadow_scb(vcpu);
1326	unpin_blocks(vcpu, vsie_page);
1327out_unshadow:
1328	unshadow_scb(vcpu, vsie_page);
1329out_unpin_scb:
1330	unpin_scb(vcpu, vsie_page, scb_addr);
1331out_put:
1332	put_vsie_page(vcpu->kvm, vsie_page);
1333
1334	return rc < 0 ? rc : 0;
1335}
1336
1337/* Init the vsie data structures. To be called when a vm is initialized. */
1338void kvm_s390_vsie_init(struct kvm *kvm)
1339{
1340	mutex_init(&kvm->arch.vsie.mutex);
1341	INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
1342}
1343
1344/* Destroy the vsie data structures. To be called when a vm is destroyed. */
1345void kvm_s390_vsie_destroy(struct kvm *kvm)
1346{
1347	struct vsie_page *vsie_page;
1348	struct page *page;
1349	int i;
1350
1351	mutex_lock(&kvm->arch.vsie.mutex);
1352	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
1353		page = kvm->arch.vsie.pages[i];
1354		kvm->arch.vsie.pages[i] = NULL;
1355		vsie_page = page_to_virt(page);
1356		release_gmap_shadow(vsie_page);
1357		/* free the radix tree entry */
1358		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
 
 
1359		__free_page(page);
1360	}
1361	kvm->arch.vsie.page_count = 0;
1362	mutex_unlock(&kvm->arch.vsie.mutex);
1363}
1364
1365void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
1366{
1367	struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
1368
1369	/*
1370	 * Even if the VCPU lets go of the shadow sie block reference, it is
1371	 * still valid in the cache. So we can safely kick it.
1372	 */
1373	if (scb) {
1374		atomic_or(PROG_BLOCK_SIE, &scb->prog20);
1375		if (scb->prog0c & PROG_IN_SIE)
1376			atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
1377	}
1378}