Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * kvm nested virtualization support for s390x
   3 *
   4 * Copyright IBM Corp. 2016
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License (version 2 only)
   8 * as published by the Free Software Foundation.
   9 *
  10 *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
  11 */
  12#include <linux/vmalloc.h>
  13#include <linux/kvm_host.h>
  14#include <linux/bug.h>
  15#include <linux/list.h>
  16#include <linux/bitmap.h>
 
 
  17#include <asm/gmap.h>
  18#include <asm/mmu_context.h>
  19#include <asm/sclp.h>
  20#include <asm/nmi.h>
  21#include <asm/dis.h>
  22#include "kvm-s390.h"
  23#include "gaccess.h"
  24
  25struct vsie_page {
  26	struct kvm_s390_sie_block scb_s;	/* 0x0000 */
  27	/* the pinned originial scb */
  28	struct kvm_s390_sie_block *scb_o;	/* 0x0200 */
 
 
 
 
 
 
 
 
 
  29	/* the shadow gmap in use by the vsie_page */
  30	struct gmap *gmap;			/* 0x0208 */
  31	/* address of the last reported fault to guest2 */
  32	unsigned long fault_addr;		/* 0x0210 */
  33	__u8 reserved[0x0700 - 0x0218];		/* 0x0218 */
 
 
 
 
 
 
  34	struct kvm_s390_crypto_cb crycb;	/* 0x0700 */
  35	__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE];	/* 0x0800 */
  36} __packed;
  37
  38/* trigger a validity icpt for the given scb */
  39static int set_validity_icpt(struct kvm_s390_sie_block *scb,
  40			     __u16 reason_code)
  41{
  42	scb->ipa = 0x1000;
  43	scb->ipb = ((__u32) reason_code) << 16;
  44	scb->icptcode = ICPT_VALIDITY;
  45	return 1;
  46}
  47
  48/* mark the prefix as unmapped, this will block the VSIE */
  49static void prefix_unmapped(struct vsie_page *vsie_page)
  50{
  51	atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
  52}
  53
  54/* mark the prefix as unmapped and wait until the VSIE has been left */
  55static void prefix_unmapped_sync(struct vsie_page *vsie_page)
  56{
  57	prefix_unmapped(vsie_page);
  58	if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  59		atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
  60	while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  61		cpu_relax();
  62}
  63
  64/* mark the prefix as mapped, this will allow the VSIE to run */
  65static void prefix_mapped(struct vsie_page *vsie_page)
  66{
  67	atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
  68}
  69
  70/* test if the prefix is mapped into the gmap shadow */
  71static int prefix_is_mapped(struct vsie_page *vsie_page)
  72{
  73	return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
  74}
  75
  76/* copy the updated intervention request bits into the shadow scb */
  77static void update_intervention_requests(struct vsie_page *vsie_page)
  78{
  79	const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
  80	int cpuflags;
  81
  82	cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
  83	atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
  84	atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
  85}
  86
  87/* shadow (filter and validate) the cpuflags  */
  88static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  89{
  90	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  91	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  92	int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
  93
  94	/* we don't allow ESA/390 guests */
  95	if (!(cpuflags & CPUSTAT_ZARCH))
  96		return set_validity_icpt(scb_s, 0x0001U);
  97
  98	if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
  99		return set_validity_icpt(scb_s, 0x0001U);
 100	else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
 101		return set_validity_icpt(scb_s, 0x0007U);
 102
 103	/* intervention requests will be set later */
 104	newflags = CPUSTAT_ZARCH;
 105	if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
 106		newflags |= CPUSTAT_GED;
 107	if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
 108		if (cpuflags & CPUSTAT_GED)
 109			return set_validity_icpt(scb_s, 0x0001U);
 110		newflags |= CPUSTAT_GED2;
 111	}
 112	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
 113		newflags |= cpuflags & CPUSTAT_P;
 114	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
 115		newflags |= cpuflags & CPUSTAT_SM;
 116	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
 117		newflags |= cpuflags & CPUSTAT_IBS;
 
 
 118
 119	atomic_set(&scb_s->cpuflags, newflags);
 120	return 0;
 121}
 
 
 
 
 
 
 
 
 
 
 
 
 122
 123/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 124 * Create a shadow copy of the crycb block and setup key wrapping, if
 125 * requested for guest 3 and enabled for guest 2.
 126 *
 127 * We only accept format-1 (no AP in g2), but convert it into format-2
 
 
 
 128 * There is nothing to do for format-0.
 129 *
 
 
 
 130 * Returns: - 0 if shadowed or nothing to do
 131 *          - > 0 if control has to be given to guest 2
 132 */
 133static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 134{
 135	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 136	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 137	u32 crycb_addr = scb_o->crycbd & 0x7ffffff8U;
 
 138	unsigned long *b1, *b2;
 139	u8 ecb3_flags;
 
 
 
 
 
 
 
 140
 141	scb_s->crycbd = 0;
 142	if (!(scb_o->crycbd & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
 143		return 0;
 144	/* format-1 is supported with message-security-assist extension 3 */
 145	if (!test_kvm_facility(vcpu->kvm, 76))
 146		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 147	/* we may only allow it if enabled for guest 2 */
 148	ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
 149		     (ECB3_AES | ECB3_DEA);
 150	if (!ecb3_flags)
 151		return 0;
 152
 153	if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK))
 154		return set_validity_icpt(scb_s, 0x003CU);
 155	else if (!crycb_addr)
 156		return set_validity_icpt(scb_s, 0x0039U);
 157
 158	/* copy only the wrapping keys */
 159	if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
 
 160		return set_validity_icpt(scb_s, 0x0035U);
 161
 162	scb_s->ecb3 |= ecb3_flags;
 163	scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 |
 164			CRYCB_FORMAT2;
 165
 166	/* xor both blocks in one run */
 167	b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
 168	b2 = (unsigned long *)
 169			    vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
 170	/* as 56%8 == 0, bitmap_xor won't overwrite any data */
 171	bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
 
 
 
 
 
 
 
 
 
 
 172	return 0;
 173}
 174
 175/* shadow (round up/down) the ibc to avoid validity icpt */
 176static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 177{
 178	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 179	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 
 
 
 180	__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
 181
 182	scb_s->ibc = 0;
 183	/* ibc installed in g2 and requested for g3 */
 184	if (vcpu->kvm->arch.model.ibc && (scb_o->ibc & 0x0fffU)) {
 185		scb_s->ibc = scb_o->ibc & 0x0fffU;
 186		/* takte care of the minimum ibc level of the machine */
 187		if (scb_s->ibc < min_ibc)
 188			scb_s->ibc = min_ibc;
 189		/* take care of the maximum ibc level set for the guest */
 190		if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
 191			scb_s->ibc = vcpu->kvm->arch.model.ibc;
 192	}
 193}
 194
 195/* unshadow the scb, copying parameters back to the real scb */
 196static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 197{
 198	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 199	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 200
 201	/* interception */
 202	scb_o->icptcode = scb_s->icptcode;
 203	scb_o->icptstatus = scb_s->icptstatus;
 204	scb_o->ipa = scb_s->ipa;
 205	scb_o->ipb = scb_s->ipb;
 206	scb_o->gbea = scb_s->gbea;
 207
 208	/* timer */
 209	scb_o->cputm = scb_s->cputm;
 210	scb_o->ckc = scb_s->ckc;
 211	scb_o->todpr = scb_s->todpr;
 212
 213	/* guest state */
 214	scb_o->gpsw = scb_s->gpsw;
 215	scb_o->gg14 = scb_s->gg14;
 216	scb_o->gg15 = scb_s->gg15;
 217	memcpy(scb_o->gcr, scb_s->gcr, 128);
 218	scb_o->pp = scb_s->pp;
 219
 
 
 
 
 
 
 220	/* interrupt intercept */
 221	switch (scb_s->icptcode) {
 222	case ICPT_PROGI:
 223	case ICPT_INSTPROGI:
 224	case ICPT_EXTINT:
 225		memcpy((void *)((u64)scb_o + 0xc0),
 226		       (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
 227		break;
 228	case ICPT_PARTEXEC:
 229		/* MVPG only */
 230		memcpy((void *)((u64)scb_o + 0xc0),
 231		       (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
 232		break;
 233	}
 234
 235	if (scb_s->ihcpu != 0xffffU)
 236		scb_o->ihcpu = scb_s->ihcpu;
 237}
 238
 239/*
 240 * Setup the shadow scb by copying and checking the relevant parts of the g2
 241 * provided scb.
 242 *
 243 * Returns: - 0 if the scb has been shadowed
 244 *          - > 0 if control has to be given to guest 2
 245 */
 246static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 247{
 248	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 249	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 250	bool had_tx = scb_s->ecb & 0x10U;
 
 
 
 
 251	unsigned long new_mso = 0;
 252	int rc;
 253
 254	/* make sure we don't have any leftovers when reusing the scb */
 255	scb_s->icptcode = 0;
 256	scb_s->eca = 0;
 257	scb_s->ecb = 0;
 258	scb_s->ecb2 = 0;
 259	scb_s->ecb3 = 0;
 260	scb_s->ecd = 0;
 261	scb_s->fac = 0;
 
 262
 263	rc = prepare_cpuflags(vcpu, vsie_page);
 264	if (rc)
 265		goto out;
 266
 267	/* timer */
 268	scb_s->cputm = scb_o->cputm;
 269	scb_s->ckc = scb_o->ckc;
 270	scb_s->todpr = scb_o->todpr;
 271	scb_s->epoch = scb_o->epoch;
 272
 273	/* guest state */
 274	scb_s->gpsw = scb_o->gpsw;
 275	scb_s->gg14 = scb_o->gg14;
 276	scb_s->gg15 = scb_o->gg15;
 277	memcpy(scb_s->gcr, scb_o->gcr, 128);
 278	scb_s->pp = scb_o->pp;
 279
 280	/* interception / execution handling */
 281	scb_s->gbea = scb_o->gbea;
 282	scb_s->lctl = scb_o->lctl;
 283	scb_s->svcc = scb_o->svcc;
 284	scb_s->ictl = scb_o->ictl;
 285	/*
 286	 * SKEY handling functions can't deal with false setting of PTE invalid
 287	 * bits. Therefore we cannot provide interpretation and would later
 288	 * have to provide own emulation handlers.
 289	 */
 290	scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
 
 
 291	scb_s->icpua = scb_o->icpua;
 292
 293	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
 294		new_mso = scb_o->mso & 0xfffffffffff00000UL;
 295	/* if the hva of the prefix changes, we have to remap the prefix */
 296	if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix)
 297		prefix_unmapped(vsie_page);
 298	 /* SIE will do mso/msl validity and exception checks for us */
 299	scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
 300	scb_s->mso = new_mso;
 301	scb_s->prefix = scb_o->prefix;
 302
 303	/* We have to definetly flush the tlb if this scb never ran */
 304	if (scb_s->ihcpu != 0xffffU)
 305		scb_s->ihcpu = scb_o->ihcpu;
 306
 307	/* MVPG and Protection Exception Interpretation are always available */
 308	scb_s->eca |= scb_o->eca & 0x01002000U;
 309	/* Host-protection-interruption introduced with ESOP */
 310	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
 311		scb_s->ecb |= scb_o->ecb & 0x02U;
 312	/* transactional execution */
 313	if (test_kvm_facility(vcpu->kvm, 73)) {
 314		/* remap the prefix is tx is toggled on */
 315		if ((scb_o->ecb & 0x10U) && !had_tx)
 316			prefix_unmapped(vsie_page);
 317		scb_s->ecb |= scb_o->ecb & 0x10U;
 318	}
 
 
 
 319	/* SIMD */
 320	if (test_kvm_facility(vcpu->kvm, 129)) {
 321		scb_s->eca |= scb_o->eca & 0x00020000U;
 322		scb_s->ecd |= scb_o->ecd & 0x20000000U;
 323	}
 324	/* Run-time-Instrumentation */
 325	if (test_kvm_facility(vcpu->kvm, 64))
 326		scb_s->ecb3 |= scb_o->ecb3 & 0x01U;
 
 
 
 
 
 
 
 
 327	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
 328		scb_s->eca |= scb_o->eca & 0x00000001U;
 329	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
 330		scb_s->eca |= scb_o->eca & 0x40000000U;
 331	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
 332		scb_s->eca |= scb_o->eca & 0x80000000U;
 
 
 
 
 
 
 
 
 
 333
 334	prepare_ibc(vcpu, vsie_page);
 335	rc = shadow_crycb(vcpu, vsie_page);
 336out:
 337	if (rc)
 338		unshadow_scb(vcpu, vsie_page);
 339	return rc;
 340}
 341
 342void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
 343				 unsigned long end)
 344{
 345	struct kvm *kvm = gmap->private;
 346	struct vsie_page *cur;
 347	unsigned long prefix;
 348	struct page *page;
 349	int i;
 350
 351	if (!gmap_is_shadow(gmap))
 352		return;
 353	if (start >= 1UL << 31)
 354		/* We are only interested in prefix pages */
 355		return;
 356
 357	/*
 358	 * Only new shadow blocks are added to the list during runtime,
 359	 * therefore we can safely reference them all the time.
 360	 */
 361	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
 362		page = READ_ONCE(kvm->arch.vsie.pages[i]);
 363		if (!page)
 364			continue;
 365		cur = page_to_virt(page);
 366		if (READ_ONCE(cur->gmap) != gmap)
 367			continue;
 368		prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
 369		/* with mso/msl, the prefix lies at an offset */
 370		prefix += cur->scb_s.mso;
 371		if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
 372			prefix_unmapped_sync(cur);
 373	}
 374}
 375
 376/*
 377 * Map the first prefix page and if tx is enabled also the second prefix page.
 378 *
 379 * The prefix will be protected, a gmap notifier will inform about unmaps.
 380 * The shadow scb must not be executed until the prefix is remapped, this is
 381 * guaranteed by properly handling PROG_REQUEST.
 382 *
 383 * Returns: - 0 on if successfully mapped or already mapped
 384 *          - > 0 if control has to be given to guest 2
 385 *          - -EAGAIN if the caller can retry immediately
 386 *          - -ENOMEM if out of memory
 387 */
 388static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 389{
 390	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 391	u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
 392	int rc;
 393
 394	if (prefix_is_mapped(vsie_page))
 395		return 0;
 396
 397	/* mark it as mapped so we can catch any concurrent unmappers */
 398	prefix_mapped(vsie_page);
 399
 400	/* with mso/msl, the prefix lies at offset *mso* */
 401	prefix += scb_s->mso;
 402
 403	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
 404	if (!rc && (scb_s->ecb & 0x10U))
 405		rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 406					   prefix + PAGE_SIZE);
 407	/*
 408	 * We don't have to mprotect, we will be called for all unshadows.
 409	 * SIE will detect if protection applies and trigger a validity.
 410	 */
 411	if (rc)
 412		prefix_unmapped(vsie_page);
 413	if (rc > 0 || rc == -EFAULT)
 414		rc = set_validity_icpt(scb_s, 0x0037U);
 415	return rc;
 416}
 417
 418/*
 419 * Pin the guest page given by gpa and set hpa to the pinned host address.
 420 * Will always be pinned writable.
 421 *
 422 * Returns: - 0 on success
 423 *          - -EINVAL if the gpa is not valid guest storage
 424 *          - -ENOMEM if out of memory
 425 */
 426static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
 427{
 428	struct page *page;
 429	hva_t hva;
 430	int rc;
 431
 432	hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
 433	if (kvm_is_error_hva(hva))
 434		return -EINVAL;
 435	rc = get_user_pages_fast(hva, 1, 1, &page);
 436	if (rc < 0)
 437		return rc;
 438	else if (rc != 1)
 439		return -ENOMEM;
 440	*hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
 441	return 0;
 442}
 443
 444/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
 445static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
 446{
 447	struct page *page;
 448
 449	page = virt_to_page(hpa);
 450	set_page_dirty_lock(page);
 451	put_page(page);
 452	/* mark the page always as dirty for migration */
 453	mark_page_dirty(kvm, gpa_to_gfn(gpa));
 454}
 455
 456/* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
 457static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 458{
 459	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 460	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 461	hpa_t hpa;
 462	gpa_t gpa;
 463
 464	hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
 465	if (hpa) {
 466		gpa = scb_o->scaol & ~0xfUL;
 467		if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
 468			gpa |= (u64) scb_o->scaoh << 32;
 469		unpin_guest_page(vcpu->kvm, gpa, hpa);
 470		scb_s->scaol = 0;
 471		scb_s->scaoh = 0;
 472	}
 473
 474	hpa = scb_s->itdba;
 475	if (hpa) {
 476		gpa = scb_o->itdba & ~0xffUL;
 477		unpin_guest_page(vcpu->kvm, gpa, hpa);
 478		scb_s->itdba = 0;
 479	}
 480
 481	hpa = scb_s->gvrd;
 482	if (hpa) {
 483		gpa = scb_o->gvrd & ~0x1ffUL;
 484		unpin_guest_page(vcpu->kvm, gpa, hpa);
 485		scb_s->gvrd = 0;
 486	}
 487
 488	hpa = scb_s->riccbd;
 489	if (hpa) {
 490		gpa = scb_o->riccbd & ~0x3fUL;
 491		unpin_guest_page(vcpu->kvm, gpa, hpa);
 492		scb_s->riccbd = 0;
 493	}
 
 
 
 
 
 
 
 494}
 495
 496/*
 497 * Instead of shadowing some blocks, we can simply forward them because the
 498 * addresses in the scb are 64 bit long.
 499 *
 500 * This works as long as the data lies in one page. If blocks ever exceed one
 501 * page, we have to fall back to shadowing.
 502 *
 503 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
 504 * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
 505 *
 506 * Returns: - 0 if all blocks were pinned.
 507 *          - > 0 if control has to be given to guest 2
 508 *          - -ENOMEM if out of memory
 509 */
 510static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 511{
 512	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 513	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 514	hpa_t hpa;
 515	gpa_t gpa;
 516	int rc = 0;
 517
 518	gpa = scb_o->scaol & ~0xfUL;
 519	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
 520		gpa |= (u64) scb_o->scaoh << 32;
 521	if (gpa) {
 522		if (!(gpa & ~0x1fffUL))
 523			rc = set_validity_icpt(scb_s, 0x0038U);
 524		else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
 525			rc = set_validity_icpt(scb_s, 0x0011U);
 526		else if ((gpa & PAGE_MASK) !=
 527			 ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
 528			rc = set_validity_icpt(scb_s, 0x003bU);
 529		if (!rc) {
 530			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 531			if (rc == -EINVAL)
 532				rc = set_validity_icpt(scb_s, 0x0034U);
 533		}
 534		if (rc)
 535			goto unpin;
 
 536		scb_s->scaoh = (u32)((u64)hpa >> 32);
 537		scb_s->scaol = (u32)(u64)hpa;
 538	}
 539
 540	gpa = scb_o->itdba & ~0xffUL;
 541	if (gpa && (scb_s->ecb & 0x10U)) {
 542		if (!(gpa & ~0x1fffU)) {
 543			rc = set_validity_icpt(scb_s, 0x0080U);
 544			goto unpin;
 545		}
 546		/* 256 bytes cannot cross page boundaries */
 547		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 548		if (rc == -EINVAL)
 549			rc = set_validity_icpt(scb_s, 0x0080U);
 550		if (rc)
 551			goto unpin;
 
 
 552		scb_s->itdba = hpa;
 553	}
 554
 555	gpa = scb_o->gvrd & ~0x1ffUL;
 556	if (gpa && (scb_s->eca & 0x00020000U) &&
 557	    !(scb_s->ecd & 0x20000000U)) {
 558		if (!(gpa & ~0x1fffUL)) {
 559			rc = set_validity_icpt(scb_s, 0x1310U);
 560			goto unpin;
 561		}
 562		/*
 563		 * 512 bytes vector registers cannot cross page boundaries
 564		 * if this block gets bigger, we have to shadow it.
 565		 */
 566		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 567		if (rc == -EINVAL)
 568			rc = set_validity_icpt(scb_s, 0x1310U);
 569		if (rc)
 570			goto unpin;
 
 
 571		scb_s->gvrd = hpa;
 572	}
 573
 574	gpa = scb_o->riccbd & ~0x3fUL;
 575	if (gpa && (scb_s->ecb3 & 0x01U)) {
 576		if (!(gpa & ~0x1fffUL)) {
 577			rc = set_validity_icpt(scb_s, 0x0043U);
 578			goto unpin;
 579		}
 580		/* 64 bytes cannot cross page boundaries */
 581		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 582		if (rc == -EINVAL)
 583			rc = set_validity_icpt(scb_s, 0x0043U);
 584		/* Validity 0x0044 will be checked by SIE */
 585		if (rc)
 586			goto unpin;
 
 
 
 587		scb_s->riccbd = hpa;
 588	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 589	return 0;
 590unpin:
 591	unpin_blocks(vcpu, vsie_page);
 592	return rc;
 593}
 594
 595/* unpin the scb provided by guest 2, marking it as dirty */
 596static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 597		      gpa_t gpa)
 598{
 599	hpa_t hpa = (hpa_t) vsie_page->scb_o;
 600
 601	if (hpa)
 602		unpin_guest_page(vcpu->kvm, gpa, hpa);
 603	vsie_page->scb_o = NULL;
 604}
 605
 606/*
 607 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
 608 *
 609 * Returns: - 0 if the scb was pinned.
 610 *          - > 0 if control has to be given to guest 2
 611 *          - -ENOMEM if out of memory
 612 */
 613static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 614		   gpa_t gpa)
 615{
 616	hpa_t hpa;
 617	int rc;
 618
 619	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 620	if (rc == -EINVAL) {
 621		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 622		if (!rc)
 623			rc = 1;
 624	}
 625	if (!rc)
 626		vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
 627	return rc;
 628}
 629
 630/*
 631 * Inject a fault into guest 2.
 632 *
 633 * Returns: - > 0 if control has to be given to guest 2
 634 *            < 0 if an error occurred during injection.
 635 */
 636static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
 637			bool write_flag)
 638{
 639	struct kvm_s390_pgm_info pgm = {
 640		.code = code,
 641		.trans_exc_code =
 642			/* 0-51: virtual address */
 643			(vaddr & 0xfffffffffffff000UL) |
 644			/* 52-53: store / fetch */
 645			(((unsigned int) !write_flag) + 1) << 10,
 646			/* 62-63: asce id (alway primary == 0) */
 647		.exc_access_id = 0, /* always primary */
 648		.op_access_id = 0, /* not MVPG */
 649	};
 650	int rc;
 651
 652	if (code == PGM_PROTECTION)
 653		pgm.trans_exc_code |= 0x4UL;
 654
 655	rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
 656	return rc ? rc : 1;
 657}
 658
 659/*
 660 * Handle a fault during vsie execution on a gmap shadow.
 661 *
 662 * Returns: - 0 if the fault was resolved
 663 *          - > 0 if control has to be given to guest 2
 664 *          - < 0 if an error occurred
 665 */
 666static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 667{
 668	int rc;
 669
 670	if (current->thread.gmap_int_code == PGM_PROTECTION)
 671		/* we can directly forward all protection exceptions */
 672		return inject_fault(vcpu, PGM_PROTECTION,
 673				    current->thread.gmap_addr, 1);
 674
 675	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 676				   current->thread.gmap_addr);
 677	if (rc > 0) {
 678		rc = inject_fault(vcpu, rc,
 679				  current->thread.gmap_addr,
 680				  current->thread.gmap_write_flag);
 681		if (rc >= 0)
 682			vsie_page->fault_addr = current->thread.gmap_addr;
 683	}
 684	return rc;
 685}
 686
 687/*
 688 * Retry the previous fault that required guest 2 intervention. This avoids
 689 * one superfluous SIE re-entry and direct exit.
 690 *
 691 * Will ignore any errors. The next SIE fault will do proper fault handling.
 692 */
 693static void handle_last_fault(struct kvm_vcpu *vcpu,
 694			      struct vsie_page *vsie_page)
 695{
 696	if (vsie_page->fault_addr)
 697		kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 698				      vsie_page->fault_addr);
 699	vsie_page->fault_addr = 0;
 700}
 701
 702static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
 703{
 704	vsie_page->scb_s.icptcode = 0;
 705}
 706
 707/* rewind the psw and clear the vsie icpt, so we can retry execution */
 708static void retry_vsie_icpt(struct vsie_page *vsie_page)
 709{
 710	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 711	int ilen = insn_length(scb_s->ipa >> 8);
 712
 713	/* take care of EXECUTE instructions */
 714	if (scb_s->icptstatus & 1) {
 715		ilen = (scb_s->icptstatus >> 4) & 0x6;
 716		if (!ilen)
 717			ilen = 4;
 718	}
 719	scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
 720	clear_vsie_icpt(vsie_page);
 721}
 722
 723/*
 724 * Try to shadow + enable the guest 2 provided facility list.
 725 * Retry instruction execution if enabled for and provided by guest 2.
 726 *
 727 * Returns: - 0 if handled (retry or guest 2 icpt)
 728 *          - > 0 if control has to be given to guest 2
 729 */
 730static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 731{
 732	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 733	__u32 fac = vsie_page->scb_o->fac & 0x7ffffff8U;
 734
 735	if (fac && test_kvm_facility(vcpu->kvm, 7)) {
 736		retry_vsie_icpt(vsie_page);
 737		if (read_guest_real(vcpu, fac, &vsie_page->fac,
 738				    sizeof(vsie_page->fac)))
 739			return set_validity_icpt(scb_s, 0x1090U);
 740		scb_s->fac = (__u32)(__u64) &vsie_page->fac;
 741	}
 742	return 0;
 743}
 744
 745/*
 746 * Run the vsie on a shadow scb and a shadow gmap, without any further
 747 * sanity checks, handling SIE faults.
 748 *
 749 * Returns: - 0 everything went fine
 750 *          - > 0 if control has to be given to guest 2
 751 *          - < 0 if an error occurred
 752 */
 753static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 
 
 754{
 755	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 756	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 757	int rc;
 
 758
 759	handle_last_fault(vcpu, vsie_page);
 760
 761	if (need_resched())
 762		schedule();
 763	if (test_cpu_flag(CIF_MCCK_PENDING))
 764		s390_handle_mcck();
 765
 766	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 767	local_irq_disable();
 768	guest_enter_irqoff();
 769	local_irq_enable();
 770
 771	rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
 
 
 
 
 
 
 
 
 
 
 
 772
 773	local_irq_disable();
 774	guest_exit_irqoff();
 775	local_irq_enable();
 
 
 
 
 
 776	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 777
 
 
 
 
 
 
 778	if (rc > 0)
 779		rc = 0; /* we could still have an icpt */
 780	else if (rc == -EFAULT)
 781		return handle_fault(vcpu, vsie_page);
 782
 783	switch (scb_s->icptcode) {
 784	case ICPT_INST:
 785		if (scb_s->ipa == 0xb2b0)
 786			rc = handle_stfle(vcpu, vsie_page);
 787		break;
 788	case ICPT_STOP:
 789		/* stop not requested by g2 - must have been a kick */
 790		if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
 791			clear_vsie_icpt(vsie_page);
 792		break;
 793	case ICPT_VALIDITY:
 794		if ((scb_s->ipa & 0xf000) != 0xf000)
 795			scb_s->ipa += 0x1000;
 796		break;
 797	}
 798	return rc;
 799}
 800
 801static void release_gmap_shadow(struct vsie_page *vsie_page)
 802{
 803	if (vsie_page->gmap)
 804		gmap_put(vsie_page->gmap);
 805	WRITE_ONCE(vsie_page->gmap, NULL);
 806	prefix_unmapped(vsie_page);
 807}
 808
 809static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
 810			       struct vsie_page *vsie_page)
 811{
 812	unsigned long asce;
 813	union ctlreg0 cr0;
 814	struct gmap *gmap;
 815	int edat;
 816
 817	asce = vcpu->arch.sie_block->gcr[1];
 818	cr0.val = vcpu->arch.sie_block->gcr[0];
 819	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
 820	edat += edat && test_kvm_facility(vcpu->kvm, 78);
 821
 822	/*
 823	 * ASCE or EDAT could have changed since last icpt, or the gmap
 824	 * we're holding has been unshadowed. If the gmap is still valid,
 825	 * we can safely reuse it.
 826	 */
 827	if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
 828		return 0;
 829
 830	/* release the old shadow - if any, and mark the prefix as unmapped */
 831	release_gmap_shadow(vsie_page);
 832	gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
 833	if (IS_ERR(gmap))
 834		return PTR_ERR(gmap);
 835	gmap->private = vcpu->kvm;
 836	WRITE_ONCE(vsie_page->gmap, gmap);
 837	return 0;
 838}
 839
 840/*
 841 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
 842 */
 843static void register_shadow_scb(struct kvm_vcpu *vcpu,
 844				struct vsie_page *vsie_page)
 845{
 846	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 847
 848	WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
 849	/*
 850	 * External calls have to lead to a kick of the vcpu and
 851	 * therefore the vsie -> Simulate Wait state.
 852	 */
 853	atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
 854	/*
 855	 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
 856	 * automatically be adjusted on tod clock changes via kvm_sync_clock.
 857	 */
 858	preempt_disable();
 859	scb_s->epoch += vcpu->kvm->arch.epoch;
 
 
 
 
 
 
 
 860	preempt_enable();
 861}
 862
 863/*
 864 * Unregister a shadow scb from a VCPU.
 865 */
 866static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
 867{
 868	atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
 869	WRITE_ONCE(vcpu->arch.vsie_block, NULL);
 870}
 871
 872/*
 873 * Run the vsie on a shadowed scb, managing the gmap shadow, handling
 874 * prefix pages and faults.
 875 *
 876 * Returns: - 0 if no errors occurred
 877 *          - > 0 if control has to be given to guest 2
 878 *          - -ENOMEM if out of memory
 879 */
 880static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 881{
 882	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 883	int rc = 0;
 884
 885	while (1) {
 886		rc = acquire_gmap_shadow(vcpu, vsie_page);
 887		if (!rc)
 888			rc = map_prefix(vcpu, vsie_page);
 889		if (!rc) {
 890			gmap_enable(vsie_page->gmap);
 891			update_intervention_requests(vsie_page);
 892			rc = do_vsie_run(vcpu, vsie_page);
 893			gmap_enable(vcpu->arch.gmap);
 894		}
 895		atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
 896
 897		if (rc == -EAGAIN)
 898			rc = 0;
 899		if (rc || scb_s->icptcode || signal_pending(current) ||
 900		    kvm_s390_vcpu_has_irq(vcpu, 0))
 
 901			break;
 902	};
 903
 904	if (rc == -EFAULT) {
 905		/*
 906		 * Addressing exceptions are always presentes as intercepts.
 907		 * As addressing exceptions are suppressing and our guest 3 PSW
 908		 * points at the responsible instruction, we have to
 909		 * forward the PSW and set the ilc. If we can't read guest 3
 910		 * instruction, we can use an arbitrary ilc. Let's always use
 911		 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
 912		 * memory. (we could also fake the shadow so the hardware
 913		 * handles it).
 914		 */
 915		scb_s->icptcode = ICPT_PROGI;
 916		scb_s->iprcc = PGM_ADDRESSING;
 917		scb_s->pgmilc = 4;
 918		scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
 919	}
 920	return rc;
 921}
 922
 923/*
 924 * Get or create a vsie page for a scb address.
 925 *
 926 * Returns: - address of a vsie page (cached or new one)
 927 *          - NULL if the same scb address is already used by another VCPU
 928 *          - ERR_PTR(-ENOMEM) if out of memory
 929 */
 930static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
 931{
 932	struct vsie_page *vsie_page;
 933	struct page *page;
 934	int nr_vcpus;
 935
 936	rcu_read_lock();
 937	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
 938	rcu_read_unlock();
 939	if (page) {
 940		if (page_ref_inc_return(page) == 2)
 941			return page_to_virt(page);
 942		page_ref_dec(page);
 943	}
 944
 945	/*
 946	 * We want at least #online_vcpus shadows, so every VCPU can execute
 947	 * the VSIE in parallel.
 948	 */
 949	nr_vcpus = atomic_read(&kvm->online_vcpus);
 950
 951	mutex_lock(&kvm->arch.vsie.mutex);
 952	if (kvm->arch.vsie.page_count < nr_vcpus) {
 953		page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA);
 954		if (!page) {
 955			mutex_unlock(&kvm->arch.vsie.mutex);
 956			return ERR_PTR(-ENOMEM);
 957		}
 958		page_ref_inc(page);
 959		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
 960		kvm->arch.vsie.page_count++;
 961	} else {
 962		/* reuse an existing entry that belongs to nobody */
 963		while (true) {
 964			page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
 965			if (page_ref_inc_return(page) == 2)
 966				break;
 967			page_ref_dec(page);
 968			kvm->arch.vsie.next++;
 969			kvm->arch.vsie.next %= nr_vcpus;
 970		}
 971		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
 972	}
 973	page->index = addr;
 974	/* double use of the same address */
 975	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
 976		page_ref_dec(page);
 977		mutex_unlock(&kvm->arch.vsie.mutex);
 978		return NULL;
 979	}
 980	mutex_unlock(&kvm->arch.vsie.mutex);
 981
 982	vsie_page = page_to_virt(page);
 983	memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
 984	release_gmap_shadow(vsie_page);
 985	vsie_page->fault_addr = 0;
 986	vsie_page->scb_s.ihcpu = 0xffffU;
 987	return vsie_page;
 988}
 989
 990/* put a vsie page acquired via get_vsie_page */
 991static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
 992{
 993	struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
 994
 995	page_ref_dec(page);
 996}
 997
 998int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
 999{
1000	struct vsie_page *vsie_page;
1001	unsigned long scb_addr;
1002	int rc;
1003
1004	vcpu->stat.instruction_sie++;
1005	if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
1006		return -EOPNOTSUPP;
1007	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1008		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1009
1010	BUILD_BUG_ON(sizeof(struct vsie_page) != 4096);
1011	scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
1012
1013	/* 512 byte alignment */
1014	if (unlikely(scb_addr & 0x1ffUL))
1015		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1016
1017	if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0))
 
1018		return 0;
1019
1020	vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
1021	if (IS_ERR(vsie_page))
1022		return PTR_ERR(vsie_page);
1023	else if (!vsie_page)
1024		/* double use of sie control block - simply do nothing */
1025		return 0;
1026
1027	rc = pin_scb(vcpu, vsie_page, scb_addr);
1028	if (rc)
1029		goto out_put;
1030	rc = shadow_scb(vcpu, vsie_page);
1031	if (rc)
1032		goto out_unpin_scb;
1033	rc = pin_blocks(vcpu, vsie_page);
1034	if (rc)
1035		goto out_unshadow;
1036	register_shadow_scb(vcpu, vsie_page);
1037	rc = vsie_run(vcpu, vsie_page);
1038	unregister_shadow_scb(vcpu);
1039	unpin_blocks(vcpu, vsie_page);
1040out_unshadow:
1041	unshadow_scb(vcpu, vsie_page);
1042out_unpin_scb:
1043	unpin_scb(vcpu, vsie_page, scb_addr);
1044out_put:
1045	put_vsie_page(vcpu->kvm, vsie_page);
1046
1047	return rc < 0 ? rc : 0;
1048}
1049
1050/* Init the vsie data structures. To be called when a vm is initialized. */
1051void kvm_s390_vsie_init(struct kvm *kvm)
1052{
1053	mutex_init(&kvm->arch.vsie.mutex);
1054	INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
1055}
1056
1057/* Destroy the vsie data structures. To be called when a vm is destroyed. */
1058void kvm_s390_vsie_destroy(struct kvm *kvm)
1059{
1060	struct vsie_page *vsie_page;
1061	struct page *page;
1062	int i;
1063
1064	mutex_lock(&kvm->arch.vsie.mutex);
1065	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
1066		page = kvm->arch.vsie.pages[i];
1067		kvm->arch.vsie.pages[i] = NULL;
1068		vsie_page = page_to_virt(page);
1069		release_gmap_shadow(vsie_page);
1070		/* free the radix tree entry */
1071		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
1072		__free_page(page);
1073	}
1074	kvm->arch.vsie.page_count = 0;
1075	mutex_unlock(&kvm->arch.vsie.mutex);
1076}
1077
1078void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
1079{
1080	struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
1081
1082	/*
1083	 * Even if the VCPU lets go of the shadow sie block reference, it is
1084	 * still valid in the cache. So we can safely kick it.
1085	 */
1086	if (scb) {
1087		atomic_or(PROG_BLOCK_SIE, &scb->prog20);
1088		if (scb->prog0c & PROG_IN_SIE)
1089			atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
1090	}
1091}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * kvm nested virtualization support for s390x
   4 *
   5 * Copyright IBM Corp. 2016, 2018
 
 
 
 
   6 *
   7 *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
   8 */
   9#include <linux/vmalloc.h>
  10#include <linux/kvm_host.h>
  11#include <linux/bug.h>
  12#include <linux/list.h>
  13#include <linux/bitmap.h>
  14#include <linux/sched/signal.h>
  15
  16#include <asm/gmap.h>
  17#include <asm/mmu_context.h>
  18#include <asm/sclp.h>
  19#include <asm/nmi.h>
  20#include <asm/dis.h>
  21#include "kvm-s390.h"
  22#include "gaccess.h"
  23
  24struct vsie_page {
  25	struct kvm_s390_sie_block scb_s;	/* 0x0000 */
  26	/*
  27	 * the backup info for machine check. ensure it's at
  28	 * the same offset as that in struct sie_page!
  29	 */
  30	struct mcck_volatile_info mcck_info;    /* 0x0200 */
  31	/*
  32	 * The pinned original scb. Be aware that other VCPUs can modify
  33	 * it while we read from it. Values that are used for conditions or
  34	 * are reused conditionally, should be accessed via READ_ONCE.
  35	 */
  36	struct kvm_s390_sie_block *scb_o;	/* 0x0218 */
  37	/* the shadow gmap in use by the vsie_page */
  38	struct gmap *gmap;			/* 0x0220 */
  39	/* address of the last reported fault to guest2 */
  40	unsigned long fault_addr;		/* 0x0228 */
  41	/* calculated guest addresses of satellite control blocks */
  42	gpa_t sca_gpa;				/* 0x0230 */
  43	gpa_t itdba_gpa;			/* 0x0238 */
  44	gpa_t gvrd_gpa;				/* 0x0240 */
  45	gpa_t riccbd_gpa;			/* 0x0248 */
  46	gpa_t sdnx_gpa;				/* 0x0250 */
  47	__u8 reserved[0x0700 - 0x0258];		/* 0x0258 */
  48	struct kvm_s390_crypto_cb crycb;	/* 0x0700 */
  49	__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE];	/* 0x0800 */
  50};
  51
  52/* trigger a validity icpt for the given scb */
  53static int set_validity_icpt(struct kvm_s390_sie_block *scb,
  54			     __u16 reason_code)
  55{
  56	scb->ipa = 0x1000;
  57	scb->ipb = ((__u32) reason_code) << 16;
  58	scb->icptcode = ICPT_VALIDITY;
  59	return 1;
  60}
  61
  62/* mark the prefix as unmapped, this will block the VSIE */
  63static void prefix_unmapped(struct vsie_page *vsie_page)
  64{
  65	atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
  66}
  67
  68/* mark the prefix as unmapped and wait until the VSIE has been left */
  69static void prefix_unmapped_sync(struct vsie_page *vsie_page)
  70{
  71	prefix_unmapped(vsie_page);
  72	if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  73		atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
  74	while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  75		cpu_relax();
  76}
  77
  78/* mark the prefix as mapped, this will allow the VSIE to run */
  79static void prefix_mapped(struct vsie_page *vsie_page)
  80{
  81	atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
  82}
  83
  84/* test if the prefix is mapped into the gmap shadow */
  85static int prefix_is_mapped(struct vsie_page *vsie_page)
  86{
  87	return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
  88}
  89
  90/* copy the updated intervention request bits into the shadow scb */
  91static void update_intervention_requests(struct vsie_page *vsie_page)
  92{
  93	const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
  94	int cpuflags;
  95
  96	cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
  97	atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
  98	atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
  99}
 100
 101/* shadow (filter and validate) the cpuflags  */
 102static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 103{
 104	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 105	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 106	int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
 107
 108	/* we don't allow ESA/390 guests */
 109	if (!(cpuflags & CPUSTAT_ZARCH))
 110		return set_validity_icpt(scb_s, 0x0001U);
 111
 112	if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
 113		return set_validity_icpt(scb_s, 0x0001U);
 114	else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
 115		return set_validity_icpt(scb_s, 0x0007U);
 116
 117	/* intervention requests will be set later */
 118	newflags = CPUSTAT_ZARCH;
 119	if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
 120		newflags |= CPUSTAT_GED;
 121	if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
 122		if (cpuflags & CPUSTAT_GED)
 123			return set_validity_icpt(scb_s, 0x0001U);
 124		newflags |= CPUSTAT_GED2;
 125	}
 126	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
 127		newflags |= cpuflags & CPUSTAT_P;
 128	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
 129		newflags |= cpuflags & CPUSTAT_SM;
 130	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
 131		newflags |= cpuflags & CPUSTAT_IBS;
 132	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
 133		newflags |= cpuflags & CPUSTAT_KSS;
 134
 135	atomic_set(&scb_s->cpuflags, newflags);
 136	return 0;
 137}
 138/* Copy to APCB FORMAT1 from APCB FORMAT0 */
 139static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
 140			unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
 141{
 142	struct kvm_s390_apcb0 tmp;
 143
 144	if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
 145		return -EFAULT;
 146
 147	apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
 148	apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
 149	apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
 150
 151	return 0;
 152
 153}
 154
 155/**
 156 * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
 157 * @vcpu: pointer to the virtual CPU
 158 * @apcb_s: pointer to start of apcb in the shadow crycb
 159 * @apcb_o: pointer to start of original apcb in the guest2
 160 * @apcb_h: pointer to start of apcb in the guest1
 161 *
 162 * Returns 0 and -EFAULT on error reading guest apcb
 163 */
 164static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
 165			unsigned long apcb_o, unsigned long *apcb_h)
 166{
 167	if (read_guest_real(vcpu, apcb_o, apcb_s,
 168			    sizeof(struct kvm_s390_apcb0)))
 169		return -EFAULT;
 170
 171	bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
 172
 173	return 0;
 174}
 175
 176/**
 177 * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
 178 * @vcpu: pointer to the virtual CPU
 179 * @apcb_s: pointer to start of apcb in the shadow crycb
 180 * @apcb_o: pointer to start of original guest apcb
 181 * @apcb_h: pointer to start of apcb in the host
 182 *
 183 * Returns 0 and -EFAULT on error reading guest apcb
 184 */
 185static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
 186			unsigned long apcb_o,
 187			unsigned long *apcb_h)
 188{
 189	if (read_guest_real(vcpu, apcb_o, apcb_s,
 190			    sizeof(struct kvm_s390_apcb1)))
 191		return -EFAULT;
 192
 193	bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
 194
 195	return 0;
 196}
 197
 198/**
 199 * setup_apcb - Create a shadow copy of the apcb.
 200 * @vcpu: pointer to the virtual CPU
 201 * @crycb_s: pointer to shadow crycb
 202 * @crycb_o: pointer to original guest crycb
 203 * @crycb_h: pointer to the host crycb
 204 * @fmt_o: format of the original guest crycb.
 205 * @fmt_h: format of the host crycb.
 206 *
 207 * Checks the compatibility between the guest and host crycb and calls the
 208 * appropriate copy function.
 209 *
 210 * Return 0 or an error number if the guest and host crycb are incompatible.
 211 */
 212static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
 213	       const u32 crycb_o,
 214	       struct kvm_s390_crypto_cb *crycb_h,
 215	       int fmt_o, int fmt_h)
 216{
 217	struct kvm_s390_crypto_cb *crycb;
 218
 219	crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
 220
 221	switch (fmt_o) {
 222	case CRYCB_FORMAT2:
 223		if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
 224			return -EACCES;
 225		if (fmt_h != CRYCB_FORMAT2)
 226			return -EINVAL;
 227		return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
 228				    (unsigned long) &crycb->apcb1,
 229				    (unsigned long *)&crycb_h->apcb1);
 230	case CRYCB_FORMAT1:
 231		switch (fmt_h) {
 232		case CRYCB_FORMAT2:
 233			return setup_apcb10(vcpu, &crycb_s->apcb1,
 234					    (unsigned long) &crycb->apcb0,
 235					    &crycb_h->apcb1);
 236		case CRYCB_FORMAT1:
 237			return setup_apcb00(vcpu,
 238					    (unsigned long *) &crycb_s->apcb0,
 239					    (unsigned long) &crycb->apcb0,
 240					    (unsigned long *) &crycb_h->apcb0);
 241		}
 242		break;
 243	case CRYCB_FORMAT0:
 244		if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
 245			return -EACCES;
 246
 247		switch (fmt_h) {
 248		case CRYCB_FORMAT2:
 249			return setup_apcb10(vcpu, &crycb_s->apcb1,
 250					    (unsigned long) &crycb->apcb0,
 251					    &crycb_h->apcb1);
 252		case CRYCB_FORMAT1:
 253		case CRYCB_FORMAT0:
 254			return setup_apcb00(vcpu,
 255					    (unsigned long *) &crycb_s->apcb0,
 256					    (unsigned long) &crycb->apcb0,
 257					    (unsigned long *) &crycb_h->apcb0);
 258		}
 259	}
 260	return -EINVAL;
 261}
 262
 263/**
 264 * shadow_crycb - Create a shadow copy of the crycb block
 265 * @vcpu: a pointer to the virtual CPU
 266 * @vsie_page: a pointer to internal date used for the vSIE
 267 *
 268 * Create a shadow copy of the crycb block and setup key wrapping, if
 269 * requested for guest 3 and enabled for guest 2.
 270 *
 271 * We accept format-1 or format-2, but we convert format-1 into format-2
 272 * in the shadow CRYCB.
 273 * Using format-2 enables the firmware to choose the right format when
 274 * scheduling the SIE.
 275 * There is nothing to do for format-0.
 276 *
 277 * This function centralize the issuing of set_validity_icpt() for all
 278 * the subfunctions working on the crycb.
 279 *
 280 * Returns: - 0 if shadowed or nothing to do
 281 *          - > 0 if control has to be given to guest 2
 282 */
 283static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 284{
 285	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 286	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 287	const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
 288	const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
 289	unsigned long *b1, *b2;
 290	u8 ecb3_flags;
 291	u32 ecd_flags;
 292	int apie_h;
 293	int apie_s;
 294	int key_msk = test_kvm_facility(vcpu->kvm, 76);
 295	int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
 296	int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
 297	int ret = 0;
 298
 299	scb_s->crycbd = 0;
 300
 301	apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
 302	apie_s = apie_h & scb_o->eca;
 303	if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
 304		return 0;
 305
 306	if (!crycb_addr)
 307		return set_validity_icpt(scb_s, 0x0039U);
 308
 309	if (fmt_o == CRYCB_FORMAT1)
 310		if ((crycb_addr & PAGE_MASK) !=
 311		    ((crycb_addr + 128) & PAGE_MASK))
 312			return set_validity_icpt(scb_s, 0x003CU);
 313
 314	if (apie_s) {
 315		ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
 316				 vcpu->kvm->arch.crypto.crycb,
 317				 fmt_o, fmt_h);
 318		if (ret)
 319			goto end;
 320		scb_s->eca |= scb_o->eca & ECA_APIE;
 321	}
 322
 323	/* we may only allow it if enabled for guest 2 */
 324	ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
 325		     (ECB3_AES | ECB3_DEA);
 326	ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
 327	if (!ecb3_flags && !ecd_flags)
 328		goto end;
 
 
 
 
 329
 330	/* copy only the wrapping keys */
 331	if (read_guest_real(vcpu, crycb_addr + 72,
 332			    vsie_page->crycb.dea_wrapping_key_mask, 56))
 333		return set_validity_icpt(scb_s, 0x0035U);
 334
 335	scb_s->ecb3 |= ecb3_flags;
 336	scb_s->ecd |= ecd_flags;
 
 337
 338	/* xor both blocks in one run */
 339	b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
 340	b2 = (unsigned long *)
 341			    vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
 342	/* as 56%8 == 0, bitmap_xor won't overwrite any data */
 343	bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
 344end:
 345	switch (ret) {
 346	case -EINVAL:
 347		return set_validity_icpt(scb_s, 0x0022U);
 348	case -EFAULT:
 349		return set_validity_icpt(scb_s, 0x0035U);
 350	case -EACCES:
 351		return set_validity_icpt(scb_s, 0x003CU);
 352	}
 353	scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
 354	return 0;
 355}
 356
 357/* shadow (round up/down) the ibc to avoid validity icpt */
 358static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 359{
 360	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 361	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 362	/* READ_ONCE does not work on bitfields - use a temporary variable */
 363	const uint32_t __new_ibc = scb_o->ibc;
 364	const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
 365	__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
 366
 367	scb_s->ibc = 0;
 368	/* ibc installed in g2 and requested for g3 */
 369	if (vcpu->kvm->arch.model.ibc && new_ibc) {
 370		scb_s->ibc = new_ibc;
 371		/* takte care of the minimum ibc level of the machine */
 372		if (scb_s->ibc < min_ibc)
 373			scb_s->ibc = min_ibc;
 374		/* take care of the maximum ibc level set for the guest */
 375		if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
 376			scb_s->ibc = vcpu->kvm->arch.model.ibc;
 377	}
 378}
 379
 380/* unshadow the scb, copying parameters back to the real scb */
 381static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 382{
 383	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 384	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 385
 386	/* interception */
 387	scb_o->icptcode = scb_s->icptcode;
 388	scb_o->icptstatus = scb_s->icptstatus;
 389	scb_o->ipa = scb_s->ipa;
 390	scb_o->ipb = scb_s->ipb;
 391	scb_o->gbea = scb_s->gbea;
 392
 393	/* timer */
 394	scb_o->cputm = scb_s->cputm;
 395	scb_o->ckc = scb_s->ckc;
 396	scb_o->todpr = scb_s->todpr;
 397
 398	/* guest state */
 399	scb_o->gpsw = scb_s->gpsw;
 400	scb_o->gg14 = scb_s->gg14;
 401	scb_o->gg15 = scb_s->gg15;
 402	memcpy(scb_o->gcr, scb_s->gcr, 128);
 403	scb_o->pp = scb_s->pp;
 404
 405	/* branch prediction */
 406	if (test_kvm_facility(vcpu->kvm, 82)) {
 407		scb_o->fpf &= ~FPF_BPBC;
 408		scb_o->fpf |= scb_s->fpf & FPF_BPBC;
 409	}
 410
 411	/* interrupt intercept */
 412	switch (scb_s->icptcode) {
 413	case ICPT_PROGI:
 414	case ICPT_INSTPROGI:
 415	case ICPT_EXTINT:
 416		memcpy((void *)((u64)scb_o + 0xc0),
 417		       (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
 418		break;
 419	case ICPT_PARTEXEC:
 420		/* MVPG only */
 421		memcpy((void *)((u64)scb_o + 0xc0),
 422		       (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
 423		break;
 424	}
 425
 426	if (scb_s->ihcpu != 0xffffU)
 427		scb_o->ihcpu = scb_s->ihcpu;
 428}
 429
 430/*
 431 * Setup the shadow scb by copying and checking the relevant parts of the g2
 432 * provided scb.
 433 *
 434 * Returns: - 0 if the scb has been shadowed
 435 *          - > 0 if control has to be given to guest 2
 436 */
 437static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 438{
 439	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 440	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 441	/* READ_ONCE does not work on bitfields - use a temporary variable */
 442	const uint32_t __new_prefix = scb_o->prefix;
 443	const uint32_t new_prefix = READ_ONCE(__new_prefix);
 444	const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
 445	bool had_tx = scb_s->ecb & ECB_TE;
 446	unsigned long new_mso = 0;
 447	int rc;
 448
 449	/* make sure we don't have any leftovers when reusing the scb */
 450	scb_s->icptcode = 0;
 451	scb_s->eca = 0;
 452	scb_s->ecb = 0;
 453	scb_s->ecb2 = 0;
 454	scb_s->ecb3 = 0;
 455	scb_s->ecd = 0;
 456	scb_s->fac = 0;
 457	scb_s->fpf = 0;
 458
 459	rc = prepare_cpuflags(vcpu, vsie_page);
 460	if (rc)
 461		goto out;
 462
 463	/* timer */
 464	scb_s->cputm = scb_o->cputm;
 465	scb_s->ckc = scb_o->ckc;
 466	scb_s->todpr = scb_o->todpr;
 467	scb_s->epoch = scb_o->epoch;
 468
 469	/* guest state */
 470	scb_s->gpsw = scb_o->gpsw;
 471	scb_s->gg14 = scb_o->gg14;
 472	scb_s->gg15 = scb_o->gg15;
 473	memcpy(scb_s->gcr, scb_o->gcr, 128);
 474	scb_s->pp = scb_o->pp;
 475
 476	/* interception / execution handling */
 477	scb_s->gbea = scb_o->gbea;
 478	scb_s->lctl = scb_o->lctl;
 479	scb_s->svcc = scb_o->svcc;
 480	scb_s->ictl = scb_o->ictl;
 481	/*
 482	 * SKEY handling functions can't deal with false setting of PTE invalid
 483	 * bits. Therefore we cannot provide interpretation and would later
 484	 * have to provide own emulation handlers.
 485	 */
 486	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
 487		scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
 488
 489	scb_s->icpua = scb_o->icpua;
 490
 491	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
 492		new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
 493	/* if the hva of the prefix changes, we have to remap the prefix */
 494	if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
 495		prefix_unmapped(vsie_page);
 496	 /* SIE will do mso/msl validity and exception checks for us */
 497	scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
 498	scb_s->mso = new_mso;
 499	scb_s->prefix = new_prefix;
 500
 501	/* We have to definetly flush the tlb if this scb never ran */
 502	if (scb_s->ihcpu != 0xffffU)
 503		scb_s->ihcpu = scb_o->ihcpu;
 504
 505	/* MVPG and Protection Exception Interpretation are always available */
 506	scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
 507	/* Host-protection-interruption introduced with ESOP */
 508	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
 509		scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
 510	/* transactional execution */
 511	if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
 512		/* remap the prefix is tx is toggled on */
 513		if (!had_tx)
 514			prefix_unmapped(vsie_page);
 515		scb_s->ecb |= ECB_TE;
 516	}
 517	/* branch prediction */
 518	if (test_kvm_facility(vcpu->kvm, 82))
 519		scb_s->fpf |= scb_o->fpf & FPF_BPBC;
 520	/* SIMD */
 521	if (test_kvm_facility(vcpu->kvm, 129)) {
 522		scb_s->eca |= scb_o->eca & ECA_VX;
 523		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
 524	}
 525	/* Run-time-Instrumentation */
 526	if (test_kvm_facility(vcpu->kvm, 64))
 527		scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
 528	/* Instruction Execution Prevention */
 529	if (test_kvm_facility(vcpu->kvm, 130))
 530		scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
 531	/* Guarded Storage */
 532	if (test_kvm_facility(vcpu->kvm, 133)) {
 533		scb_s->ecb |= scb_o->ecb & ECB_GS;
 534		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
 535	}
 536	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
 537		scb_s->eca |= scb_o->eca & ECA_SII;
 538	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
 539		scb_s->eca |= scb_o->eca & ECA_IB;
 540	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
 541		scb_s->eca |= scb_o->eca & ECA_CEI;
 542	/* Epoch Extension */
 543	if (test_kvm_facility(vcpu->kvm, 139))
 544		scb_s->ecd |= scb_o->ecd & ECD_MEF;
 545
 546	/* etoken */
 547	if (test_kvm_facility(vcpu->kvm, 156))
 548		scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
 549
 550	scb_s->hpid = HPID_VSIE;
 551
 552	prepare_ibc(vcpu, vsie_page);
 553	rc = shadow_crycb(vcpu, vsie_page);
 554out:
 555	if (rc)
 556		unshadow_scb(vcpu, vsie_page);
 557	return rc;
 558}
 559
 560void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
 561				 unsigned long end)
 562{
 563	struct kvm *kvm = gmap->private;
 564	struct vsie_page *cur;
 565	unsigned long prefix;
 566	struct page *page;
 567	int i;
 568
 569	if (!gmap_is_shadow(gmap))
 570		return;
 571	if (start >= 1UL << 31)
 572		/* We are only interested in prefix pages */
 573		return;
 574
 575	/*
 576	 * Only new shadow blocks are added to the list during runtime,
 577	 * therefore we can safely reference them all the time.
 578	 */
 579	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
 580		page = READ_ONCE(kvm->arch.vsie.pages[i]);
 581		if (!page)
 582			continue;
 583		cur = page_to_virt(page);
 584		if (READ_ONCE(cur->gmap) != gmap)
 585			continue;
 586		prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
 587		/* with mso/msl, the prefix lies at an offset */
 588		prefix += cur->scb_s.mso;
 589		if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
 590			prefix_unmapped_sync(cur);
 591	}
 592}
 593
 594/*
 595 * Map the first prefix page and if tx is enabled also the second prefix page.
 596 *
 597 * The prefix will be protected, a gmap notifier will inform about unmaps.
 598 * The shadow scb must not be executed until the prefix is remapped, this is
 599 * guaranteed by properly handling PROG_REQUEST.
 600 *
 601 * Returns: - 0 on if successfully mapped or already mapped
 602 *          - > 0 if control has to be given to guest 2
 603 *          - -EAGAIN if the caller can retry immediately
 604 *          - -ENOMEM if out of memory
 605 */
 606static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 607{
 608	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 609	u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
 610	int rc;
 611
 612	if (prefix_is_mapped(vsie_page))
 613		return 0;
 614
 615	/* mark it as mapped so we can catch any concurrent unmappers */
 616	prefix_mapped(vsie_page);
 617
 618	/* with mso/msl, the prefix lies at offset *mso* */
 619	prefix += scb_s->mso;
 620
 621	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
 622	if (!rc && (scb_s->ecb & ECB_TE))
 623		rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 624					   prefix + PAGE_SIZE);
 625	/*
 626	 * We don't have to mprotect, we will be called for all unshadows.
 627	 * SIE will detect if protection applies and trigger a validity.
 628	 */
 629	if (rc)
 630		prefix_unmapped(vsie_page);
 631	if (rc > 0 || rc == -EFAULT)
 632		rc = set_validity_icpt(scb_s, 0x0037U);
 633	return rc;
 634}
 635
 636/*
 637 * Pin the guest page given by gpa and set hpa to the pinned host address.
 638 * Will always be pinned writable.
 639 *
 640 * Returns: - 0 on success
 641 *          - -EINVAL if the gpa is not valid guest storage
 
 642 */
 643static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
 644{
 645	struct page *page;
 
 
 646
 647	page = gfn_to_page(kvm, gpa_to_gfn(gpa));
 648	if (is_error_page(page))
 649		return -EINVAL;
 
 
 
 
 
 650	*hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
 651	return 0;
 652}
 653
 654/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
 655static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
 656{
 657	kvm_release_pfn_dirty(hpa >> PAGE_SHIFT);
 
 
 
 
 658	/* mark the page always as dirty for migration */
 659	mark_page_dirty(kvm, gpa_to_gfn(gpa));
 660}
 661
 662/* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
 663static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 664{
 
 665	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 666	hpa_t hpa;
 
 667
 668	hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
 669	if (hpa) {
 670		unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
 671		vsie_page->sca_gpa = 0;
 
 
 672		scb_s->scaol = 0;
 673		scb_s->scaoh = 0;
 674	}
 675
 676	hpa = scb_s->itdba;
 677	if (hpa) {
 678		unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
 679		vsie_page->itdba_gpa = 0;
 680		scb_s->itdba = 0;
 681	}
 682
 683	hpa = scb_s->gvrd;
 684	if (hpa) {
 685		unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
 686		vsie_page->gvrd_gpa = 0;
 687		scb_s->gvrd = 0;
 688	}
 689
 690	hpa = scb_s->riccbd;
 691	if (hpa) {
 692		unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
 693		vsie_page->riccbd_gpa = 0;
 694		scb_s->riccbd = 0;
 695	}
 696
 697	hpa = scb_s->sdnxo;
 698	if (hpa) {
 699		unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
 700		vsie_page->sdnx_gpa = 0;
 701		scb_s->sdnxo = 0;
 702	}
 703}
 704
 705/*
 706 * Instead of shadowing some blocks, we can simply forward them because the
 707 * addresses in the scb are 64 bit long.
 708 *
 709 * This works as long as the data lies in one page. If blocks ever exceed one
 710 * page, we have to fall back to shadowing.
 711 *
 712 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
 713 * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
 714 *
 715 * Returns: - 0 if all blocks were pinned.
 716 *          - > 0 if control has to be given to guest 2
 717 *          - -ENOMEM if out of memory
 718 */
 719static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 720{
 721	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 722	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 723	hpa_t hpa;
 724	gpa_t gpa;
 725	int rc = 0;
 726
 727	gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
 728	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
 729		gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
 730	if (gpa) {
 731		if (gpa < 2 * PAGE_SIZE)
 732			rc = set_validity_icpt(scb_s, 0x0038U);
 733		else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
 734			rc = set_validity_icpt(scb_s, 0x0011U);
 735		else if ((gpa & PAGE_MASK) !=
 736			 ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
 737			rc = set_validity_icpt(scb_s, 0x003bU);
 738		if (!rc) {
 739			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 740			if (rc)
 741				rc = set_validity_icpt(scb_s, 0x0034U);
 742		}
 743		if (rc)
 744			goto unpin;
 745		vsie_page->sca_gpa = gpa;
 746		scb_s->scaoh = (u32)((u64)hpa >> 32);
 747		scb_s->scaol = (u32)(u64)hpa;
 748	}
 749
 750	gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
 751	if (gpa && (scb_s->ecb & ECB_TE)) {
 752		if (gpa < 2 * PAGE_SIZE) {
 753			rc = set_validity_icpt(scb_s, 0x0080U);
 754			goto unpin;
 755		}
 756		/* 256 bytes cannot cross page boundaries */
 757		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 758		if (rc) {
 759			rc = set_validity_icpt(scb_s, 0x0080U);
 
 760			goto unpin;
 761		}
 762		vsie_page->itdba_gpa = gpa;
 763		scb_s->itdba = hpa;
 764	}
 765
 766	gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
 767	if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
 768		if (gpa < 2 * PAGE_SIZE) {
 
 769			rc = set_validity_icpt(scb_s, 0x1310U);
 770			goto unpin;
 771		}
 772		/*
 773		 * 512 bytes vector registers cannot cross page boundaries
 774		 * if this block gets bigger, we have to shadow it.
 775		 */
 776		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 777		if (rc) {
 778			rc = set_validity_icpt(scb_s, 0x1310U);
 
 779			goto unpin;
 780		}
 781		vsie_page->gvrd_gpa = gpa;
 782		scb_s->gvrd = hpa;
 783	}
 784
 785	gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
 786	if (gpa && (scb_s->ecb3 & ECB3_RI)) {
 787		if (gpa < 2 * PAGE_SIZE) {
 788			rc = set_validity_icpt(scb_s, 0x0043U);
 789			goto unpin;
 790		}
 791		/* 64 bytes cannot cross page boundaries */
 792		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 793		if (rc) {
 794			rc = set_validity_icpt(scb_s, 0x0043U);
 
 
 795			goto unpin;
 796		}
 797		/* Validity 0x0044 will be checked by SIE */
 798		vsie_page->riccbd_gpa = gpa;
 799		scb_s->riccbd = hpa;
 800	}
 801	if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
 802	    (scb_s->ecd & ECD_ETOKENF)) {
 803		unsigned long sdnxc;
 804
 805		gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
 806		sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
 807		if (!gpa || gpa < 2 * PAGE_SIZE) {
 808			rc = set_validity_icpt(scb_s, 0x10b0U);
 809			goto unpin;
 810		}
 811		if (sdnxc < 6 || sdnxc > 12) {
 812			rc = set_validity_icpt(scb_s, 0x10b1U);
 813			goto unpin;
 814		}
 815		if (gpa & ((1 << sdnxc) - 1)) {
 816			rc = set_validity_icpt(scb_s, 0x10b2U);
 817			goto unpin;
 818		}
 819		/* Due to alignment rules (checked above) this cannot
 820		 * cross page boundaries
 821		 */
 822		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 823		if (rc) {
 824			rc = set_validity_icpt(scb_s, 0x10b0U);
 825			goto unpin;
 826		}
 827		vsie_page->sdnx_gpa = gpa;
 828		scb_s->sdnxo = hpa | sdnxc;
 829	}
 830	return 0;
 831unpin:
 832	unpin_blocks(vcpu, vsie_page);
 833	return rc;
 834}
 835
 836/* unpin the scb provided by guest 2, marking it as dirty */
 837static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 838		      gpa_t gpa)
 839{
 840	hpa_t hpa = (hpa_t) vsie_page->scb_o;
 841
 842	if (hpa)
 843		unpin_guest_page(vcpu->kvm, gpa, hpa);
 844	vsie_page->scb_o = NULL;
 845}
 846
 847/*
 848 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
 849 *
 850 * Returns: - 0 if the scb was pinned.
 851 *          - > 0 if control has to be given to guest 2
 
 852 */
 853static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
 854		   gpa_t gpa)
 855{
 856	hpa_t hpa;
 857	int rc;
 858
 859	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
 860	if (rc) {
 861		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 862		WARN_ON_ONCE(rc);
 863		return 1;
 864	}
 865	vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
 866	return 0;
 
 867}
 868
 869/*
 870 * Inject a fault into guest 2.
 871 *
 872 * Returns: - > 0 if control has to be given to guest 2
 873 *            < 0 if an error occurred during injection.
 874 */
 875static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
 876			bool write_flag)
 877{
 878	struct kvm_s390_pgm_info pgm = {
 879		.code = code,
 880		.trans_exc_code =
 881			/* 0-51: virtual address */
 882			(vaddr & 0xfffffffffffff000UL) |
 883			/* 52-53: store / fetch */
 884			(((unsigned int) !write_flag) + 1) << 10,
 885			/* 62-63: asce id (alway primary == 0) */
 886		.exc_access_id = 0, /* always primary */
 887		.op_access_id = 0, /* not MVPG */
 888	};
 889	int rc;
 890
 891	if (code == PGM_PROTECTION)
 892		pgm.trans_exc_code |= 0x4UL;
 893
 894	rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
 895	return rc ? rc : 1;
 896}
 897
 898/*
 899 * Handle a fault during vsie execution on a gmap shadow.
 900 *
 901 * Returns: - 0 if the fault was resolved
 902 *          - > 0 if control has to be given to guest 2
 903 *          - < 0 if an error occurred
 904 */
 905static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 906{
 907	int rc;
 908
 909	if (current->thread.gmap_int_code == PGM_PROTECTION)
 910		/* we can directly forward all protection exceptions */
 911		return inject_fault(vcpu, PGM_PROTECTION,
 912				    current->thread.gmap_addr, 1);
 913
 914	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 915				   current->thread.gmap_addr);
 916	if (rc > 0) {
 917		rc = inject_fault(vcpu, rc,
 918				  current->thread.gmap_addr,
 919				  current->thread.gmap_write_flag);
 920		if (rc >= 0)
 921			vsie_page->fault_addr = current->thread.gmap_addr;
 922	}
 923	return rc;
 924}
 925
 926/*
 927 * Retry the previous fault that required guest 2 intervention. This avoids
 928 * one superfluous SIE re-entry and direct exit.
 929 *
 930 * Will ignore any errors. The next SIE fault will do proper fault handling.
 931 */
 932static void handle_last_fault(struct kvm_vcpu *vcpu,
 933			      struct vsie_page *vsie_page)
 934{
 935	if (vsie_page->fault_addr)
 936		kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
 937				      vsie_page->fault_addr);
 938	vsie_page->fault_addr = 0;
 939}
 940
 941static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
 942{
 943	vsie_page->scb_s.icptcode = 0;
 944}
 945
 946/* rewind the psw and clear the vsie icpt, so we can retry execution */
 947static void retry_vsie_icpt(struct vsie_page *vsie_page)
 948{
 949	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 950	int ilen = insn_length(scb_s->ipa >> 8);
 951
 952	/* take care of EXECUTE instructions */
 953	if (scb_s->icptstatus & 1) {
 954		ilen = (scb_s->icptstatus >> 4) & 0x6;
 955		if (!ilen)
 956			ilen = 4;
 957	}
 958	scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
 959	clear_vsie_icpt(vsie_page);
 960}
 961
 962/*
 963 * Try to shadow + enable the guest 2 provided facility list.
 964 * Retry instruction execution if enabled for and provided by guest 2.
 965 *
 966 * Returns: - 0 if handled (retry or guest 2 icpt)
 967 *          - > 0 if control has to be given to guest 2
 968 */
 969static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 970{
 971	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 972	__u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
 973
 974	if (fac && test_kvm_facility(vcpu->kvm, 7)) {
 975		retry_vsie_icpt(vsie_page);
 976		if (read_guest_real(vcpu, fac, &vsie_page->fac,
 977				    sizeof(vsie_page->fac)))
 978			return set_validity_icpt(scb_s, 0x1090U);
 979		scb_s->fac = (__u32)(__u64) &vsie_page->fac;
 980	}
 981	return 0;
 982}
 983
 984/*
 985 * Run the vsie on a shadow scb and a shadow gmap, without any further
 986 * sanity checks, handling SIE faults.
 987 *
 988 * Returns: - 0 everything went fine
 989 *          - > 0 if control has to be given to guest 2
 990 *          - < 0 if an error occurred
 991 */
 992static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 993	__releases(vcpu->kvm->srcu)
 994	__acquires(vcpu->kvm->srcu)
 995{
 996	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
 997	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
 998	int guest_bp_isolation;
 999	int rc = 0;
1000
1001	handle_last_fault(vcpu, vsie_page);
1002
1003	if (need_resched())
1004		schedule();
1005	if (test_cpu_flag(CIF_MCCK_PENDING))
1006		s390_handle_mcck();
1007
1008	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1009
1010	/* save current guest state of bp isolation override */
1011	guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
1012
1013	/*
1014	 * The guest is running with BPBC, so we have to force it on for our
1015	 * nested guest. This is done by enabling BPBC globally, so the BPBC
1016	 * control in the SCB (which the nested guest can modify) is simply
1017	 * ignored.
1018	 */
1019	if (test_kvm_facility(vcpu->kvm, 82) &&
1020	    vcpu->arch.sie_block->fpf & FPF_BPBC)
1021		set_thread_flag(TIF_ISOLATE_BP_GUEST);
1022
1023	local_irq_disable();
1024	guest_enter_irqoff();
1025	local_irq_enable();
1026
1027	/*
1028	 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
1029	 * and VCPU requests also hinder the vSIE from running and lead
1030	 * to an immediate exit. kvm_s390_vsie_kick() has to be used to
1031	 * also kick the vSIE.
1032	 */
1033	vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
1034	barrier();
1035	if (!kvm_s390_vcpu_sie_inhibited(vcpu))
1036		rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
1037	barrier();
1038	vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
1039
1040	local_irq_disable();
1041	guest_exit_irqoff();
1042	local_irq_enable();
1043
1044	/* restore guest state for bp isolation override */
1045	if (!guest_bp_isolation)
1046		clear_thread_flag(TIF_ISOLATE_BP_GUEST);
1047
1048	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1049
1050	if (rc == -EINTR) {
1051		VCPU_EVENT(vcpu, 3, "%s", "machine check");
1052		kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
1053		return 0;
1054	}
1055
1056	if (rc > 0)
1057		rc = 0; /* we could still have an icpt */
1058	else if (rc == -EFAULT)
1059		return handle_fault(vcpu, vsie_page);
1060
1061	switch (scb_s->icptcode) {
1062	case ICPT_INST:
1063		if (scb_s->ipa == 0xb2b0)
1064			rc = handle_stfle(vcpu, vsie_page);
1065		break;
1066	case ICPT_STOP:
1067		/* stop not requested by g2 - must have been a kick */
1068		if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
1069			clear_vsie_icpt(vsie_page);
1070		break;
1071	case ICPT_VALIDITY:
1072		if ((scb_s->ipa & 0xf000) != 0xf000)
1073			scb_s->ipa += 0x1000;
1074		break;
1075	}
1076	return rc;
1077}
1078
1079static void release_gmap_shadow(struct vsie_page *vsie_page)
1080{
1081	if (vsie_page->gmap)
1082		gmap_put(vsie_page->gmap);
1083	WRITE_ONCE(vsie_page->gmap, NULL);
1084	prefix_unmapped(vsie_page);
1085}
1086
1087static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
1088			       struct vsie_page *vsie_page)
1089{
1090	unsigned long asce;
1091	union ctlreg0 cr0;
1092	struct gmap *gmap;
1093	int edat;
1094
1095	asce = vcpu->arch.sie_block->gcr[1];
1096	cr0.val = vcpu->arch.sie_block->gcr[0];
1097	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1098	edat += edat && test_kvm_facility(vcpu->kvm, 78);
1099
1100	/*
1101	 * ASCE or EDAT could have changed since last icpt, or the gmap
1102	 * we're holding has been unshadowed. If the gmap is still valid,
1103	 * we can safely reuse it.
1104	 */
1105	if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
1106		return 0;
1107
1108	/* release the old shadow - if any, and mark the prefix as unmapped */
1109	release_gmap_shadow(vsie_page);
1110	gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
1111	if (IS_ERR(gmap))
1112		return PTR_ERR(gmap);
1113	gmap->private = vcpu->kvm;
1114	WRITE_ONCE(vsie_page->gmap, gmap);
1115	return 0;
1116}
1117
1118/*
1119 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
1120 */
1121static void register_shadow_scb(struct kvm_vcpu *vcpu,
1122				struct vsie_page *vsie_page)
1123{
1124	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1125
1126	WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
1127	/*
1128	 * External calls have to lead to a kick of the vcpu and
1129	 * therefore the vsie -> Simulate Wait state.
1130	 */
1131	kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
1132	/*
1133	 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
1134	 * automatically be adjusted on tod clock changes via kvm_sync_clock.
1135	 */
1136	preempt_disable();
1137	scb_s->epoch += vcpu->kvm->arch.epoch;
1138
1139	if (scb_s->ecd & ECD_MEF) {
1140		scb_s->epdx += vcpu->kvm->arch.epdx;
1141		if (scb_s->epoch < vcpu->kvm->arch.epoch)
1142			scb_s->epdx += 1;
1143	}
1144
1145	preempt_enable();
1146}
1147
1148/*
1149 * Unregister a shadow scb from a VCPU.
1150 */
1151static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
1152{
1153	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
1154	WRITE_ONCE(vcpu->arch.vsie_block, NULL);
1155}
1156
1157/*
1158 * Run the vsie on a shadowed scb, managing the gmap shadow, handling
1159 * prefix pages and faults.
1160 *
1161 * Returns: - 0 if no errors occurred
1162 *          - > 0 if control has to be given to guest 2
1163 *          - -ENOMEM if out of memory
1164 */
1165static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1166{
1167	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1168	int rc = 0;
1169
1170	while (1) {
1171		rc = acquire_gmap_shadow(vcpu, vsie_page);
1172		if (!rc)
1173			rc = map_prefix(vcpu, vsie_page);
1174		if (!rc) {
1175			gmap_enable(vsie_page->gmap);
1176			update_intervention_requests(vsie_page);
1177			rc = do_vsie_run(vcpu, vsie_page);
1178			gmap_enable(vcpu->arch.gmap);
1179		}
1180		atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
1181
1182		if (rc == -EAGAIN)
1183			rc = 0;
1184		if (rc || scb_s->icptcode || signal_pending(current) ||
1185		    kvm_s390_vcpu_has_irq(vcpu, 0) ||
1186		    kvm_s390_vcpu_sie_inhibited(vcpu))
1187			break;
1188	}
1189
1190	if (rc == -EFAULT) {
1191		/*
1192		 * Addressing exceptions are always presentes as intercepts.
1193		 * As addressing exceptions are suppressing and our guest 3 PSW
1194		 * points at the responsible instruction, we have to
1195		 * forward the PSW and set the ilc. If we can't read guest 3
1196		 * instruction, we can use an arbitrary ilc. Let's always use
1197		 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
1198		 * memory. (we could also fake the shadow so the hardware
1199		 * handles it).
1200		 */
1201		scb_s->icptcode = ICPT_PROGI;
1202		scb_s->iprcc = PGM_ADDRESSING;
1203		scb_s->pgmilc = 4;
1204		scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
1205	}
1206	return rc;
1207}
1208
1209/*
1210 * Get or create a vsie page for a scb address.
1211 *
1212 * Returns: - address of a vsie page (cached or new one)
1213 *          - NULL if the same scb address is already used by another VCPU
1214 *          - ERR_PTR(-ENOMEM) if out of memory
1215 */
1216static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
1217{
1218	struct vsie_page *vsie_page;
1219	struct page *page;
1220	int nr_vcpus;
1221
1222	rcu_read_lock();
1223	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
1224	rcu_read_unlock();
1225	if (page) {
1226		if (page_ref_inc_return(page) == 2)
1227			return page_to_virt(page);
1228		page_ref_dec(page);
1229	}
1230
1231	/*
1232	 * We want at least #online_vcpus shadows, so every VCPU can execute
1233	 * the VSIE in parallel.
1234	 */
1235	nr_vcpus = atomic_read(&kvm->online_vcpus);
1236
1237	mutex_lock(&kvm->arch.vsie.mutex);
1238	if (kvm->arch.vsie.page_count < nr_vcpus) {
1239		page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA);
1240		if (!page) {
1241			mutex_unlock(&kvm->arch.vsie.mutex);
1242			return ERR_PTR(-ENOMEM);
1243		}
1244		page_ref_inc(page);
1245		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
1246		kvm->arch.vsie.page_count++;
1247	} else {
1248		/* reuse an existing entry that belongs to nobody */
1249		while (true) {
1250			page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
1251			if (page_ref_inc_return(page) == 2)
1252				break;
1253			page_ref_dec(page);
1254			kvm->arch.vsie.next++;
1255			kvm->arch.vsie.next %= nr_vcpus;
1256		}
1257		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
1258	}
1259	page->index = addr;
1260	/* double use of the same address */
1261	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
1262		page_ref_dec(page);
1263		mutex_unlock(&kvm->arch.vsie.mutex);
1264		return NULL;
1265	}
1266	mutex_unlock(&kvm->arch.vsie.mutex);
1267
1268	vsie_page = page_to_virt(page);
1269	memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
1270	release_gmap_shadow(vsie_page);
1271	vsie_page->fault_addr = 0;
1272	vsie_page->scb_s.ihcpu = 0xffffU;
1273	return vsie_page;
1274}
1275
1276/* put a vsie page acquired via get_vsie_page */
1277static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
1278{
1279	struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
1280
1281	page_ref_dec(page);
1282}
1283
1284int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
1285{
1286	struct vsie_page *vsie_page;
1287	unsigned long scb_addr;
1288	int rc;
1289
1290	vcpu->stat.instruction_sie++;
1291	if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
1292		return -EOPNOTSUPP;
1293	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1294		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1295
1296	BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
1297	scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
1298
1299	/* 512 byte alignment */
1300	if (unlikely(scb_addr & 0x1ffUL))
1301		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1302
1303	if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
1304	    kvm_s390_vcpu_sie_inhibited(vcpu))
1305		return 0;
1306
1307	vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
1308	if (IS_ERR(vsie_page))
1309		return PTR_ERR(vsie_page);
1310	else if (!vsie_page)
1311		/* double use of sie control block - simply do nothing */
1312		return 0;
1313
1314	rc = pin_scb(vcpu, vsie_page, scb_addr);
1315	if (rc)
1316		goto out_put;
1317	rc = shadow_scb(vcpu, vsie_page);
1318	if (rc)
1319		goto out_unpin_scb;
1320	rc = pin_blocks(vcpu, vsie_page);
1321	if (rc)
1322		goto out_unshadow;
1323	register_shadow_scb(vcpu, vsie_page);
1324	rc = vsie_run(vcpu, vsie_page);
1325	unregister_shadow_scb(vcpu);
1326	unpin_blocks(vcpu, vsie_page);
1327out_unshadow:
1328	unshadow_scb(vcpu, vsie_page);
1329out_unpin_scb:
1330	unpin_scb(vcpu, vsie_page, scb_addr);
1331out_put:
1332	put_vsie_page(vcpu->kvm, vsie_page);
1333
1334	return rc < 0 ? rc : 0;
1335}
1336
1337/* Init the vsie data structures. To be called when a vm is initialized. */
1338void kvm_s390_vsie_init(struct kvm *kvm)
1339{
1340	mutex_init(&kvm->arch.vsie.mutex);
1341	INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
1342}
1343
1344/* Destroy the vsie data structures. To be called when a vm is destroyed. */
1345void kvm_s390_vsie_destroy(struct kvm *kvm)
1346{
1347	struct vsie_page *vsie_page;
1348	struct page *page;
1349	int i;
1350
1351	mutex_lock(&kvm->arch.vsie.mutex);
1352	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
1353		page = kvm->arch.vsie.pages[i];
1354		kvm->arch.vsie.pages[i] = NULL;
1355		vsie_page = page_to_virt(page);
1356		release_gmap_shadow(vsie_page);
1357		/* free the radix tree entry */
1358		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
1359		__free_page(page);
1360	}
1361	kvm->arch.vsie.page_count = 0;
1362	mutex_unlock(&kvm->arch.vsie.mutex);
1363}
1364
1365void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
1366{
1367	struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
1368
1369	/*
1370	 * Even if the VCPU lets go of the shadow sie block reference, it is
1371	 * still valid in the cache. So we can safely kick it.
1372	 */
1373	if (scb) {
1374		atomic_or(PROG_BLOCK_SIE, &scb->prog20);
1375		if (scb->prog0c & PROG_IN_SIE)
1376			atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
1377	}
1378}