Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * hosting IBM Z kernel virtual machines (s390x)
   4 *
   5 * Copyright IBM Corp. 2008, 2020
 
 
 
 
   6 *
   7 *    Author(s): Carsten Otte <cotte@de.ibm.com>
   8 *               Christian Borntraeger <borntraeger@de.ibm.com>
 
   9 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
  10 *               Jason J. Herne <jjherne@us.ibm.com>
  11 */
  12
  13#define KMSG_COMPONENT "kvm-s390"
  14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  15
  16#include <linux/compiler.h>
  17#include <linux/err.h>
  18#include <linux/fs.h>
  19#include <linux/hrtimer.h>
  20#include <linux/init.h>
  21#include <linux/kvm.h>
  22#include <linux/kvm_host.h>
  23#include <linux/mman.h>
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <linux/random.h>
  27#include <linux/slab.h>
  28#include <linux/timer.h>
  29#include <linux/vmalloc.h>
  30#include <linux/bitmap.h>
  31#include <linux/sched/signal.h>
  32#include <linux/string.h>
  33#include <linux/pgtable.h>
  34#include <linux/mmu_notifier.h>
  35
  36#include <asm/access-regs.h>
  37#include <asm/asm-offsets.h>
  38#include <asm/lowcore.h>
  39#include <asm/stp.h>
  40#include <asm/gmap.h>
  41#include <asm/nmi.h>
  42#include <asm/isc.h>
  43#include <asm/sclp.h>
  44#include <asm/cpacf.h>
  45#include <asm/timex.h>
  46#include <asm/asm.h>
  47#include <asm/fpu.h>
  48#include <asm/ap.h>
  49#include <asm/uv.h>
  50#include "kvm-s390.h"
  51#include "gaccess.h"
  52#include "pci.h"
  53
  54#define CREATE_TRACE_POINTS
  55#include "trace.h"
  56#include "trace-s390.h"
  57
  58#define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
  59#define LOCAL_IRQS 32
  60#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
  61			   (KVM_MAX_VCPUS + LOCAL_IRQS))
  62
  63const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
  64	KVM_GENERIC_VM_STATS(),
  65	STATS_DESC_COUNTER(VM, inject_io),
  66	STATS_DESC_COUNTER(VM, inject_float_mchk),
  67	STATS_DESC_COUNTER(VM, inject_pfault_done),
  68	STATS_DESC_COUNTER(VM, inject_service_signal),
  69	STATS_DESC_COUNTER(VM, inject_virtio),
  70	STATS_DESC_COUNTER(VM, aen_forward),
  71	STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
  72	STATS_DESC_COUNTER(VM, gmap_shadow_create),
  73	STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
  74	STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
  75	STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
  76	STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
  77	STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
  78};
  79
  80const struct kvm_stats_header kvm_vm_stats_header = {
  81	.name_size = KVM_STATS_NAME_SIZE,
  82	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
  83	.id_offset = sizeof(struct kvm_stats_header),
  84	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
  85	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
  86		       sizeof(kvm_vm_stats_desc),
  87};
  88
  89const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
  90	KVM_GENERIC_VCPU_STATS(),
  91	STATS_DESC_COUNTER(VCPU, exit_userspace),
  92	STATS_DESC_COUNTER(VCPU, exit_null),
  93	STATS_DESC_COUNTER(VCPU, exit_external_request),
  94	STATS_DESC_COUNTER(VCPU, exit_io_request),
  95	STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
  96	STATS_DESC_COUNTER(VCPU, exit_stop_request),
  97	STATS_DESC_COUNTER(VCPU, exit_validity),
  98	STATS_DESC_COUNTER(VCPU, exit_instruction),
  99	STATS_DESC_COUNTER(VCPU, exit_pei),
 100	STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
 101	STATS_DESC_COUNTER(VCPU, instruction_lctl),
 102	STATS_DESC_COUNTER(VCPU, instruction_lctlg),
 103	STATS_DESC_COUNTER(VCPU, instruction_stctl),
 104	STATS_DESC_COUNTER(VCPU, instruction_stctg),
 105	STATS_DESC_COUNTER(VCPU, exit_program_interruption),
 106	STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
 107	STATS_DESC_COUNTER(VCPU, exit_operation_exception),
 108	STATS_DESC_COUNTER(VCPU, deliver_ckc),
 109	STATS_DESC_COUNTER(VCPU, deliver_cputm),
 110	STATS_DESC_COUNTER(VCPU, deliver_external_call),
 111	STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
 112	STATS_DESC_COUNTER(VCPU, deliver_service_signal),
 113	STATS_DESC_COUNTER(VCPU, deliver_virtio),
 114	STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
 115	STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
 116	STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
 117	STATS_DESC_COUNTER(VCPU, deliver_program),
 118	STATS_DESC_COUNTER(VCPU, deliver_io),
 119	STATS_DESC_COUNTER(VCPU, deliver_machine_check),
 120	STATS_DESC_COUNTER(VCPU, exit_wait_state),
 121	STATS_DESC_COUNTER(VCPU, inject_ckc),
 122	STATS_DESC_COUNTER(VCPU, inject_cputm),
 123	STATS_DESC_COUNTER(VCPU, inject_external_call),
 124	STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
 125	STATS_DESC_COUNTER(VCPU, inject_mchk),
 126	STATS_DESC_COUNTER(VCPU, inject_pfault_init),
 127	STATS_DESC_COUNTER(VCPU, inject_program),
 128	STATS_DESC_COUNTER(VCPU, inject_restart),
 129	STATS_DESC_COUNTER(VCPU, inject_set_prefix),
 130	STATS_DESC_COUNTER(VCPU, inject_stop_signal),
 131	STATS_DESC_COUNTER(VCPU, instruction_epsw),
 132	STATS_DESC_COUNTER(VCPU, instruction_gs),
 133	STATS_DESC_COUNTER(VCPU, instruction_io_other),
 134	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
 135	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
 136	STATS_DESC_COUNTER(VCPU, instruction_lpswey),
 137	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
 138	STATS_DESC_COUNTER(VCPU, instruction_ptff),
 139	STATS_DESC_COUNTER(VCPU, instruction_sck),
 140	STATS_DESC_COUNTER(VCPU, instruction_sckpf),
 141	STATS_DESC_COUNTER(VCPU, instruction_stidp),
 142	STATS_DESC_COUNTER(VCPU, instruction_spx),
 143	STATS_DESC_COUNTER(VCPU, instruction_stpx),
 144	STATS_DESC_COUNTER(VCPU, instruction_stap),
 145	STATS_DESC_COUNTER(VCPU, instruction_iske),
 146	STATS_DESC_COUNTER(VCPU, instruction_ri),
 147	STATS_DESC_COUNTER(VCPU, instruction_rrbe),
 148	STATS_DESC_COUNTER(VCPU, instruction_sske),
 149	STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
 150	STATS_DESC_COUNTER(VCPU, instruction_stsi),
 151	STATS_DESC_COUNTER(VCPU, instruction_stfl),
 152	STATS_DESC_COUNTER(VCPU, instruction_tb),
 153	STATS_DESC_COUNTER(VCPU, instruction_tpi),
 154	STATS_DESC_COUNTER(VCPU, instruction_tprot),
 155	STATS_DESC_COUNTER(VCPU, instruction_tsch),
 156	STATS_DESC_COUNTER(VCPU, instruction_sie),
 157	STATS_DESC_COUNTER(VCPU, instruction_essa),
 158	STATS_DESC_COUNTER(VCPU, instruction_sthyi),
 159	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
 160	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
 161	STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
 162	STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
 163	STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
 164	STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
 165	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
 166	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
 167	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
 168	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
 169	STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
 170	STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
 171	STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
 172	STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
 173	STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
 174	STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
 175	STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
 176	STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
 177	STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
 178	STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
 179	STATS_DESC_COUNTER(VCPU, diag_9c_forward),
 180	STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
 181	STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
 182	STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
 183	STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
 184	STATS_DESC_COUNTER(VCPU, pfault_sync)
 185};
 186
 187const struct kvm_stats_header kvm_vcpu_stats_header = {
 188	.name_size = KVM_STATS_NAME_SIZE,
 189	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
 190	.id_offset = sizeof(struct kvm_stats_header),
 191	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
 192	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
 193		       sizeof(kvm_vcpu_stats_desc),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194};
 195
 196/* allow nested virtualization in KVM (if enabled by user space) */
 197static int nested;
 198module_param(nested, int, S_IRUGO);
 199MODULE_PARM_DESC(nested, "Nested virtualization support");
 200
 201/* allow 1m huge page guest backing, if !nested */
 202static int hpage;
 203module_param(hpage, int, 0444);
 204MODULE_PARM_DESC(hpage, "1m huge page backing support");
 205
 206/* maximum percentage of steal time for polling.  >100 is treated like 100 */
 207static u8 halt_poll_max_steal = 10;
 208module_param(halt_poll_max_steal, byte, 0644);
 209MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
 210
 211/* if set to true, the GISA will be initialized and used if available */
 212static bool use_gisa  = true;
 213module_param(use_gisa, bool, 0644);
 214MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
 215
 216/* maximum diag9c forwarding per second */
 217unsigned int diag9c_forwarding_hz;
 218module_param(diag9c_forwarding_hz, uint, 0644);
 219MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
 220
 221/*
 222 * allow asynchronous deinit for protected guests; enable by default since
 223 * the feature is opt-in anyway
 224 */
 225static int async_destroy = 1;
 226module_param(async_destroy, int, 0444);
 227MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
 228
 229/*
 230 * For now we handle at most 16 double words as this is what the s390 base
 231 * kernel handles and stores in the prefix page. If we ever need to go beyond
 232 * this, this requires changes to code, but the external uapi can stay.
 233 */
 234#define SIZE_INTERNAL 16
 235
 236/*
 237 * Base feature mask that defines default mask for facilities. Consists of the
 238 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
 239 */
 240static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
 241/*
 242 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
 243 * and defines the facilities that can be enabled via a cpu model.
 244 */
 245static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
 246
 247static unsigned long kvm_s390_fac_size(void)
 248{
 249	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
 250	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
 251	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
 252		sizeof(stfle_fac_list));
 253
 254	return SIZE_INTERNAL;
 255}
 256
 257/* available cpu features supported by kvm */
 258static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
 259/* available subfunctions indicated via query / "test bit" */
 260static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
 261
 262static struct gmap_notifier gmap_notifier;
 263static struct gmap_notifier vsie_gmap_notifier;
 264debug_info_t *kvm_s390_dbf;
 265debug_info_t *kvm_s390_dbf_uv;
 266
 267/* Section: not file related */
 268/* forward declarations */
 269static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
 270			      unsigned long end);
 271static int sca_switch_to_extended(struct kvm *kvm);
 272
 273static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
 274{
 275	u8 delta_idx = 0;
 276
 277	/*
 278	 * The TOD jumps by delta, we have to compensate this by adding
 279	 * -delta to the epoch.
 280	 */
 281	delta = -delta;
 282
 283	/* sign-extension - we're adding to signed values below */
 284	if ((s64)delta < 0)
 285		delta_idx = -1;
 286
 287	scb->epoch += delta;
 288	if (scb->ecd & ECD_MEF) {
 289		scb->epdx += delta_idx;
 290		if (scb->epoch < delta)
 291			scb->epdx += 1;
 292	}
 293}
 294
 295/*
 296 * This callback is executed during stop_machine(). All CPUs are therefore
 297 * temporarily stopped. In order not to change guest behavior, we have to
 298 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
 299 * so a CPU won't be stopped while calculating with the epoch.
 300 */
 301static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
 302			  void *v)
 303{
 304	struct kvm *kvm;
 305	struct kvm_vcpu *vcpu;
 306	unsigned long i;
 307	unsigned long long *delta = v;
 308
 309	list_for_each_entry(kvm, &vm_list, vm_list) {
 310		kvm_for_each_vcpu(i, vcpu, kvm) {
 311			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
 312			if (i == 0) {
 313				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
 314				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
 315			}
 316			if (vcpu->arch.cputm_enabled)
 317				vcpu->arch.cputm_start += *delta;
 318			if (vcpu->arch.vsie_block)
 319				kvm_clock_sync_scb(vcpu->arch.vsie_block,
 320						   *delta);
 321		}
 322	}
 323	return NOTIFY_OK;
 324}
 325
 326static struct notifier_block kvm_clock_notifier = {
 327	.notifier_call = kvm_clock_sync,
 328};
 329
 330static void allow_cpu_feat(unsigned long nr)
 331{
 332	set_bit_inv(nr, kvm_s390_available_cpu_feat);
 333}
 334
 335static inline int plo_test_bit(unsigned char nr)
 336{
 337	unsigned long function = (unsigned long)nr | 0x100;
 338	int cc;
 339
 340	asm volatile(
 341		"	lgr	0,%[function]\n"
 342		/* Parameter registers are ignored for "test bit" */
 343		"	plo	0,0,0,0(0)\n"
 344		CC_IPM(cc)
 345		: CC_OUT(cc, cc)
 346		: [function] "d" (function)
 347		: CC_CLOBBER_LIST("0"));
 348	return CC_TRANSFORM(cc) == 0;
 349}
 350
 351static __always_inline void pfcr_query(u8 (*query)[16])
 352{
 353	asm volatile(
 354		"	lghi	0,0\n"
 355		"	.insn   rsy,0xeb0000000016,0,0,%[query]\n"
 356		: [query] "=QS" (*query)
 357		:
 358		: "cc", "0");
 359}
 360
 361static __always_inline void __sortl_query(u8 (*query)[32])
 362{
 363	asm volatile(
 364		"	lghi	0,0\n"
 365		"	la	1,%[query]\n"
 366		/* Parameter registers are ignored */
 367		"	.insn	rre,0xb9380000,2,4\n"
 368		: [query] "=R" (*query)
 369		:
 370		: "cc", "0", "1");
 371}
 372
 373static __always_inline void __dfltcc_query(u8 (*query)[32])
 374{
 375	asm volatile(
 376		"	lghi	0,0\n"
 377		"	la	1,%[query]\n"
 378		/* Parameter registers are ignored */
 379		"	.insn	rrf,0xb9390000,2,4,6,0\n"
 380		: [query] "=R" (*query)
 381		:
 382		: "cc", "0", "1");
 383}
 384
 385static void __init kvm_s390_cpu_feat_init(void)
 386{
 387	int i;
 388
 389	for (i = 0; i < 256; ++i) {
 390		if (plo_test_bit(i))
 391			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
 392	}
 393
 394	if (test_facility(28)) /* TOD-clock steering */
 395		ptff(kvm_s390_available_subfunc.ptff,
 396		     sizeof(kvm_s390_available_subfunc.ptff),
 397		     PTFF_QAF);
 398
 399	if (test_facility(17)) { /* MSA */
 400		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
 401			      kvm_s390_available_subfunc.kmac);
 402		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
 403			      kvm_s390_available_subfunc.kmc);
 404		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
 405			      kvm_s390_available_subfunc.km);
 406		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
 407			      kvm_s390_available_subfunc.kimd);
 408		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
 409			      kvm_s390_available_subfunc.klmd);
 410	}
 411	if (test_facility(76)) /* MSA3 */
 412		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
 413			      kvm_s390_available_subfunc.pckmo);
 414	if (test_facility(77)) { /* MSA4 */
 415		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
 416			      kvm_s390_available_subfunc.kmctr);
 417		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
 418			      kvm_s390_available_subfunc.kmf);
 419		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
 420			      kvm_s390_available_subfunc.kmo);
 421		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
 422			      kvm_s390_available_subfunc.pcc);
 423	}
 424	if (test_facility(57)) /* MSA5 */
 425		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
 426			      kvm_s390_available_subfunc.ppno);
 427
 428	if (test_facility(146)) /* MSA8 */
 429		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
 430			      kvm_s390_available_subfunc.kma);
 431
 432	if (test_facility(155)) /* MSA9 */
 433		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
 434			      kvm_s390_available_subfunc.kdsa);
 435
 436	if (test_facility(150)) /* SORTL */
 437		__sortl_query(&kvm_s390_available_subfunc.sortl);
 438
 439	if (test_facility(151)) /* DFLTCC */
 440		__dfltcc_query(&kvm_s390_available_subfunc.dfltcc);
 441
 442	if (test_facility(201))	/* PFCR */
 443		pfcr_query(&kvm_s390_available_subfunc.pfcr);
 444
 445	if (MACHINE_HAS_ESOP)
 446		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
 447	/*
 448	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
 449	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
 450	 */
 451	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
 452	    !test_facility(3) || !nested)
 453		return;
 454	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
 455	if (sclp.has_64bscao)
 456		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
 457	if (sclp.has_siif)
 458		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
 459	if (sclp.has_gpere)
 460		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
 461	if (sclp.has_gsls)
 462		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
 463	if (sclp.has_ib)
 464		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
 465	if (sclp.has_cei)
 466		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
 467	if (sclp.has_ibs)
 468		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
 469	if (sclp.has_kss)
 470		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
 471	/*
 472	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
 473	 * all skey handling functions read/set the skey from the PGSTE
 474	 * instead of the real storage key.
 475	 *
 476	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
 477	 * pages being detected as preserved although they are resident.
 478	 *
 479	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
 480	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
 481	 *
 482	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
 483	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
 484	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
 485	 *
 486	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
 487	 * cannot easily shadow the SCA because of the ipte lock.
 488	 */
 489}
 490
 491static int __init __kvm_s390_init(void)
 492{
 493	int rc = -ENOMEM;
 494
 495	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
 496	if (!kvm_s390_dbf)
 497		return -ENOMEM;
 498
 499	kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
 500	if (!kvm_s390_dbf_uv)
 501		goto err_kvm_uv;
 502
 503	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
 504	    debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
 505		goto err_debug_view;
 506
 507	kvm_s390_cpu_feat_init();
 508
 509	/* Register floating interrupt controller interface. */
 510	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
 511	if (rc) {
 512		pr_err("A FLIC registration call failed with rc=%d\n", rc);
 513		goto err_flic;
 514	}
 515
 516	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
 517		rc = kvm_s390_pci_init();
 518		if (rc) {
 519			pr_err("Unable to allocate AIFT for PCI\n");
 520			goto err_pci;
 521		}
 522	}
 523
 524	rc = kvm_s390_gib_init(GAL_ISC);
 525	if (rc)
 526		goto err_gib;
 527
 528	gmap_notifier.notifier_call = kvm_gmap_notifier;
 529	gmap_register_pte_notifier(&gmap_notifier);
 530	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
 531	gmap_register_pte_notifier(&vsie_gmap_notifier);
 532	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
 533				       &kvm_clock_notifier);
 534
 535	return 0;
 536
 537err_gib:
 538	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
 539		kvm_s390_pci_exit();
 540err_pci:
 541err_flic:
 542err_debug_view:
 543	debug_unregister(kvm_s390_dbf_uv);
 544err_kvm_uv:
 545	debug_unregister(kvm_s390_dbf);
 546	return rc;
 547}
 548
 549static void __kvm_s390_exit(void)
 550{
 551	gmap_unregister_pte_notifier(&gmap_notifier);
 552	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
 553	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
 554					 &kvm_clock_notifier);
 555
 556	kvm_s390_gib_destroy();
 557	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
 558		kvm_s390_pci_exit();
 559	debug_unregister(kvm_s390_dbf);
 560	debug_unregister(kvm_s390_dbf_uv);
 561}
 562
 563/* Section: device related */
 564long kvm_arch_dev_ioctl(struct file *filp,
 565			unsigned int ioctl, unsigned long arg)
 566{
 567	if (ioctl == KVM_S390_ENABLE_SIE)
 568		return s390_enable_sie();
 569	return -EINVAL;
 570}
 571
 572int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 573{
 574	int r;
 575
 576	switch (ext) {
 577	case KVM_CAP_S390_PSW:
 578	case KVM_CAP_S390_GMAP:
 579	case KVM_CAP_SYNC_MMU:
 580#ifdef CONFIG_KVM_S390_UCONTROL
 581	case KVM_CAP_S390_UCONTROL:
 582#endif
 583	case KVM_CAP_ASYNC_PF:
 584	case KVM_CAP_SYNC_REGS:
 585	case KVM_CAP_ONE_REG:
 586	case KVM_CAP_ENABLE_CAP:
 587	case KVM_CAP_S390_CSS_SUPPORT:
 588	case KVM_CAP_IOEVENTFD:
 589	case KVM_CAP_S390_IRQCHIP:
 590	case KVM_CAP_VM_ATTRIBUTES:
 591	case KVM_CAP_MP_STATE:
 592	case KVM_CAP_IMMEDIATE_EXIT:
 593	case KVM_CAP_S390_INJECT_IRQ:
 594	case KVM_CAP_S390_USER_SIGP:
 595	case KVM_CAP_S390_USER_STSI:
 596	case KVM_CAP_S390_SKEYS:
 597	case KVM_CAP_S390_IRQ_STATE:
 598	case KVM_CAP_S390_USER_INSTR0:
 599	case KVM_CAP_S390_CMMA_MIGRATION:
 600	case KVM_CAP_S390_AIS:
 601	case KVM_CAP_S390_AIS_MIGRATION:
 602	case KVM_CAP_S390_VCPU_RESETS:
 603	case KVM_CAP_SET_GUEST_DEBUG:
 604	case KVM_CAP_S390_DIAG318:
 605	case KVM_CAP_IRQFD_RESAMPLE:
 606		r = 1;
 607		break;
 608	case KVM_CAP_SET_GUEST_DEBUG2:
 609		r = KVM_GUESTDBG_VALID_MASK;
 610		break;
 611	case KVM_CAP_S390_HPAGE_1M:
 612		r = 0;
 613		if (hpage && !(kvm && kvm_is_ucontrol(kvm)))
 614			r = 1;
 615		break;
 616	case KVM_CAP_S390_MEM_OP:
 617		r = MEM_OP_MAX_SIZE;
 618		break;
 619	case KVM_CAP_S390_MEM_OP_EXTENSION:
 620		/*
 621		 * Flag bits indicating which extensions are supported.
 622		 * If r > 0, the base extension must also be supported/indicated,
 623		 * in order to maintain backwards compatibility.
 624		 */
 625		r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
 626		    KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG;
 627		break;
 628	case KVM_CAP_NR_VCPUS:
 629	case KVM_CAP_MAX_VCPUS:
 630	case KVM_CAP_MAX_VCPU_ID:
 631		r = KVM_S390_BSCA_CPU_SLOTS;
 632		if (!kvm_s390_use_sca_entries())
 633			r = KVM_MAX_VCPUS;
 634		else if (sclp.has_esca && sclp.has_64bscao)
 635			r = KVM_S390_ESCA_CPU_SLOTS;
 636		if (ext == KVM_CAP_NR_VCPUS)
 637			r = min_t(unsigned int, num_online_cpus(), r);
 638		break;
 639	case KVM_CAP_S390_COW:
 640		r = MACHINE_HAS_ESOP;
 641		break;
 642	case KVM_CAP_S390_VECTOR_REGISTERS:
 643		r = test_facility(129);
 644		break;
 645	case KVM_CAP_S390_RI:
 646		r = test_facility(64);
 647		break;
 648	case KVM_CAP_S390_GS:
 649		r = test_facility(133);
 650		break;
 651	case KVM_CAP_S390_BPB:
 652		r = test_facility(82);
 653		break;
 654	case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
 655		r = async_destroy && is_prot_virt_host();
 656		break;
 657	case KVM_CAP_S390_PROTECTED:
 658		r = is_prot_virt_host();
 659		break;
 660	case KVM_CAP_S390_PROTECTED_DUMP: {
 661		u64 pv_cmds_dump[] = {
 662			BIT_UVC_CMD_DUMP_INIT,
 663			BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
 664			BIT_UVC_CMD_DUMP_CPU,
 665			BIT_UVC_CMD_DUMP_COMPLETE,
 666		};
 667		int i;
 668
 669		r = is_prot_virt_host();
 670
 671		for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
 672			if (!test_bit_inv(pv_cmds_dump[i],
 673					  (unsigned long *)&uv_info.inst_calls_list)) {
 674				r = 0;
 675				break;
 676			}
 677		}
 678		break;
 679	}
 680	case KVM_CAP_S390_ZPCI_OP:
 681		r = kvm_s390_pci_interp_allowed();
 682		break;
 683	case KVM_CAP_S390_CPU_TOPOLOGY:
 684		r = test_facility(11);
 685		break;
 686	default:
 687		r = 0;
 688	}
 689	return r;
 690}
 691
 692void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 693{
 694	int i;
 695	gfn_t cur_gfn, last_gfn;
 696	unsigned long gaddr, vmaddr;
 697	struct gmap *gmap = kvm->arch.gmap;
 698	DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
 699
 700	/* Loop over all guest segments */
 701	cur_gfn = memslot->base_gfn;
 702	last_gfn = memslot->base_gfn + memslot->npages;
 703	for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
 704		gaddr = gfn_to_gpa(cur_gfn);
 705		vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
 706		if (kvm_is_error_hva(vmaddr))
 707			continue;
 708
 709		bitmap_zero(bitmap, _PAGE_ENTRIES);
 710		gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
 711		for (i = 0; i < _PAGE_ENTRIES; i++) {
 712			if (test_bit(i, bitmap))
 713				mark_page_dirty(kvm, cur_gfn + i);
 714		}
 715
 716		if (fatal_signal_pending(current))
 717			return;
 718		cond_resched();
 719	}
 720}
 721
 722/* Section: vm related */
 723static void sca_del_vcpu(struct kvm_vcpu *vcpu);
 724
 725/*
 726 * Get (and clear) the dirty memory log for a memory slot.
 727 */
 728int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 729			       struct kvm_dirty_log *log)
 730{
 731	int r;
 732	unsigned long n;
 733	struct kvm_memory_slot *memslot;
 734	int is_dirty;
 735
 736	if (kvm_is_ucontrol(kvm))
 737		return -EINVAL;
 738
 739	mutex_lock(&kvm->slots_lock);
 740
 741	r = -EINVAL;
 742	if (log->slot >= KVM_USER_MEM_SLOTS)
 743		goto out;
 744
 745	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
 746	if (r)
 747		goto out;
 748
 749	/* Clear the dirty log */
 750	if (is_dirty) {
 751		n = kvm_dirty_bitmap_bytes(memslot);
 752		memset(memslot->dirty_bitmap, 0, n);
 753	}
 754	r = 0;
 755out:
 756	mutex_unlock(&kvm->slots_lock);
 757	return r;
 758}
 759
 760static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
 761{
 762	unsigned long i;
 763	struct kvm_vcpu *vcpu;
 764
 765	kvm_for_each_vcpu(i, vcpu, kvm) {
 766		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
 767	}
 768}
 769
 770int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
 771{
 772	int r;
 773
 774	if (cap->flags)
 775		return -EINVAL;
 776
 777	switch (cap->cap) {
 778	case KVM_CAP_S390_IRQCHIP:
 779		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
 780		kvm->arch.use_irqchip = 1;
 781		r = 0;
 782		break;
 783	case KVM_CAP_S390_USER_SIGP:
 784		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
 785		kvm->arch.user_sigp = 1;
 786		r = 0;
 787		break;
 788	case KVM_CAP_S390_VECTOR_REGISTERS:
 789		mutex_lock(&kvm->lock);
 790		if (kvm->created_vcpus) {
 791			r = -EBUSY;
 792		} else if (cpu_has_vx()) {
 793			set_kvm_facility(kvm->arch.model.fac_mask, 129);
 794			set_kvm_facility(kvm->arch.model.fac_list, 129);
 795			if (test_facility(134)) {
 796				set_kvm_facility(kvm->arch.model.fac_mask, 134);
 797				set_kvm_facility(kvm->arch.model.fac_list, 134);
 798			}
 799			if (test_facility(135)) {
 800				set_kvm_facility(kvm->arch.model.fac_mask, 135);
 801				set_kvm_facility(kvm->arch.model.fac_list, 135);
 802			}
 803			if (test_facility(148)) {
 804				set_kvm_facility(kvm->arch.model.fac_mask, 148);
 805				set_kvm_facility(kvm->arch.model.fac_list, 148);
 806			}
 807			if (test_facility(152)) {
 808				set_kvm_facility(kvm->arch.model.fac_mask, 152);
 809				set_kvm_facility(kvm->arch.model.fac_list, 152);
 810			}
 811			if (test_facility(192)) {
 812				set_kvm_facility(kvm->arch.model.fac_mask, 192);
 813				set_kvm_facility(kvm->arch.model.fac_list, 192);
 814			}
 815			if (test_facility(198)) {
 816				set_kvm_facility(kvm->arch.model.fac_mask, 198);
 817				set_kvm_facility(kvm->arch.model.fac_list, 198);
 818			}
 819			if (test_facility(199)) {
 820				set_kvm_facility(kvm->arch.model.fac_mask, 199);
 821				set_kvm_facility(kvm->arch.model.fac_list, 199);
 822			}
 823			r = 0;
 824		} else
 825			r = -EINVAL;
 826		mutex_unlock(&kvm->lock);
 827		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
 828			 r ? "(not available)" : "(success)");
 829		break;
 830	case KVM_CAP_S390_RI:
 831		r = -EINVAL;
 832		mutex_lock(&kvm->lock);
 833		if (kvm->created_vcpus) {
 834			r = -EBUSY;
 835		} else if (test_facility(64)) {
 836			set_kvm_facility(kvm->arch.model.fac_mask, 64);
 837			set_kvm_facility(kvm->arch.model.fac_list, 64);
 838			r = 0;
 839		}
 840		mutex_unlock(&kvm->lock);
 841		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
 842			 r ? "(not available)" : "(success)");
 843		break;
 844	case KVM_CAP_S390_AIS:
 845		mutex_lock(&kvm->lock);
 846		if (kvm->created_vcpus) {
 847			r = -EBUSY;
 848		} else {
 849			set_kvm_facility(kvm->arch.model.fac_mask, 72);
 850			set_kvm_facility(kvm->arch.model.fac_list, 72);
 851			r = 0;
 852		}
 853		mutex_unlock(&kvm->lock);
 854		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
 855			 r ? "(not available)" : "(success)");
 856		break;
 857	case KVM_CAP_S390_GS:
 858		r = -EINVAL;
 859		mutex_lock(&kvm->lock);
 860		if (kvm->created_vcpus) {
 861			r = -EBUSY;
 862		} else if (test_facility(133)) {
 863			set_kvm_facility(kvm->arch.model.fac_mask, 133);
 864			set_kvm_facility(kvm->arch.model.fac_list, 133);
 865			r = 0;
 866		}
 867		mutex_unlock(&kvm->lock);
 868		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
 869			 r ? "(not available)" : "(success)");
 870		break;
 871	case KVM_CAP_S390_HPAGE_1M:
 872		mutex_lock(&kvm->lock);
 873		if (kvm->created_vcpus)
 874			r = -EBUSY;
 875		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
 876			r = -EINVAL;
 877		else {
 878			r = 0;
 879			mmap_write_lock(kvm->mm);
 880			kvm->mm->context.allow_gmap_hpage_1m = 1;
 881			mmap_write_unlock(kvm->mm);
 882			/*
 883			 * We might have to create fake 4k page
 884			 * tables. To avoid that the hardware works on
 885			 * stale PGSTEs, we emulate these instructions.
 886			 */
 887			kvm->arch.use_skf = 0;
 888			kvm->arch.use_pfmfi = 0;
 889		}
 890		mutex_unlock(&kvm->lock);
 891		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
 892			 r ? "(not available)" : "(success)");
 893		break;
 894	case KVM_CAP_S390_USER_STSI:
 895		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
 896		kvm->arch.user_stsi = 1;
 897		r = 0;
 898		break;
 899	case KVM_CAP_S390_USER_INSTR0:
 900		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
 901		kvm->arch.user_instr0 = 1;
 902		icpt_operexc_on_all_vcpus(kvm);
 903		r = 0;
 904		break;
 905	case KVM_CAP_S390_CPU_TOPOLOGY:
 906		r = -EINVAL;
 907		mutex_lock(&kvm->lock);
 908		if (kvm->created_vcpus) {
 909			r = -EBUSY;
 910		} else if (test_facility(11)) {
 911			set_kvm_facility(kvm->arch.model.fac_mask, 11);
 912			set_kvm_facility(kvm->arch.model.fac_list, 11);
 913			r = 0;
 914		}
 915		mutex_unlock(&kvm->lock);
 916		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
 917			 r ? "(not available)" : "(success)");
 918		break;
 919	default:
 920		r = -EINVAL;
 921		break;
 922	}
 923	return r;
 924}
 925
 926static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
 927{
 928	int ret;
 929
 930	switch (attr->attr) {
 931	case KVM_S390_VM_MEM_LIMIT_SIZE:
 932		ret = 0;
 933		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
 934			 kvm->arch.mem_limit);
 935		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
 936			ret = -EFAULT;
 937		break;
 938	default:
 939		ret = -ENXIO;
 940		break;
 941	}
 942	return ret;
 943}
 944
 945static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
 946{
 947	int ret;
 948	unsigned int idx;
 949	switch (attr->attr) {
 950	case KVM_S390_VM_MEM_ENABLE_CMMA:
 951		ret = -ENXIO;
 952		if (!sclp.has_cmma)
 953			break;
 954
 955		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
 956		mutex_lock(&kvm->lock);
 957		if (kvm->created_vcpus)
 958			ret = -EBUSY;
 959		else if (kvm->mm->context.allow_gmap_hpage_1m)
 960			ret = -EINVAL;
 961		else {
 962			kvm->arch.use_cmma = 1;
 963			/* Not compatible with cmma. */
 964			kvm->arch.use_pfmfi = 0;
 965			ret = 0;
 966		}
 967		mutex_unlock(&kvm->lock);
 968		break;
 969	case KVM_S390_VM_MEM_CLR_CMMA:
 970		ret = -ENXIO;
 971		if (!sclp.has_cmma)
 972			break;
 973		ret = -EINVAL;
 974		if (!kvm->arch.use_cmma)
 975			break;
 976
 977		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
 978		mutex_lock(&kvm->lock);
 979		idx = srcu_read_lock(&kvm->srcu);
 980		s390_reset_cmma(kvm->arch.gmap->mm);
 981		srcu_read_unlock(&kvm->srcu, idx);
 982		mutex_unlock(&kvm->lock);
 983		ret = 0;
 984		break;
 985	case KVM_S390_VM_MEM_LIMIT_SIZE: {
 986		unsigned long new_limit;
 987
 988		if (kvm_is_ucontrol(kvm))
 989			return -EINVAL;
 990
 991		if (get_user(new_limit, (u64 __user *)attr->addr))
 992			return -EFAULT;
 993
 994		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
 995		    new_limit > kvm->arch.mem_limit)
 996			return -E2BIG;
 997
 998		if (!new_limit)
 999			return -EINVAL;
1000
1001		/* gmap_create takes last usable address */
1002		if (new_limit != KVM_S390_NO_MEM_LIMIT)
1003			new_limit -= 1;
1004
1005		ret = -EBUSY;
1006		mutex_lock(&kvm->lock);
1007		if (!kvm->created_vcpus) {
1008			/* gmap_create will round the limit up */
1009			struct gmap *new = gmap_create(current->mm, new_limit);
1010
1011			if (!new) {
1012				ret = -ENOMEM;
1013			} else {
1014				gmap_remove(kvm->arch.gmap);
1015				new->private = kvm;
1016				kvm->arch.gmap = new;
1017				ret = 0;
1018			}
1019		}
1020		mutex_unlock(&kvm->lock);
1021		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
1022		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
1023			 (void *) kvm->arch.gmap->asce);
1024		break;
1025	}
1026	default:
1027		ret = -ENXIO;
1028		break;
1029	}
1030	return ret;
1031}
1032
1033static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
1034
1035void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
1036{
1037	struct kvm_vcpu *vcpu;
1038	unsigned long i;
1039
1040	kvm_s390_vcpu_block_all(kvm);
1041
1042	kvm_for_each_vcpu(i, vcpu, kvm) {
1043		kvm_s390_vcpu_crypto_setup(vcpu);
1044		/* recreate the shadow crycb by leaving the VSIE handler */
1045		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1046	}
1047
1048	kvm_s390_vcpu_unblock_all(kvm);
1049}
1050
1051static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
1052{
1053	mutex_lock(&kvm->lock);
1054	switch (attr->attr) {
1055	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1056		if (!test_kvm_facility(kvm, 76)) {
1057			mutex_unlock(&kvm->lock);
1058			return -EINVAL;
1059		}
1060		get_random_bytes(
1061			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1062			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1063		kvm->arch.crypto.aes_kw = 1;
1064		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1065		break;
1066	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1067		if (!test_kvm_facility(kvm, 76)) {
1068			mutex_unlock(&kvm->lock);
1069			return -EINVAL;
1070		}
1071		get_random_bytes(
1072			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1073			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1074		kvm->arch.crypto.dea_kw = 1;
1075		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1076		break;
1077	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1078		if (!test_kvm_facility(kvm, 76)) {
1079			mutex_unlock(&kvm->lock);
1080			return -EINVAL;
1081		}
1082		kvm->arch.crypto.aes_kw = 0;
1083		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1084			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1085		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1086		break;
1087	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1088		if (!test_kvm_facility(kvm, 76)) {
1089			mutex_unlock(&kvm->lock);
1090			return -EINVAL;
1091		}
1092		kvm->arch.crypto.dea_kw = 0;
1093		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1094			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1095		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1096		break;
1097	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1098		if (!ap_instructions_available()) {
1099			mutex_unlock(&kvm->lock);
1100			return -EOPNOTSUPP;
1101		}
1102		kvm->arch.crypto.apie = 1;
1103		break;
1104	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1105		if (!ap_instructions_available()) {
1106			mutex_unlock(&kvm->lock);
1107			return -EOPNOTSUPP;
1108		}
1109		kvm->arch.crypto.apie = 0;
1110		break;
1111	default:
1112		mutex_unlock(&kvm->lock);
1113		return -ENXIO;
1114	}
1115
1116	kvm_s390_vcpu_crypto_reset_all(kvm);
1117	mutex_unlock(&kvm->lock);
1118	return 0;
1119}
1120
1121static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
1122{
1123	/* Only set the ECB bits after guest requests zPCI interpretation */
1124	if (!vcpu->kvm->arch.use_zpci_interp)
1125		return;
1126
1127	vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1128	vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1129}
1130
1131void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
1132{
1133	struct kvm_vcpu *vcpu;
1134	unsigned long i;
1135
1136	lockdep_assert_held(&kvm->lock);
1137
1138	if (!kvm_s390_pci_interp_allowed())
1139		return;
1140
1141	/*
1142	 * If host is configured for PCI and the necessary facilities are
1143	 * available, turn on interpretation for the life of this guest
1144	 */
1145	kvm->arch.use_zpci_interp = 1;
1146
1147	kvm_s390_vcpu_block_all(kvm);
1148
1149	kvm_for_each_vcpu(i, vcpu, kvm) {
1150		kvm_s390_vcpu_pci_setup(vcpu);
1151		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1152	}
1153
1154	kvm_s390_vcpu_unblock_all(kvm);
1155}
1156
1157static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1158{
1159	unsigned long cx;
1160	struct kvm_vcpu *vcpu;
1161
1162	kvm_for_each_vcpu(cx, vcpu, kvm)
1163		kvm_s390_sync_request(req, vcpu);
1164}
1165
1166/*
1167 * Must be called with kvm->srcu held to avoid races on memslots, and with
1168 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1169 */
1170static int kvm_s390_vm_start_migration(struct kvm *kvm)
1171{
1172	struct kvm_memory_slot *ms;
1173	struct kvm_memslots *slots;
1174	unsigned long ram_pages = 0;
1175	int bkt;
1176
1177	/* migration mode already enabled */
1178	if (kvm->arch.migration_mode)
1179		return 0;
1180	slots = kvm_memslots(kvm);
1181	if (!slots || kvm_memslots_empty(slots))
1182		return -EINVAL;
1183
1184	if (!kvm->arch.use_cmma) {
1185		kvm->arch.migration_mode = 1;
1186		return 0;
1187	}
1188	/* mark all the pages in active slots as dirty */
1189	kvm_for_each_memslot(ms, bkt, slots) {
1190		if (!ms->dirty_bitmap)
1191			return -EINVAL;
1192		/*
1193		 * The second half of the bitmap is only used on x86,
1194		 * and would be wasted otherwise, so we put it to good
1195		 * use here to keep track of the state of the storage
1196		 * attributes.
1197		 */
1198		memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1199		ram_pages += ms->npages;
1200	}
1201	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1202	kvm->arch.migration_mode = 1;
1203	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1204	return 0;
1205}
1206
1207/*
1208 * Must be called with kvm->slots_lock to avoid races with ourselves and
1209 * kvm_s390_vm_start_migration.
1210 */
1211static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1212{
1213	/* migration mode already disabled */
1214	if (!kvm->arch.migration_mode)
1215		return 0;
1216	kvm->arch.migration_mode = 0;
1217	if (kvm->arch.use_cmma)
1218		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1219	return 0;
1220}
1221
1222static int kvm_s390_vm_set_migration(struct kvm *kvm,
1223				     struct kvm_device_attr *attr)
1224{
1225	int res = -ENXIO;
1226
1227	mutex_lock(&kvm->slots_lock);
1228	switch (attr->attr) {
1229	case KVM_S390_VM_MIGRATION_START:
1230		res = kvm_s390_vm_start_migration(kvm);
1231		break;
1232	case KVM_S390_VM_MIGRATION_STOP:
1233		res = kvm_s390_vm_stop_migration(kvm);
1234		break;
1235	default:
1236		break;
1237	}
1238	mutex_unlock(&kvm->slots_lock);
1239
1240	return res;
1241}
1242
1243static int kvm_s390_vm_get_migration(struct kvm *kvm,
1244				     struct kvm_device_attr *attr)
1245{
1246	u64 mig = kvm->arch.migration_mode;
1247
1248	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1249		return -ENXIO;
1250
1251	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1252		return -EFAULT;
1253	return 0;
1254}
1255
1256static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1257
1258static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1259{
1260	struct kvm_s390_vm_tod_clock gtod;
1261
1262	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1263		return -EFAULT;
1264
1265	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1266		return -EINVAL;
1267	__kvm_s390_set_tod_clock(kvm, &gtod);
1268
1269	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1270		gtod.epoch_idx, gtod.tod);
1271
1272	return 0;
1273}
1274
1275static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1276{
1277	u8 gtod_high;
1278
1279	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1280					   sizeof(gtod_high)))
1281		return -EFAULT;
1282
1283	if (gtod_high != 0)
1284		return -EINVAL;
1285	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1286
1287	return 0;
1288}
1289
1290static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1291{
1292	struct kvm_s390_vm_tod_clock gtod = { 0 };
1293
1294	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1295			   sizeof(gtod.tod)))
1296		return -EFAULT;
1297
1298	__kvm_s390_set_tod_clock(kvm, &gtod);
1299	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1300	return 0;
1301}
1302
1303static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1304{
1305	int ret;
1306
1307	if (attr->flags)
1308		return -EINVAL;
1309
1310	mutex_lock(&kvm->lock);
1311	/*
1312	 * For protected guests, the TOD is managed by the ultravisor, so trying
1313	 * to change it will never bring the expected results.
1314	 */
1315	if (kvm_s390_pv_is_protected(kvm)) {
1316		ret = -EOPNOTSUPP;
1317		goto out_unlock;
1318	}
1319
1320	switch (attr->attr) {
1321	case KVM_S390_VM_TOD_EXT:
1322		ret = kvm_s390_set_tod_ext(kvm, attr);
1323		break;
1324	case KVM_S390_VM_TOD_HIGH:
1325		ret = kvm_s390_set_tod_high(kvm, attr);
1326		break;
1327	case KVM_S390_VM_TOD_LOW:
1328		ret = kvm_s390_set_tod_low(kvm, attr);
1329		break;
1330	default:
1331		ret = -ENXIO;
1332		break;
1333	}
1334
1335out_unlock:
1336	mutex_unlock(&kvm->lock);
1337	return ret;
1338}
1339
1340static void kvm_s390_get_tod_clock(struct kvm *kvm,
1341				   struct kvm_s390_vm_tod_clock *gtod)
1342{
1343	union tod_clock clk;
1344
1345	preempt_disable();
1346
1347	store_tod_clock_ext(&clk);
1348
1349	gtod->tod = clk.tod + kvm->arch.epoch;
1350	gtod->epoch_idx = 0;
1351	if (test_kvm_facility(kvm, 139)) {
1352		gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1353		if (gtod->tod < clk.tod)
1354			gtod->epoch_idx += 1;
1355	}
1356
1357	preempt_enable();
1358}
1359
1360static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1361{
1362	struct kvm_s390_vm_tod_clock gtod;
1363
1364	memset(&gtod, 0, sizeof(gtod));
1365	kvm_s390_get_tod_clock(kvm, &gtod);
1366	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1367		return -EFAULT;
1368
1369	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1370		gtod.epoch_idx, gtod.tod);
1371	return 0;
1372}
1373
1374static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1375{
1376	u8 gtod_high = 0;
1377
1378	if (copy_to_user((void __user *)attr->addr, &gtod_high,
1379					 sizeof(gtod_high)))
1380		return -EFAULT;
1381	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1382
1383	return 0;
1384}
1385
1386static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1387{
1388	u64 gtod;
1389
1390	gtod = kvm_s390_get_tod_clock_fast(kvm);
1391	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1392		return -EFAULT;
1393	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1394
1395	return 0;
1396}
1397
1398static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1399{
1400	int ret;
1401
1402	if (attr->flags)
1403		return -EINVAL;
1404
1405	switch (attr->attr) {
1406	case KVM_S390_VM_TOD_EXT:
1407		ret = kvm_s390_get_tod_ext(kvm, attr);
1408		break;
1409	case KVM_S390_VM_TOD_HIGH:
1410		ret = kvm_s390_get_tod_high(kvm, attr);
1411		break;
1412	case KVM_S390_VM_TOD_LOW:
1413		ret = kvm_s390_get_tod_low(kvm, attr);
1414		break;
1415	default:
1416		ret = -ENXIO;
1417		break;
1418	}
1419	return ret;
1420}
1421
1422static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1423{
1424	struct kvm_s390_vm_cpu_processor *proc;
1425	u16 lowest_ibc, unblocked_ibc;
1426	int ret = 0;
1427
1428	mutex_lock(&kvm->lock);
1429	if (kvm->created_vcpus) {
1430		ret = -EBUSY;
1431		goto out;
1432	}
1433	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1434	if (!proc) {
1435		ret = -ENOMEM;
1436		goto out;
1437	}
1438	if (!copy_from_user(proc, (void __user *)attr->addr,
1439			    sizeof(*proc))) {
1440		kvm->arch.model.cpuid = proc->cpuid;
1441		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1442		unblocked_ibc = sclp.ibc & 0xfff;
1443		if (lowest_ibc && proc->ibc) {
1444			if (proc->ibc > unblocked_ibc)
1445				kvm->arch.model.ibc = unblocked_ibc;
1446			else if (proc->ibc < lowest_ibc)
1447				kvm->arch.model.ibc = lowest_ibc;
1448			else
1449				kvm->arch.model.ibc = proc->ibc;
1450		}
1451		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1452		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1453		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1454			 kvm->arch.model.ibc,
1455			 kvm->arch.model.cpuid);
1456		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1457			 kvm->arch.model.fac_list[0],
1458			 kvm->arch.model.fac_list[1],
1459			 kvm->arch.model.fac_list[2]);
1460	} else
1461		ret = -EFAULT;
1462	kfree(proc);
1463out:
1464	mutex_unlock(&kvm->lock);
1465	return ret;
1466}
1467
1468static int kvm_s390_set_processor_feat(struct kvm *kvm,
1469				       struct kvm_device_attr *attr)
1470{
1471	struct kvm_s390_vm_cpu_feat data;
1472
1473	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1474		return -EFAULT;
1475	if (!bitmap_subset((unsigned long *) data.feat,
1476			   kvm_s390_available_cpu_feat,
1477			   KVM_S390_VM_CPU_FEAT_NR_BITS))
1478		return -EINVAL;
1479
1480	mutex_lock(&kvm->lock);
1481	if (kvm->created_vcpus) {
1482		mutex_unlock(&kvm->lock);
1483		return -EBUSY;
1484	}
1485	bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1486	mutex_unlock(&kvm->lock);
1487	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1488			 data.feat[0],
1489			 data.feat[1],
1490			 data.feat[2]);
1491	return 0;
1492}
1493
1494static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1495					  struct kvm_device_attr *attr)
1496{
1497	mutex_lock(&kvm->lock);
1498	if (kvm->created_vcpus) {
1499		mutex_unlock(&kvm->lock);
1500		return -EBUSY;
1501	}
1502
1503	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1504			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1505		mutex_unlock(&kvm->lock);
1506		return -EFAULT;
1507	}
1508	mutex_unlock(&kvm->lock);
1509
1510	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1511		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1512		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1513		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1514		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1515	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1516		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1517		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1518	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1519		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1520		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1521	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1522		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1523		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1524	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1525		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1526		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1527	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1528		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1529		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1530	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1531		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1532		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1533	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1534		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1535		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1536	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1537		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1538		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1539	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1540		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1541		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1542	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1543		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1544		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1545	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1546		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1547		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1548	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1549		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1550		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1551	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1552		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1553		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1554	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1555		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1556		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1557	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1558		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1559		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1560		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1561		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1562	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1563		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1564		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1565		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1566		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1567	VM_EVENT(kvm, 3, "GET: guest PFCR   subfunc 0x%16.16lx.%16.16lx",
1568		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1569		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1570
1571	return 0;
1572}
1573
1574#define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK	\
1575(						\
1576	((struct kvm_s390_vm_cpu_uv_feat){	\
1577		.ap = 1,			\
1578		.ap_intr = 1,			\
1579	})					\
1580	.feat					\
1581)
1582
1583static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1584{
1585	struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr;
1586	unsigned long data, filter;
1587
1588	filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1589	if (get_user(data, &ptr->feat))
1590		return -EFAULT;
1591	if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS))
1592		return -EINVAL;
1593
1594	mutex_lock(&kvm->lock);
1595	if (kvm->created_vcpus) {
1596		mutex_unlock(&kvm->lock);
1597		return -EBUSY;
1598	}
1599	kvm->arch.model.uv_feat_guest.feat = data;
1600	mutex_unlock(&kvm->lock);
1601
1602	VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data);
1603
1604	return 0;
1605}
1606
1607static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1608{
1609	int ret = -ENXIO;
1610
1611	switch (attr->attr) {
1612	case KVM_S390_VM_CPU_PROCESSOR:
1613		ret = kvm_s390_set_processor(kvm, attr);
1614		break;
1615	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1616		ret = kvm_s390_set_processor_feat(kvm, attr);
1617		break;
1618	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1619		ret = kvm_s390_set_processor_subfunc(kvm, attr);
1620		break;
1621	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1622		ret = kvm_s390_set_uv_feat(kvm, attr);
1623		break;
1624	}
1625	return ret;
1626}
1627
1628static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1629{
1630	struct kvm_s390_vm_cpu_processor *proc;
1631	int ret = 0;
1632
1633	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1634	if (!proc) {
1635		ret = -ENOMEM;
1636		goto out;
1637	}
1638	proc->cpuid = kvm->arch.model.cpuid;
1639	proc->ibc = kvm->arch.model.ibc;
1640	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1641	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1642	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1643		 kvm->arch.model.ibc,
1644		 kvm->arch.model.cpuid);
1645	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1646		 kvm->arch.model.fac_list[0],
1647		 kvm->arch.model.fac_list[1],
1648		 kvm->arch.model.fac_list[2]);
1649	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1650		ret = -EFAULT;
1651	kfree(proc);
1652out:
1653	return ret;
1654}
1655
1656static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1657{
1658	struct kvm_s390_vm_cpu_machine *mach;
1659	int ret = 0;
1660
1661	mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1662	if (!mach) {
1663		ret = -ENOMEM;
1664		goto out;
1665	}
1666	get_cpu_id((struct cpuid *) &mach->cpuid);
1667	mach->ibc = sclp.ibc;
1668	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1669	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1670	memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1671	       sizeof(stfle_fac_list));
1672	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1673		 kvm->arch.model.ibc,
1674		 kvm->arch.model.cpuid);
1675	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1676		 mach->fac_mask[0],
1677		 mach->fac_mask[1],
1678		 mach->fac_mask[2]);
1679	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1680		 mach->fac_list[0],
1681		 mach->fac_list[1],
1682		 mach->fac_list[2]);
1683	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1684		ret = -EFAULT;
1685	kfree(mach);
1686out:
1687	return ret;
1688}
1689
1690static int kvm_s390_get_processor_feat(struct kvm *kvm,
1691				       struct kvm_device_attr *attr)
1692{
1693	struct kvm_s390_vm_cpu_feat data;
1694
1695	bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1696	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1697		return -EFAULT;
1698	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1699			 data.feat[0],
1700			 data.feat[1],
1701			 data.feat[2]);
1702	return 0;
1703}
1704
1705static int kvm_s390_get_machine_feat(struct kvm *kvm,
1706				     struct kvm_device_attr *attr)
1707{
1708	struct kvm_s390_vm_cpu_feat data;
1709
1710	bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1711	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1712		return -EFAULT;
1713	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
1714			 data.feat[0],
1715			 data.feat[1],
1716			 data.feat[2]);
1717	return 0;
1718}
1719
1720static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1721					  struct kvm_device_attr *attr)
1722{
1723	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1724	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1725		return -EFAULT;
1726
1727	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1728		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1729		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1730		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1731		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1732	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1733		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1734		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1735	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1736		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1737		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1738	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1739		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1740		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1741	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1742		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1743		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1744	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1745		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1746		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1747	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1748		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1749		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1750	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1751		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1752		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1753	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1754		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1755		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1756	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1757		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1758		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1759	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1760		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1761		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1762	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1763		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1764		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1765	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1766		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1767		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1768	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1769		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1770		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1771	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1772		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1773		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1774	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1775		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1776		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1777		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1778		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1779	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1780		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1781		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1782		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1783		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1784	VM_EVENT(kvm, 3, "GET: guest PFCR   subfunc 0x%16.16lx.%16.16lx",
1785		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1786		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1787
1788	return 0;
1789}
1790
1791static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1792					struct kvm_device_attr *attr)
1793{
1794	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1795	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1796		return -EFAULT;
1797
1798	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1799		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1800		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1801		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1802		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1803	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
1804		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1805		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1806	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
1807		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1808		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1809	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
1810		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1811		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1812	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
1813		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1814		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1815	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
1816		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1817		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1818	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
1819		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1820		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1821	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
1822		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1823		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1824	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
1825		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1826		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1827	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
1828		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1829		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1830	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
1831		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1832		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1833	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
1834		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1835		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1836	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
1837		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1838		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1839	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
1840		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1841		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1842	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
1843		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1844		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1845	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1846		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1847		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1848		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1849		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1850	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1851		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1852		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1853		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1854		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1855	VM_EVENT(kvm, 3, "GET: host  PFCR   subfunc 0x%16.16lx.%16.16lx",
1856		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1857		 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1858
1859	return 0;
1860}
1861
1862static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1863{
1864	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1865	unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
1866
1867	if (put_user(feat, &dst->feat))
1868		return -EFAULT;
1869	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1870
1871	return 0;
1872}
1873
1874static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1875{
1876	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1877	unsigned long feat;
1878
1879	BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications));
1880
1881	feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1882	if (put_user(feat, &dst->feat))
1883		return -EFAULT;
1884	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1885
1886	return 0;
1887}
1888
1889static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1890{
1891	int ret = -ENXIO;
1892
1893	switch (attr->attr) {
1894	case KVM_S390_VM_CPU_PROCESSOR:
1895		ret = kvm_s390_get_processor(kvm, attr);
1896		break;
1897	case KVM_S390_VM_CPU_MACHINE:
1898		ret = kvm_s390_get_machine(kvm, attr);
1899		break;
1900	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1901		ret = kvm_s390_get_processor_feat(kvm, attr);
1902		break;
1903	case KVM_S390_VM_CPU_MACHINE_FEAT:
1904		ret = kvm_s390_get_machine_feat(kvm, attr);
1905		break;
1906	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1907		ret = kvm_s390_get_processor_subfunc(kvm, attr);
1908		break;
1909	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1910		ret = kvm_s390_get_machine_subfunc(kvm, attr);
1911		break;
1912	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1913		ret = kvm_s390_get_processor_uv_feat(kvm, attr);
1914		break;
1915	case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
1916		ret = kvm_s390_get_machine_uv_feat(kvm, attr);
1917		break;
1918	}
1919	return ret;
1920}
1921
1922/**
1923 * kvm_s390_update_topology_change_report - update CPU topology change report
1924 * @kvm: guest KVM description
1925 * @val: set or clear the MTCR bit
1926 *
1927 * Updates the Multiprocessor Topology-Change-Report bit to signal
1928 * the guest with a topology change.
1929 * This is only relevant if the topology facility is present.
1930 *
1931 * The SCA version, bsca or esca, doesn't matter as offset is the same.
1932 */
1933static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
1934{
1935	union sca_utility new, old;
1936	struct bsca_block *sca;
1937
1938	read_lock(&kvm->arch.sca_lock);
1939	sca = kvm->arch.sca;
1940	old = READ_ONCE(sca->utility);
1941	do {
1942		new = old;
1943		new.mtcr = val;
1944	} while (!try_cmpxchg(&sca->utility.val, &old.val, new.val));
1945	read_unlock(&kvm->arch.sca_lock);
1946}
1947
1948static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1949					       struct kvm_device_attr *attr)
1950{
1951	if (!test_kvm_facility(kvm, 11))
1952		return -ENXIO;
1953
1954	kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1955	return 0;
1956}
1957
1958static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1959					       struct kvm_device_attr *attr)
1960{
1961	u8 topo;
1962
1963	if (!test_kvm_facility(kvm, 11))
1964		return -ENXIO;
1965
1966	read_lock(&kvm->arch.sca_lock);
1967	topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1968	read_unlock(&kvm->arch.sca_lock);
1969
1970	return put_user(topo, (u8 __user *)attr->addr);
1971}
1972
1973static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1974{
1975	int ret;
1976
1977	switch (attr->group) {
1978	case KVM_S390_VM_MEM_CTRL:
1979		ret = kvm_s390_set_mem_control(kvm, attr);
1980		break;
1981	case KVM_S390_VM_TOD:
1982		ret = kvm_s390_set_tod(kvm, attr);
1983		break;
1984	case KVM_S390_VM_CPU_MODEL:
1985		ret = kvm_s390_set_cpu_model(kvm, attr);
1986		break;
1987	case KVM_S390_VM_CRYPTO:
1988		ret = kvm_s390_vm_set_crypto(kvm, attr);
1989		break;
1990	case KVM_S390_VM_MIGRATION:
1991		ret = kvm_s390_vm_set_migration(kvm, attr);
1992		break;
1993	case KVM_S390_VM_CPU_TOPOLOGY:
1994		ret = kvm_s390_set_topo_change_indication(kvm, attr);
1995		break;
1996	default:
1997		ret = -ENXIO;
1998		break;
1999	}
2000
2001	return ret;
2002}
2003
2004static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
2005{
2006	int ret;
2007
2008	switch (attr->group) {
2009	case KVM_S390_VM_MEM_CTRL:
2010		ret = kvm_s390_get_mem_control(kvm, attr);
2011		break;
2012	case KVM_S390_VM_TOD:
2013		ret = kvm_s390_get_tod(kvm, attr);
2014		break;
2015	case KVM_S390_VM_CPU_MODEL:
2016		ret = kvm_s390_get_cpu_model(kvm, attr);
2017		break;
2018	case KVM_S390_VM_MIGRATION:
2019		ret = kvm_s390_vm_get_migration(kvm, attr);
2020		break;
2021	case KVM_S390_VM_CPU_TOPOLOGY:
2022		ret = kvm_s390_get_topo_change_indication(kvm, attr);
2023		break;
2024	default:
2025		ret = -ENXIO;
2026		break;
2027	}
2028
2029	return ret;
2030}
2031
2032static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
2033{
2034	int ret;
2035
2036	switch (attr->group) {
2037	case KVM_S390_VM_MEM_CTRL:
2038		switch (attr->attr) {
2039		case KVM_S390_VM_MEM_ENABLE_CMMA:
2040		case KVM_S390_VM_MEM_CLR_CMMA:
2041			ret = sclp.has_cmma ? 0 : -ENXIO;
2042			break;
2043		case KVM_S390_VM_MEM_LIMIT_SIZE:
2044			ret = 0;
2045			break;
2046		default:
2047			ret = -ENXIO;
2048			break;
2049		}
2050		break;
2051	case KVM_S390_VM_TOD:
2052		switch (attr->attr) {
2053		case KVM_S390_VM_TOD_LOW:
2054		case KVM_S390_VM_TOD_HIGH:
2055			ret = 0;
2056			break;
2057		default:
2058			ret = -ENXIO;
2059			break;
2060		}
2061		break;
2062	case KVM_S390_VM_CPU_MODEL:
2063		switch (attr->attr) {
2064		case KVM_S390_VM_CPU_PROCESSOR:
2065		case KVM_S390_VM_CPU_MACHINE:
2066		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
2067		case KVM_S390_VM_CPU_MACHINE_FEAT:
2068		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
2069		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
2070		case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
2071		case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
2072			ret = 0;
2073			break;
2074		default:
2075			ret = -ENXIO;
2076			break;
2077		}
2078		break;
2079	case KVM_S390_VM_CRYPTO:
2080		switch (attr->attr) {
2081		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
2082		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
2083		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
2084		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
2085			ret = 0;
2086			break;
2087		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
2088		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
2089			ret = ap_instructions_available() ? 0 : -ENXIO;
2090			break;
2091		default:
2092			ret = -ENXIO;
2093			break;
2094		}
2095		break;
2096	case KVM_S390_VM_MIGRATION:
2097		ret = 0;
2098		break;
2099	case KVM_S390_VM_CPU_TOPOLOGY:
2100		ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
2101		break;
2102	default:
2103		ret = -ENXIO;
2104		break;
2105	}
2106
2107	return ret;
2108}
2109
2110static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2111{
2112	uint8_t *keys;
2113	uint64_t hva;
2114	int srcu_idx, i, r = 0;
2115
2116	if (args->flags != 0)
2117		return -EINVAL;
2118
2119	/* Is this guest using storage keys? */
2120	if (!mm_uses_skeys(current->mm))
2121		return KVM_S390_GET_SKEYS_NONE;
2122
2123	/* Enforce sane limit on memory allocation */
2124	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2125		return -EINVAL;
2126
2127	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2128	if (!keys)
2129		return -ENOMEM;
2130
2131	mmap_read_lock(current->mm);
2132	srcu_idx = srcu_read_lock(&kvm->srcu);
2133	for (i = 0; i < args->count; i++) {
2134		hva = gfn_to_hva(kvm, args->start_gfn + i);
2135		if (kvm_is_error_hva(hva)) {
2136			r = -EFAULT;
2137			break;
2138		}
2139
2140		r = get_guest_storage_key(current->mm, hva, &keys[i]);
2141		if (r)
2142			break;
2143	}
2144	srcu_read_unlock(&kvm->srcu, srcu_idx);
2145	mmap_read_unlock(current->mm);
2146
2147	if (!r) {
2148		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
2149				 sizeof(uint8_t) * args->count);
2150		if (r)
2151			r = -EFAULT;
2152	}
2153
2154	kvfree(keys);
2155	return r;
2156}
2157
2158static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2159{
2160	uint8_t *keys;
2161	uint64_t hva;
2162	int srcu_idx, i, r = 0;
2163	bool unlocked;
2164
2165	if (args->flags != 0)
2166		return -EINVAL;
2167
2168	/* Enforce sane limit on memory allocation */
2169	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2170		return -EINVAL;
2171
2172	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2173	if (!keys)
2174		return -ENOMEM;
2175
2176	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
2177			   sizeof(uint8_t) * args->count);
2178	if (r) {
2179		r = -EFAULT;
2180		goto out;
2181	}
2182
2183	/* Enable storage key handling for the guest */
2184	r = s390_enable_skey();
2185	if (r)
2186		goto out;
2187
2188	i = 0;
2189	mmap_read_lock(current->mm);
2190	srcu_idx = srcu_read_lock(&kvm->srcu);
2191        while (i < args->count) {
2192		unlocked = false;
2193		hva = gfn_to_hva(kvm, args->start_gfn + i);
2194		if (kvm_is_error_hva(hva)) {
2195			r = -EFAULT;
2196			break;
2197		}
2198
2199		/* Lowest order bit is reserved */
2200		if (keys[i] & 0x01) {
2201			r = -EINVAL;
2202			break;
2203		}
2204
2205		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2206		if (r) {
2207			r = fixup_user_fault(current->mm, hva,
2208					     FAULT_FLAG_WRITE, &unlocked);
2209			if (r)
2210				break;
2211		}
2212		if (!r)
2213			i++;
2214	}
2215	srcu_read_unlock(&kvm->srcu, srcu_idx);
2216	mmap_read_unlock(current->mm);
2217out:
2218	kvfree(keys);
2219	return r;
2220}
2221
2222/*
2223 * Base address and length must be sent at the start of each block, therefore
2224 * it's cheaper to send some clean data, as long as it's less than the size of
2225 * two longs.
2226 */
2227#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
2228/* for consistency */
2229#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
2230
2231static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2232			      u8 *res, unsigned long bufsize)
2233{
2234	unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2235
2236	args->count = 0;
2237	while (args->count < bufsize) {
2238		hva = gfn_to_hva(kvm, cur_gfn);
2239		/*
2240		 * We return an error if the first value was invalid, but we
2241		 * return successfully if at least one value was copied.
2242		 */
2243		if (kvm_is_error_hva(hva))
2244			return args->count ? 0 : -EFAULT;
2245		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2246			pgstev = 0;
2247		res[args->count++] = (pgstev >> 24) & 0x43;
2248		cur_gfn++;
2249	}
2250
2251	return 0;
2252}
2253
2254static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2255						     gfn_t gfn)
2256{
2257	return ____gfn_to_memslot(slots, gfn, true);
2258}
2259
2260static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2261					      unsigned long cur_gfn)
2262{
2263	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2264	unsigned long ofs = cur_gfn - ms->base_gfn;
2265	struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2266
2267	if (ms->base_gfn + ms->npages <= cur_gfn) {
2268		mnode = rb_next(mnode);
2269		/* If we are above the highest slot, wrap around */
2270		if (!mnode)
2271			mnode = rb_first(&slots->gfn_tree);
2272
2273		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2274		ofs = 0;
2275	}
2276
2277	if (cur_gfn < ms->base_gfn)
2278		ofs = 0;
2279
2280	ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2281	while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
2282		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2283		ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
2284	}
2285	return ms->base_gfn + ofs;
2286}
2287
2288static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2289			     u8 *res, unsigned long bufsize)
2290{
2291	unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2292	struct kvm_memslots *slots = kvm_memslots(kvm);
2293	struct kvm_memory_slot *ms;
2294
2295	if (unlikely(kvm_memslots_empty(slots)))
2296		return 0;
2297
2298	cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2299	ms = gfn_to_memslot(kvm, cur_gfn);
2300	args->count = 0;
2301	args->start_gfn = cur_gfn;
2302	if (!ms)
2303		return 0;
2304	next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2305	mem_end = kvm_s390_get_gfn_end(slots);
2306
2307	while (args->count < bufsize) {
2308		hva = gfn_to_hva(kvm, cur_gfn);
2309		if (kvm_is_error_hva(hva))
2310			return 0;
2311		/* Decrement only if we actually flipped the bit to 0 */
2312		if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2313			atomic64_dec(&kvm->arch.cmma_dirty_pages);
2314		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2315			pgstev = 0;
2316		/* Save the value */
2317		res[args->count++] = (pgstev >> 24) & 0x43;
2318		/* If the next bit is too far away, stop. */
2319		if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2320			return 0;
2321		/* If we reached the previous "next", find the next one */
2322		if (cur_gfn == next_gfn)
2323			next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2324		/* Reached the end of memory or of the buffer, stop */
2325		if ((next_gfn >= mem_end) ||
2326		    (next_gfn - args->start_gfn >= bufsize))
2327			return 0;
2328		cur_gfn++;
2329		/* Reached the end of the current memslot, take the next one. */
2330		if (cur_gfn - ms->base_gfn >= ms->npages) {
2331			ms = gfn_to_memslot(kvm, cur_gfn);
2332			if (!ms)
2333				return 0;
2334		}
2335	}
2336	return 0;
2337}
2338
2339/*
2340 * This function searches for the next page with dirty CMMA attributes, and
2341 * saves the attributes in the buffer up to either the end of the buffer or
2342 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2343 * no trailing clean bytes are saved.
2344 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2345 * output buffer will indicate 0 as length.
2346 */
2347static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2348				  struct kvm_s390_cmma_log *args)
2349{
2350	unsigned long bufsize;
2351	int srcu_idx, peek, ret;
2352	u8 *values;
2353
2354	if (!kvm->arch.use_cmma)
2355		return -ENXIO;
2356	/* Invalid/unsupported flags were specified */
2357	if (args->flags & ~KVM_S390_CMMA_PEEK)
2358		return -EINVAL;
2359	/* Migration mode query, and we are not doing a migration */
2360	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2361	if (!peek && !kvm->arch.migration_mode)
2362		return -EINVAL;
2363	/* CMMA is disabled or was not used, or the buffer has length zero */
2364	bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2365	if (!bufsize || !kvm->mm->context.uses_cmm) {
2366		memset(args, 0, sizeof(*args));
2367		return 0;
2368	}
2369	/* We are not peeking, and there are no dirty pages */
2370	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2371		memset(args, 0, sizeof(*args));
2372		return 0;
2373	}
2374
2375	values = vmalloc(bufsize);
2376	if (!values)
2377		return -ENOMEM;
2378
2379	mmap_read_lock(kvm->mm);
2380	srcu_idx = srcu_read_lock(&kvm->srcu);
2381	if (peek)
2382		ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2383	else
2384		ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2385	srcu_read_unlock(&kvm->srcu, srcu_idx);
2386	mmap_read_unlock(kvm->mm);
2387
2388	if (kvm->arch.migration_mode)
2389		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2390	else
2391		args->remaining = 0;
2392
2393	if (copy_to_user((void __user *)args->values, values, args->count))
2394		ret = -EFAULT;
2395
2396	vfree(values);
2397	return ret;
2398}
2399
2400/*
2401 * This function sets the CMMA attributes for the given pages. If the input
2402 * buffer has zero length, no action is taken, otherwise the attributes are
2403 * set and the mm->context.uses_cmm flag is set.
2404 */
2405static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2406				  const struct kvm_s390_cmma_log *args)
2407{
2408	unsigned long hva, mask, pgstev, i;
2409	uint8_t *bits;
2410	int srcu_idx, r = 0;
2411
2412	mask = args->mask;
2413
2414	if (!kvm->arch.use_cmma)
2415		return -ENXIO;
2416	/* invalid/unsupported flags */
2417	if (args->flags != 0)
2418		return -EINVAL;
2419	/* Enforce sane limit on memory allocation */
2420	if (args->count > KVM_S390_CMMA_SIZE_MAX)
2421		return -EINVAL;
2422	/* Nothing to do */
2423	if (args->count == 0)
2424		return 0;
2425
2426	bits = vmalloc(array_size(sizeof(*bits), args->count));
2427	if (!bits)
2428		return -ENOMEM;
2429
2430	r = copy_from_user(bits, (void __user *)args->values, args->count);
2431	if (r) {
2432		r = -EFAULT;
2433		goto out;
2434	}
2435
2436	mmap_read_lock(kvm->mm);
2437	srcu_idx = srcu_read_lock(&kvm->srcu);
2438	for (i = 0; i < args->count; i++) {
2439		hva = gfn_to_hva(kvm, args->start_gfn + i);
2440		if (kvm_is_error_hva(hva)) {
2441			r = -EFAULT;
2442			break;
2443		}
2444
2445		pgstev = bits[i];
2446		pgstev = pgstev << 24;
2447		mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2448		set_pgste_bits(kvm->mm, hva, mask, pgstev);
2449	}
2450	srcu_read_unlock(&kvm->srcu, srcu_idx);
2451	mmap_read_unlock(kvm->mm);
2452
2453	if (!kvm->mm->context.uses_cmm) {
2454		mmap_write_lock(kvm->mm);
2455		kvm->mm->context.uses_cmm = 1;
2456		mmap_write_unlock(kvm->mm);
2457	}
2458out:
2459	vfree(bits);
2460	return r;
2461}
2462
2463/**
2464 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2465 * non protected.
2466 * @kvm: the VM whose protected vCPUs are to be converted
2467 * @rc: return value for the RC field of the UVC (in case of error)
2468 * @rrc: return value for the RRC field of the UVC (in case of error)
2469 *
2470 * Does not stop in case of error, tries to convert as many
2471 * CPUs as possible. In case of error, the RC and RRC of the last error are
2472 * returned.
2473 *
2474 * Return: 0 in case of success, otherwise -EIO
2475 */
2476int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2477{
2478	struct kvm_vcpu *vcpu;
2479	unsigned long i;
2480	u16 _rc, _rrc;
2481	int ret = 0;
2482
2483	/*
2484	 * We ignore failures and try to destroy as many CPUs as possible.
2485	 * At the same time we must not free the assigned resources when
2486	 * this fails, as the ultravisor has still access to that memory.
2487	 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2488	 * behind.
2489	 * We want to return the first failure rc and rrc, though.
2490	 */
2491	kvm_for_each_vcpu(i, vcpu, kvm) {
2492		mutex_lock(&vcpu->mutex);
2493		if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2494			*rc = _rc;
2495			*rrc = _rrc;
2496			ret = -EIO;
2497		}
2498		mutex_unlock(&vcpu->mutex);
2499	}
2500	/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2501	if (use_gisa)
2502		kvm_s390_gisa_enable(kvm);
2503	return ret;
2504}
2505
2506/**
2507 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2508 * to protected.
2509 * @kvm: the VM whose protected vCPUs are to be converted
2510 * @rc: return value for the RC field of the UVC (in case of error)
2511 * @rrc: return value for the RRC field of the UVC (in case of error)
2512 *
2513 * Tries to undo the conversion in case of error.
2514 *
2515 * Return: 0 in case of success, otherwise -EIO
2516 */
2517static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2518{
2519	unsigned long i;
2520	int r = 0;
2521	u16 dummy;
2522
2523	struct kvm_vcpu *vcpu;
2524
2525	/* Disable the GISA if the ultravisor does not support AIV. */
2526	if (!uv_has_feature(BIT_UV_FEAT_AIV))
2527		kvm_s390_gisa_disable(kvm);
2528
2529	kvm_for_each_vcpu(i, vcpu, kvm) {
2530		mutex_lock(&vcpu->mutex);
2531		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2532		mutex_unlock(&vcpu->mutex);
2533		if (r)
2534			break;
2535	}
2536	if (r)
2537		kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2538	return r;
2539}
2540
2541/*
2542 * Here we provide user space with a direct interface to query UV
2543 * related data like UV maxima and available features as well as
2544 * feature specific data.
2545 *
2546 * To facilitate future extension of the data structures we'll try to
2547 * write data up to the maximum requested length.
2548 */
2549static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
2550{
2551	ssize_t len_min;
2552
2553	switch (info->header.id) {
2554	case KVM_PV_INFO_VM: {
2555		len_min =  sizeof(info->header) + sizeof(info->vm);
2556
2557		if (info->header.len_max < len_min)
2558			return -EINVAL;
2559
2560		memcpy(info->vm.inst_calls_list,
2561		       uv_info.inst_calls_list,
2562		       sizeof(uv_info.inst_calls_list));
2563
2564		/* It's max cpuid not max cpus, so it's off by one */
2565		info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
2566		info->vm.max_guests = uv_info.max_num_sec_conf;
2567		info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
2568		info->vm.feature_indication = uv_info.uv_feature_indications;
2569
2570		return len_min;
2571	}
2572	case KVM_PV_INFO_DUMP: {
2573		len_min =  sizeof(info->header) + sizeof(info->dump);
2574
2575		if (info->header.len_max < len_min)
2576			return -EINVAL;
2577
2578		info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2579		info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2580		info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2581		return len_min;
2582	}
2583	default:
2584		return -EINVAL;
2585	}
2586}
2587
2588static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
2589			   struct kvm_s390_pv_dmp dmp)
2590{
2591	int r = -EINVAL;
2592	void __user *result_buff = (void __user *)dmp.buff_addr;
2593
2594	switch (dmp.subcmd) {
2595	case KVM_PV_DUMP_INIT: {
2596		if (kvm->arch.pv.dumping)
2597			break;
2598
2599		/*
2600		 * Block SIE entry as concurrent dump UVCs could lead
2601		 * to validities.
2602		 */
2603		kvm_s390_vcpu_block_all(kvm);
2604
2605		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2606				  UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
2607		KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2608			     cmd->rc, cmd->rrc);
2609		if (!r) {
2610			kvm->arch.pv.dumping = true;
2611		} else {
2612			kvm_s390_vcpu_unblock_all(kvm);
2613			r = -EINVAL;
2614		}
2615		break;
2616	}
2617	case KVM_PV_DUMP_CONFIG_STOR_STATE: {
2618		if (!kvm->arch.pv.dumping)
2619			break;
2620
2621		/*
2622		 * gaddr is an output parameter since we might stop
2623		 * early. As dmp will be copied back in our caller, we
2624		 * don't need to do it ourselves.
2625		 */
2626		r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
2627						&cmd->rc, &cmd->rrc);
2628		break;
2629	}
2630	case KVM_PV_DUMP_COMPLETE: {
2631		if (!kvm->arch.pv.dumping)
2632			break;
2633
2634		r = -EINVAL;
2635		if (dmp.buff_len < uv_info.conf_dump_finalize_len)
2636			break;
2637
2638		r = kvm_s390_pv_dump_complete(kvm, result_buff,
2639					      &cmd->rc, &cmd->rrc);
2640		break;
2641	}
2642	default:
2643		r = -ENOTTY;
2644		break;
2645	}
2646
2647	return r;
2648}
2649
2650static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2651{
2652	const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2653	void __user *argp = (void __user *)cmd->data;
2654	int r = 0;
2655	u16 dummy;
2656
2657	if (need_lock)
2658		mutex_lock(&kvm->lock);
2659
2660	switch (cmd->cmd) {
2661	case KVM_PV_ENABLE: {
2662		r = -EINVAL;
2663		if (kvm_s390_pv_is_protected(kvm))
2664			break;
2665
2666		/*
2667		 *  FMT 4 SIE needs esca. As we never switch back to bsca from
2668		 *  esca, we need no cleanup in the error cases below
2669		 */
2670		r = sca_switch_to_extended(kvm);
2671		if (r)
2672			break;
2673
2674		r = s390_disable_cow_sharing();
2675		if (r)
2676			break;
2677
2678		r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2679		if (r)
2680			break;
2681
2682		r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2683		if (r)
2684			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2685
2686		/* we need to block service interrupts from now on */
2687		set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2688		break;
2689	}
2690	case KVM_PV_ASYNC_CLEANUP_PREPARE:
2691		r = -EINVAL;
2692		if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2693			break;
2694
2695		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2696		/*
2697		 * If a CPU could not be destroyed, destroy VM will also fail.
2698		 * There is no point in trying to destroy it. Instead return
2699		 * the rc and rrc from the first CPU that failed destroying.
2700		 */
2701		if (r)
2702			break;
2703		r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2704
2705		/* no need to block service interrupts any more */
2706		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2707		break;
2708	case KVM_PV_ASYNC_CLEANUP_PERFORM:
2709		r = -EINVAL;
2710		if (!async_destroy)
2711			break;
2712		/* kvm->lock must not be held; this is asserted inside the function. */
2713		r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2714		break;
2715	case KVM_PV_DISABLE: {
2716		r = -EINVAL;
2717		if (!kvm_s390_pv_is_protected(kvm))
2718			break;
2719
2720		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2721		/*
2722		 * If a CPU could not be destroyed, destroy VM will also fail.
2723		 * There is no point in trying to destroy it. Instead return
2724		 * the rc and rrc from the first CPU that failed destroying.
2725		 */
2726		if (r)
2727			break;
2728		r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
2729
2730		/* no need to block service interrupts any more */
2731		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2732		break;
2733	}
2734	case KVM_PV_SET_SEC_PARMS: {
2735		struct kvm_s390_pv_sec_parm parms = {};
2736		void *hdr;
2737
2738		r = -EINVAL;
2739		if (!kvm_s390_pv_is_protected(kvm))
2740			break;
2741
2742		r = -EFAULT;
2743		if (copy_from_user(&parms, argp, sizeof(parms)))
2744			break;
2745
2746		/* Currently restricted to 8KB */
2747		r = -EINVAL;
2748		if (parms.length > PAGE_SIZE * 2)
2749			break;
2750
2751		r = -ENOMEM;
2752		hdr = vmalloc(parms.length);
2753		if (!hdr)
2754			break;
2755
2756		r = -EFAULT;
2757		if (!copy_from_user(hdr, (void __user *)parms.origin,
2758				    parms.length))
2759			r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2760						      &cmd->rc, &cmd->rrc);
2761
2762		vfree(hdr);
2763		break;
2764	}
2765	case KVM_PV_UNPACK: {
2766		struct kvm_s390_pv_unp unp = {};
2767
2768		r = -EINVAL;
2769		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2770			break;
2771
2772		r = -EFAULT;
2773		if (copy_from_user(&unp, argp, sizeof(unp)))
2774			break;
2775
2776		r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2777				       &cmd->rc, &cmd->rrc);
2778		break;
2779	}
2780	case KVM_PV_VERIFY: {
2781		r = -EINVAL;
2782		if (!kvm_s390_pv_is_protected(kvm))
2783			break;
2784
2785		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2786				  UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2787		KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2788			     cmd->rrc);
2789		break;
2790	}
2791	case KVM_PV_PREP_RESET: {
2792		r = -EINVAL;
2793		if (!kvm_s390_pv_is_protected(kvm))
2794			break;
2795
2796		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2797				  UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2798		KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2799			     cmd->rc, cmd->rrc);
2800		break;
2801	}
2802	case KVM_PV_UNSHARE_ALL: {
2803		r = -EINVAL;
2804		if (!kvm_s390_pv_is_protected(kvm))
2805			break;
2806
2807		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2808				  UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2809		KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2810			     cmd->rc, cmd->rrc);
2811		break;
2812	}
2813	case KVM_PV_INFO: {
2814		struct kvm_s390_pv_info info = {};
2815		ssize_t data_len;
2816
2817		/*
2818		 * No need to check the VM protection here.
2819		 *
2820		 * Maybe user space wants to query some of the data
2821		 * when the VM is still unprotected. If we see the
2822		 * need to fence a new data command we can still
2823		 * return an error in the info handler.
2824		 */
2825
2826		r = -EFAULT;
2827		if (copy_from_user(&info, argp, sizeof(info.header)))
2828			break;
2829
2830		r = -EINVAL;
2831		if (info.header.len_max < sizeof(info.header))
2832			break;
2833
2834		data_len = kvm_s390_handle_pv_info(&info);
2835		if (data_len < 0) {
2836			r = data_len;
2837			break;
2838		}
2839		/*
2840		 * If a data command struct is extended (multiple
2841		 * times) this can be used to determine how much of it
2842		 * is valid.
2843		 */
2844		info.header.len_written = data_len;
2845
2846		r = -EFAULT;
2847		if (copy_to_user(argp, &info, data_len))
2848			break;
2849
2850		r = 0;
2851		break;
2852	}
2853	case KVM_PV_DUMP: {
2854		struct kvm_s390_pv_dmp dmp;
2855
2856		r = -EINVAL;
2857		if (!kvm_s390_pv_is_protected(kvm))
2858			break;
2859
2860		r = -EFAULT;
2861		if (copy_from_user(&dmp, argp, sizeof(dmp)))
2862			break;
2863
2864		r = kvm_s390_pv_dmp(kvm, cmd, dmp);
2865		if (r)
2866			break;
2867
2868		if (copy_to_user(argp, &dmp, sizeof(dmp))) {
2869			r = -EFAULT;
2870			break;
2871		}
2872
2873		break;
2874	}
2875	default:
2876		r = -ENOTTY;
2877	}
2878	if (need_lock)
2879		mutex_unlock(&kvm->lock);
2880
2881	return r;
2882}
2883
2884static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags)
2885{
2886	if (mop->flags & ~supported_flags || !mop->size)
2887		return -EINVAL;
2888	if (mop->size > MEM_OP_MAX_SIZE)
2889		return -E2BIG;
2890	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2891		if (mop->key > 0xf)
2892			return -EINVAL;
2893	} else {
2894		mop->key = 0;
2895	}
2896	return 0;
2897}
2898
2899static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2900{
2901	void __user *uaddr = (void __user *)mop->buf;
2902	enum gacc_mode acc_mode;
2903	void *tmpbuf = NULL;
2904	int r, srcu_idx;
2905
2906	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION |
2907					KVM_S390_MEMOP_F_CHECK_ONLY);
2908	if (r)
2909		return r;
2910
2911	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2912		tmpbuf = vmalloc(mop->size);
2913		if (!tmpbuf)
2914			return -ENOMEM;
2915	}
2916
2917	srcu_idx = srcu_read_lock(&kvm->srcu);
2918
2919	if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
2920		r = PGM_ADDRESSING;
2921		goto out_unlock;
2922	}
2923
2924	acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE;
2925	if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2926		r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key);
2927		goto out_unlock;
2928	}
2929	if (acc_mode == GACC_FETCH) {
2930		r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2931					      mop->size, GACC_FETCH, mop->key);
2932		if (r)
2933			goto out_unlock;
2934		if (copy_to_user(uaddr, tmpbuf, mop->size))
2935			r = -EFAULT;
2936	} else {
2937		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2938			r = -EFAULT;
2939			goto out_unlock;
2940		}
2941		r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2942					      mop->size, GACC_STORE, mop->key);
2943	}
2944
2945out_unlock:
2946	srcu_read_unlock(&kvm->srcu, srcu_idx);
2947
2948	vfree(tmpbuf);
2949	return r;
2950}
2951
2952static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2953{
2954	void __user *uaddr = (void __user *)mop->buf;
2955	void __user *old_addr = (void __user *)mop->old_addr;
2956	union {
2957		__uint128_t quad;
2958		char raw[sizeof(__uint128_t)];
2959	} old = { .quad = 0}, new = { .quad = 0 };
2960	unsigned int off_in_quad = sizeof(new) - mop->size;
2961	int r, srcu_idx;
2962	bool success;
2963
2964	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION);
2965	if (r)
2966		return r;
2967	/*
2968	 * This validates off_in_quad. Checking that size is a power
2969	 * of two is not necessary, as cmpxchg_guest_abs_with_key
2970	 * takes care of that
2971	 */
2972	if (mop->size > sizeof(new))
2973		return -EINVAL;
2974	if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size))
2975		return -EFAULT;
2976	if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size))
2977		return -EFAULT;
2978
2979	srcu_idx = srcu_read_lock(&kvm->srcu);
2980
2981	if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
2982		r = PGM_ADDRESSING;
2983		goto out_unlock;
2984	}
2985
2986	r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad,
2987				       new.quad, mop->key, &success);
2988	if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size))
2989		r = -EFAULT;
2990
2991out_unlock:
2992	srcu_read_unlock(&kvm->srcu, srcu_idx);
2993	return r;
2994}
2995
2996static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2997{
2998	/*
2999	 * This is technically a heuristic only, if the kvm->lock is not
3000	 * taken, it is not guaranteed that the vm is/remains non-protected.
3001	 * This is ok from a kernel perspective, wrongdoing is detected
3002	 * on the access, -EFAULT is returned and the vm may crash the
3003	 * next time it accesses the memory in question.
3004	 * There is no sane usecase to do switching and a memop on two
3005	 * different CPUs at the same time.
3006	 */
3007	if (kvm_s390_pv_get_handle(kvm))
3008		return -EINVAL;
3009
3010	switch (mop->op) {
3011	case KVM_S390_MEMOP_ABSOLUTE_READ:
3012	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
3013		return kvm_s390_vm_mem_op_abs(kvm, mop);
3014	case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
3015		return kvm_s390_vm_mem_op_cmpxchg(kvm, mop);
3016	default:
3017		return -EINVAL;
3018	}
3019}
3020
3021int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
3022{
3023	struct kvm *kvm = filp->private_data;
3024	void __user *argp = (void __user *)arg;
3025	struct kvm_device_attr attr;
3026	int r;
3027
3028	switch (ioctl) {
3029	case KVM_S390_INTERRUPT: {
3030		struct kvm_s390_interrupt s390int;
3031
3032		r = -EFAULT;
3033		if (copy_from_user(&s390int, argp, sizeof(s390int)))
3034			break;
3035		r = kvm_s390_inject_vm(kvm, &s390int);
3036		break;
3037	}
3038	case KVM_CREATE_IRQCHIP: {
3039		r = -EINVAL;
3040		if (kvm->arch.use_irqchip)
3041			r = 0;
3042		break;
3043	}
3044	case KVM_SET_DEVICE_ATTR: {
3045		r = -EFAULT;
3046		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3047			break;
3048		r = kvm_s390_vm_set_attr(kvm, &attr);
3049		break;
3050	}
3051	case KVM_GET_DEVICE_ATTR: {
3052		r = -EFAULT;
3053		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3054			break;
3055		r = kvm_s390_vm_get_attr(kvm, &attr);
3056		break;
3057	}
3058	case KVM_HAS_DEVICE_ATTR: {
3059		r = -EFAULT;
3060		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3061			break;
3062		r = kvm_s390_vm_has_attr(kvm, &attr);
3063		break;
3064	}
3065	case KVM_S390_GET_SKEYS: {
3066		struct kvm_s390_skeys args;
3067
3068		r = -EFAULT;
3069		if (copy_from_user(&args, argp,
3070				   sizeof(struct kvm_s390_skeys)))
3071			break;
3072		r = kvm_s390_get_skeys(kvm, &args);
3073		break;
3074	}
3075	case KVM_S390_SET_SKEYS: {
3076		struct kvm_s390_skeys args;
3077
3078		r = -EFAULT;
3079		if (copy_from_user(&args, argp,
3080				   sizeof(struct kvm_s390_skeys)))
3081			break;
3082		r = kvm_s390_set_skeys(kvm, &args);
3083		break;
3084	}
3085	case KVM_S390_GET_CMMA_BITS: {
3086		struct kvm_s390_cmma_log args;
3087
3088		r = -EFAULT;
3089		if (copy_from_user(&args, argp, sizeof(args)))
3090			break;
3091		mutex_lock(&kvm->slots_lock);
3092		r = kvm_s390_get_cmma_bits(kvm, &args);
3093		mutex_unlock(&kvm->slots_lock);
3094		if (!r) {
3095			r = copy_to_user(argp, &args, sizeof(args));
3096			if (r)
3097				r = -EFAULT;
3098		}
3099		break;
3100	}
3101	case KVM_S390_SET_CMMA_BITS: {
3102		struct kvm_s390_cmma_log args;
3103
3104		r = -EFAULT;
3105		if (copy_from_user(&args, argp, sizeof(args)))
3106			break;
3107		mutex_lock(&kvm->slots_lock);
3108		r = kvm_s390_set_cmma_bits(kvm, &args);
3109		mutex_unlock(&kvm->slots_lock);
3110		break;
3111	}
3112	case KVM_S390_PV_COMMAND: {
3113		struct kvm_pv_cmd args;
3114
3115		/* protvirt means user cpu state */
3116		kvm_s390_set_user_cpu_state_ctrl(kvm);
3117		r = 0;
3118		if (!is_prot_virt_host()) {
3119			r = -EINVAL;
3120			break;
3121		}
3122		if (copy_from_user(&args, argp, sizeof(args))) {
3123			r = -EFAULT;
3124			break;
3125		}
3126		if (args.flags) {
3127			r = -EINVAL;
3128			break;
3129		}
3130		/* must be called without kvm->lock */
3131		r = kvm_s390_handle_pv(kvm, &args);
3132		if (copy_to_user(argp, &args, sizeof(args))) {
3133			r = -EFAULT;
3134			break;
3135		}
3136		break;
3137	}
3138	case KVM_S390_MEM_OP: {
3139		struct kvm_s390_mem_op mem_op;
3140
3141		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3142			r = kvm_s390_vm_mem_op(kvm, &mem_op);
3143		else
3144			r = -EFAULT;
3145		break;
3146	}
3147	case KVM_S390_ZPCI_OP: {
3148		struct kvm_s390_zpci_op args;
3149
3150		r = -EINVAL;
3151		if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3152			break;
3153		if (copy_from_user(&args, argp, sizeof(args))) {
3154			r = -EFAULT;
3155			break;
3156		}
3157		r = kvm_s390_pci_zpci_op(kvm, &args);
3158		break;
3159	}
3160	default:
3161		r = -ENOTTY;
3162	}
3163
3164	return r;
3165}
3166
3167static int kvm_s390_apxa_installed(void)
3168{
3169	struct ap_config_info info;
3170
3171	if (ap_instructions_available()) {
3172		if (ap_qci(&info) == 0)
3173			return info.apxa;
3174	}
3175
3176	return 0;
3177}
3178
3179/*
3180 * The format of the crypto control block (CRYCB) is specified in the 3 low
3181 * order bits of the CRYCB designation (CRYCBD) field as follows:
3182 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3183 *	     AP extended addressing (APXA) facility are installed.
3184 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3185 * Format 2: Both the APXA and MSAX3 facilities are installed
3186 */
3187static void kvm_s390_set_crycb_format(struct kvm *kvm)
3188{
3189	kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb);
3190
3191	/* Clear the CRYCB format bits - i.e., set format 0 by default */
3192	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3193
3194	/* Check whether MSAX3 is installed */
3195	if (!test_kvm_facility(kvm, 76))
3196		return;
3197
3198	if (kvm_s390_apxa_installed())
3199		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3200	else
3201		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3202}
3203
3204/*
3205 * kvm_arch_crypto_set_masks
3206 *
3207 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3208 *	 to be set.
3209 * @apm: the mask identifying the accessible AP adapters
3210 * @aqm: the mask identifying the accessible AP domains
3211 * @adm: the mask identifying the accessible AP control domains
3212 *
3213 * Set the masks that identify the adapters, domains and control domains to
3214 * which the KVM guest is granted access.
3215 *
3216 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3217 *	 function.
3218 */
3219void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
3220			       unsigned long *aqm, unsigned long *adm)
3221{
3222	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3223
3224	kvm_s390_vcpu_block_all(kvm);
3225
3226	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3227	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
3228		memcpy(crycb->apcb1.apm, apm, 32);
3229		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3230			 apm[0], apm[1], apm[2], apm[3]);
3231		memcpy(crycb->apcb1.aqm, aqm, 32);
3232		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3233			 aqm[0], aqm[1], aqm[2], aqm[3]);
3234		memcpy(crycb->apcb1.adm, adm, 32);
3235		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3236			 adm[0], adm[1], adm[2], adm[3]);
3237		break;
3238	case CRYCB_FORMAT1:
3239	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
3240		memcpy(crycb->apcb0.apm, apm, 8);
3241		memcpy(crycb->apcb0.aqm, aqm, 2);
3242		memcpy(crycb->apcb0.adm, adm, 2);
3243		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3244			 apm[0], *((unsigned short *)aqm),
3245			 *((unsigned short *)adm));
3246		break;
3247	default:	/* Can not happen */
3248		break;
3249	}
3250
3251	/* recreate the shadow crycb for each vcpu */
3252	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3253	kvm_s390_vcpu_unblock_all(kvm);
3254}
3255EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
3256
3257/*
3258 * kvm_arch_crypto_clear_masks
3259 *
3260 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3261 *	 to be cleared.
3262 *
3263 * Clear the masks that identify the adapters, domains and control domains to
3264 * which the KVM guest is granted access.
3265 *
3266 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3267 *	 function.
3268 */
3269void kvm_arch_crypto_clear_masks(struct kvm *kvm)
3270{
3271	kvm_s390_vcpu_block_all(kvm);
3272
3273	memset(&kvm->arch.crypto.crycb->apcb0, 0,
3274	       sizeof(kvm->arch.crypto.crycb->apcb0));
3275	memset(&kvm->arch.crypto.crycb->apcb1, 0,
3276	       sizeof(kvm->arch.crypto.crycb->apcb1));
3277
3278	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
3279	/* recreate the shadow crycb for each vcpu */
3280	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3281	kvm_s390_vcpu_unblock_all(kvm);
3282}
3283EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
3284
3285static u64 kvm_s390_get_initial_cpuid(void)
3286{
3287	struct cpuid cpuid;
3288
3289	get_cpu_id(&cpuid);
3290	cpuid.version = 0xff;
3291	return *((u64 *) &cpuid);
3292}
3293
3294static void kvm_s390_crypto_init(struct kvm *kvm)
3295{
3296	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3297	kvm_s390_set_crycb_format(kvm);
3298	init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3299
3300	if (!test_kvm_facility(kvm, 76))
3301		return;
3302
3303	/* Enable AES/DEA protected key functions by default */
3304	kvm->arch.crypto.aes_kw = 1;
3305	kvm->arch.crypto.dea_kw = 1;
3306	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3307			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3308	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3309			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3310}
3311
3312static void sca_dispose(struct kvm *kvm)
3313{
3314	if (kvm->arch.use_esca)
3315		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
3316	else
3317		free_page((unsigned long)(kvm->arch.sca));
3318	kvm->arch.sca = NULL;
3319}
3320
3321void kvm_arch_free_vm(struct kvm *kvm)
3322{
3323	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3324		kvm_s390_pci_clear_list(kvm);
3325
3326	__kvm_arch_free_vm(kvm);
3327}
3328
3329int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3330{
3331	gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
3332	int i, rc;
3333	char debug_name[16];
3334	static unsigned long sca_offset;
3335
3336	rc = -EINVAL;
3337#ifdef CONFIG_KVM_S390_UCONTROL
3338	if (type & ~KVM_VM_S390_UCONTROL)
3339		goto out_err;
3340	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3341		goto out_err;
3342#else
3343	if (type)
3344		goto out_err;
3345#endif
3346
3347	rc = s390_enable_sie();
3348	if (rc)
3349		goto out_err;
3350
3351	rc = -ENOMEM;
3352
3353	if (!sclp.has_64bscao)
3354		alloc_flags |= GFP_DMA;
3355	rwlock_init(&kvm->arch.sca_lock);
3356	/* start with basic SCA */
3357	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3358	if (!kvm->arch.sca)
3359		goto out_err;
3360	mutex_lock(&kvm_lock);
3361	sca_offset += 16;
3362	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
3363		sca_offset = 0;
3364	kvm->arch.sca = (struct bsca_block *)
3365			((char *) kvm->arch.sca + sca_offset);
3366	mutex_unlock(&kvm_lock);
3367
3368	sprintf(debug_name, "kvm-%u", current->pid);
3369
3370	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3371	if (!kvm->arch.dbf)
3372		goto out_err;
3373
3374	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3375	kvm->arch.sie_page2 =
3376	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3377	if (!kvm->arch.sie_page2)
3378		goto out_err;
3379
3380	kvm->arch.sie_page2->kvm = kvm;
3381	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3382
3383	for (i = 0; i < kvm_s390_fac_size(); i++) {
3384		kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3385					      (kvm_s390_fac_base[i] |
3386					       kvm_s390_fac_ext[i]);
3387		kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3388					      kvm_s390_fac_base[i];
3389	}
3390	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3391
3392	/* we are always in czam mode - even on pre z14 machines */
3393	set_kvm_facility(kvm->arch.model.fac_mask, 138);
3394	set_kvm_facility(kvm->arch.model.fac_list, 138);
3395	/* we emulate STHYI in kvm */
3396	set_kvm_facility(kvm->arch.model.fac_mask, 74);
3397	set_kvm_facility(kvm->arch.model.fac_list, 74);
3398	if (MACHINE_HAS_TLB_GUEST) {
3399		set_kvm_facility(kvm->arch.model.fac_mask, 147);
3400		set_kvm_facility(kvm->arch.model.fac_list, 147);
3401	}
3402
3403	if (css_general_characteristics.aiv && test_facility(65))
3404		set_kvm_facility(kvm->arch.model.fac_mask, 65);
3405
3406	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3407	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3408
3409	kvm->arch.model.uv_feat_guest.feat = 0;
3410
3411	kvm_s390_crypto_init(kvm);
3412
3413	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3414		mutex_lock(&kvm->lock);
3415		kvm_s390_pci_init_list(kvm);
3416		kvm_s390_vcpu_pci_enable_interp(kvm);
3417		mutex_unlock(&kvm->lock);
3418	}
3419
3420	mutex_init(&kvm->arch.float_int.ais_lock);
3421	spin_lock_init(&kvm->arch.float_int.lock);
3422	for (i = 0; i < FIRQ_LIST_COUNT; i++)
3423		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3424	init_waitqueue_head(&kvm->arch.ipte_wq);
3425	mutex_init(&kvm->arch.ipte_mutex);
3426
3427	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3428	VM_EVENT(kvm, 3, "vm created with type %lu", type);
3429
3430	if (type & KVM_VM_S390_UCONTROL) {
3431		kvm->arch.gmap = NULL;
3432		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3433	} else {
3434		if (sclp.hamax == U64_MAX)
3435			kvm->arch.mem_limit = TASK_SIZE_MAX;
3436		else
3437			kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
3438						    sclp.hamax + 1);
3439		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3440		if (!kvm->arch.gmap)
3441			goto out_err;
3442		kvm->arch.gmap->private = kvm;
3443		kvm->arch.gmap->pfault_enabled = 0;
3444	}
3445
3446	kvm->arch.use_pfmfi = sclp.has_pfmfi;
3447	kvm->arch.use_skf = sclp.has_skey;
3448	spin_lock_init(&kvm->arch.start_stop_lock);
3449	kvm_s390_vsie_init(kvm);
3450	if (use_gisa)
3451		kvm_s390_gisa_init(kvm);
3452	INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3453	kvm->arch.pv.set_aside = NULL;
3454	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
3455
3456	return 0;
3457out_err:
3458	free_page((unsigned long)kvm->arch.sie_page2);
3459	debug_unregister(kvm->arch.dbf);
3460	sca_dispose(kvm);
3461	KVM_EVENT(3, "creation of vm failed: %d", rc);
 
3462	return rc;
3463}
3464
3465void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3466{
3467	u16 rc, rrc;
3468
3469	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3470	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3471	kvm_s390_clear_local_irqs(vcpu);
3472	kvm_clear_async_pf_completion_queue(vcpu);
3473	if (!kvm_is_ucontrol(vcpu->kvm))
3474		sca_del_vcpu(vcpu);
3475	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3476
3477	if (kvm_is_ucontrol(vcpu->kvm))
3478		gmap_remove(vcpu->arch.gmap);
3479
3480	if (vcpu->kvm->arch.use_cmma)
3481		kvm_s390_vcpu_unsetup_cmma(vcpu);
3482	/* We can not hold the vcpu mutex here, we are already dying */
3483	if (kvm_s390_pv_cpu_get_handle(vcpu))
3484		kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3485	free_page((unsigned long)(vcpu->arch.sie_block));
 
 
3486}
3487
3488void kvm_arch_destroy_vm(struct kvm *kvm)
3489{
3490	u16 rc, rrc;
3491
3492	kvm_destroy_vcpus(kvm);
3493	sca_dispose(kvm);
3494	kvm_s390_gisa_destroy(kvm);
3495	/*
3496	 * We are already at the end of life and kvm->lock is not taken.
3497	 * This is ok as the file descriptor is closed by now and nobody
3498	 * can mess with the pv state.
3499	 */
3500	kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3501	/*
3502	 * Remove the mmu notifier only when the whole KVM VM is torn down,
3503	 * and only if one was registered to begin with. If the VM is
3504	 * currently not protected, but has been previously been protected,
3505	 * then it's possible that the notifier is still registered.
3506	 */
3507	if (kvm->arch.pv.mmu_notifier.ops)
3508		mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3509
3510	debug_unregister(kvm->arch.dbf);
3511	free_page((unsigned long)kvm->arch.sie_page2);
3512	if (!kvm_is_ucontrol(kvm))
3513		gmap_remove(kvm->arch.gmap);
3514	kvm_s390_destroy_adapters(kvm);
3515	kvm_s390_clear_float_irqs(kvm);
3516	kvm_s390_vsie_destroy(kvm);
3517	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
3518}
3519
3520/* Section: vcpu related */
3521static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
3522{
3523	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
3524	if (!vcpu->arch.gmap)
3525		return -ENOMEM;
3526	vcpu->arch.gmap->private = vcpu->kvm;
3527
3528	return 0;
3529}
3530
3531static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3532{
3533	if (!kvm_s390_use_sca_entries())
3534		return;
3535	read_lock(&vcpu->kvm->arch.sca_lock);
3536	if (vcpu->kvm->arch.use_esca) {
3537		struct esca_block *sca = vcpu->kvm->arch.sca;
3538
3539		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3540		sca->cpu[vcpu->vcpu_id].sda = 0;
3541	} else {
3542		struct bsca_block *sca = vcpu->kvm->arch.sca;
3543
3544		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3545		sca->cpu[vcpu->vcpu_id].sda = 0;
3546	}
3547	read_unlock(&vcpu->kvm->arch.sca_lock);
3548}
3549
3550static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3551{
3552	if (!kvm_s390_use_sca_entries()) {
3553		phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
3554
3555		/* we still need the basic sca for the ipte control */
3556		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3557		vcpu->arch.sie_block->scaol = sca_phys;
3558		return;
3559	}
3560	read_lock(&vcpu->kvm->arch.sca_lock);
3561	if (vcpu->kvm->arch.use_esca) {
3562		struct esca_block *sca = vcpu->kvm->arch.sca;
3563		phys_addr_t sca_phys = virt_to_phys(sca);
3564
3565		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3566		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3567		vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3568		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3569		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3570	} else {
3571		struct bsca_block *sca = vcpu->kvm->arch.sca;
3572		phys_addr_t sca_phys = virt_to_phys(sca);
3573
3574		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3575		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3576		vcpu->arch.sie_block->scaol = sca_phys;
3577		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3578	}
3579	read_unlock(&vcpu->kvm->arch.sca_lock);
3580}
3581
3582/* Basic SCA to Extended SCA data copy routines */
3583static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
3584{
3585	d->sda = s->sda;
3586	d->sigp_ctrl.c = s->sigp_ctrl.c;
3587	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
3588}
3589
3590static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
3591{
3592	int i;
3593
3594	d->ipte_control = s->ipte_control;
3595	d->mcn[0] = s->mcn;
3596	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
3597		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
3598}
3599
3600static int sca_switch_to_extended(struct kvm *kvm)
3601{
3602	struct bsca_block *old_sca = kvm->arch.sca;
3603	struct esca_block *new_sca;
3604	struct kvm_vcpu *vcpu;
3605	unsigned long vcpu_idx;
3606	u32 scaol, scaoh;
3607	phys_addr_t new_sca_phys;
3608
3609	if (kvm->arch.use_esca)
3610		return 0;
3611
3612	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3613	if (!new_sca)
3614		return -ENOMEM;
3615
3616	new_sca_phys = virt_to_phys(new_sca);
3617	scaoh = new_sca_phys >> 32;
3618	scaol = new_sca_phys & ESCA_SCAOL_MASK;
3619
3620	kvm_s390_vcpu_block_all(kvm);
3621	write_lock(&kvm->arch.sca_lock);
3622
3623	sca_copy_b_to_e(new_sca, old_sca);
3624
3625	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
3626		vcpu->arch.sie_block->scaoh = scaoh;
3627		vcpu->arch.sie_block->scaol = scaol;
3628		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3629	}
3630	kvm->arch.sca = new_sca;
3631	kvm->arch.use_esca = 1;
3632
3633	write_unlock(&kvm->arch.sca_lock);
3634	kvm_s390_vcpu_unblock_all(kvm);
3635
3636	free_page((unsigned long)old_sca);
3637
3638	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
3639		 old_sca, kvm->arch.sca);
3640	return 0;
3641}
3642
3643static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3644{
3645	int rc;
3646
3647	if (!kvm_s390_use_sca_entries()) {
3648		if (id < KVM_MAX_VCPUS)
3649			return true;
3650		return false;
3651	}
3652	if (id < KVM_S390_BSCA_CPU_SLOTS)
3653		return true;
3654	if (!sclp.has_esca || !sclp.has_64bscao)
3655		return false;
3656
3657	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
3658
3659	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3660}
3661
3662/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3663static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3664{
3665	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3666	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3667	vcpu->arch.cputm_start = get_tod_clock_fast();
3668	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3669}
3670
3671/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3672static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3673{
3674	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3675	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3676	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3677	vcpu->arch.cputm_start = 0;
3678	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3679}
3680
3681/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3682static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3683{
3684	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3685	vcpu->arch.cputm_enabled = true;
3686	__start_cpu_timer_accounting(vcpu);
3687}
3688
3689/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3690static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3691{
3692	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3693	__stop_cpu_timer_accounting(vcpu);
3694	vcpu->arch.cputm_enabled = false;
3695}
3696
3697static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3698{
3699	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3700	__enable_cpu_timer_accounting(vcpu);
3701	preempt_enable();
3702}
3703
3704static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3705{
3706	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3707	__disable_cpu_timer_accounting(vcpu);
3708	preempt_enable();
 
3709}
3710
3711/* set the cpu timer - may only be called from the VCPU thread itself */
3712void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3713{
3714	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3715	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3716	if (vcpu->arch.cputm_enabled)
3717		vcpu->arch.cputm_start = get_tod_clock_fast();
3718	vcpu->arch.sie_block->cputm = cputm;
3719	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3720	preempt_enable();
3721}
3722
3723/* update and get the cpu timer - can also be called from other VCPU threads */
3724__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3725{
3726	unsigned int seq;
3727	__u64 value;
3728
3729	if (unlikely(!vcpu->arch.cputm_enabled))
3730		return vcpu->arch.sie_block->cputm;
3731
3732	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3733	do {
3734		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3735		/*
3736		 * If the writer would ever execute a read in the critical
3737		 * section, e.g. in irq context, we have a deadlock.
3738		 */
3739		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3740		value = vcpu->arch.sie_block->cputm;
3741		/* if cputm_start is 0, accounting is being started/stopped */
3742		if (likely(vcpu->arch.cputm_start))
3743			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3744	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3745	preempt_enable();
3746	return value;
3747}
3748
3749void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3750{
3751
3752	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3753	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3754		__start_cpu_timer_accounting(vcpu);
3755	vcpu->cpu = cpu;
 
3756}
3757
3758void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3759{
3760	vcpu->cpu = -1;
3761	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3762		__stop_cpu_timer_accounting(vcpu);
3763	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3764
3765}
3766
3767void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3768{
3769	mutex_lock(&vcpu->kvm->lock);
3770	preempt_disable();
3771	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3772	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3773	preempt_enable();
3774	mutex_unlock(&vcpu->kvm->lock);
3775	if (!kvm_is_ucontrol(vcpu->kvm)) {
3776		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3777		sca_add_vcpu(vcpu);
3778	}
3779	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3780		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3781}
3782
3783static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3784{
3785	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3786	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3787		return true;
3788	return false;
3789}
3790
3791static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3792{
3793	/* At least one ECC subfunction must be present */
3794	return kvm_has_pckmo_subfunc(kvm, 32) ||
3795	       kvm_has_pckmo_subfunc(kvm, 33) ||
3796	       kvm_has_pckmo_subfunc(kvm, 34) ||
3797	       kvm_has_pckmo_subfunc(kvm, 40) ||
3798	       kvm_has_pckmo_subfunc(kvm, 41);
3799
3800}
3801
3802static bool kvm_has_pckmo_hmac(struct kvm *kvm)
3803{
3804	/* At least one HMAC subfunction must be present */
3805	return kvm_has_pckmo_subfunc(kvm, 118) ||
3806	       kvm_has_pckmo_subfunc(kvm, 122);
3807}
3808
3809static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3810{
3811	/*
3812	 * If the AP instructions are not being interpreted and the MSAX3
3813	 * facility is not configured for the guest, there is nothing to set up.
3814	 */
3815	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3816		return;
3817
3818	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3819	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3820	vcpu->arch.sie_block->eca &= ~ECA_APIE;
3821	vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC);
3822
3823	if (vcpu->kvm->arch.crypto.apie)
3824		vcpu->arch.sie_block->eca |= ECA_APIE;
3825
3826	/* Set up protected key support */
3827	if (vcpu->kvm->arch.crypto.aes_kw) {
3828		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3829		/* ecc/hmac is also wrapped with AES key */
3830		if (kvm_has_pckmo_ecc(vcpu->kvm))
3831			vcpu->arch.sie_block->ecd |= ECD_ECC;
3832		if (kvm_has_pckmo_hmac(vcpu->kvm))
3833			vcpu->arch.sie_block->ecd |= ECD_HMAC;
3834	}
3835
3836	if (vcpu->kvm->arch.crypto.dea_kw)
3837		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3838}
3839
3840void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3841{
3842	free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3843	vcpu->arch.sie_block->cbrlo = 0;
3844}
3845
3846int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3847{
3848	void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3849
3850	if (!cbrlo_page)
3851		return -ENOMEM;
3852
3853	vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3854	return 0;
3855}
3856
3857static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3858{
3859	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3860
3861	vcpu->arch.sie_block->ibc = model->ibc;
3862	if (test_kvm_facility(vcpu->kvm, 7))
3863		vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3864}
3865
3866static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3867{
3868	int rc = 0;
3869	u16 uvrc, uvrrc;
3870
3871	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3872						    CPUSTAT_SM |
3873						    CPUSTAT_STOPPED);
3874
3875	if (test_kvm_facility(vcpu->kvm, 78))
3876		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3877	else if (test_kvm_facility(vcpu->kvm, 8))
3878		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3879
3880	kvm_s390_vcpu_setup_model(vcpu);
3881
3882	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3883	if (MACHINE_HAS_ESOP)
3884		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3885	if (test_kvm_facility(vcpu->kvm, 9))
3886		vcpu->arch.sie_block->ecb |= ECB_SRSI;
3887	if (test_kvm_facility(vcpu->kvm, 11))
3888		vcpu->arch.sie_block->ecb |= ECB_PTF;
3889	if (test_kvm_facility(vcpu->kvm, 73))
3890		vcpu->arch.sie_block->ecb |= ECB_TE;
3891	if (!kvm_is_ucontrol(vcpu->kvm))
3892		vcpu->arch.sie_block->ecb |= ECB_SPECI;
3893
3894	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3895		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3896	if (test_kvm_facility(vcpu->kvm, 130))
3897		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3898	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3899	if (sclp.has_cei)
3900		vcpu->arch.sie_block->eca |= ECA_CEI;
3901	if (sclp.has_ib)
3902		vcpu->arch.sie_block->eca |= ECA_IB;
3903	if (sclp.has_siif)
3904		vcpu->arch.sie_block->eca |= ECA_SII;
3905	if (sclp.has_sigpif)
3906		vcpu->arch.sie_block->eca |= ECA_SIGPI;
3907	if (test_kvm_facility(vcpu->kvm, 129)) {
3908		vcpu->arch.sie_block->eca |= ECA_VX;
3909		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3910	}
3911	if (test_kvm_facility(vcpu->kvm, 139))
3912		vcpu->arch.sie_block->ecd |= ECD_MEF;
3913	if (test_kvm_facility(vcpu->kvm, 156))
3914		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3915	if (vcpu->arch.sie_block->gd) {
3916		vcpu->arch.sie_block->eca |= ECA_AIV;
3917		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3918			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3919	}
3920	vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3921	vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3922
3923	if (sclp.has_kss)
3924		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3925	else
3926		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3927
3928	if (vcpu->kvm->arch.use_cmma) {
3929		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3930		if (rc)
3931			return rc;
3932	}
3933	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3934	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3935
3936	vcpu->arch.sie_block->hpid = HPID_KVM;
3937
3938	kvm_s390_vcpu_crypto_setup(vcpu);
3939
3940	kvm_s390_vcpu_pci_setup(vcpu);
3941
3942	mutex_lock(&vcpu->kvm->lock);
3943	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3944		rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3945		if (rc)
3946			kvm_s390_vcpu_unsetup_cmma(vcpu);
3947	}
3948	mutex_unlock(&vcpu->kvm->lock);
3949
3950	return rc;
3951}
3952
3953int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3954{
3955	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3956		return -EINVAL;
3957	return 0;
3958}
3959
3960int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 
3961{
3962	struct sie_page *sie_page;
3963	int rc;
3964
3965	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3966	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3967	if (!sie_page)
3968		return -ENOMEM;
3969
3970	vcpu->arch.sie_block = &sie_page->sie_block;
3971	vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3972
3973	/* the real guest size will always be smaller than msl */
3974	vcpu->arch.sie_block->mso = 0;
3975	vcpu->arch.sie_block->msl = sclp.hamax;
3976
3977	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3978	spin_lock_init(&vcpu->arch.local_int.lock);
3979	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3980	seqcount_init(&vcpu->arch.cputm_seqcount);
3981
3982	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3983	kvm_clear_async_pf_completion_queue(vcpu);
3984	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3985				    KVM_SYNC_GPRS |
3986				    KVM_SYNC_ACRS |
3987				    KVM_SYNC_CRS |
3988				    KVM_SYNC_ARCH0 |
3989				    KVM_SYNC_PFAULT |
3990				    KVM_SYNC_DIAG318;
3991	vcpu->arch.acrs_loaded = false;
3992	kvm_s390_set_prefix(vcpu, 0);
3993	if (test_kvm_facility(vcpu->kvm, 64))
3994		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3995	if (test_kvm_facility(vcpu->kvm, 82))
3996		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3997	if (test_kvm_facility(vcpu->kvm, 133))
3998		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3999	if (test_kvm_facility(vcpu->kvm, 156))
4000		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
4001	/* fprs can be synchronized via vrs, even if the guest has no vx. With
4002	 * cpu_has_vx(), (load|store)_fpu_regs() will work with vrs format.
4003	 */
4004	if (cpu_has_vx())
4005		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
4006	else
4007		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
4008
4009	if (kvm_is_ucontrol(vcpu->kvm)) {
4010		rc = __kvm_ucontrol_vcpu_init(vcpu);
4011		if (rc)
4012			goto out_free_sie_block;
4013	}
 
 
4014
4015	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
4016		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
4017	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
 
 
 
 
 
4018
4019	rc = kvm_s390_vcpu_setup(vcpu);
4020	if (rc)
4021		goto out_ucontrol_uninit;
4022
4023	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
4024	return 0;
4025
4026out_ucontrol_uninit:
4027	if (kvm_is_ucontrol(vcpu->kvm))
4028		gmap_remove(vcpu->arch.gmap);
4029out_free_sie_block:
4030	free_page((unsigned long)(vcpu->arch.sie_block));
4031	return rc;
 
 
 
4032}
4033
4034int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4035{
4036	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4037	return kvm_s390_vcpu_has_irq(vcpu, 0);
4038}
4039
4040bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
4041{
4042	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
4043}
4044
4045void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
4046{
4047	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4048	exit_sie(vcpu);
4049}
4050
4051void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
4052{
4053	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4054}
4055
4056static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
4057{
4058	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4059	exit_sie(vcpu);
4060}
4061
4062bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
4063{
4064	return atomic_read(&vcpu->arch.sie_block->prog20) &
4065	       (PROG_BLOCK_SIE | PROG_REQUEST);
4066}
4067
4068static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
4069{
4070	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4071}
4072
4073/*
4074 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
4075 * If the CPU is not running (e.g. waiting as idle) the function will
4076 * return immediately. */
4077void exit_sie(struct kvm_vcpu *vcpu)
4078{
4079	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
4080	kvm_s390_vsie_kick(vcpu);
4081	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
4082		cpu_relax();
4083}
4084
4085/* Kick a guest cpu out of SIE to process a request synchronously */
4086void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
4087{
4088	__kvm_make_request(req, vcpu);
4089	kvm_s390_vcpu_request(vcpu);
4090}
4091
4092static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
4093			      unsigned long end)
4094{
4095	struct kvm *kvm = gmap->private;
4096	struct kvm_vcpu *vcpu;
4097	unsigned long prefix;
4098	unsigned long i;
4099
4100	trace_kvm_s390_gmap_notifier(start, end, gmap_is_shadow(gmap));
4101
4102	if (gmap_is_shadow(gmap))
4103		return;
4104	if (start >= 1UL << 31)
4105		/* We are only interested in prefix pages */
4106		return;
4107	kvm_for_each_vcpu(i, vcpu, kvm) {
4108		/* match against both prefix pages */
4109		prefix = kvm_s390_get_prefix(vcpu);
4110		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
4111			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
4112				   start, end);
4113			kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4114		}
4115	}
4116}
4117
4118bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
4119{
4120	/* do not poll with more than halt_poll_max_steal percent of steal time */
4121	if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >=
4122	    READ_ONCE(halt_poll_max_steal)) {
4123		vcpu->stat.halt_no_poll_steal++;
4124		return true;
4125	}
4126	return false;
4127}
4128
4129int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
4130{
4131	/* kvm common code refers to this, but never calls it */
4132	BUG();
4133	return 0;
4134}
4135
4136static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
4137					   struct kvm_one_reg *reg)
4138{
4139	int r = -EINVAL;
4140
4141	switch (reg->id) {
4142	case KVM_REG_S390_TODPR:
4143		r = put_user(vcpu->arch.sie_block->todpr,
4144			     (u32 __user *)reg->addr);
4145		break;
4146	case KVM_REG_S390_EPOCHDIFF:
4147		r = put_user(vcpu->arch.sie_block->epoch,
4148			     (u64 __user *)reg->addr);
4149		break;
4150	case KVM_REG_S390_CPU_TIMER:
4151		r = put_user(kvm_s390_get_cpu_timer(vcpu),
4152			     (u64 __user *)reg->addr);
4153		break;
4154	case KVM_REG_S390_CLOCK_COMP:
4155		r = put_user(vcpu->arch.sie_block->ckc,
4156			     (u64 __user *)reg->addr);
4157		break;
4158	case KVM_REG_S390_PFTOKEN:
4159		r = put_user(vcpu->arch.pfault_token,
4160			     (u64 __user *)reg->addr);
4161		break;
4162	case KVM_REG_S390_PFCOMPARE:
4163		r = put_user(vcpu->arch.pfault_compare,
4164			     (u64 __user *)reg->addr);
4165		break;
4166	case KVM_REG_S390_PFSELECT:
4167		r = put_user(vcpu->arch.pfault_select,
4168			     (u64 __user *)reg->addr);
4169		break;
4170	case KVM_REG_S390_PP:
4171		r = put_user(vcpu->arch.sie_block->pp,
4172			     (u64 __user *)reg->addr);
4173		break;
4174	case KVM_REG_S390_GBEA:
4175		r = put_user(vcpu->arch.sie_block->gbea,
4176			     (u64 __user *)reg->addr);
4177		break;
4178	default:
4179		break;
4180	}
4181
4182	return r;
4183}
4184
4185static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
4186					   struct kvm_one_reg *reg)
4187{
4188	int r = -EINVAL;
4189	__u64 val;
4190
4191	switch (reg->id) {
4192	case KVM_REG_S390_TODPR:
4193		r = get_user(vcpu->arch.sie_block->todpr,
4194			     (u32 __user *)reg->addr);
4195		break;
4196	case KVM_REG_S390_EPOCHDIFF:
4197		r = get_user(vcpu->arch.sie_block->epoch,
4198			     (u64 __user *)reg->addr);
4199		break;
4200	case KVM_REG_S390_CPU_TIMER:
4201		r = get_user(val, (u64 __user *)reg->addr);
4202		if (!r)
4203			kvm_s390_set_cpu_timer(vcpu, val);
4204		break;
4205	case KVM_REG_S390_CLOCK_COMP:
4206		r = get_user(vcpu->arch.sie_block->ckc,
4207			     (u64 __user *)reg->addr);
4208		break;
4209	case KVM_REG_S390_PFTOKEN:
4210		r = get_user(vcpu->arch.pfault_token,
4211			     (u64 __user *)reg->addr);
4212		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4213			kvm_clear_async_pf_completion_queue(vcpu);
4214		break;
4215	case KVM_REG_S390_PFCOMPARE:
4216		r = get_user(vcpu->arch.pfault_compare,
4217			     (u64 __user *)reg->addr);
4218		break;
4219	case KVM_REG_S390_PFSELECT:
4220		r = get_user(vcpu->arch.pfault_select,
4221			     (u64 __user *)reg->addr);
4222		break;
4223	case KVM_REG_S390_PP:
4224		r = get_user(vcpu->arch.sie_block->pp,
4225			     (u64 __user *)reg->addr);
4226		break;
4227	case KVM_REG_S390_GBEA:
4228		r = get_user(vcpu->arch.sie_block->gbea,
4229			     (u64 __user *)reg->addr);
4230		break;
4231	default:
4232		break;
4233	}
4234
4235	return r;
4236}
4237
4238static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
4239{
4240	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
4241	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
4242	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
4243
4244	kvm_clear_async_pf_completion_queue(vcpu);
4245	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
4246		kvm_s390_vcpu_stop(vcpu);
4247	kvm_s390_clear_local_irqs(vcpu);
4248}
4249
4250static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
4251{
4252	/* Initial reset is a superset of the normal reset */
4253	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
4254
4255	/*
4256	 * This equals initial cpu reset in pop, but we don't switch to ESA.
4257	 * We do not only reset the internal data, but also ...
4258	 */
4259	vcpu->arch.sie_block->gpsw.mask = 0;
4260	vcpu->arch.sie_block->gpsw.addr = 0;
4261	kvm_s390_set_prefix(vcpu, 0);
4262	kvm_s390_set_cpu_timer(vcpu, 0);
4263	vcpu->arch.sie_block->ckc = 0;
4264	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
4265	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
4266	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4267
4268	/* ... the data in sync regs */
4269	memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
4270	vcpu->run->s.regs.ckc = 0;
4271	vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
4272	vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
4273	vcpu->run->psw_addr = 0;
4274	vcpu->run->psw_mask = 0;
4275	vcpu->run->s.regs.todpr = 0;
4276	vcpu->run->s.regs.cputm = 0;
4277	vcpu->run->s.regs.ckc = 0;
4278	vcpu->run->s.regs.pp = 0;
4279	vcpu->run->s.regs.gbea = 1;
4280	vcpu->run->s.regs.fpc = 0;
4281	/*
4282	 * Do not reset these registers in the protected case, as some of
4283	 * them are overlaid and they are not accessible in this case
4284	 * anyway.
4285	 */
4286	if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4287		vcpu->arch.sie_block->gbea = 1;
4288		vcpu->arch.sie_block->pp = 0;
4289		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4290		vcpu->arch.sie_block->todpr = 0;
4291	}
4292}
4293
4294static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
4295{
4296	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4297
4298	/* Clear reset is a superset of the initial reset */
4299	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4300
4301	memset(&regs->gprs, 0, sizeof(regs->gprs));
4302	memset(&regs->vrs, 0, sizeof(regs->vrs));
4303	memset(&regs->acrs, 0, sizeof(regs->acrs));
4304	memset(&regs->gscb, 0, sizeof(regs->gscb));
4305
4306	regs->etoken = 0;
4307	regs->etoken_extension = 0;
4308}
4309
4310int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4311{
4312	vcpu_load(vcpu);
4313	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
4314	vcpu_put(vcpu);
4315	return 0;
4316}
4317
4318int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4319{
4320	vcpu_load(vcpu);
4321	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
4322	vcpu_put(vcpu);
4323	return 0;
4324}
4325
4326int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4327				  struct kvm_sregs *sregs)
4328{
4329	vcpu_load(vcpu);
4330
4331	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4332	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4333
4334	vcpu_put(vcpu);
4335	return 0;
4336}
4337
4338int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4339				  struct kvm_sregs *sregs)
4340{
4341	vcpu_load(vcpu);
4342
4343	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4344	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4345
4346	vcpu_put(vcpu);
4347	return 0;
4348}
4349
4350int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4351{
4352	int ret = 0;
4353
4354	vcpu_load(vcpu);
4355
4356	vcpu->run->s.regs.fpc = fpu->fpc;
4357	if (cpu_has_vx())
4358		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4359				 (freg_t *) fpu->fprs);
4360	else
4361		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4362
4363	vcpu_put(vcpu);
4364	return ret;
4365}
4366
4367int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4368{
4369	vcpu_load(vcpu);
4370
4371	if (cpu_has_vx())
4372		convert_vx_to_fp((freg_t *) fpu->fprs,
4373				 (__vector128 *) vcpu->run->s.regs.vrs);
4374	else
4375		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4376	fpu->fpc = vcpu->run->s.regs.fpc;
4377
4378	vcpu_put(vcpu);
4379	return 0;
4380}
4381
4382static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4383{
4384	int rc = 0;
4385
4386	if (!is_vcpu_stopped(vcpu))
4387		rc = -EBUSY;
4388	else {
4389		vcpu->run->psw_mask = psw.mask;
4390		vcpu->run->psw_addr = psw.addr;
4391	}
4392	return rc;
4393}
4394
4395int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4396				  struct kvm_translation *tr)
4397{
4398	return -EINVAL; /* not implemented yet */
4399}
4400
4401#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4402			      KVM_GUESTDBG_USE_HW_BP | \
4403			      KVM_GUESTDBG_ENABLE)
4404
4405int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4406					struct kvm_guest_debug *dbg)
4407{
4408	int rc = 0;
4409
4410	vcpu_load(vcpu);
4411
4412	vcpu->guest_debug = 0;
4413	kvm_s390_clear_bp_data(vcpu);
4414
4415	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
4416		rc = -EINVAL;
4417		goto out;
4418	}
4419	if (!sclp.has_gpere) {
4420		rc = -EINVAL;
4421		goto out;
4422	}
4423
4424	if (dbg->control & KVM_GUESTDBG_ENABLE) {
4425		vcpu->guest_debug = dbg->control;
4426		/* enforce guest PER */
4427		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
4428
4429		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
4430			rc = kvm_s390_import_bp_data(vcpu, dbg);
4431	} else {
4432		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4433		vcpu->arch.guestdbg.last_bp = 0;
4434	}
4435
4436	if (rc) {
4437		vcpu->guest_debug = 0;
4438		kvm_s390_clear_bp_data(vcpu);
4439		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4440	}
4441
4442out:
4443	vcpu_put(vcpu);
4444	return rc;
4445}
4446
4447int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4448				    struct kvm_mp_state *mp_state)
4449{
4450	int ret;
4451
4452	vcpu_load(vcpu);
4453
4454	/* CHECK_STOP and LOAD are not supported yet */
4455	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
4456				      KVM_MP_STATE_OPERATING;
4457
4458	vcpu_put(vcpu);
4459	return ret;
4460}
4461
4462int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4463				    struct kvm_mp_state *mp_state)
4464{
4465	int rc = 0;
4466
4467	vcpu_load(vcpu);
4468
4469	/* user space knows about this interface - let it control the state */
4470	kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
4471
4472	switch (mp_state->mp_state) {
4473	case KVM_MP_STATE_STOPPED:
4474		rc = kvm_s390_vcpu_stop(vcpu);
4475		break;
4476	case KVM_MP_STATE_OPERATING:
4477		rc = kvm_s390_vcpu_start(vcpu);
4478		break;
4479	case KVM_MP_STATE_LOAD:
4480		if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4481			rc = -ENXIO;
4482			break;
4483		}
4484		rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
4485		break;
4486	case KVM_MP_STATE_CHECK_STOP:
4487		fallthrough;	/* CHECK_STOP and LOAD are not supported yet */
4488	default:
4489		rc = -ENXIO;
4490	}
4491
4492	vcpu_put(vcpu);
4493	return rc;
4494}
4495
4496static bool ibs_enabled(struct kvm_vcpu *vcpu)
4497{
4498	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
4499}
4500
4501static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
4502{
4503retry:
4504	kvm_s390_vcpu_request_handled(vcpu);
4505	if (!kvm_request_pending(vcpu))
4506		return 0;
4507	/*
4508	 * If the guest prefix changed, re-arm the ipte notifier for the
4509	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4510	 * This ensures that the ipte instruction for this request has
4511	 * already finished. We might race against a second unmapper that
4512	 * wants to set the blocking bit. Lets just retry the request loop.
4513	 */
4514	if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
4515		int rc;
4516		rc = gmap_mprotect_notify(vcpu->arch.gmap,
4517					  kvm_s390_get_prefix(vcpu),
4518					  PAGE_SIZE * 2, PROT_WRITE);
4519		if (rc) {
4520			kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4521			return rc;
4522		}
4523		goto retry;
4524	}
4525
4526	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4527		vcpu->arch.sie_block->ihcpu = 0xffff;
4528		goto retry;
4529	}
4530
4531	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
4532		if (!ibs_enabled(vcpu)) {
4533			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4534			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
4535		}
4536		goto retry;
4537	}
4538
4539	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
4540		if (ibs_enabled(vcpu)) {
4541			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
4542			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
4543		}
4544		goto retry;
4545	}
4546
4547	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
4548		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4549		goto retry;
4550	}
4551
4552	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4553		/*
4554		 * Disable CMM virtualization; we will emulate the ESSA
4555		 * instruction manually, in order to provide additional
4556		 * functionalities needed for live migration.
4557		 */
4558		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4559		goto retry;
4560	}
4561
4562	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4563		/*
4564		 * Re-enable CMM virtualization if CMMA is available and
4565		 * CMM has been used.
4566		 */
4567		if ((vcpu->kvm->arch.use_cmma) &&
4568		    (vcpu->kvm->mm->context.uses_cmm))
4569			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4570		goto retry;
4571	}
4572
4573	/* we left the vsie handler, nothing to do, just clear the request */
4574	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
4575
4576	return 0;
4577}
4578
4579static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4580{
4581	struct kvm_vcpu *vcpu;
4582	union tod_clock clk;
4583	unsigned long i;
4584
4585	preempt_disable();
4586
4587	store_tod_clock_ext(&clk);
4588
4589	kvm->arch.epoch = gtod->tod - clk.tod;
4590	kvm->arch.epdx = 0;
4591	if (test_kvm_facility(kvm, 139)) {
4592		kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4593		if (kvm->arch.epoch > gtod->tod)
4594			kvm->arch.epdx -= 1;
4595	}
4596
4597	kvm_s390_vcpu_block_all(kvm);
4598	kvm_for_each_vcpu(i, vcpu, kvm) {
4599		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4600		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
4601	}
4602
4603	kvm_s390_vcpu_unblock_all(kvm);
4604	preempt_enable();
4605}
4606
4607int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4608{
4609	if (!mutex_trylock(&kvm->lock))
4610		return 0;
4611	__kvm_s390_set_tod_clock(kvm, gtod);
4612	mutex_unlock(&kvm->lock);
4613	return 1;
4614}
4615
4616static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
4617				      unsigned long token)
4618{
4619	struct kvm_s390_interrupt inti;
4620	struct kvm_s390_irq irq;
4621
4622	if (start_token) {
4623		irq.u.ext.ext_params2 = token;
4624		irq.type = KVM_S390_INT_PFAULT_INIT;
4625		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
4626	} else {
4627		inti.type = KVM_S390_INT_PFAULT_DONE;
4628		inti.parm64 = token;
4629		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
4630	}
4631}
4632
4633bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
4634				     struct kvm_async_pf *work)
4635{
4636	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4637	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4638
4639	return true;
4640}
4641
4642void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
4643				 struct kvm_async_pf *work)
4644{
4645	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4646	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4647}
4648
4649void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
4650			       struct kvm_async_pf *work)
4651{
4652	/* s390 will always inject the page directly */
4653}
4654
4655bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
4656{
4657	/*
4658	 * s390 will always inject the page directly,
4659	 * but we still want check_async_completion to cleanup
4660	 */
4661	return true;
4662}
4663
4664static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
4665{
4666	hva_t hva;
4667	struct kvm_arch_async_pf arch;
4668
4669	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4670		return false;
4671	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4672	    vcpu->arch.pfault_compare)
4673		return false;
4674	if (psw_extint_disabled(vcpu))
4675		return false;
4676	if (kvm_s390_vcpu_has_irq(vcpu, 0))
4677		return false;
4678	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4679		return false;
4680	if (!vcpu->arch.gmap->pfault_enabled)
4681		return false;
4682
4683	hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr);
4684	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4685		return false;
4686
4687	return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch);
4688}
4689
4690static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4691{
4692	int rc, cpuflags;
4693
4694	/*
4695	 * On s390 notifications for arriving pages will be delivered directly
4696	 * to the guest but the house keeping for completed pfaults is
4697	 * handled outside the worker.
4698	 */
4699	kvm_check_async_pf_completion(vcpu);
4700
4701	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4702	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4703
4704	if (need_resched())
4705		schedule();
4706
4707	if (!kvm_is_ucontrol(vcpu->kvm)) {
4708		rc = kvm_s390_deliver_pending_interrupts(vcpu);
4709		if (rc || guestdbg_exit_pending(vcpu))
4710			return rc;
4711	}
4712
4713	rc = kvm_s390_handle_requests(vcpu);
4714	if (rc)
4715		return rc;
4716
4717	if (guestdbg_enabled(vcpu)) {
4718		kvm_s390_backup_guest_per_regs(vcpu);
4719		kvm_s390_patch_guest_per_regs(vcpu);
4720	}
4721
4722	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4723
4724	vcpu->arch.sie_block->icptcode = 0;
4725	current->thread.gmap_int_code = 0;
4726	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4727	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4728	trace_kvm_s390_sie_enter(vcpu, cpuflags);
 
 
 
 
 
 
 
 
 
 
4729
4730	return 0;
4731}
4732
4733static int vcpu_post_run_addressing_exception(struct kvm_vcpu *vcpu)
4734{
4735	struct kvm_s390_pgm_info pgm_info = {
4736		.code = PGM_ADDRESSING,
4737	};
4738	u8 opcode, ilen;
4739	int rc;
 
4740
4741	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4742	trace_kvm_s390_sie_fault(vcpu);
 
4743
4744	/*
4745	 * We want to inject an addressing exception, which is defined as a
4746	 * suppressing or terminating exception. However, since we came here
4747	 * by a DAT access exception, the PSW still points to the faulting
4748	 * instruction since DAT exceptions are nullifying. So we've got
4749	 * to look up the current opcode to get the length of the instruction
4750	 * to be able to forward the PSW.
4751	 */
4752	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4753	ilen = insn_length(opcode);
4754	if (rc < 0) {
4755		return rc;
4756	} else if (rc) {
4757		/* Instruction-Fetching Exceptions - we can't detect the ilen.
4758		 * Forward by arbitrary ilc, injection will take care of
4759		 * nullification if necessary.
4760		 */
4761		pgm_info = vcpu->arch.pgm;
4762		ilen = 4;
4763	}
4764	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4765	kvm_s390_forward_psw(vcpu, ilen);
4766	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4767}
4768
4769static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
4770{
4771	unsigned int flags = 0;
4772	unsigned long gaddr;
4773	int rc = 0;
4774
4775	gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
4776	if (kvm_s390_cur_gmap_fault_is_write())
4777		flags = FAULT_FLAG_WRITE;
4778
4779	switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) {
4780	case 0:
4781		vcpu->stat.exit_null++;
4782		break;
4783	case PGM_NON_SECURE_STORAGE_ACCESS:
4784		KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4785			"Unexpected program interrupt 0x%x, TEID 0x%016lx",
4786			current->thread.gmap_int_code, current->thread.gmap_teid.val);
4787		/*
4788		 * This is normal operation; a page belonging to a protected
4789		 * guest has not been imported yet. Try to import the page into
4790		 * the protected guest.
4791		 */
4792		if (gmap_convert_to_secure(vcpu->arch.gmap, gaddr) == -EINVAL)
4793			send_sig(SIGSEGV, current, 0);
4794		break;
4795	case PGM_SECURE_STORAGE_ACCESS:
4796	case PGM_SECURE_STORAGE_VIOLATION:
4797		KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4798			"Unexpected program interrupt 0x%x, TEID 0x%016lx",
4799			current->thread.gmap_int_code, current->thread.gmap_teid.val);
4800		/*
4801		 * This can happen after a reboot with asynchronous teardown;
4802		 * the new guest (normal or protected) will run on top of the
4803		 * previous protected guest. The old pages need to be destroyed
4804		 * so the new guest can use them.
4805		 */
4806		if (gmap_destroy_page(vcpu->arch.gmap, gaddr)) {
4807			/*
4808			 * Either KVM messed up the secure guest mapping or the
4809			 * same page is mapped into multiple secure guests.
4810			 *
4811			 * This exception is only triggered when a guest 2 is
4812			 * running and can therefore never occur in kernel
4813			 * context.
4814			 */
4815			pr_warn_ratelimited("Secure storage violation (%x) in task: %s, pid %d\n",
4816					    current->thread.gmap_int_code, current->comm,
4817					    current->pid);
4818			send_sig(SIGSEGV, current, 0);
4819		}
4820		break;
4821	case PGM_PROTECTION:
4822	case PGM_SEGMENT_TRANSLATION:
4823	case PGM_PAGE_TRANSLATION:
4824	case PGM_ASCE_TYPE:
4825	case PGM_REGION_FIRST_TRANS:
4826	case PGM_REGION_SECOND_TRANS:
4827	case PGM_REGION_THIRD_TRANS:
4828		KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4829			"Unexpected program interrupt 0x%x, TEID 0x%016lx",
4830			current->thread.gmap_int_code, current->thread.gmap_teid.val);
4831		if (vcpu->arch.gmap->pfault_enabled) {
4832			rc = gmap_fault(vcpu->arch.gmap, gaddr, flags | FAULT_FLAG_RETRY_NOWAIT);
4833			if (rc == -EFAULT)
4834				return vcpu_post_run_addressing_exception(vcpu);
4835			if (rc == -EAGAIN) {
4836				trace_kvm_s390_major_guest_pfault(vcpu);
4837				if (kvm_arch_setup_async_pf(vcpu))
4838					return 0;
4839				vcpu->stat.pfault_sync++;
4840			} else {
4841				return rc;
4842			}
4843		}
4844		rc = gmap_fault(vcpu->arch.gmap, gaddr, flags);
4845		if (rc == -EFAULT) {
4846			if (kvm_is_ucontrol(vcpu->kvm)) {
4847				vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4848				vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
4849				vcpu->run->s390_ucontrol.pgm_code = 0x10;
4850				return -EREMOTE;
4851			}
4852			return vcpu_post_run_addressing_exception(vcpu);
4853		}
4854		break;
4855	default:
4856		KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4857			current->thread.gmap_int_code, current->thread.gmap_teid.val);
4858		send_sig(SIGSEGV, current, 0);
4859		break;
4860	}
4861	return rc;
4862}
4863
4864static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4865{
4866	struct mcck_volatile_info *mcck_info;
4867	struct sie_page *sie_page;
4868	int rc;
4869
4870	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4871		   vcpu->arch.sie_block->icptcode);
4872	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4873
4874	if (guestdbg_enabled(vcpu))
4875		kvm_s390_restore_guest_per_regs(vcpu);
4876
4877	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4878	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4879
4880	if (exit_reason == -EINTR) {
4881		VCPU_EVENT(vcpu, 3, "%s", "machine check");
4882		sie_page = container_of(vcpu->arch.sie_block,
4883					struct sie_page, sie_block);
4884		mcck_info = &sie_page->mcck_info;
4885		kvm_s390_reinject_machine_check(vcpu, mcck_info);
4886		return 0;
4887	}
4888
4889	if (vcpu->arch.sie_block->icptcode > 0) {
4890		rc = kvm_handle_sie_intercept(vcpu);
4891
4892		if (rc != -EOPNOTSUPP)
4893			return rc;
4894		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4895		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4896		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4897		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4898		return -EREMOTE;
4899	}
4900
4901	return vcpu_post_run_handle_fault(vcpu);
4902}
4903
4904#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
4905static int __vcpu_run(struct kvm_vcpu *vcpu)
4906{
4907	int rc, exit_reason;
4908	struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4909
4910	/*
4911	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4912	 * ning the guest), so that memslots (and other stuff) are protected
4913	 */
4914	kvm_vcpu_srcu_read_lock(vcpu);
4915
4916	do {
4917		rc = vcpu_pre_run(vcpu);
4918		if (rc || guestdbg_exit_pending(vcpu))
4919			break;
4920
4921		kvm_vcpu_srcu_read_unlock(vcpu);
4922		/*
4923		 * As PF_VCPU will be used in fault handler, between
4924		 * guest_enter and guest_exit should be no uaccess.
4925		 */
4926		local_irq_disable();
4927		guest_enter_irqoff();
4928		__disable_cpu_timer_accounting(vcpu);
4929		local_irq_enable();
4930		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4931			memcpy(sie_page->pv_grregs,
4932			       vcpu->run->s.regs.gprs,
4933			       sizeof(sie_page->pv_grregs));
4934		}
4935		exit_reason = sie64a(vcpu->arch.sie_block,
4936				     vcpu->run->s.regs.gprs,
4937				     vcpu->arch.gmap->asce);
4938		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4939			memcpy(vcpu->run->s.regs.gprs,
4940			       sie_page->pv_grregs,
4941			       sizeof(sie_page->pv_grregs));
4942			/*
4943			 * We're not allowed to inject interrupts on intercepts
4944			 * that leave the guest state in an "in-between" state
4945			 * where the next SIE entry will do a continuation.
4946			 * Fence interrupts in our "internal" PSW.
4947			 */
4948			if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4949			    vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4950				vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4951			}
4952		}
4953		local_irq_disable();
4954		__enable_cpu_timer_accounting(vcpu);
4955		guest_exit_irqoff();
4956		local_irq_enable();
4957		kvm_vcpu_srcu_read_lock(vcpu);
4958
4959		rc = vcpu_post_run(vcpu, exit_reason);
4960	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
4961
4962	kvm_vcpu_srcu_read_unlock(vcpu);
4963	return rc;
4964}
4965
4966static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4967{
4968	struct kvm_run *kvm_run = vcpu->run;
4969	struct runtime_instr_cb *riccb;
4970	struct gs_cb *gscb;
4971
4972	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4973	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4974	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4975	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4976	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4977		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4978		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4979		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4980	}
4981	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4982		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4983		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4984		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4985		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4986			kvm_clear_async_pf_completion_queue(vcpu);
4987	}
4988	if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4989		vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4990		vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4991		VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4992	}
4993	/*
4994	 * If userspace sets the riccb (e.g. after migration) to a valid state,
4995	 * we should enable RI here instead of doing the lazy enablement.
4996	 */
4997	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4998	    test_kvm_facility(vcpu->kvm, 64) &&
4999	    riccb->v &&
5000	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
5001		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
5002		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
5003	}
5004	/*
5005	 * If userspace sets the gscb (e.g. after migration) to non-zero,
5006	 * we should enable GS here instead of doing the lazy enablement.
5007	 */
5008	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
5009	    test_kvm_facility(vcpu->kvm, 133) &&
5010	    gscb->gssm &&
5011	    !vcpu->arch.gs_enabled) {
5012		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
5013		vcpu->arch.sie_block->ecb |= ECB_GS;
5014		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
5015		vcpu->arch.gs_enabled = 1;
5016	}
5017	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
5018	    test_kvm_facility(vcpu->kvm, 82)) {
5019		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
5020		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
5021	}
5022	if (MACHINE_HAS_GS) {
5023		preempt_disable();
5024		local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
5025		if (current->thread.gs_cb) {
5026			vcpu->arch.host_gscb = current->thread.gs_cb;
5027			save_gs_cb(vcpu->arch.host_gscb);
5028		}
5029		if (vcpu->arch.gs_enabled) {
5030			current->thread.gs_cb = (struct gs_cb *)
5031						&vcpu->run->s.regs.gscb;
5032			restore_gs_cb(current->thread.gs_cb);
5033		}
5034		preempt_enable();
5035	}
5036	/* SIE will load etoken directly from SDNX and therefore kvm_run */
5037}
5038
5039static void sync_regs(struct kvm_vcpu *vcpu)
5040{
5041	struct kvm_run *kvm_run = vcpu->run;
5042
5043	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
5044		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
5045	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
5046		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
5047		/* some control register changes require a tlb flush */
5048		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5049	}
5050	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
5051		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
5052		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
5053	}
5054	save_access_regs(vcpu->arch.host_acrs);
5055	restore_access_regs(vcpu->run->s.regs.acrs);
5056	vcpu->arch.acrs_loaded = true;
5057	kvm_s390_fpu_load(vcpu->run);
5058	/* Sync fmt2 only data */
5059	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
5060		sync_regs_fmt2(vcpu);
5061	} else {
5062		/*
5063		 * In several places we have to modify our internal view to
5064		 * not do things that are disallowed by the ultravisor. For
5065		 * example we must not inject interrupts after specific exits
5066		 * (e.g. 112 prefix page not secure). We do this by turning
5067		 * off the machine check, external and I/O interrupt bits
5068		 * of our PSW copy. To avoid getting validity intercepts, we
5069		 * do only accept the condition code from userspace.
5070		 */
5071		vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
5072		vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
5073						   PSW_MASK_CC;
5074	}
5075
5076	kvm_run->kvm_dirty_regs = 0;
5077}
5078
5079static void store_regs_fmt2(struct kvm_vcpu *vcpu)
5080{
5081	struct kvm_run *kvm_run = vcpu->run;
5082
5083	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
5084	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
5085	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
5086	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
5087	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
5088	if (MACHINE_HAS_GS) {
5089		preempt_disable();
5090		local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
5091		if (vcpu->arch.gs_enabled)
5092			save_gs_cb(current->thread.gs_cb);
5093		current->thread.gs_cb = vcpu->arch.host_gscb;
5094		restore_gs_cb(vcpu->arch.host_gscb);
5095		if (!vcpu->arch.host_gscb)
5096			local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
5097		vcpu->arch.host_gscb = NULL;
5098		preempt_enable();
5099	}
5100	/* SIE will save etoken directly into SDNX and therefore kvm_run */
5101}
5102
5103static void store_regs(struct kvm_vcpu *vcpu)
5104{
5105	struct kvm_run *kvm_run = vcpu->run;
5106
5107	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
5108	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
5109	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
5110	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
5111	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
5112	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
5113	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
5114	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
5115	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
5116	save_access_regs(vcpu->run->s.regs.acrs);
5117	restore_access_regs(vcpu->arch.host_acrs);
5118	vcpu->arch.acrs_loaded = false;
5119	kvm_s390_fpu_store(vcpu->run);
5120	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
5121		store_regs_fmt2(vcpu);
5122}
5123
5124int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
5125{
5126	struct kvm_run *kvm_run = vcpu->run;
5127	DECLARE_KERNEL_FPU_ONSTACK32(fpu);
5128	int rc;
5129
5130	/*
5131	 * Running a VM while dumping always has the potential to
5132	 * produce inconsistent dump data. But for PV vcpus a SIE
5133	 * entry while dumping could also lead to a fatal validity
5134	 * intercept which we absolutely want to avoid.
5135	 */
5136	if (vcpu->kvm->arch.pv.dumping)
5137		return -EINVAL;
5138
5139	if (!vcpu->wants_to_run)
5140		return -EINTR;
5141
5142	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
5143	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
5144		return -EINVAL;
5145
5146	vcpu_load(vcpu);
5147
5148	if (guestdbg_exit_pending(vcpu)) {
5149		kvm_s390_prepare_debug_exit(vcpu);
5150		rc = 0;
5151		goto out;
5152	}
5153
5154	kvm_sigset_activate(vcpu);
5155
5156	/*
5157	 * no need to check the return value of vcpu_start as it can only have
5158	 * an error for protvirt, but protvirt means user cpu state
5159	 */
5160	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
5161		kvm_s390_vcpu_start(vcpu);
5162	} else if (is_vcpu_stopped(vcpu)) {
5163		pr_err_ratelimited("can't run stopped vcpu %d\n",
5164				   vcpu->vcpu_id);
5165		rc = -EINVAL;
5166		goto out;
5167	}
5168
5169	kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR);
5170	sync_regs(vcpu);
5171	enable_cpu_timer_accounting(vcpu);
 
5172
5173	might_fault();
5174	rc = __vcpu_run(vcpu);
5175
5176	if (signal_pending(current) && !rc) {
5177		kvm_run->exit_reason = KVM_EXIT_INTR;
5178		rc = -EINTR;
5179	}
5180
5181	if (guestdbg_exit_pending(vcpu) && !rc)  {
5182		kvm_s390_prepare_debug_exit(vcpu);
 
 
 
 
5183		rc = 0;
5184	}
5185
5186	if (rc == -EREMOTE) {
5187		/* userspace support is needed, kvm_run has been prepared */
 
5188		rc = 0;
5189	}
5190
5191	disable_cpu_timer_accounting(vcpu);
5192	store_regs(vcpu);
5193	kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR);
5194
5195	kvm_sigset_deactivate(vcpu);
 
5196
5197	vcpu->stat.exit_userspace++;
5198out:
5199	vcpu_put(vcpu);
5200	return rc;
5201}
5202
 
 
 
 
 
 
 
 
 
5203/*
5204 * store status at address
5205 * we use have two special cases:
5206 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5207 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5208 */
5209int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
5210{
5211	unsigned char archmode = 1;
5212	freg_t fprs[NUM_FPRS];
5213	unsigned int px;
5214	u64 clkcomp, cputm;
5215	int rc;
5216
5217	px = kvm_s390_get_prefix(vcpu);
5218	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
5219		if (write_guest_abs(vcpu, 163, &archmode, 1))
5220			return -EFAULT;
5221		gpa = 0;
5222	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5223		if (write_guest_real(vcpu, 163, &archmode, 1))
 
5224			return -EFAULT;
5225		gpa = px;
 
5226	} else
5227		gpa -= __LC_FPREGS_SAVE_AREA;
5228
5229	/* manually convert vector registers if necessary */
5230	if (cpu_has_vx()) {
5231		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5232		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5233				     fprs, 128);
5234	} else {
5235		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5236				     vcpu->run->s.regs.fprs, 128);
5237	}
5238	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5239			      vcpu->run->s.regs.gprs, 128);
5240	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5241			      &vcpu->arch.sie_block->gpsw, 16);
5242	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5243			      &px, 4);
5244	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
5245			      &vcpu->run->s.regs.fpc, 4);
5246	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5247			      &vcpu->arch.sie_block->todpr, 4);
5248	cputm = kvm_s390_get_cpu_timer(vcpu);
5249	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
5250			      &cputm, 8);
5251	clkcomp = vcpu->arch.sie_block->ckc >> 8;
5252	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5253			      &clkcomp, 8);
5254	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5255			      &vcpu->run->s.regs.acrs, 64);
5256	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5257			      &vcpu->arch.sie_block->gcr, 128);
5258	return rc ? -EFAULT : 0;
5259}
5260
5261int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5262{
5263	/*
5264	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5265	 * switch in the run ioctl. Let's update our copies before we save
5266	 * it into the save area
5267	 */
5268	kvm_s390_fpu_store(vcpu->run);
5269	save_access_regs(vcpu->run->s.regs.acrs);
5270
5271	return kvm_s390_store_status_unloaded(vcpu, addr);
5272}
5273
5274static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5275{
5276	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
5277	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
5278}
5279
5280static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
5281{
5282	unsigned long i;
5283	struct kvm_vcpu *vcpu;
5284
5285	kvm_for_each_vcpu(i, vcpu, kvm) {
5286		__disable_ibs_on_vcpu(vcpu);
5287	}
5288}
5289
5290static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5291{
5292	if (!sclp.has_ibs)
5293		return;
5294	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
5295	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
5296}
5297
5298int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
5299{
5300	int i, online_vcpus, r = 0, started_vcpus = 0;
5301
5302	if (!is_vcpu_stopped(vcpu))
5303		return 0;
5304
5305	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
5306	/* Only one cpu at a time may enter/leave the STOPPED state. */
5307	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5308	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5309
5310	/* Let's tell the UV that we want to change into the operating state */
5311	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5312		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5313		if (r) {
5314			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5315			return r;
5316		}
5317	}
5318
5319	for (i = 0; i < online_vcpus; i++) {
5320		if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
5321			started_vcpus++;
5322	}
5323
5324	if (started_vcpus == 0) {
5325		/* we're the only active VCPU -> speed it up */
5326		__enable_ibs_on_vcpu(vcpu);
5327	} else if (started_vcpus == 1) {
5328		/*
5329		 * As we are starting a second VCPU, we have to disable
5330		 * the IBS facility on all VCPUs to remove potentially
5331		 * outstanding ENABLE requests.
5332		 */
5333		__disable_ibs_on_all_vcpus(vcpu->kvm);
5334	}
5335
5336	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
5337	/*
5338	 * The real PSW might have changed due to a RESTART interpreted by the
5339	 * ultravisor. We block all interrupts and let the next sie exit
5340	 * refresh our view.
5341	 */
5342	if (kvm_s390_pv_cpu_is_protected(vcpu))
5343		vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5344	/*
5345	 * Another VCPU might have used IBS while we were offline.
5346	 * Let's play safe and flush the VCPU at startup.
5347	 */
5348	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5349	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5350	return 0;
5351}
5352
5353int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
5354{
5355	int i, online_vcpus, r = 0, started_vcpus = 0;
5356	struct kvm_vcpu *started_vcpu = NULL;
5357
5358	if (is_vcpu_stopped(vcpu))
5359		return 0;
 
5360
5361	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
5362	/* Only one cpu at a time may enter/leave the STOPPED state. */
5363	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5364	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5365
5366	/* Let's tell the UV that we want to change into the stopped state */
5367	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5368		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5369		if (r) {
5370			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5371			return r;
5372		}
5373	}
5374
5375	/*
5376	 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5377	 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5378	 * have been fully processed. This will ensure that the VCPU
5379	 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5380	 */
5381	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
5382	kvm_s390_clear_stop_irq(vcpu);
5383
5384	__disable_ibs_on_vcpu(vcpu);
 
 
5385
5386	for (i = 0; i < online_vcpus; i++) {
5387		struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
 
5388
5389		if (!is_vcpu_stopped(tmp)) {
5390			started_vcpus++;
5391			started_vcpu = tmp;
5392		}
5393	}
5394
5395	if (started_vcpus == 1) {
5396		/*
5397		 * As we only have one VCPU left, we want to enable the
5398		 * IBS facility for that VCPU to speed it up.
5399		 */
5400		__enable_ibs_on_vcpu(started_vcpu);
5401	}
5402
5403	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
 
 
 
5404	return 0;
5405}
5406
5407static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5408				     struct kvm_enable_cap *cap)
5409{
5410	int r;
5411
5412	if (cap->flags)
5413		return -EINVAL;
5414
5415	switch (cap->cap) {
5416	case KVM_CAP_S390_CSS_SUPPORT:
5417		if (!vcpu->kvm->arch.css_support) {
5418			vcpu->kvm->arch.css_support = 1;
5419			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5420			trace_kvm_s390_enable_css(vcpu->kvm);
5421		}
5422		r = 0;
5423		break;
5424	default:
5425		r = -EINVAL;
5426		break;
5427	}
5428	return r;
5429}
5430
5431static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
5432				  struct kvm_s390_mem_op *mop)
5433{
5434	void __user *uaddr = (void __user *)mop->buf;
5435	void *sida_addr;
5436	int r = 0;
5437
5438	if (mop->flags || !mop->size)
5439		return -EINVAL;
5440	if (mop->size + mop->sida_offset < mop->size)
5441		return -EINVAL;
5442	if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5443		return -E2BIG;
5444	if (!kvm_s390_pv_cpu_is_protected(vcpu))
5445		return -EINVAL;
5446
5447	sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5448
5449	switch (mop->op) {
5450	case KVM_S390_MEMOP_SIDA_READ:
5451		if (copy_to_user(uaddr, sida_addr, mop->size))
5452			r = -EFAULT;
5453
5454		break;
5455	case KVM_S390_MEMOP_SIDA_WRITE:
5456		if (copy_from_user(sida_addr, uaddr, mop->size))
5457			r = -EFAULT;
5458		break;
5459	}
5460	return r;
5461}
5462
5463static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
5464				 struct kvm_s390_mem_op *mop)
5465{
5466	void __user *uaddr = (void __user *)mop->buf;
5467	enum gacc_mode acc_mode;
5468	void *tmpbuf = NULL;
5469	int r;
5470
5471	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION |
5472					KVM_S390_MEMOP_F_CHECK_ONLY |
5473					KVM_S390_MEMOP_F_SKEY_PROTECTION);
5474	if (r)
5475		return r;
5476	if (mop->ar >= NUM_ACRS)
5477		return -EINVAL;
5478	if (kvm_s390_pv_cpu_is_protected(vcpu))
5479		return -EINVAL;
5480	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
5481		tmpbuf = vmalloc(mop->size);
5482		if (!tmpbuf)
5483			return -ENOMEM;
5484	}
5485
5486	acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE;
5487	if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5488		r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5489				    acc_mode, mop->key);
5490		goto out_inject;
5491	}
5492	if (acc_mode == GACC_FETCH) {
5493		r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5494					mop->size, mop->key);
5495		if (r)
5496			goto out_inject;
5497		if (copy_to_user(uaddr, tmpbuf, mop->size)) {
5498			r = -EFAULT;
5499			goto out_free;
5500		}
5501	} else {
5502		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
5503			r = -EFAULT;
5504			goto out_free;
5505		}
5506		r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5507					 mop->size, mop->key);
5508	}
5509
5510out_inject:
5511	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
5512		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5513
5514out_free:
5515	vfree(tmpbuf);
5516	return r;
5517}
5518
5519static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
5520				     struct kvm_s390_mem_op *mop)
5521{
5522	int r, srcu_idx;
5523
5524	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5525
5526	switch (mop->op) {
5527	case KVM_S390_MEMOP_LOGICAL_READ:
5528	case KVM_S390_MEMOP_LOGICAL_WRITE:
5529		r = kvm_s390_vcpu_mem_op(vcpu, mop);
5530		break;
5531	case KVM_S390_MEMOP_SIDA_READ:
5532	case KVM_S390_MEMOP_SIDA_WRITE:
5533		/* we are locked against sida going away by the vcpu->mutex */
5534		r = kvm_s390_vcpu_sida_op(vcpu, mop);
5535		break;
5536	default:
5537		r = -EINVAL;
5538	}
5539
5540	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5541	return r;
5542}
5543
5544long kvm_arch_vcpu_async_ioctl(struct file *filp,
5545			       unsigned int ioctl, unsigned long arg)
5546{
5547	struct kvm_vcpu *vcpu = filp->private_data;
5548	void __user *argp = (void __user *)arg;
5549	int rc;
5550
5551	switch (ioctl) {
5552	case KVM_S390_IRQ: {
5553		struct kvm_s390_irq s390irq;
5554
5555		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
5556			return -EFAULT;
5557		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5558		break;
5559	}
5560	case KVM_S390_INTERRUPT: {
5561		struct kvm_s390_interrupt s390int;
5562		struct kvm_s390_irq s390irq = {};
5563
 
5564		if (copy_from_user(&s390int, argp, sizeof(s390int)))
5565			return -EFAULT;
5566		if (s390int_to_s390irq(&s390int, &s390irq))
5567			return -EINVAL;
5568		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5569		break;
5570	}
5571	default:
5572		rc = -ENOIOCTLCMD;
5573		break;
5574	}
5575
5576	/*
5577	 * To simplify single stepping of userspace-emulated instructions,
5578	 * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see
5579	 * should_handle_per_ifetch()). However, if userspace emulation injects
5580	 * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens
5581	 * after (and not before) the interrupt delivery.
5582	 */
5583	if (!rc)
5584		vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
5585
5586	return rc;
5587}
5588
5589static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
5590					struct kvm_pv_cmd *cmd)
5591{
5592	struct kvm_s390_pv_dmp dmp;
5593	void *data;
5594	int ret;
5595
5596	/* Dump initialization is a prerequisite */
5597	if (!vcpu->kvm->arch.pv.dumping)
5598		return -EINVAL;
5599
5600	if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
5601		return -EFAULT;
5602
5603	/* We only handle this subcmd right now */
5604	if (dmp.subcmd != KVM_PV_DUMP_CPU)
5605		return -EINVAL;
5606
5607	/* CPU dump length is the same as create cpu storage donation. */
5608	if (dmp.buff_len != uv_info.guest_cpu_stor_len)
5609		return -EINVAL;
5610
5611	data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
5612	if (!data)
5613		return -ENOMEM;
5614
5615	ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
5616
5617	VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5618		   vcpu->vcpu_id, cmd->rc, cmd->rrc);
5619
5620	if (ret)
5621		ret = -EINVAL;
5622
5623	/* On success copy over the dump data */
5624	if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
5625		ret = -EFAULT;
5626
5627	kvfree(data);
5628	return ret;
5629}
5630
5631long kvm_arch_vcpu_ioctl(struct file *filp,
5632			 unsigned int ioctl, unsigned long arg)
5633{
5634	struct kvm_vcpu *vcpu = filp->private_data;
5635	void __user *argp = (void __user *)arg;
5636	int idx;
5637	long r;
5638	u16 rc, rrc;
5639
5640	vcpu_load(vcpu);
5641
5642	switch (ioctl) {
5643	case KVM_S390_STORE_STATUS:
5644		idx = srcu_read_lock(&vcpu->kvm->srcu);
5645		r = kvm_s390_store_status_unloaded(vcpu, arg);
5646		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5647		break;
5648	case KVM_S390_SET_INITIAL_PSW: {
5649		psw_t psw;
5650
5651		r = -EFAULT;
5652		if (copy_from_user(&psw, argp, sizeof(psw)))
5653			break;
5654		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5655		break;
5656	}
5657	case KVM_S390_CLEAR_RESET:
5658		r = 0;
5659		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
5660		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5661			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5662					  UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
5663			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5664				   rc, rrc);
5665		}
5666		break;
5667	case KVM_S390_INITIAL_RESET:
5668		r = 0;
5669		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
5670		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5671			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5672					  UVC_CMD_CPU_RESET_INITIAL,
5673					  &rc, &rrc);
5674			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5675				   rc, rrc);
5676		}
5677		break;
5678	case KVM_S390_NORMAL_RESET:
5679		r = 0;
5680		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
5681		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5682			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5683					  UVC_CMD_CPU_RESET, &rc, &rrc);
5684			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5685				   rc, rrc);
5686		}
5687		break;
5688	case KVM_SET_ONE_REG:
5689	case KVM_GET_ONE_REG: {
5690		struct kvm_one_reg reg;
5691		r = -EINVAL;
5692		if (kvm_s390_pv_cpu_is_protected(vcpu))
5693			break;
5694		r = -EFAULT;
5695		if (copy_from_user(&reg, argp, sizeof(reg)))
5696			break;
5697		if (ioctl == KVM_SET_ONE_REG)
5698			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
5699		else
5700			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
5701		break;
5702	}
5703#ifdef CONFIG_KVM_S390_UCONTROL
5704	case KVM_S390_UCAS_MAP: {
5705		struct kvm_s390_ucas_mapping ucasmap;
5706
5707		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5708			r = -EFAULT;
5709			break;
5710		}
5711
5712		if (!kvm_is_ucontrol(vcpu->kvm)) {
5713			r = -EINVAL;
5714			break;
5715		}
5716
5717		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
5718				     ucasmap.vcpu_addr, ucasmap.length);
5719		break;
5720	}
5721	case KVM_S390_UCAS_UNMAP: {
5722		struct kvm_s390_ucas_mapping ucasmap;
5723
5724		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5725			r = -EFAULT;
5726			break;
5727		}
5728
5729		if (!kvm_is_ucontrol(vcpu->kvm)) {
5730			r = -EINVAL;
5731			break;
5732		}
5733
5734		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
5735			ucasmap.length);
5736		break;
5737	}
5738#endif
5739	case KVM_S390_VCPU_FAULT: {
5740		r = gmap_fault(vcpu->arch.gmap, arg, 0);
5741		break;
5742	}
5743	case KVM_ENABLE_CAP:
5744	{
5745		struct kvm_enable_cap cap;
5746		r = -EFAULT;
5747		if (copy_from_user(&cap, argp, sizeof(cap)))
5748			break;
5749		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5750		break;
5751	}
5752	case KVM_S390_MEM_OP: {
5753		struct kvm_s390_mem_op mem_op;
5754
5755		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
5756			r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
5757		else
5758			r = -EFAULT;
5759		break;
5760	}
5761	case KVM_S390_SET_IRQ_STATE: {
5762		struct kvm_s390_irq_state irq_state;
5763
5764		r = -EFAULT;
5765		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5766			break;
5767		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5768		    irq_state.len == 0 ||
5769		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5770			r = -EINVAL;
5771			break;
5772		}
5773		/* do not use irq_state.flags, it will break old QEMUs */
5774		r = kvm_s390_set_irq_state(vcpu,
5775					   (void __user *) irq_state.buf,
5776					   irq_state.len);
5777		break;
5778	}
5779	case KVM_S390_GET_IRQ_STATE: {
5780		struct kvm_s390_irq_state irq_state;
5781
5782		r = -EFAULT;
5783		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5784			break;
5785		if (irq_state.len == 0) {
5786			r = -EINVAL;
5787			break;
5788		}
5789		/* do not use irq_state.flags, it will break old QEMUs */
5790		r = kvm_s390_get_irq_state(vcpu,
5791					   (__u8 __user *)  irq_state.buf,
5792					   irq_state.len);
5793		break;
5794	}
5795	case KVM_S390_PV_CPU_COMMAND: {
5796		struct kvm_pv_cmd cmd;
5797
5798		r = -EINVAL;
5799		if (!is_prot_virt_host())
5800			break;
5801
5802		r = -EFAULT;
5803		if (copy_from_user(&cmd, argp, sizeof(cmd)))
5804			break;
5805
5806		r = -EINVAL;
5807		if (cmd.flags)
5808			break;
5809
5810		/* We only handle this cmd right now */
5811		if (cmd.cmd != KVM_PV_DUMP)
5812			break;
5813
5814		r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
5815
5816		/* Always copy over UV rc / rrc data */
5817		if (copy_to_user((__u8 __user *)argp, &cmd.rc,
5818				 sizeof(cmd.rc) + sizeof(cmd.rrc)))
5819			r = -EFAULT;
5820		break;
5821	}
5822	default:
5823		r = -ENOTTY;
5824	}
5825
5826	vcpu_put(vcpu);
5827	return r;
5828}
5829
5830vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5831{
5832#ifdef CONFIG_KVM_S390_UCONTROL
5833	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5834		 && (kvm_is_ucontrol(vcpu->kvm))) {
5835		vmf->page = virt_to_page(vcpu->arch.sie_block);
5836		get_page(vmf->page);
5837		return 0;
5838	}
5839#endif
5840	return VM_FAULT_SIGBUS;
5841}
5842
5843bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5844{
5845	return true;
5846}
5847
5848/* Section: memory related */
5849int kvm_arch_prepare_memory_region(struct kvm *kvm,
5850				   const struct kvm_memory_slot *old,
5851				   struct kvm_memory_slot *new,
5852				   enum kvm_mr_change change)
 
5853{
5854	gpa_t size;
 
 
 
 
 
5855
5856	if (kvm_is_ucontrol(kvm))
5857		return -EINVAL;
5858
5859	/* When we are protected, we should not change the memory slots */
5860	if (kvm_s390_pv_get_handle(kvm))
5861		return -EINVAL;
5862
5863	if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
5864		/*
5865		 * A few sanity checks. We can have memory slots which have to be
5866		 * located/ended at a segment boundary (1MB). The memory in userland is
5867		 * ok to be fragmented into various different vmas. It is okay to mmap()
5868		 * and munmap() stuff in this slot after doing this call at any time
5869		 */
5870
5871		if (new->userspace_addr & 0xffffful)
5872			return -EINVAL;
5873
5874		size = new->npages * PAGE_SIZE;
5875		if (size & 0xffffful)
5876			return -EINVAL;
5877
5878		if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5879			return -EINVAL;
5880	}
5881
5882	if (!kvm->arch.migration_mode)
5883		return 0;
5884
5885	/*
5886	 * Turn off migration mode when:
5887	 * - userspace creates a new memslot with dirty logging off,
5888	 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5889	 *   dirty logging is turned off.
5890	 * Migration mode expects dirty page logging being enabled to store
5891	 * its dirty bitmap.
5892	 */
5893	if (change != KVM_MR_DELETE &&
5894	    !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
5895		WARN(kvm_s390_vm_stop_migration(kvm),
5896		     "Failed to stop migration mode");
5897
5898	return 0;
5899}
5900
5901void kvm_arch_commit_memory_region(struct kvm *kvm,
5902				struct kvm_memory_slot *old,
5903				const struct kvm_memory_slot *new,
5904				enum kvm_mr_change change)
5905{
5906	int rc = 0;
5907
5908	switch (change) {
5909	case KVM_MR_DELETE:
5910		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5911					old->npages * PAGE_SIZE);
5912		break;
5913	case KVM_MR_MOVE:
5914		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5915					old->npages * PAGE_SIZE);
5916		if (rc)
5917			break;
5918		fallthrough;
5919	case KVM_MR_CREATE:
5920		rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5921				      new->base_gfn * PAGE_SIZE,
5922				      new->npages * PAGE_SIZE);
5923		break;
5924	case KVM_MR_FLAGS_ONLY:
5925		break;
5926	default:
5927		WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5928	}
5929	if (rc)
5930		pr_warn("failed to commit memory region\n");
5931	return;
5932}
5933
5934static inline unsigned long nonhyp_mask(int i)
5935{
5936	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5937
5938	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5939}
5940
5941static int __init kvm_s390_init(void)
5942{
5943	int i, r;
5944
5945	if (!sclp.has_sief2) {
5946		pr_info("SIE is not available\n");
5947		return -ENODEV;
5948	}
5949
5950	if (nested && hpage) {
5951		pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5952		return -EINVAL;
5953	}
5954
5955	for (i = 0; i < 16; i++)
5956		kvm_s390_fac_base[i] |=
5957			stfle_fac_list[i] & nonhyp_mask(i);
5958
5959	r = __kvm_s390_init();
5960	if (r)
5961		return r;
5962
5963	r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5964	if (r) {
5965		__kvm_s390_exit();
5966		return r;
5967	}
 
 
 
5968	return 0;
5969}
5970
5971static void __exit kvm_s390_exit(void)
5972{
 
5973	kvm_exit();
5974
5975	__kvm_s390_exit();
5976}
5977
5978module_init(kvm_s390_init);
5979module_exit(kvm_s390_exit);
5980
5981/*
5982 * Enable autoloading of the kvm module.
5983 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5984 * since x86 takes a different approach.
5985 */
5986#include <linux/miscdevice.h>
5987MODULE_ALIAS_MISCDEV(KVM_MINOR);
5988MODULE_ALIAS("devname:kvm");
v3.1
 
  1/*
  2 * s390host.c --  hosting zSeries kernel virtual machines
  3 *
  4 * Copyright IBM Corp. 2008,2009
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License (version 2 only)
  8 * as published by the Free Software Foundation.
  9 *
 10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 11 *               Christian Borntraeger <borntraeger@de.ibm.com>
 12 *               Heiko Carstens <heiko.carstens@de.ibm.com>
 13 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
 
 14 */
 15
 
 
 
 16#include <linux/compiler.h>
 17#include <linux/err.h>
 18#include <linux/fs.h>
 19#include <linux/hrtimer.h>
 20#include <linux/init.h>
 21#include <linux/kvm.h>
 22#include <linux/kvm_host.h>
 
 23#include <linux/module.h>
 
 
 24#include <linux/slab.h>
 25#include <linux/timer.h>
 
 
 
 
 
 
 
 
 26#include <asm/asm-offsets.h>
 27#include <asm/lowcore.h>
 28#include <asm/pgtable.h>
 
 29#include <asm/nmi.h>
 30#include <asm/system.h>
 
 
 
 
 
 
 
 31#include "kvm-s390.h"
 32#include "gaccess.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33
 34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35
 36struct kvm_stats_debugfs_item debugfs_entries[] = {
 37	{ "userspace_handled", VCPU_STAT(exit_userspace) },
 38	{ "exit_null", VCPU_STAT(exit_null) },
 39	{ "exit_validity", VCPU_STAT(exit_validity) },
 40	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
 41	{ "exit_external_request", VCPU_STAT(exit_external_request) },
 42	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
 43	{ "exit_instruction", VCPU_STAT(exit_instruction) },
 44	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
 45	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
 46	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
 47	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
 48	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
 49	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
 50	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
 51	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
 52	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
 53	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
 54	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
 55	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
 56	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
 57	{ "instruction_spx", VCPU_STAT(instruction_spx) },
 58	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
 59	{ "instruction_stap", VCPU_STAT(instruction_stap) },
 60	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
 61	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
 62	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
 63	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
 64	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
 65	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
 66	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
 67	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
 68	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
 69	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
 70	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
 71	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
 72	{ "diagnose_44", VCPU_STAT(diagnose_44) },
 73	{ NULL }
 74};
 75
 76static unsigned long long *facilities;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77
 78/* Section: not file related */
 79int kvm_arch_hardware_enable(void *garbage)
 
 
 
 
 
 80{
 81	/* every s390 is virtualization enabled ;-) */
 82	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 83}
 84
 85void kvm_arch_hardware_disable(void *garbage)
 86{
 
 
 
 
 
 
 87}
 88
 89int kvm_arch_hardware_setup(void)
 90{
 91	return 0;
 
 
 
 
 
 
 
 92}
 93
 94void kvm_arch_hardware_unsetup(void)
 95{
 
 
 
 
 
 
 
 
 96}
 97
 98void kvm_arch_check_processor_compat(void *rtn)
 99{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100}
101
102int kvm_arch_init(void *opaque)
103{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104	return 0;
 
 
 
 
 
 
 
 
 
 
 
105}
106
107void kvm_arch_exit(void)
108{
 
 
 
 
 
 
 
 
 
 
109}
110
111/* Section: device related */
112long kvm_arch_dev_ioctl(struct file *filp,
113			unsigned int ioctl, unsigned long arg)
114{
115	if (ioctl == KVM_S390_ENABLE_SIE)
116		return s390_enable_sie();
117	return -EINVAL;
118}
119
120int kvm_dev_ioctl_check_extension(long ext)
121{
122	int r;
123
124	switch (ext) {
125	case KVM_CAP_S390_PSW:
126	case KVM_CAP_S390_GMAP:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127		r = 1;
128		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129	default:
130		r = 0;
131	}
132	return r;
133}
134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135/* Section: vm related */
 
 
136/*
137 * Get (and clear) the dirty memory log for a memory slot.
138 */
139int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
140			       struct kvm_dirty_log *log)
141{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142	return 0;
143}
144
145long kvm_arch_vm_ioctl(struct file *filp,
146		       unsigned int ioctl, unsigned long arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147{
148	struct kvm *kvm = filp->private_data;
149	void __user *argp = (void __user *)arg;
 
150	int r;
151
152	switch (ioctl) {
153	case KVM_S390_INTERRUPT: {
154		struct kvm_s390_interrupt s390int;
155
156		r = -EFAULT;
157		if (copy_from_user(&s390int, argp, sizeof(s390int)))
158			break;
159		r = kvm_s390_inject_vm(kvm, &s390int);
160		break;
161	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162	default:
163		r = -ENOTTY;
164	}
165
166	return r;
167}
168
169int kvm_arch_init_vm(struct kvm *kvm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170{
171	int rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172	char debug_name[16];
 
 
 
 
 
 
 
 
 
 
 
 
173
174	rc = s390_enable_sie();
175	if (rc)
176		goto out_err;
177
178	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
 
 
 
 
 
 
179	if (!kvm->arch.sca)
180		goto out_err;
 
 
 
 
 
 
 
181
182	sprintf(debug_name, "kvm-%u", current->pid);
183
184	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
185	if (!kvm->arch.dbf)
186		goto out_nodbf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
 
 
 
 
 
 
 
 
 
 
188	spin_lock_init(&kvm->arch.float_int.lock);
189	INIT_LIST_HEAD(&kvm->arch.float_int.list);
 
 
 
190
191	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
192	VM_EVENT(kvm, 3, "%s", "vm created");
193
194	kvm->arch.gmap = gmap_alloc(current->mm);
195	if (!kvm->arch.gmap)
196		goto out_nogmap;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
198	return 0;
199out_nogmap:
 
200	debug_unregister(kvm->arch.dbf);
201out_nodbf:
202	free_page((unsigned long)(kvm->arch.sca));
203out_err:
204	return rc;
205}
206
207void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208{
 
 
209	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
210	clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
211	if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
212		(__u64) vcpu->arch.sie_block)
213		vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
214	smp_mb();
 
 
 
 
 
 
 
 
 
 
215	free_page((unsigned long)(vcpu->arch.sie_block));
216	kvm_vcpu_uninit(vcpu);
217	kfree(vcpu);
218}
219
220static void kvm_free_vcpus(struct kvm *kvm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221{
222	unsigned int i;
 
 
 
 
 
 
 
 
 
 
 
223	struct kvm_vcpu *vcpu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
225	kvm_for_each_vcpu(i, vcpu, kvm)
226		kvm_arch_vcpu_destroy(vcpu);
 
 
 
 
227
228	mutex_lock(&kvm->lock);
229	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
230		kvm->vcpus[i] = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
232	atomic_set(&kvm->online_vcpus, 0);
233	mutex_unlock(&kvm->lock);
 
 
 
 
234}
235
236void kvm_arch_sync_events(struct kvm *kvm)
237{
 
 
 
238}
239
240void kvm_arch_destroy_vm(struct kvm *kvm)
241{
242	kvm_free_vcpus(kvm);
243	free_page((unsigned long)(kvm->arch.sca));
244	debug_unregister(kvm->arch.dbf);
245	gmap_free(kvm->arch.gmap);
246}
247
248/* Section: vcpu related */
249int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
250{
251	vcpu->arch.gmap = vcpu->kvm->arch.gmap;
252	return 0;
 
 
 
 
 
253}
254
255void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 
256{
257	/* Nothing todo */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258}
259
260void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
261{
262	save_fp_regs(&vcpu->arch.host_fpregs);
263	save_access_regs(vcpu->arch.host_acrs);
264	vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
265	restore_fp_regs(&vcpu->arch.guest_fpregs);
266	restore_access_regs(vcpu->arch.guest_acrs);
267	gmap_enable(vcpu->arch.gmap);
268}
269
270void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
271{
272	gmap_disable(vcpu->arch.gmap);
273	save_fp_regs(&vcpu->arch.guest_fpregs);
274	save_access_regs(vcpu->arch.guest_acrs);
275	restore_fp_regs(&vcpu->arch.host_fpregs);
276	restore_access_regs(vcpu->arch.host_acrs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277}
278
279static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
280{
281	/* this equals initial cpu reset in pop, but we don't switch to ESA */
282	vcpu->arch.sie_block->gpsw.mask = 0UL;
283	vcpu->arch.sie_block->gpsw.addr = 0UL;
284	vcpu->arch.sie_block->prefix    = 0UL;
285	vcpu->arch.sie_block->ihcpu     = 0xffff;
286	vcpu->arch.sie_block->cputm     = 0UL;
287	vcpu->arch.sie_block->ckc       = 0UL;
288	vcpu->arch.sie_block->todpr     = 0;
289	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
290	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
291	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
292	vcpu->arch.guest_fpregs.fpc = 0;
293	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
294	vcpu->arch.sie_block->gbea = 1;
295}
296
297int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
298{
299	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
300	vcpu->arch.sie_block->ecb   = 6;
301	vcpu->arch.sie_block->eca   = 0xC1002001U;
302	vcpu->arch.sie_block->fac   = (int) (long) facilities;
303	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
304	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
305		     (unsigned long) vcpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
307	get_cpu_id(&vcpu->arch.cpu_id);
308	vcpu->arch.cpu_id.version = 0xff;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309	return 0;
310}
311
312struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
313				      unsigned int id)
314{
315	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
316	int rc = -ENOMEM;
 
 
 
 
 
 
 
 
317
318	if (!vcpu)
319		goto out_nomem;
 
320
321	vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
322					get_zeroed_page(GFP_KERNEL);
 
 
323
324	if (!vcpu->arch.sie_block)
325		goto out_free_cpu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
327	vcpu->arch.sie_block->icpua = id;
328	BUG_ON(!kvm->arch.sca);
329	if (!kvm->arch.sca->cpu[id].sda)
330		kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
331	vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
332	vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
333	set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
334
335	spin_lock_init(&vcpu->arch.local_int.lock);
336	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
337	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
338	spin_lock(&kvm->arch.float_int.lock);
339	kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
340	init_waitqueue_head(&vcpu->arch.local_int.wq);
341	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
342	spin_unlock(&kvm->arch.float_int.lock);
343
344	rc = kvm_vcpu_init(vcpu, kvm, id);
345	if (rc)
346		goto out_free_sie_block;
347	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
348		 vcpu->arch.sie_block);
 
349
350	return vcpu;
 
 
351out_free_sie_block:
352	free_page((unsigned long)(vcpu->arch.sie_block));
353out_free_cpu:
354	kfree(vcpu);
355out_nomem:
356	return ERR_PTR(rc);
357}
358
359int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
360{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361	/* kvm common code refers to this, but never calls it */
362	BUG();
363	return 0;
364}
365
366static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
 
367{
368	kvm_s390_vcpu_initial_reset(vcpu);
369	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370}
371
372int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
373{
374	memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
 
 
375	return 0;
376}
377
378int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
379{
380	memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
 
 
381	return 0;
382}
383
384int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
385				  struct kvm_sregs *sregs)
386{
387	memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
 
 
388	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
 
 
389	return 0;
390}
391
392int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
393				  struct kvm_sregs *sregs)
394{
395	memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
 
 
396	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
 
 
397	return 0;
398}
399
400int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
401{
402	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
403	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
404	return 0;
 
 
 
 
 
 
 
 
 
 
405}
406
407int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
408{
409	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
410	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
 
 
 
 
 
 
 
 
411	return 0;
412}
413
414static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
415{
416	int rc = 0;
417
418	if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
419		rc = -EBUSY;
420	else {
421		vcpu->run->psw_mask = psw.mask;
422		vcpu->run->psw_addr = psw.addr;
423	}
424	return rc;
425}
426
427int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
428				  struct kvm_translation *tr)
429{
430	return -EINVAL; /* not implemented yet */
431}
432
 
 
 
 
433int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
434					struct kvm_guest_debug *dbg)
435{
436	return -EINVAL; /* not implemented yet */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437}
438
439int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
440				    struct kvm_mp_state *mp_state)
441{
442	return -EINVAL; /* not implemented yet */
 
 
 
 
 
 
 
 
 
443}
444
445int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
446				    struct kvm_mp_state *mp_state)
447{
448	return -EINVAL; /* not implemented yet */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449}
450
451static void __vcpu_run(struct kvm_vcpu *vcpu)
452{
453	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
 
 
 
 
 
 
 
 
 
 
454
455	if (need_resched())
456		schedule();
457
458	if (test_thread_flag(TIF_MCCK_PENDING))
459		s390_handle_mcck();
 
 
 
 
 
 
 
 
 
 
 
 
460
461	kvm_s390_deliver_pending_interrupts(vcpu);
462
463	vcpu->arch.sie_block->icptcode = 0;
464	local_irq_disable();
465	kvm_guest_enter();
466	local_irq_enable();
467	VCPU_EVENT(vcpu, 6, "entering sie flags %x",
468		   atomic_read(&vcpu->arch.sie_block->cpuflags));
469	if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
470		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
471		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
472	}
473	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
474		   vcpu->arch.sie_block->icptcode);
475	local_irq_disable();
476	kvm_guest_exit();
477	local_irq_enable();
478
479	memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
480}
481
482int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
483{
 
 
 
 
484	int rc;
485	sigset_t sigsaved;
486
487rerun_vcpu:
488	if (vcpu->sigset_active)
489		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
490
491	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
492
493	BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
 
 
 
 
494
495	switch (kvm_run->exit_reason) {
496	case KVM_EXIT_S390_SIEIC:
497	case KVM_EXIT_UNKNOWN:
498	case KVM_EXIT_INTR:
499	case KVM_EXIT_S390_RESET:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500		break;
501	default:
502		BUG();
 
 
 
503	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
504
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
506	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
508	might_fault();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509
510	do {
511		__vcpu_run(vcpu);
512		rc = kvm_handle_sie_intercept(vcpu);
513	} while (!signal_pending(current) && !rc);
514
515	if (rc == SIE_INTERCEPT_RERUNVCPU)
516		goto rerun_vcpu;
517
518	if (signal_pending(current) && !rc) {
519		kvm_run->exit_reason = KVM_EXIT_INTR;
520		rc = -EINTR;
521	}
522
523	if (rc == -EOPNOTSUPP) {
524		/* intercept cannot be handled in-kernel, prepare kvm-run */
525		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
526		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
527		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
528		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
529		rc = 0;
530	}
531
532	if (rc == -EREMOTE) {
533		/* intercept was handled, but userspace support is needed
534		 * kvm_run has been prepared by the handler */
535		rc = 0;
536	}
537
538	kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
539	kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
 
540
541	if (vcpu->sigset_active)
542		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
543
544	vcpu->stat.exit_userspace++;
 
 
545	return rc;
546}
547
548static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
549		       unsigned long n, int prefix)
550{
551	if (prefix)
552		return copy_to_guest(vcpu, guestdest, from, n);
553	else
554		return copy_to_guest_absolute(vcpu, guestdest, from, n);
555}
556
557/*
558 * store status at address
559 * we use have two special cases:
560 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
561 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
562 */
563int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
564{
565	unsigned char archmode = 1;
566	int prefix;
 
 
 
567
568	if (addr == KVM_S390_STORE_STATUS_NOADDR) {
569		if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
 
570			return -EFAULT;
571		addr = SAVE_AREA_BASE;
572		prefix = 0;
573	} else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
574		if (copy_to_guest(vcpu, 163ul, &archmode, 1))
575			return -EFAULT;
576		addr = SAVE_AREA_BASE;
577		prefix = 1;
578	} else
579		prefix = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580
581	if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
582			vcpu->arch.guest_fpregs.fprs, 128, prefix))
583		return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
584
585	if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
586			vcpu->arch.guest_gprs, 128, prefix))
587		return -EFAULT;
 
588
589	if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
590			&vcpu->arch.sie_block->gpsw, 16, prefix))
591		return -EFAULT;
592
593	if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
594			&vcpu->arch.sie_block->prefix, 4, prefix))
595		return -EFAULT;
 
 
 
 
 
 
 
 
 
 
596
597	if (__guestcopy(vcpu,
598			addr + offsetof(struct save_area, fp_ctrl_reg),
599			&vcpu->arch.guest_fpregs.fpc, 4, prefix))
600		return -EFAULT;
 
 
 
 
601
602	if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
603			&vcpu->arch.sie_block->todpr, 4, prefix))
604		return -EFAULT;
605
606	if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
607			&vcpu->arch.sie_block->cputm, 8, prefix))
608		return -EFAULT;
609
610	if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
611			&vcpu->arch.sie_block->ckc, 8, prefix))
612		return -EFAULT;
 
 
613
614	if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
615			&vcpu->arch.guest_acrs, 64, prefix))
616		return -EFAULT;
 
 
 
 
617
618	if (__guestcopy(vcpu,
619			addr + offsetof(struct save_area, ctrl_regs),
620			&vcpu->arch.sie_block->gcr, 128, prefix))
621		return -EFAULT;
622	return 0;
623}
624
625long kvm_arch_vcpu_ioctl(struct file *filp,
626			 unsigned int ioctl, unsigned long arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627{
628	struct kvm_vcpu *vcpu = filp->private_data;
629	void __user *argp = (void __user *)arg;
630	long r;
631
632	switch (ioctl) {
 
 
 
 
 
 
 
 
633	case KVM_S390_INTERRUPT: {
634		struct kvm_s390_interrupt s390int;
 
635
636		r = -EFAULT;
637		if (copy_from_user(&s390int, argp, sizeof(s390int)))
638			break;
639		r = kvm_s390_inject_vcpu(vcpu, &s390int);
 
 
 
 
 
 
640		break;
641	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
642	case KVM_S390_STORE_STATUS:
643		r = kvm_s390_vcpu_store_status(vcpu, arg);
 
 
644		break;
645	case KVM_S390_SET_INITIAL_PSW: {
646		psw_t psw;
647
648		r = -EFAULT;
649		if (copy_from_user(&psw, argp, sizeof(psw)))
650			break;
651		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
652		break;
653	}
 
 
 
 
 
 
 
 
 
 
654	case KVM_S390_INITIAL_RESET:
655		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
657	default:
658		r = -EINVAL;
659	}
 
 
660	return r;
661}
662
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
663/* Section: memory related */
664int kvm_arch_prepare_memory_region(struct kvm *kvm,
665				   struct kvm_memory_slot *memslot,
666				   struct kvm_memory_slot old,
667				   struct kvm_userspace_memory_region *mem,
668				   int user_alloc)
669{
670	/* A few sanity checks. We can have exactly one memory slot which has
671	   to start at guest virtual zero and which has to be located at a
672	   page boundary in userland and which has to end at a page boundary.
673	   The memory in userland is ok to be fragmented into various different
674	   vmas. It is okay to mmap() and munmap() stuff in this slot after
675	   doing this call at any time */
676
677	if (mem->slot)
678		return -EINVAL;
679
680	if (mem->guest_phys_addr)
 
681		return -EINVAL;
682
683	if (mem->userspace_addr & 0xffffful)
684		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
685
686	if (mem->memory_size & 0xffffful)
687		return -EINVAL;
688
689	if (!user_alloc)
690		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
691
692	return 0;
693}
694
695void kvm_arch_commit_memory_region(struct kvm *kvm,
696				struct kvm_userspace_memory_region *mem,
697				struct kvm_memory_slot old,
698				int user_alloc)
699{
700	int rc;
701
702
703	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
704		mem->guest_phys_addr, mem->memory_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
705	if (rc)
706		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
707	return;
708}
709
710void kvm_arch_flush_shadow(struct kvm *kvm)
711{
 
 
 
712}
713
714static int __init kvm_s390_init(void)
715{
716	int ret;
717	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
718	if (ret)
719		return ret;
 
 
 
 
 
 
 
720
721	/*
722	 * guests can ask for up to 255+1 double words, we need a full page
723	 * to hold the maximum amount of facilities. On the other hand, we
724	 * only set facilities that are known to work in KVM.
725	 */
726	facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
727	if (!facilities) {
728		kvm_exit();
729		return -ENOMEM;
 
 
 
730	}
731	memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
732	facilities[0] &= 0xff00fff3f47c0000ULL;
733	facilities[1] &= 0x201c000000000000ULL;
734	return 0;
735}
736
737static void __exit kvm_s390_exit(void)
738{
739	free_page((unsigned long) facilities);
740	kvm_exit();
 
 
741}
742
743module_init(kvm_s390_init);
744module_exit(kvm_s390_exit);