Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * hosting IBM Z kernel virtual machines (s390x)
4 *
5 * Copyright IBM Corp. 2008, 2020
6 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Christian Ehrhardt <ehrhardt@de.ibm.com>
10 * Jason J. Herne <jjherne@us.ibm.com>
11 */
12
13#define KMSG_COMPONENT "kvm-s390"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
19#include <linux/hrtimer.h>
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/mman.h>
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/random.h>
27#include <linux/slab.h>
28#include <linux/timer.h>
29#include <linux/vmalloc.h>
30#include <linux/bitmap.h>
31#include <linux/sched/signal.h>
32#include <linux/string.h>
33#include <linux/pgtable.h>
34#include <linux/mmu_notifier.h>
35
36#include <asm/access-regs.h>
37#include <asm/asm-offsets.h>
38#include <asm/lowcore.h>
39#include <asm/stp.h>
40#include <asm/gmap.h>
41#include <asm/nmi.h>
42#include <asm/isc.h>
43#include <asm/sclp.h>
44#include <asm/cpacf.h>
45#include <asm/timex.h>
46#include <asm/asm.h>
47#include <asm/fpu.h>
48#include <asm/ap.h>
49#include <asm/uv.h>
50#include "kvm-s390.h"
51#include "gaccess.h"
52#include "pci.h"
53
54#define CREATE_TRACE_POINTS
55#include "trace.h"
56#include "trace-s390.h"
57
58#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
59#define LOCAL_IRQS 32
60#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
61 (KVM_MAX_VCPUS + LOCAL_IRQS))
62
63const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
64 KVM_GENERIC_VM_STATS(),
65 STATS_DESC_COUNTER(VM, inject_io),
66 STATS_DESC_COUNTER(VM, inject_float_mchk),
67 STATS_DESC_COUNTER(VM, inject_pfault_done),
68 STATS_DESC_COUNTER(VM, inject_service_signal),
69 STATS_DESC_COUNTER(VM, inject_virtio),
70 STATS_DESC_COUNTER(VM, aen_forward),
71 STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
72 STATS_DESC_COUNTER(VM, gmap_shadow_create),
73 STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
74 STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
75 STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
76 STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
77 STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
78};
79
80const struct kvm_stats_header kvm_vm_stats_header = {
81 .name_size = KVM_STATS_NAME_SIZE,
82 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
83 .id_offset = sizeof(struct kvm_stats_header),
84 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
85 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
86 sizeof(kvm_vm_stats_desc),
87};
88
89const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
90 KVM_GENERIC_VCPU_STATS(),
91 STATS_DESC_COUNTER(VCPU, exit_userspace),
92 STATS_DESC_COUNTER(VCPU, exit_null),
93 STATS_DESC_COUNTER(VCPU, exit_external_request),
94 STATS_DESC_COUNTER(VCPU, exit_io_request),
95 STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
96 STATS_DESC_COUNTER(VCPU, exit_stop_request),
97 STATS_DESC_COUNTER(VCPU, exit_validity),
98 STATS_DESC_COUNTER(VCPU, exit_instruction),
99 STATS_DESC_COUNTER(VCPU, exit_pei),
100 STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
101 STATS_DESC_COUNTER(VCPU, instruction_lctl),
102 STATS_DESC_COUNTER(VCPU, instruction_lctlg),
103 STATS_DESC_COUNTER(VCPU, instruction_stctl),
104 STATS_DESC_COUNTER(VCPU, instruction_stctg),
105 STATS_DESC_COUNTER(VCPU, exit_program_interruption),
106 STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
107 STATS_DESC_COUNTER(VCPU, exit_operation_exception),
108 STATS_DESC_COUNTER(VCPU, deliver_ckc),
109 STATS_DESC_COUNTER(VCPU, deliver_cputm),
110 STATS_DESC_COUNTER(VCPU, deliver_external_call),
111 STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
112 STATS_DESC_COUNTER(VCPU, deliver_service_signal),
113 STATS_DESC_COUNTER(VCPU, deliver_virtio),
114 STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
115 STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
116 STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
117 STATS_DESC_COUNTER(VCPU, deliver_program),
118 STATS_DESC_COUNTER(VCPU, deliver_io),
119 STATS_DESC_COUNTER(VCPU, deliver_machine_check),
120 STATS_DESC_COUNTER(VCPU, exit_wait_state),
121 STATS_DESC_COUNTER(VCPU, inject_ckc),
122 STATS_DESC_COUNTER(VCPU, inject_cputm),
123 STATS_DESC_COUNTER(VCPU, inject_external_call),
124 STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
125 STATS_DESC_COUNTER(VCPU, inject_mchk),
126 STATS_DESC_COUNTER(VCPU, inject_pfault_init),
127 STATS_DESC_COUNTER(VCPU, inject_program),
128 STATS_DESC_COUNTER(VCPU, inject_restart),
129 STATS_DESC_COUNTER(VCPU, inject_set_prefix),
130 STATS_DESC_COUNTER(VCPU, inject_stop_signal),
131 STATS_DESC_COUNTER(VCPU, instruction_epsw),
132 STATS_DESC_COUNTER(VCPU, instruction_gs),
133 STATS_DESC_COUNTER(VCPU, instruction_io_other),
134 STATS_DESC_COUNTER(VCPU, instruction_lpsw),
135 STATS_DESC_COUNTER(VCPU, instruction_lpswe),
136 STATS_DESC_COUNTER(VCPU, instruction_lpswey),
137 STATS_DESC_COUNTER(VCPU, instruction_pfmf),
138 STATS_DESC_COUNTER(VCPU, instruction_ptff),
139 STATS_DESC_COUNTER(VCPU, instruction_sck),
140 STATS_DESC_COUNTER(VCPU, instruction_sckpf),
141 STATS_DESC_COUNTER(VCPU, instruction_stidp),
142 STATS_DESC_COUNTER(VCPU, instruction_spx),
143 STATS_DESC_COUNTER(VCPU, instruction_stpx),
144 STATS_DESC_COUNTER(VCPU, instruction_stap),
145 STATS_DESC_COUNTER(VCPU, instruction_iske),
146 STATS_DESC_COUNTER(VCPU, instruction_ri),
147 STATS_DESC_COUNTER(VCPU, instruction_rrbe),
148 STATS_DESC_COUNTER(VCPU, instruction_sske),
149 STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
150 STATS_DESC_COUNTER(VCPU, instruction_stsi),
151 STATS_DESC_COUNTER(VCPU, instruction_stfl),
152 STATS_DESC_COUNTER(VCPU, instruction_tb),
153 STATS_DESC_COUNTER(VCPU, instruction_tpi),
154 STATS_DESC_COUNTER(VCPU, instruction_tprot),
155 STATS_DESC_COUNTER(VCPU, instruction_tsch),
156 STATS_DESC_COUNTER(VCPU, instruction_sie),
157 STATS_DESC_COUNTER(VCPU, instruction_essa),
158 STATS_DESC_COUNTER(VCPU, instruction_sthyi),
159 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
160 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
161 STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
162 STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
163 STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
164 STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
165 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
166 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
167 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
168 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
169 STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
170 STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
171 STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
172 STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
173 STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
174 STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
175 STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
176 STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
177 STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
178 STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
179 STATS_DESC_COUNTER(VCPU, diag_9c_forward),
180 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
181 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
182 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
183 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
184 STATS_DESC_COUNTER(VCPU, pfault_sync)
185};
186
187const struct kvm_stats_header kvm_vcpu_stats_header = {
188 .name_size = KVM_STATS_NAME_SIZE,
189 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
190 .id_offset = sizeof(struct kvm_stats_header),
191 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
192 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
193 sizeof(kvm_vcpu_stats_desc),
194};
195
196/* allow nested virtualization in KVM (if enabled by user space) */
197static int nested;
198module_param(nested, int, S_IRUGO);
199MODULE_PARM_DESC(nested, "Nested virtualization support");
200
201/* allow 1m huge page guest backing, if !nested */
202static int hpage;
203module_param(hpage, int, 0444);
204MODULE_PARM_DESC(hpage, "1m huge page backing support");
205
206/* maximum percentage of steal time for polling. >100 is treated like 100 */
207static u8 halt_poll_max_steal = 10;
208module_param(halt_poll_max_steal, byte, 0644);
209MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
210
211/* if set to true, the GISA will be initialized and used if available */
212static bool use_gisa = true;
213module_param(use_gisa, bool, 0644);
214MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
215
216/* maximum diag9c forwarding per second */
217unsigned int diag9c_forwarding_hz;
218module_param(diag9c_forwarding_hz, uint, 0644);
219MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
220
221/*
222 * allow asynchronous deinit for protected guests; enable by default since
223 * the feature is opt-in anyway
224 */
225static int async_destroy = 1;
226module_param(async_destroy, int, 0444);
227MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
228
229/*
230 * For now we handle at most 16 double words as this is what the s390 base
231 * kernel handles and stores in the prefix page. If we ever need to go beyond
232 * this, this requires changes to code, but the external uapi can stay.
233 */
234#define SIZE_INTERNAL 16
235
236/*
237 * Base feature mask that defines default mask for facilities. Consists of the
238 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
239 */
240static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
241/*
242 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
243 * and defines the facilities that can be enabled via a cpu model.
244 */
245static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
246
247static unsigned long kvm_s390_fac_size(void)
248{
249 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
250 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
251 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
252 sizeof(stfle_fac_list));
253
254 return SIZE_INTERNAL;
255}
256
257/* available cpu features supported by kvm */
258static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
259/* available subfunctions indicated via query / "test bit" */
260static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
261
262static struct gmap_notifier gmap_notifier;
263static struct gmap_notifier vsie_gmap_notifier;
264debug_info_t *kvm_s390_dbf;
265debug_info_t *kvm_s390_dbf_uv;
266
267/* Section: not file related */
268/* forward declarations */
269static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
270 unsigned long end);
271static int sca_switch_to_extended(struct kvm *kvm);
272
273static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
274{
275 u8 delta_idx = 0;
276
277 /*
278 * The TOD jumps by delta, we have to compensate this by adding
279 * -delta to the epoch.
280 */
281 delta = -delta;
282
283 /* sign-extension - we're adding to signed values below */
284 if ((s64)delta < 0)
285 delta_idx = -1;
286
287 scb->epoch += delta;
288 if (scb->ecd & ECD_MEF) {
289 scb->epdx += delta_idx;
290 if (scb->epoch < delta)
291 scb->epdx += 1;
292 }
293}
294
295/*
296 * This callback is executed during stop_machine(). All CPUs are therefore
297 * temporarily stopped. In order not to change guest behavior, we have to
298 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
299 * so a CPU won't be stopped while calculating with the epoch.
300 */
301static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
302 void *v)
303{
304 struct kvm *kvm;
305 struct kvm_vcpu *vcpu;
306 unsigned long i;
307 unsigned long long *delta = v;
308
309 list_for_each_entry(kvm, &vm_list, vm_list) {
310 kvm_for_each_vcpu(i, vcpu, kvm) {
311 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
312 if (i == 0) {
313 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
314 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
315 }
316 if (vcpu->arch.cputm_enabled)
317 vcpu->arch.cputm_start += *delta;
318 if (vcpu->arch.vsie_block)
319 kvm_clock_sync_scb(vcpu->arch.vsie_block,
320 *delta);
321 }
322 }
323 return NOTIFY_OK;
324}
325
326static struct notifier_block kvm_clock_notifier = {
327 .notifier_call = kvm_clock_sync,
328};
329
330static void allow_cpu_feat(unsigned long nr)
331{
332 set_bit_inv(nr, kvm_s390_available_cpu_feat);
333}
334
335static inline int plo_test_bit(unsigned char nr)
336{
337 unsigned long function = (unsigned long)nr | 0x100;
338 int cc;
339
340 asm volatile(
341 " lgr 0,%[function]\n"
342 /* Parameter registers are ignored for "test bit" */
343 " plo 0,0,0,0(0)\n"
344 CC_IPM(cc)
345 : CC_OUT(cc, cc)
346 : [function] "d" (function)
347 : CC_CLOBBER_LIST("0"));
348 return CC_TRANSFORM(cc) == 0;
349}
350
351static __always_inline void pfcr_query(u8 (*query)[16])
352{
353 asm volatile(
354 " lghi 0,0\n"
355 " .insn rsy,0xeb0000000016,0,0,%[query]\n"
356 : [query] "=QS" (*query)
357 :
358 : "cc", "0");
359}
360
361static __always_inline void __sortl_query(u8 (*query)[32])
362{
363 asm volatile(
364 " lghi 0,0\n"
365 " la 1,%[query]\n"
366 /* Parameter registers are ignored */
367 " .insn rre,0xb9380000,2,4\n"
368 : [query] "=R" (*query)
369 :
370 : "cc", "0", "1");
371}
372
373static __always_inline void __dfltcc_query(u8 (*query)[32])
374{
375 asm volatile(
376 " lghi 0,0\n"
377 " la 1,%[query]\n"
378 /* Parameter registers are ignored */
379 " .insn rrf,0xb9390000,2,4,6,0\n"
380 : [query] "=R" (*query)
381 :
382 : "cc", "0", "1");
383}
384
385static void __init kvm_s390_cpu_feat_init(void)
386{
387 int i;
388
389 for (i = 0; i < 256; ++i) {
390 if (plo_test_bit(i))
391 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
392 }
393
394 if (test_facility(28)) /* TOD-clock steering */
395 ptff(kvm_s390_available_subfunc.ptff,
396 sizeof(kvm_s390_available_subfunc.ptff),
397 PTFF_QAF);
398
399 if (test_facility(17)) { /* MSA */
400 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
401 kvm_s390_available_subfunc.kmac);
402 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
403 kvm_s390_available_subfunc.kmc);
404 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
405 kvm_s390_available_subfunc.km);
406 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
407 kvm_s390_available_subfunc.kimd);
408 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
409 kvm_s390_available_subfunc.klmd);
410 }
411 if (test_facility(76)) /* MSA3 */
412 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
413 kvm_s390_available_subfunc.pckmo);
414 if (test_facility(77)) { /* MSA4 */
415 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
416 kvm_s390_available_subfunc.kmctr);
417 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
418 kvm_s390_available_subfunc.kmf);
419 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
420 kvm_s390_available_subfunc.kmo);
421 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
422 kvm_s390_available_subfunc.pcc);
423 }
424 if (test_facility(57)) /* MSA5 */
425 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
426 kvm_s390_available_subfunc.ppno);
427
428 if (test_facility(146)) /* MSA8 */
429 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
430 kvm_s390_available_subfunc.kma);
431
432 if (test_facility(155)) /* MSA9 */
433 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
434 kvm_s390_available_subfunc.kdsa);
435
436 if (test_facility(150)) /* SORTL */
437 __sortl_query(&kvm_s390_available_subfunc.sortl);
438
439 if (test_facility(151)) /* DFLTCC */
440 __dfltcc_query(&kvm_s390_available_subfunc.dfltcc);
441
442 if (test_facility(201)) /* PFCR */
443 pfcr_query(&kvm_s390_available_subfunc.pfcr);
444
445 if (MACHINE_HAS_ESOP)
446 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
447 /*
448 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
449 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
450 */
451 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
452 !test_facility(3) || !nested)
453 return;
454 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
455 if (sclp.has_64bscao)
456 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
457 if (sclp.has_siif)
458 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
459 if (sclp.has_gpere)
460 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
461 if (sclp.has_gsls)
462 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
463 if (sclp.has_ib)
464 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
465 if (sclp.has_cei)
466 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
467 if (sclp.has_ibs)
468 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
469 if (sclp.has_kss)
470 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
471 /*
472 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
473 * all skey handling functions read/set the skey from the PGSTE
474 * instead of the real storage key.
475 *
476 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
477 * pages being detected as preserved although they are resident.
478 *
479 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
480 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
481 *
482 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
483 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
484 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
485 *
486 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
487 * cannot easily shadow the SCA because of the ipte lock.
488 */
489}
490
491static int __init __kvm_s390_init(void)
492{
493 int rc = -ENOMEM;
494
495 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
496 if (!kvm_s390_dbf)
497 return -ENOMEM;
498
499 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
500 if (!kvm_s390_dbf_uv)
501 goto err_kvm_uv;
502
503 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
504 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
505 goto err_debug_view;
506
507 kvm_s390_cpu_feat_init();
508
509 /* Register floating interrupt controller interface. */
510 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
511 if (rc) {
512 pr_err("A FLIC registration call failed with rc=%d\n", rc);
513 goto err_flic;
514 }
515
516 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
517 rc = kvm_s390_pci_init();
518 if (rc) {
519 pr_err("Unable to allocate AIFT for PCI\n");
520 goto err_pci;
521 }
522 }
523
524 rc = kvm_s390_gib_init(GAL_ISC);
525 if (rc)
526 goto err_gib;
527
528 gmap_notifier.notifier_call = kvm_gmap_notifier;
529 gmap_register_pte_notifier(&gmap_notifier);
530 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
531 gmap_register_pte_notifier(&vsie_gmap_notifier);
532 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
533 &kvm_clock_notifier);
534
535 return 0;
536
537err_gib:
538 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
539 kvm_s390_pci_exit();
540err_pci:
541err_flic:
542err_debug_view:
543 debug_unregister(kvm_s390_dbf_uv);
544err_kvm_uv:
545 debug_unregister(kvm_s390_dbf);
546 return rc;
547}
548
549static void __kvm_s390_exit(void)
550{
551 gmap_unregister_pte_notifier(&gmap_notifier);
552 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
553 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
554 &kvm_clock_notifier);
555
556 kvm_s390_gib_destroy();
557 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
558 kvm_s390_pci_exit();
559 debug_unregister(kvm_s390_dbf);
560 debug_unregister(kvm_s390_dbf_uv);
561}
562
563/* Section: device related */
564long kvm_arch_dev_ioctl(struct file *filp,
565 unsigned int ioctl, unsigned long arg)
566{
567 if (ioctl == KVM_S390_ENABLE_SIE)
568 return s390_enable_sie();
569 return -EINVAL;
570}
571
572int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
573{
574 int r;
575
576 switch (ext) {
577 case KVM_CAP_S390_PSW:
578 case KVM_CAP_S390_GMAP:
579 case KVM_CAP_SYNC_MMU:
580#ifdef CONFIG_KVM_S390_UCONTROL
581 case KVM_CAP_S390_UCONTROL:
582#endif
583 case KVM_CAP_ASYNC_PF:
584 case KVM_CAP_SYNC_REGS:
585 case KVM_CAP_ONE_REG:
586 case KVM_CAP_ENABLE_CAP:
587 case KVM_CAP_S390_CSS_SUPPORT:
588 case KVM_CAP_IOEVENTFD:
589 case KVM_CAP_S390_IRQCHIP:
590 case KVM_CAP_VM_ATTRIBUTES:
591 case KVM_CAP_MP_STATE:
592 case KVM_CAP_IMMEDIATE_EXIT:
593 case KVM_CAP_S390_INJECT_IRQ:
594 case KVM_CAP_S390_USER_SIGP:
595 case KVM_CAP_S390_USER_STSI:
596 case KVM_CAP_S390_SKEYS:
597 case KVM_CAP_S390_IRQ_STATE:
598 case KVM_CAP_S390_USER_INSTR0:
599 case KVM_CAP_S390_CMMA_MIGRATION:
600 case KVM_CAP_S390_AIS:
601 case KVM_CAP_S390_AIS_MIGRATION:
602 case KVM_CAP_S390_VCPU_RESETS:
603 case KVM_CAP_SET_GUEST_DEBUG:
604 case KVM_CAP_S390_DIAG318:
605 case KVM_CAP_IRQFD_RESAMPLE:
606 r = 1;
607 break;
608 case KVM_CAP_SET_GUEST_DEBUG2:
609 r = KVM_GUESTDBG_VALID_MASK;
610 break;
611 case KVM_CAP_S390_HPAGE_1M:
612 r = 0;
613 if (hpage && !(kvm && kvm_is_ucontrol(kvm)))
614 r = 1;
615 break;
616 case KVM_CAP_S390_MEM_OP:
617 r = MEM_OP_MAX_SIZE;
618 break;
619 case KVM_CAP_S390_MEM_OP_EXTENSION:
620 /*
621 * Flag bits indicating which extensions are supported.
622 * If r > 0, the base extension must also be supported/indicated,
623 * in order to maintain backwards compatibility.
624 */
625 r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
626 KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG;
627 break;
628 case KVM_CAP_NR_VCPUS:
629 case KVM_CAP_MAX_VCPUS:
630 case KVM_CAP_MAX_VCPU_ID:
631 r = KVM_S390_BSCA_CPU_SLOTS;
632 if (!kvm_s390_use_sca_entries())
633 r = KVM_MAX_VCPUS;
634 else if (sclp.has_esca && sclp.has_64bscao)
635 r = KVM_S390_ESCA_CPU_SLOTS;
636 if (ext == KVM_CAP_NR_VCPUS)
637 r = min_t(unsigned int, num_online_cpus(), r);
638 break;
639 case KVM_CAP_S390_COW:
640 r = MACHINE_HAS_ESOP;
641 break;
642 case KVM_CAP_S390_VECTOR_REGISTERS:
643 r = test_facility(129);
644 break;
645 case KVM_CAP_S390_RI:
646 r = test_facility(64);
647 break;
648 case KVM_CAP_S390_GS:
649 r = test_facility(133);
650 break;
651 case KVM_CAP_S390_BPB:
652 r = test_facility(82);
653 break;
654 case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
655 r = async_destroy && is_prot_virt_host();
656 break;
657 case KVM_CAP_S390_PROTECTED:
658 r = is_prot_virt_host();
659 break;
660 case KVM_CAP_S390_PROTECTED_DUMP: {
661 u64 pv_cmds_dump[] = {
662 BIT_UVC_CMD_DUMP_INIT,
663 BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
664 BIT_UVC_CMD_DUMP_CPU,
665 BIT_UVC_CMD_DUMP_COMPLETE,
666 };
667 int i;
668
669 r = is_prot_virt_host();
670
671 for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
672 if (!test_bit_inv(pv_cmds_dump[i],
673 (unsigned long *)&uv_info.inst_calls_list)) {
674 r = 0;
675 break;
676 }
677 }
678 break;
679 }
680 case KVM_CAP_S390_ZPCI_OP:
681 r = kvm_s390_pci_interp_allowed();
682 break;
683 case KVM_CAP_S390_CPU_TOPOLOGY:
684 r = test_facility(11);
685 break;
686 default:
687 r = 0;
688 }
689 return r;
690}
691
692void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
693{
694 int i;
695 gfn_t cur_gfn, last_gfn;
696 unsigned long gaddr, vmaddr;
697 struct gmap *gmap = kvm->arch.gmap;
698 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
699
700 /* Loop over all guest segments */
701 cur_gfn = memslot->base_gfn;
702 last_gfn = memslot->base_gfn + memslot->npages;
703 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
704 gaddr = gfn_to_gpa(cur_gfn);
705 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
706 if (kvm_is_error_hva(vmaddr))
707 continue;
708
709 bitmap_zero(bitmap, _PAGE_ENTRIES);
710 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
711 for (i = 0; i < _PAGE_ENTRIES; i++) {
712 if (test_bit(i, bitmap))
713 mark_page_dirty(kvm, cur_gfn + i);
714 }
715
716 if (fatal_signal_pending(current))
717 return;
718 cond_resched();
719 }
720}
721
722/* Section: vm related */
723static void sca_del_vcpu(struct kvm_vcpu *vcpu);
724
725/*
726 * Get (and clear) the dirty memory log for a memory slot.
727 */
728int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
729 struct kvm_dirty_log *log)
730{
731 int r;
732 unsigned long n;
733 struct kvm_memory_slot *memslot;
734 int is_dirty;
735
736 if (kvm_is_ucontrol(kvm))
737 return -EINVAL;
738
739 mutex_lock(&kvm->slots_lock);
740
741 r = -EINVAL;
742 if (log->slot >= KVM_USER_MEM_SLOTS)
743 goto out;
744
745 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
746 if (r)
747 goto out;
748
749 /* Clear the dirty log */
750 if (is_dirty) {
751 n = kvm_dirty_bitmap_bytes(memslot);
752 memset(memslot->dirty_bitmap, 0, n);
753 }
754 r = 0;
755out:
756 mutex_unlock(&kvm->slots_lock);
757 return r;
758}
759
760static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
761{
762 unsigned long i;
763 struct kvm_vcpu *vcpu;
764
765 kvm_for_each_vcpu(i, vcpu, kvm) {
766 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
767 }
768}
769
770int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
771{
772 int r;
773
774 if (cap->flags)
775 return -EINVAL;
776
777 switch (cap->cap) {
778 case KVM_CAP_S390_IRQCHIP:
779 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
780 kvm->arch.use_irqchip = 1;
781 r = 0;
782 break;
783 case KVM_CAP_S390_USER_SIGP:
784 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
785 kvm->arch.user_sigp = 1;
786 r = 0;
787 break;
788 case KVM_CAP_S390_VECTOR_REGISTERS:
789 mutex_lock(&kvm->lock);
790 if (kvm->created_vcpus) {
791 r = -EBUSY;
792 } else if (cpu_has_vx()) {
793 set_kvm_facility(kvm->arch.model.fac_mask, 129);
794 set_kvm_facility(kvm->arch.model.fac_list, 129);
795 if (test_facility(134)) {
796 set_kvm_facility(kvm->arch.model.fac_mask, 134);
797 set_kvm_facility(kvm->arch.model.fac_list, 134);
798 }
799 if (test_facility(135)) {
800 set_kvm_facility(kvm->arch.model.fac_mask, 135);
801 set_kvm_facility(kvm->arch.model.fac_list, 135);
802 }
803 if (test_facility(148)) {
804 set_kvm_facility(kvm->arch.model.fac_mask, 148);
805 set_kvm_facility(kvm->arch.model.fac_list, 148);
806 }
807 if (test_facility(152)) {
808 set_kvm_facility(kvm->arch.model.fac_mask, 152);
809 set_kvm_facility(kvm->arch.model.fac_list, 152);
810 }
811 if (test_facility(192)) {
812 set_kvm_facility(kvm->arch.model.fac_mask, 192);
813 set_kvm_facility(kvm->arch.model.fac_list, 192);
814 }
815 if (test_facility(198)) {
816 set_kvm_facility(kvm->arch.model.fac_mask, 198);
817 set_kvm_facility(kvm->arch.model.fac_list, 198);
818 }
819 if (test_facility(199)) {
820 set_kvm_facility(kvm->arch.model.fac_mask, 199);
821 set_kvm_facility(kvm->arch.model.fac_list, 199);
822 }
823 r = 0;
824 } else
825 r = -EINVAL;
826 mutex_unlock(&kvm->lock);
827 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
828 r ? "(not available)" : "(success)");
829 break;
830 case KVM_CAP_S390_RI:
831 r = -EINVAL;
832 mutex_lock(&kvm->lock);
833 if (kvm->created_vcpus) {
834 r = -EBUSY;
835 } else if (test_facility(64)) {
836 set_kvm_facility(kvm->arch.model.fac_mask, 64);
837 set_kvm_facility(kvm->arch.model.fac_list, 64);
838 r = 0;
839 }
840 mutex_unlock(&kvm->lock);
841 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
842 r ? "(not available)" : "(success)");
843 break;
844 case KVM_CAP_S390_AIS:
845 mutex_lock(&kvm->lock);
846 if (kvm->created_vcpus) {
847 r = -EBUSY;
848 } else {
849 set_kvm_facility(kvm->arch.model.fac_mask, 72);
850 set_kvm_facility(kvm->arch.model.fac_list, 72);
851 r = 0;
852 }
853 mutex_unlock(&kvm->lock);
854 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
855 r ? "(not available)" : "(success)");
856 break;
857 case KVM_CAP_S390_GS:
858 r = -EINVAL;
859 mutex_lock(&kvm->lock);
860 if (kvm->created_vcpus) {
861 r = -EBUSY;
862 } else if (test_facility(133)) {
863 set_kvm_facility(kvm->arch.model.fac_mask, 133);
864 set_kvm_facility(kvm->arch.model.fac_list, 133);
865 r = 0;
866 }
867 mutex_unlock(&kvm->lock);
868 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
869 r ? "(not available)" : "(success)");
870 break;
871 case KVM_CAP_S390_HPAGE_1M:
872 mutex_lock(&kvm->lock);
873 if (kvm->created_vcpus)
874 r = -EBUSY;
875 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
876 r = -EINVAL;
877 else {
878 r = 0;
879 mmap_write_lock(kvm->mm);
880 kvm->mm->context.allow_gmap_hpage_1m = 1;
881 mmap_write_unlock(kvm->mm);
882 /*
883 * We might have to create fake 4k page
884 * tables. To avoid that the hardware works on
885 * stale PGSTEs, we emulate these instructions.
886 */
887 kvm->arch.use_skf = 0;
888 kvm->arch.use_pfmfi = 0;
889 }
890 mutex_unlock(&kvm->lock);
891 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
892 r ? "(not available)" : "(success)");
893 break;
894 case KVM_CAP_S390_USER_STSI:
895 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
896 kvm->arch.user_stsi = 1;
897 r = 0;
898 break;
899 case KVM_CAP_S390_USER_INSTR0:
900 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
901 kvm->arch.user_instr0 = 1;
902 icpt_operexc_on_all_vcpus(kvm);
903 r = 0;
904 break;
905 case KVM_CAP_S390_CPU_TOPOLOGY:
906 r = -EINVAL;
907 mutex_lock(&kvm->lock);
908 if (kvm->created_vcpus) {
909 r = -EBUSY;
910 } else if (test_facility(11)) {
911 set_kvm_facility(kvm->arch.model.fac_mask, 11);
912 set_kvm_facility(kvm->arch.model.fac_list, 11);
913 r = 0;
914 }
915 mutex_unlock(&kvm->lock);
916 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
917 r ? "(not available)" : "(success)");
918 break;
919 default:
920 r = -EINVAL;
921 break;
922 }
923 return r;
924}
925
926static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
927{
928 int ret;
929
930 switch (attr->attr) {
931 case KVM_S390_VM_MEM_LIMIT_SIZE:
932 ret = 0;
933 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
934 kvm->arch.mem_limit);
935 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
936 ret = -EFAULT;
937 break;
938 default:
939 ret = -ENXIO;
940 break;
941 }
942 return ret;
943}
944
945static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
946{
947 int ret;
948 unsigned int idx;
949 switch (attr->attr) {
950 case KVM_S390_VM_MEM_ENABLE_CMMA:
951 ret = -ENXIO;
952 if (!sclp.has_cmma)
953 break;
954
955 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
956 mutex_lock(&kvm->lock);
957 if (kvm->created_vcpus)
958 ret = -EBUSY;
959 else if (kvm->mm->context.allow_gmap_hpage_1m)
960 ret = -EINVAL;
961 else {
962 kvm->arch.use_cmma = 1;
963 /* Not compatible with cmma. */
964 kvm->arch.use_pfmfi = 0;
965 ret = 0;
966 }
967 mutex_unlock(&kvm->lock);
968 break;
969 case KVM_S390_VM_MEM_CLR_CMMA:
970 ret = -ENXIO;
971 if (!sclp.has_cmma)
972 break;
973 ret = -EINVAL;
974 if (!kvm->arch.use_cmma)
975 break;
976
977 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
978 mutex_lock(&kvm->lock);
979 idx = srcu_read_lock(&kvm->srcu);
980 s390_reset_cmma(kvm->arch.gmap->mm);
981 srcu_read_unlock(&kvm->srcu, idx);
982 mutex_unlock(&kvm->lock);
983 ret = 0;
984 break;
985 case KVM_S390_VM_MEM_LIMIT_SIZE: {
986 unsigned long new_limit;
987
988 if (kvm_is_ucontrol(kvm))
989 return -EINVAL;
990
991 if (get_user(new_limit, (u64 __user *)attr->addr))
992 return -EFAULT;
993
994 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
995 new_limit > kvm->arch.mem_limit)
996 return -E2BIG;
997
998 if (!new_limit)
999 return -EINVAL;
1000
1001 /* gmap_create takes last usable address */
1002 if (new_limit != KVM_S390_NO_MEM_LIMIT)
1003 new_limit -= 1;
1004
1005 ret = -EBUSY;
1006 mutex_lock(&kvm->lock);
1007 if (!kvm->created_vcpus) {
1008 /* gmap_create will round the limit up */
1009 struct gmap *new = gmap_create(current->mm, new_limit);
1010
1011 if (!new) {
1012 ret = -ENOMEM;
1013 } else {
1014 gmap_remove(kvm->arch.gmap);
1015 new->private = kvm;
1016 kvm->arch.gmap = new;
1017 ret = 0;
1018 }
1019 }
1020 mutex_unlock(&kvm->lock);
1021 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
1022 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
1023 (void *) kvm->arch.gmap->asce);
1024 break;
1025 }
1026 default:
1027 ret = -ENXIO;
1028 break;
1029 }
1030 return ret;
1031}
1032
1033static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
1034
1035void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
1036{
1037 struct kvm_vcpu *vcpu;
1038 unsigned long i;
1039
1040 kvm_s390_vcpu_block_all(kvm);
1041
1042 kvm_for_each_vcpu(i, vcpu, kvm) {
1043 kvm_s390_vcpu_crypto_setup(vcpu);
1044 /* recreate the shadow crycb by leaving the VSIE handler */
1045 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1046 }
1047
1048 kvm_s390_vcpu_unblock_all(kvm);
1049}
1050
1051static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
1052{
1053 mutex_lock(&kvm->lock);
1054 switch (attr->attr) {
1055 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1056 if (!test_kvm_facility(kvm, 76)) {
1057 mutex_unlock(&kvm->lock);
1058 return -EINVAL;
1059 }
1060 get_random_bytes(
1061 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1062 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1063 kvm->arch.crypto.aes_kw = 1;
1064 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1065 break;
1066 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1067 if (!test_kvm_facility(kvm, 76)) {
1068 mutex_unlock(&kvm->lock);
1069 return -EINVAL;
1070 }
1071 get_random_bytes(
1072 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1073 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1074 kvm->arch.crypto.dea_kw = 1;
1075 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1076 break;
1077 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1078 if (!test_kvm_facility(kvm, 76)) {
1079 mutex_unlock(&kvm->lock);
1080 return -EINVAL;
1081 }
1082 kvm->arch.crypto.aes_kw = 0;
1083 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1084 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1085 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1086 break;
1087 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1088 if (!test_kvm_facility(kvm, 76)) {
1089 mutex_unlock(&kvm->lock);
1090 return -EINVAL;
1091 }
1092 kvm->arch.crypto.dea_kw = 0;
1093 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1094 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1095 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1096 break;
1097 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1098 if (!ap_instructions_available()) {
1099 mutex_unlock(&kvm->lock);
1100 return -EOPNOTSUPP;
1101 }
1102 kvm->arch.crypto.apie = 1;
1103 break;
1104 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1105 if (!ap_instructions_available()) {
1106 mutex_unlock(&kvm->lock);
1107 return -EOPNOTSUPP;
1108 }
1109 kvm->arch.crypto.apie = 0;
1110 break;
1111 default:
1112 mutex_unlock(&kvm->lock);
1113 return -ENXIO;
1114 }
1115
1116 kvm_s390_vcpu_crypto_reset_all(kvm);
1117 mutex_unlock(&kvm->lock);
1118 return 0;
1119}
1120
1121static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
1122{
1123 /* Only set the ECB bits after guest requests zPCI interpretation */
1124 if (!vcpu->kvm->arch.use_zpci_interp)
1125 return;
1126
1127 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1128 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1129}
1130
1131void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
1132{
1133 struct kvm_vcpu *vcpu;
1134 unsigned long i;
1135
1136 lockdep_assert_held(&kvm->lock);
1137
1138 if (!kvm_s390_pci_interp_allowed())
1139 return;
1140
1141 /*
1142 * If host is configured for PCI and the necessary facilities are
1143 * available, turn on interpretation for the life of this guest
1144 */
1145 kvm->arch.use_zpci_interp = 1;
1146
1147 kvm_s390_vcpu_block_all(kvm);
1148
1149 kvm_for_each_vcpu(i, vcpu, kvm) {
1150 kvm_s390_vcpu_pci_setup(vcpu);
1151 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1152 }
1153
1154 kvm_s390_vcpu_unblock_all(kvm);
1155}
1156
1157static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1158{
1159 unsigned long cx;
1160 struct kvm_vcpu *vcpu;
1161
1162 kvm_for_each_vcpu(cx, vcpu, kvm)
1163 kvm_s390_sync_request(req, vcpu);
1164}
1165
1166/*
1167 * Must be called with kvm->srcu held to avoid races on memslots, and with
1168 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1169 */
1170static int kvm_s390_vm_start_migration(struct kvm *kvm)
1171{
1172 struct kvm_memory_slot *ms;
1173 struct kvm_memslots *slots;
1174 unsigned long ram_pages = 0;
1175 int bkt;
1176
1177 /* migration mode already enabled */
1178 if (kvm->arch.migration_mode)
1179 return 0;
1180 slots = kvm_memslots(kvm);
1181 if (!slots || kvm_memslots_empty(slots))
1182 return -EINVAL;
1183
1184 if (!kvm->arch.use_cmma) {
1185 kvm->arch.migration_mode = 1;
1186 return 0;
1187 }
1188 /* mark all the pages in active slots as dirty */
1189 kvm_for_each_memslot(ms, bkt, slots) {
1190 if (!ms->dirty_bitmap)
1191 return -EINVAL;
1192 /*
1193 * The second half of the bitmap is only used on x86,
1194 * and would be wasted otherwise, so we put it to good
1195 * use here to keep track of the state of the storage
1196 * attributes.
1197 */
1198 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1199 ram_pages += ms->npages;
1200 }
1201 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1202 kvm->arch.migration_mode = 1;
1203 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1204 return 0;
1205}
1206
1207/*
1208 * Must be called with kvm->slots_lock to avoid races with ourselves and
1209 * kvm_s390_vm_start_migration.
1210 */
1211static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1212{
1213 /* migration mode already disabled */
1214 if (!kvm->arch.migration_mode)
1215 return 0;
1216 kvm->arch.migration_mode = 0;
1217 if (kvm->arch.use_cmma)
1218 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1219 return 0;
1220}
1221
1222static int kvm_s390_vm_set_migration(struct kvm *kvm,
1223 struct kvm_device_attr *attr)
1224{
1225 int res = -ENXIO;
1226
1227 mutex_lock(&kvm->slots_lock);
1228 switch (attr->attr) {
1229 case KVM_S390_VM_MIGRATION_START:
1230 res = kvm_s390_vm_start_migration(kvm);
1231 break;
1232 case KVM_S390_VM_MIGRATION_STOP:
1233 res = kvm_s390_vm_stop_migration(kvm);
1234 break;
1235 default:
1236 break;
1237 }
1238 mutex_unlock(&kvm->slots_lock);
1239
1240 return res;
1241}
1242
1243static int kvm_s390_vm_get_migration(struct kvm *kvm,
1244 struct kvm_device_attr *attr)
1245{
1246 u64 mig = kvm->arch.migration_mode;
1247
1248 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1249 return -ENXIO;
1250
1251 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1252 return -EFAULT;
1253 return 0;
1254}
1255
1256static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1257
1258static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1259{
1260 struct kvm_s390_vm_tod_clock gtod;
1261
1262 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
1263 return -EFAULT;
1264
1265 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1266 return -EINVAL;
1267 __kvm_s390_set_tod_clock(kvm, >od);
1268
1269 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1270 gtod.epoch_idx, gtod.tod);
1271
1272 return 0;
1273}
1274
1275static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1276{
1277 u8 gtod_high;
1278
1279 if (copy_from_user(>od_high, (void __user *)attr->addr,
1280 sizeof(gtod_high)))
1281 return -EFAULT;
1282
1283 if (gtod_high != 0)
1284 return -EINVAL;
1285 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1286
1287 return 0;
1288}
1289
1290static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1291{
1292 struct kvm_s390_vm_tod_clock gtod = { 0 };
1293
1294 if (copy_from_user(>od.tod, (void __user *)attr->addr,
1295 sizeof(gtod.tod)))
1296 return -EFAULT;
1297
1298 __kvm_s390_set_tod_clock(kvm, >od);
1299 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1300 return 0;
1301}
1302
1303static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1304{
1305 int ret;
1306
1307 if (attr->flags)
1308 return -EINVAL;
1309
1310 mutex_lock(&kvm->lock);
1311 /*
1312 * For protected guests, the TOD is managed by the ultravisor, so trying
1313 * to change it will never bring the expected results.
1314 */
1315 if (kvm_s390_pv_is_protected(kvm)) {
1316 ret = -EOPNOTSUPP;
1317 goto out_unlock;
1318 }
1319
1320 switch (attr->attr) {
1321 case KVM_S390_VM_TOD_EXT:
1322 ret = kvm_s390_set_tod_ext(kvm, attr);
1323 break;
1324 case KVM_S390_VM_TOD_HIGH:
1325 ret = kvm_s390_set_tod_high(kvm, attr);
1326 break;
1327 case KVM_S390_VM_TOD_LOW:
1328 ret = kvm_s390_set_tod_low(kvm, attr);
1329 break;
1330 default:
1331 ret = -ENXIO;
1332 break;
1333 }
1334
1335out_unlock:
1336 mutex_unlock(&kvm->lock);
1337 return ret;
1338}
1339
1340static void kvm_s390_get_tod_clock(struct kvm *kvm,
1341 struct kvm_s390_vm_tod_clock *gtod)
1342{
1343 union tod_clock clk;
1344
1345 preempt_disable();
1346
1347 store_tod_clock_ext(&clk);
1348
1349 gtod->tod = clk.tod + kvm->arch.epoch;
1350 gtod->epoch_idx = 0;
1351 if (test_kvm_facility(kvm, 139)) {
1352 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1353 if (gtod->tod < clk.tod)
1354 gtod->epoch_idx += 1;
1355 }
1356
1357 preempt_enable();
1358}
1359
1360static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1361{
1362 struct kvm_s390_vm_tod_clock gtod;
1363
1364 memset(>od, 0, sizeof(gtod));
1365 kvm_s390_get_tod_clock(kvm, >od);
1366 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1367 return -EFAULT;
1368
1369 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1370 gtod.epoch_idx, gtod.tod);
1371 return 0;
1372}
1373
1374static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1375{
1376 u8 gtod_high = 0;
1377
1378 if (copy_to_user((void __user *)attr->addr, >od_high,
1379 sizeof(gtod_high)))
1380 return -EFAULT;
1381 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1382
1383 return 0;
1384}
1385
1386static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1387{
1388 u64 gtod;
1389
1390 gtod = kvm_s390_get_tod_clock_fast(kvm);
1391 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1392 return -EFAULT;
1393 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1394
1395 return 0;
1396}
1397
1398static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1399{
1400 int ret;
1401
1402 if (attr->flags)
1403 return -EINVAL;
1404
1405 switch (attr->attr) {
1406 case KVM_S390_VM_TOD_EXT:
1407 ret = kvm_s390_get_tod_ext(kvm, attr);
1408 break;
1409 case KVM_S390_VM_TOD_HIGH:
1410 ret = kvm_s390_get_tod_high(kvm, attr);
1411 break;
1412 case KVM_S390_VM_TOD_LOW:
1413 ret = kvm_s390_get_tod_low(kvm, attr);
1414 break;
1415 default:
1416 ret = -ENXIO;
1417 break;
1418 }
1419 return ret;
1420}
1421
1422static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1423{
1424 struct kvm_s390_vm_cpu_processor *proc;
1425 u16 lowest_ibc, unblocked_ibc;
1426 int ret = 0;
1427
1428 mutex_lock(&kvm->lock);
1429 if (kvm->created_vcpus) {
1430 ret = -EBUSY;
1431 goto out;
1432 }
1433 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1434 if (!proc) {
1435 ret = -ENOMEM;
1436 goto out;
1437 }
1438 if (!copy_from_user(proc, (void __user *)attr->addr,
1439 sizeof(*proc))) {
1440 kvm->arch.model.cpuid = proc->cpuid;
1441 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1442 unblocked_ibc = sclp.ibc & 0xfff;
1443 if (lowest_ibc && proc->ibc) {
1444 if (proc->ibc > unblocked_ibc)
1445 kvm->arch.model.ibc = unblocked_ibc;
1446 else if (proc->ibc < lowest_ibc)
1447 kvm->arch.model.ibc = lowest_ibc;
1448 else
1449 kvm->arch.model.ibc = proc->ibc;
1450 }
1451 memcpy(kvm->arch.model.fac_list, proc->fac_list,
1452 S390_ARCH_FAC_LIST_SIZE_BYTE);
1453 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1454 kvm->arch.model.ibc,
1455 kvm->arch.model.cpuid);
1456 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1457 kvm->arch.model.fac_list[0],
1458 kvm->arch.model.fac_list[1],
1459 kvm->arch.model.fac_list[2]);
1460 } else
1461 ret = -EFAULT;
1462 kfree(proc);
1463out:
1464 mutex_unlock(&kvm->lock);
1465 return ret;
1466}
1467
1468static int kvm_s390_set_processor_feat(struct kvm *kvm,
1469 struct kvm_device_attr *attr)
1470{
1471 struct kvm_s390_vm_cpu_feat data;
1472
1473 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1474 return -EFAULT;
1475 if (!bitmap_subset((unsigned long *) data.feat,
1476 kvm_s390_available_cpu_feat,
1477 KVM_S390_VM_CPU_FEAT_NR_BITS))
1478 return -EINVAL;
1479
1480 mutex_lock(&kvm->lock);
1481 if (kvm->created_vcpus) {
1482 mutex_unlock(&kvm->lock);
1483 return -EBUSY;
1484 }
1485 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1486 mutex_unlock(&kvm->lock);
1487 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1488 data.feat[0],
1489 data.feat[1],
1490 data.feat[2]);
1491 return 0;
1492}
1493
1494static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1495 struct kvm_device_attr *attr)
1496{
1497 mutex_lock(&kvm->lock);
1498 if (kvm->created_vcpus) {
1499 mutex_unlock(&kvm->lock);
1500 return -EBUSY;
1501 }
1502
1503 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1504 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1505 mutex_unlock(&kvm->lock);
1506 return -EFAULT;
1507 }
1508 mutex_unlock(&kvm->lock);
1509
1510 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1511 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1512 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1513 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1514 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1515 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1516 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1517 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1518 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1519 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1520 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1521 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1522 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1524 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1525 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1526 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1527 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1528 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1529 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1530 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1531 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1532 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1533 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1534 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1535 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1536 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1537 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1538 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1539 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1540 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1542 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1545 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1546 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1548 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1549 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1551 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1552 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1554 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1555 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1557 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1558 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1560 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1561 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1562 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1563 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1564 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1565 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1566 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1567 VM_EVENT(kvm, 3, "GET: guest PFCR subfunc 0x%16.16lx.%16.16lx",
1568 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1569 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1570
1571 return 0;
1572}
1573
1574#define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK \
1575( \
1576 ((struct kvm_s390_vm_cpu_uv_feat){ \
1577 .ap = 1, \
1578 .ap_intr = 1, \
1579 }) \
1580 .feat \
1581)
1582
1583static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1584{
1585 struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr;
1586 unsigned long data, filter;
1587
1588 filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1589 if (get_user(data, &ptr->feat))
1590 return -EFAULT;
1591 if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS))
1592 return -EINVAL;
1593
1594 mutex_lock(&kvm->lock);
1595 if (kvm->created_vcpus) {
1596 mutex_unlock(&kvm->lock);
1597 return -EBUSY;
1598 }
1599 kvm->arch.model.uv_feat_guest.feat = data;
1600 mutex_unlock(&kvm->lock);
1601
1602 VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data);
1603
1604 return 0;
1605}
1606
1607static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1608{
1609 int ret = -ENXIO;
1610
1611 switch (attr->attr) {
1612 case KVM_S390_VM_CPU_PROCESSOR:
1613 ret = kvm_s390_set_processor(kvm, attr);
1614 break;
1615 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1616 ret = kvm_s390_set_processor_feat(kvm, attr);
1617 break;
1618 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1619 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1620 break;
1621 case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1622 ret = kvm_s390_set_uv_feat(kvm, attr);
1623 break;
1624 }
1625 return ret;
1626}
1627
1628static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1629{
1630 struct kvm_s390_vm_cpu_processor *proc;
1631 int ret = 0;
1632
1633 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1634 if (!proc) {
1635 ret = -ENOMEM;
1636 goto out;
1637 }
1638 proc->cpuid = kvm->arch.model.cpuid;
1639 proc->ibc = kvm->arch.model.ibc;
1640 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1641 S390_ARCH_FAC_LIST_SIZE_BYTE);
1642 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1643 kvm->arch.model.ibc,
1644 kvm->arch.model.cpuid);
1645 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1646 kvm->arch.model.fac_list[0],
1647 kvm->arch.model.fac_list[1],
1648 kvm->arch.model.fac_list[2]);
1649 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1650 ret = -EFAULT;
1651 kfree(proc);
1652out:
1653 return ret;
1654}
1655
1656static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1657{
1658 struct kvm_s390_vm_cpu_machine *mach;
1659 int ret = 0;
1660
1661 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1662 if (!mach) {
1663 ret = -ENOMEM;
1664 goto out;
1665 }
1666 get_cpu_id((struct cpuid *) &mach->cpuid);
1667 mach->ibc = sclp.ibc;
1668 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1669 S390_ARCH_FAC_LIST_SIZE_BYTE);
1670 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1671 sizeof(stfle_fac_list));
1672 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1673 kvm->arch.model.ibc,
1674 kvm->arch.model.cpuid);
1675 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1676 mach->fac_mask[0],
1677 mach->fac_mask[1],
1678 mach->fac_mask[2]);
1679 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1680 mach->fac_list[0],
1681 mach->fac_list[1],
1682 mach->fac_list[2]);
1683 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1684 ret = -EFAULT;
1685 kfree(mach);
1686out:
1687 return ret;
1688}
1689
1690static int kvm_s390_get_processor_feat(struct kvm *kvm,
1691 struct kvm_device_attr *attr)
1692{
1693 struct kvm_s390_vm_cpu_feat data;
1694
1695 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1696 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1697 return -EFAULT;
1698 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1699 data.feat[0],
1700 data.feat[1],
1701 data.feat[2]);
1702 return 0;
1703}
1704
1705static int kvm_s390_get_machine_feat(struct kvm *kvm,
1706 struct kvm_device_attr *attr)
1707{
1708 struct kvm_s390_vm_cpu_feat data;
1709
1710 bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1711 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1712 return -EFAULT;
1713 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1714 data.feat[0],
1715 data.feat[1],
1716 data.feat[2]);
1717 return 0;
1718}
1719
1720static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1721 struct kvm_device_attr *attr)
1722{
1723 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1724 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1725 return -EFAULT;
1726
1727 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1728 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1729 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1730 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1731 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1732 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1733 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1734 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1735 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1736 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1737 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1738 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1739 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1740 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1741 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1742 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1743 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1744 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1745 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1746 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1747 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1748 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1749 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1750 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1751 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1752 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1753 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1754 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1755 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1756 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1757 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1758 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1759 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1760 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1761 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1762 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1763 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1764 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1765 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1766 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1767 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1768 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1769 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1770 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1771 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1772 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1773 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1774 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1775 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1776 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1777 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1778 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1779 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1780 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1781 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1782 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1783 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1784 VM_EVENT(kvm, 3, "GET: guest PFCR subfunc 0x%16.16lx.%16.16lx",
1785 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1786 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1787
1788 return 0;
1789}
1790
1791static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1792 struct kvm_device_attr *attr)
1793{
1794 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1795 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1796 return -EFAULT;
1797
1798 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1799 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1800 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1801 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1802 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1803 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1804 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1805 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1806 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1807 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1808 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1809 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1810 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1811 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1812 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1813 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1814 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1815 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1816 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1817 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1818 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1819 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1820 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1821 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1822 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1823 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1824 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1825 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1826 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1827 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1828 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1829 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1830 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1831 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1832 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1833 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1834 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1835 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1836 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1837 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1838 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1839 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1840 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1841 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1842 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1843 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1844 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1845 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1846 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1847 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1848 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1849 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1850 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1851 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1852 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1853 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1854 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1855 VM_EVENT(kvm, 3, "GET: host PFCR subfunc 0x%16.16lx.%16.16lx",
1856 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1857 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1858
1859 return 0;
1860}
1861
1862static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1863{
1864 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1865 unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
1866
1867 if (put_user(feat, &dst->feat))
1868 return -EFAULT;
1869 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1870
1871 return 0;
1872}
1873
1874static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1875{
1876 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1877 unsigned long feat;
1878
1879 BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications));
1880
1881 feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1882 if (put_user(feat, &dst->feat))
1883 return -EFAULT;
1884 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1885
1886 return 0;
1887}
1888
1889static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1890{
1891 int ret = -ENXIO;
1892
1893 switch (attr->attr) {
1894 case KVM_S390_VM_CPU_PROCESSOR:
1895 ret = kvm_s390_get_processor(kvm, attr);
1896 break;
1897 case KVM_S390_VM_CPU_MACHINE:
1898 ret = kvm_s390_get_machine(kvm, attr);
1899 break;
1900 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1901 ret = kvm_s390_get_processor_feat(kvm, attr);
1902 break;
1903 case KVM_S390_VM_CPU_MACHINE_FEAT:
1904 ret = kvm_s390_get_machine_feat(kvm, attr);
1905 break;
1906 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1907 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1908 break;
1909 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1910 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1911 break;
1912 case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1913 ret = kvm_s390_get_processor_uv_feat(kvm, attr);
1914 break;
1915 case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
1916 ret = kvm_s390_get_machine_uv_feat(kvm, attr);
1917 break;
1918 }
1919 return ret;
1920}
1921
1922/**
1923 * kvm_s390_update_topology_change_report - update CPU topology change report
1924 * @kvm: guest KVM description
1925 * @val: set or clear the MTCR bit
1926 *
1927 * Updates the Multiprocessor Topology-Change-Report bit to signal
1928 * the guest with a topology change.
1929 * This is only relevant if the topology facility is present.
1930 *
1931 * The SCA version, bsca or esca, doesn't matter as offset is the same.
1932 */
1933static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
1934{
1935 union sca_utility new, old;
1936 struct bsca_block *sca;
1937
1938 read_lock(&kvm->arch.sca_lock);
1939 sca = kvm->arch.sca;
1940 old = READ_ONCE(sca->utility);
1941 do {
1942 new = old;
1943 new.mtcr = val;
1944 } while (!try_cmpxchg(&sca->utility.val, &old.val, new.val));
1945 read_unlock(&kvm->arch.sca_lock);
1946}
1947
1948static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1949 struct kvm_device_attr *attr)
1950{
1951 if (!test_kvm_facility(kvm, 11))
1952 return -ENXIO;
1953
1954 kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1955 return 0;
1956}
1957
1958static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1959 struct kvm_device_attr *attr)
1960{
1961 u8 topo;
1962
1963 if (!test_kvm_facility(kvm, 11))
1964 return -ENXIO;
1965
1966 read_lock(&kvm->arch.sca_lock);
1967 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1968 read_unlock(&kvm->arch.sca_lock);
1969
1970 return put_user(topo, (u8 __user *)attr->addr);
1971}
1972
1973static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1974{
1975 int ret;
1976
1977 switch (attr->group) {
1978 case KVM_S390_VM_MEM_CTRL:
1979 ret = kvm_s390_set_mem_control(kvm, attr);
1980 break;
1981 case KVM_S390_VM_TOD:
1982 ret = kvm_s390_set_tod(kvm, attr);
1983 break;
1984 case KVM_S390_VM_CPU_MODEL:
1985 ret = kvm_s390_set_cpu_model(kvm, attr);
1986 break;
1987 case KVM_S390_VM_CRYPTO:
1988 ret = kvm_s390_vm_set_crypto(kvm, attr);
1989 break;
1990 case KVM_S390_VM_MIGRATION:
1991 ret = kvm_s390_vm_set_migration(kvm, attr);
1992 break;
1993 case KVM_S390_VM_CPU_TOPOLOGY:
1994 ret = kvm_s390_set_topo_change_indication(kvm, attr);
1995 break;
1996 default:
1997 ret = -ENXIO;
1998 break;
1999 }
2000
2001 return ret;
2002}
2003
2004static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
2005{
2006 int ret;
2007
2008 switch (attr->group) {
2009 case KVM_S390_VM_MEM_CTRL:
2010 ret = kvm_s390_get_mem_control(kvm, attr);
2011 break;
2012 case KVM_S390_VM_TOD:
2013 ret = kvm_s390_get_tod(kvm, attr);
2014 break;
2015 case KVM_S390_VM_CPU_MODEL:
2016 ret = kvm_s390_get_cpu_model(kvm, attr);
2017 break;
2018 case KVM_S390_VM_MIGRATION:
2019 ret = kvm_s390_vm_get_migration(kvm, attr);
2020 break;
2021 case KVM_S390_VM_CPU_TOPOLOGY:
2022 ret = kvm_s390_get_topo_change_indication(kvm, attr);
2023 break;
2024 default:
2025 ret = -ENXIO;
2026 break;
2027 }
2028
2029 return ret;
2030}
2031
2032static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
2033{
2034 int ret;
2035
2036 switch (attr->group) {
2037 case KVM_S390_VM_MEM_CTRL:
2038 switch (attr->attr) {
2039 case KVM_S390_VM_MEM_ENABLE_CMMA:
2040 case KVM_S390_VM_MEM_CLR_CMMA:
2041 ret = sclp.has_cmma ? 0 : -ENXIO;
2042 break;
2043 case KVM_S390_VM_MEM_LIMIT_SIZE:
2044 ret = 0;
2045 break;
2046 default:
2047 ret = -ENXIO;
2048 break;
2049 }
2050 break;
2051 case KVM_S390_VM_TOD:
2052 switch (attr->attr) {
2053 case KVM_S390_VM_TOD_LOW:
2054 case KVM_S390_VM_TOD_HIGH:
2055 ret = 0;
2056 break;
2057 default:
2058 ret = -ENXIO;
2059 break;
2060 }
2061 break;
2062 case KVM_S390_VM_CPU_MODEL:
2063 switch (attr->attr) {
2064 case KVM_S390_VM_CPU_PROCESSOR:
2065 case KVM_S390_VM_CPU_MACHINE:
2066 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
2067 case KVM_S390_VM_CPU_MACHINE_FEAT:
2068 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
2069 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
2070 case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
2071 case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
2072 ret = 0;
2073 break;
2074 default:
2075 ret = -ENXIO;
2076 break;
2077 }
2078 break;
2079 case KVM_S390_VM_CRYPTO:
2080 switch (attr->attr) {
2081 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
2082 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
2083 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
2084 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
2085 ret = 0;
2086 break;
2087 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
2088 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
2089 ret = ap_instructions_available() ? 0 : -ENXIO;
2090 break;
2091 default:
2092 ret = -ENXIO;
2093 break;
2094 }
2095 break;
2096 case KVM_S390_VM_MIGRATION:
2097 ret = 0;
2098 break;
2099 case KVM_S390_VM_CPU_TOPOLOGY:
2100 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
2101 break;
2102 default:
2103 ret = -ENXIO;
2104 break;
2105 }
2106
2107 return ret;
2108}
2109
2110static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2111{
2112 uint8_t *keys;
2113 uint64_t hva;
2114 int srcu_idx, i, r = 0;
2115
2116 if (args->flags != 0)
2117 return -EINVAL;
2118
2119 /* Is this guest using storage keys? */
2120 if (!mm_uses_skeys(current->mm))
2121 return KVM_S390_GET_SKEYS_NONE;
2122
2123 /* Enforce sane limit on memory allocation */
2124 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2125 return -EINVAL;
2126
2127 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2128 if (!keys)
2129 return -ENOMEM;
2130
2131 mmap_read_lock(current->mm);
2132 srcu_idx = srcu_read_lock(&kvm->srcu);
2133 for (i = 0; i < args->count; i++) {
2134 hva = gfn_to_hva(kvm, args->start_gfn + i);
2135 if (kvm_is_error_hva(hva)) {
2136 r = -EFAULT;
2137 break;
2138 }
2139
2140 r = get_guest_storage_key(current->mm, hva, &keys[i]);
2141 if (r)
2142 break;
2143 }
2144 srcu_read_unlock(&kvm->srcu, srcu_idx);
2145 mmap_read_unlock(current->mm);
2146
2147 if (!r) {
2148 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
2149 sizeof(uint8_t) * args->count);
2150 if (r)
2151 r = -EFAULT;
2152 }
2153
2154 kvfree(keys);
2155 return r;
2156}
2157
2158static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2159{
2160 uint8_t *keys;
2161 uint64_t hva;
2162 int srcu_idx, i, r = 0;
2163 bool unlocked;
2164
2165 if (args->flags != 0)
2166 return -EINVAL;
2167
2168 /* Enforce sane limit on memory allocation */
2169 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2170 return -EINVAL;
2171
2172 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2173 if (!keys)
2174 return -ENOMEM;
2175
2176 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
2177 sizeof(uint8_t) * args->count);
2178 if (r) {
2179 r = -EFAULT;
2180 goto out;
2181 }
2182
2183 /* Enable storage key handling for the guest */
2184 r = s390_enable_skey();
2185 if (r)
2186 goto out;
2187
2188 i = 0;
2189 mmap_read_lock(current->mm);
2190 srcu_idx = srcu_read_lock(&kvm->srcu);
2191 while (i < args->count) {
2192 unlocked = false;
2193 hva = gfn_to_hva(kvm, args->start_gfn + i);
2194 if (kvm_is_error_hva(hva)) {
2195 r = -EFAULT;
2196 break;
2197 }
2198
2199 /* Lowest order bit is reserved */
2200 if (keys[i] & 0x01) {
2201 r = -EINVAL;
2202 break;
2203 }
2204
2205 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2206 if (r) {
2207 r = fixup_user_fault(current->mm, hva,
2208 FAULT_FLAG_WRITE, &unlocked);
2209 if (r)
2210 break;
2211 }
2212 if (!r)
2213 i++;
2214 }
2215 srcu_read_unlock(&kvm->srcu, srcu_idx);
2216 mmap_read_unlock(current->mm);
2217out:
2218 kvfree(keys);
2219 return r;
2220}
2221
2222/*
2223 * Base address and length must be sent at the start of each block, therefore
2224 * it's cheaper to send some clean data, as long as it's less than the size of
2225 * two longs.
2226 */
2227#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
2228/* for consistency */
2229#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
2230
2231static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2232 u8 *res, unsigned long bufsize)
2233{
2234 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2235
2236 args->count = 0;
2237 while (args->count < bufsize) {
2238 hva = gfn_to_hva(kvm, cur_gfn);
2239 /*
2240 * We return an error if the first value was invalid, but we
2241 * return successfully if at least one value was copied.
2242 */
2243 if (kvm_is_error_hva(hva))
2244 return args->count ? 0 : -EFAULT;
2245 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2246 pgstev = 0;
2247 res[args->count++] = (pgstev >> 24) & 0x43;
2248 cur_gfn++;
2249 }
2250
2251 return 0;
2252}
2253
2254static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2255 gfn_t gfn)
2256{
2257 return ____gfn_to_memslot(slots, gfn, true);
2258}
2259
2260static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2261 unsigned long cur_gfn)
2262{
2263 struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2264 unsigned long ofs = cur_gfn - ms->base_gfn;
2265 struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2266
2267 if (ms->base_gfn + ms->npages <= cur_gfn) {
2268 mnode = rb_next(mnode);
2269 /* If we are above the highest slot, wrap around */
2270 if (!mnode)
2271 mnode = rb_first(&slots->gfn_tree);
2272
2273 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2274 ofs = 0;
2275 }
2276
2277 if (cur_gfn < ms->base_gfn)
2278 ofs = 0;
2279
2280 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2281 while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
2282 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2283 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
2284 }
2285 return ms->base_gfn + ofs;
2286}
2287
2288static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2289 u8 *res, unsigned long bufsize)
2290{
2291 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2292 struct kvm_memslots *slots = kvm_memslots(kvm);
2293 struct kvm_memory_slot *ms;
2294
2295 if (unlikely(kvm_memslots_empty(slots)))
2296 return 0;
2297
2298 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2299 ms = gfn_to_memslot(kvm, cur_gfn);
2300 args->count = 0;
2301 args->start_gfn = cur_gfn;
2302 if (!ms)
2303 return 0;
2304 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2305 mem_end = kvm_s390_get_gfn_end(slots);
2306
2307 while (args->count < bufsize) {
2308 hva = gfn_to_hva(kvm, cur_gfn);
2309 if (kvm_is_error_hva(hva))
2310 return 0;
2311 /* Decrement only if we actually flipped the bit to 0 */
2312 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2313 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2314 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2315 pgstev = 0;
2316 /* Save the value */
2317 res[args->count++] = (pgstev >> 24) & 0x43;
2318 /* If the next bit is too far away, stop. */
2319 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2320 return 0;
2321 /* If we reached the previous "next", find the next one */
2322 if (cur_gfn == next_gfn)
2323 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2324 /* Reached the end of memory or of the buffer, stop */
2325 if ((next_gfn >= mem_end) ||
2326 (next_gfn - args->start_gfn >= bufsize))
2327 return 0;
2328 cur_gfn++;
2329 /* Reached the end of the current memslot, take the next one. */
2330 if (cur_gfn - ms->base_gfn >= ms->npages) {
2331 ms = gfn_to_memslot(kvm, cur_gfn);
2332 if (!ms)
2333 return 0;
2334 }
2335 }
2336 return 0;
2337}
2338
2339/*
2340 * This function searches for the next page with dirty CMMA attributes, and
2341 * saves the attributes in the buffer up to either the end of the buffer or
2342 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2343 * no trailing clean bytes are saved.
2344 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2345 * output buffer will indicate 0 as length.
2346 */
2347static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2348 struct kvm_s390_cmma_log *args)
2349{
2350 unsigned long bufsize;
2351 int srcu_idx, peek, ret;
2352 u8 *values;
2353
2354 if (!kvm->arch.use_cmma)
2355 return -ENXIO;
2356 /* Invalid/unsupported flags were specified */
2357 if (args->flags & ~KVM_S390_CMMA_PEEK)
2358 return -EINVAL;
2359 /* Migration mode query, and we are not doing a migration */
2360 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2361 if (!peek && !kvm->arch.migration_mode)
2362 return -EINVAL;
2363 /* CMMA is disabled or was not used, or the buffer has length zero */
2364 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2365 if (!bufsize || !kvm->mm->context.uses_cmm) {
2366 memset(args, 0, sizeof(*args));
2367 return 0;
2368 }
2369 /* We are not peeking, and there are no dirty pages */
2370 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2371 memset(args, 0, sizeof(*args));
2372 return 0;
2373 }
2374
2375 values = vmalloc(bufsize);
2376 if (!values)
2377 return -ENOMEM;
2378
2379 mmap_read_lock(kvm->mm);
2380 srcu_idx = srcu_read_lock(&kvm->srcu);
2381 if (peek)
2382 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2383 else
2384 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2385 srcu_read_unlock(&kvm->srcu, srcu_idx);
2386 mmap_read_unlock(kvm->mm);
2387
2388 if (kvm->arch.migration_mode)
2389 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2390 else
2391 args->remaining = 0;
2392
2393 if (copy_to_user((void __user *)args->values, values, args->count))
2394 ret = -EFAULT;
2395
2396 vfree(values);
2397 return ret;
2398}
2399
2400/*
2401 * This function sets the CMMA attributes for the given pages. If the input
2402 * buffer has zero length, no action is taken, otherwise the attributes are
2403 * set and the mm->context.uses_cmm flag is set.
2404 */
2405static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2406 const struct kvm_s390_cmma_log *args)
2407{
2408 unsigned long hva, mask, pgstev, i;
2409 uint8_t *bits;
2410 int srcu_idx, r = 0;
2411
2412 mask = args->mask;
2413
2414 if (!kvm->arch.use_cmma)
2415 return -ENXIO;
2416 /* invalid/unsupported flags */
2417 if (args->flags != 0)
2418 return -EINVAL;
2419 /* Enforce sane limit on memory allocation */
2420 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2421 return -EINVAL;
2422 /* Nothing to do */
2423 if (args->count == 0)
2424 return 0;
2425
2426 bits = vmalloc(array_size(sizeof(*bits), args->count));
2427 if (!bits)
2428 return -ENOMEM;
2429
2430 r = copy_from_user(bits, (void __user *)args->values, args->count);
2431 if (r) {
2432 r = -EFAULT;
2433 goto out;
2434 }
2435
2436 mmap_read_lock(kvm->mm);
2437 srcu_idx = srcu_read_lock(&kvm->srcu);
2438 for (i = 0; i < args->count; i++) {
2439 hva = gfn_to_hva(kvm, args->start_gfn + i);
2440 if (kvm_is_error_hva(hva)) {
2441 r = -EFAULT;
2442 break;
2443 }
2444
2445 pgstev = bits[i];
2446 pgstev = pgstev << 24;
2447 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2448 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2449 }
2450 srcu_read_unlock(&kvm->srcu, srcu_idx);
2451 mmap_read_unlock(kvm->mm);
2452
2453 if (!kvm->mm->context.uses_cmm) {
2454 mmap_write_lock(kvm->mm);
2455 kvm->mm->context.uses_cmm = 1;
2456 mmap_write_unlock(kvm->mm);
2457 }
2458out:
2459 vfree(bits);
2460 return r;
2461}
2462
2463/**
2464 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2465 * non protected.
2466 * @kvm: the VM whose protected vCPUs are to be converted
2467 * @rc: return value for the RC field of the UVC (in case of error)
2468 * @rrc: return value for the RRC field of the UVC (in case of error)
2469 *
2470 * Does not stop in case of error, tries to convert as many
2471 * CPUs as possible. In case of error, the RC and RRC of the last error are
2472 * returned.
2473 *
2474 * Return: 0 in case of success, otherwise -EIO
2475 */
2476int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2477{
2478 struct kvm_vcpu *vcpu;
2479 unsigned long i;
2480 u16 _rc, _rrc;
2481 int ret = 0;
2482
2483 /*
2484 * We ignore failures and try to destroy as many CPUs as possible.
2485 * At the same time we must not free the assigned resources when
2486 * this fails, as the ultravisor has still access to that memory.
2487 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2488 * behind.
2489 * We want to return the first failure rc and rrc, though.
2490 */
2491 kvm_for_each_vcpu(i, vcpu, kvm) {
2492 mutex_lock(&vcpu->mutex);
2493 if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2494 *rc = _rc;
2495 *rrc = _rrc;
2496 ret = -EIO;
2497 }
2498 mutex_unlock(&vcpu->mutex);
2499 }
2500 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2501 if (use_gisa)
2502 kvm_s390_gisa_enable(kvm);
2503 return ret;
2504}
2505
2506/**
2507 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2508 * to protected.
2509 * @kvm: the VM whose protected vCPUs are to be converted
2510 * @rc: return value for the RC field of the UVC (in case of error)
2511 * @rrc: return value for the RRC field of the UVC (in case of error)
2512 *
2513 * Tries to undo the conversion in case of error.
2514 *
2515 * Return: 0 in case of success, otherwise -EIO
2516 */
2517static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2518{
2519 unsigned long i;
2520 int r = 0;
2521 u16 dummy;
2522
2523 struct kvm_vcpu *vcpu;
2524
2525 /* Disable the GISA if the ultravisor does not support AIV. */
2526 if (!uv_has_feature(BIT_UV_FEAT_AIV))
2527 kvm_s390_gisa_disable(kvm);
2528
2529 kvm_for_each_vcpu(i, vcpu, kvm) {
2530 mutex_lock(&vcpu->mutex);
2531 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2532 mutex_unlock(&vcpu->mutex);
2533 if (r)
2534 break;
2535 }
2536 if (r)
2537 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2538 return r;
2539}
2540
2541/*
2542 * Here we provide user space with a direct interface to query UV
2543 * related data like UV maxima and available features as well as
2544 * feature specific data.
2545 *
2546 * To facilitate future extension of the data structures we'll try to
2547 * write data up to the maximum requested length.
2548 */
2549static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
2550{
2551 ssize_t len_min;
2552
2553 switch (info->header.id) {
2554 case KVM_PV_INFO_VM: {
2555 len_min = sizeof(info->header) + sizeof(info->vm);
2556
2557 if (info->header.len_max < len_min)
2558 return -EINVAL;
2559
2560 memcpy(info->vm.inst_calls_list,
2561 uv_info.inst_calls_list,
2562 sizeof(uv_info.inst_calls_list));
2563
2564 /* It's max cpuid not max cpus, so it's off by one */
2565 info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
2566 info->vm.max_guests = uv_info.max_num_sec_conf;
2567 info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
2568 info->vm.feature_indication = uv_info.uv_feature_indications;
2569
2570 return len_min;
2571 }
2572 case KVM_PV_INFO_DUMP: {
2573 len_min = sizeof(info->header) + sizeof(info->dump);
2574
2575 if (info->header.len_max < len_min)
2576 return -EINVAL;
2577
2578 info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2579 info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2580 info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2581 return len_min;
2582 }
2583 default:
2584 return -EINVAL;
2585 }
2586}
2587
2588static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
2589 struct kvm_s390_pv_dmp dmp)
2590{
2591 int r = -EINVAL;
2592 void __user *result_buff = (void __user *)dmp.buff_addr;
2593
2594 switch (dmp.subcmd) {
2595 case KVM_PV_DUMP_INIT: {
2596 if (kvm->arch.pv.dumping)
2597 break;
2598
2599 /*
2600 * Block SIE entry as concurrent dump UVCs could lead
2601 * to validities.
2602 */
2603 kvm_s390_vcpu_block_all(kvm);
2604
2605 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2606 UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
2607 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2608 cmd->rc, cmd->rrc);
2609 if (!r) {
2610 kvm->arch.pv.dumping = true;
2611 } else {
2612 kvm_s390_vcpu_unblock_all(kvm);
2613 r = -EINVAL;
2614 }
2615 break;
2616 }
2617 case KVM_PV_DUMP_CONFIG_STOR_STATE: {
2618 if (!kvm->arch.pv.dumping)
2619 break;
2620
2621 /*
2622 * gaddr is an output parameter since we might stop
2623 * early. As dmp will be copied back in our caller, we
2624 * don't need to do it ourselves.
2625 */
2626 r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
2627 &cmd->rc, &cmd->rrc);
2628 break;
2629 }
2630 case KVM_PV_DUMP_COMPLETE: {
2631 if (!kvm->arch.pv.dumping)
2632 break;
2633
2634 r = -EINVAL;
2635 if (dmp.buff_len < uv_info.conf_dump_finalize_len)
2636 break;
2637
2638 r = kvm_s390_pv_dump_complete(kvm, result_buff,
2639 &cmd->rc, &cmd->rrc);
2640 break;
2641 }
2642 default:
2643 r = -ENOTTY;
2644 break;
2645 }
2646
2647 return r;
2648}
2649
2650static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2651{
2652 const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2653 void __user *argp = (void __user *)cmd->data;
2654 int r = 0;
2655 u16 dummy;
2656
2657 if (need_lock)
2658 mutex_lock(&kvm->lock);
2659
2660 switch (cmd->cmd) {
2661 case KVM_PV_ENABLE: {
2662 r = -EINVAL;
2663 if (kvm_s390_pv_is_protected(kvm))
2664 break;
2665
2666 /*
2667 * FMT 4 SIE needs esca. As we never switch back to bsca from
2668 * esca, we need no cleanup in the error cases below
2669 */
2670 r = sca_switch_to_extended(kvm);
2671 if (r)
2672 break;
2673
2674 r = s390_disable_cow_sharing();
2675 if (r)
2676 break;
2677
2678 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2679 if (r)
2680 break;
2681
2682 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2683 if (r)
2684 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2685
2686 /* we need to block service interrupts from now on */
2687 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2688 break;
2689 }
2690 case KVM_PV_ASYNC_CLEANUP_PREPARE:
2691 r = -EINVAL;
2692 if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2693 break;
2694
2695 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2696 /*
2697 * If a CPU could not be destroyed, destroy VM will also fail.
2698 * There is no point in trying to destroy it. Instead return
2699 * the rc and rrc from the first CPU that failed destroying.
2700 */
2701 if (r)
2702 break;
2703 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2704
2705 /* no need to block service interrupts any more */
2706 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2707 break;
2708 case KVM_PV_ASYNC_CLEANUP_PERFORM:
2709 r = -EINVAL;
2710 if (!async_destroy)
2711 break;
2712 /* kvm->lock must not be held; this is asserted inside the function. */
2713 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2714 break;
2715 case KVM_PV_DISABLE: {
2716 r = -EINVAL;
2717 if (!kvm_s390_pv_is_protected(kvm))
2718 break;
2719
2720 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2721 /*
2722 * If a CPU could not be destroyed, destroy VM will also fail.
2723 * There is no point in trying to destroy it. Instead return
2724 * the rc and rrc from the first CPU that failed destroying.
2725 */
2726 if (r)
2727 break;
2728 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
2729
2730 /* no need to block service interrupts any more */
2731 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2732 break;
2733 }
2734 case KVM_PV_SET_SEC_PARMS: {
2735 struct kvm_s390_pv_sec_parm parms = {};
2736 void *hdr;
2737
2738 r = -EINVAL;
2739 if (!kvm_s390_pv_is_protected(kvm))
2740 break;
2741
2742 r = -EFAULT;
2743 if (copy_from_user(&parms, argp, sizeof(parms)))
2744 break;
2745
2746 /* Currently restricted to 8KB */
2747 r = -EINVAL;
2748 if (parms.length > PAGE_SIZE * 2)
2749 break;
2750
2751 r = -ENOMEM;
2752 hdr = vmalloc(parms.length);
2753 if (!hdr)
2754 break;
2755
2756 r = -EFAULT;
2757 if (!copy_from_user(hdr, (void __user *)parms.origin,
2758 parms.length))
2759 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2760 &cmd->rc, &cmd->rrc);
2761
2762 vfree(hdr);
2763 break;
2764 }
2765 case KVM_PV_UNPACK: {
2766 struct kvm_s390_pv_unp unp = {};
2767
2768 r = -EINVAL;
2769 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2770 break;
2771
2772 r = -EFAULT;
2773 if (copy_from_user(&unp, argp, sizeof(unp)))
2774 break;
2775
2776 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2777 &cmd->rc, &cmd->rrc);
2778 break;
2779 }
2780 case KVM_PV_VERIFY: {
2781 r = -EINVAL;
2782 if (!kvm_s390_pv_is_protected(kvm))
2783 break;
2784
2785 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2786 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2787 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2788 cmd->rrc);
2789 break;
2790 }
2791 case KVM_PV_PREP_RESET: {
2792 r = -EINVAL;
2793 if (!kvm_s390_pv_is_protected(kvm))
2794 break;
2795
2796 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2797 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2798 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2799 cmd->rc, cmd->rrc);
2800 break;
2801 }
2802 case KVM_PV_UNSHARE_ALL: {
2803 r = -EINVAL;
2804 if (!kvm_s390_pv_is_protected(kvm))
2805 break;
2806
2807 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2808 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2809 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2810 cmd->rc, cmd->rrc);
2811 break;
2812 }
2813 case KVM_PV_INFO: {
2814 struct kvm_s390_pv_info info = {};
2815 ssize_t data_len;
2816
2817 /*
2818 * No need to check the VM protection here.
2819 *
2820 * Maybe user space wants to query some of the data
2821 * when the VM is still unprotected. If we see the
2822 * need to fence a new data command we can still
2823 * return an error in the info handler.
2824 */
2825
2826 r = -EFAULT;
2827 if (copy_from_user(&info, argp, sizeof(info.header)))
2828 break;
2829
2830 r = -EINVAL;
2831 if (info.header.len_max < sizeof(info.header))
2832 break;
2833
2834 data_len = kvm_s390_handle_pv_info(&info);
2835 if (data_len < 0) {
2836 r = data_len;
2837 break;
2838 }
2839 /*
2840 * If a data command struct is extended (multiple
2841 * times) this can be used to determine how much of it
2842 * is valid.
2843 */
2844 info.header.len_written = data_len;
2845
2846 r = -EFAULT;
2847 if (copy_to_user(argp, &info, data_len))
2848 break;
2849
2850 r = 0;
2851 break;
2852 }
2853 case KVM_PV_DUMP: {
2854 struct kvm_s390_pv_dmp dmp;
2855
2856 r = -EINVAL;
2857 if (!kvm_s390_pv_is_protected(kvm))
2858 break;
2859
2860 r = -EFAULT;
2861 if (copy_from_user(&dmp, argp, sizeof(dmp)))
2862 break;
2863
2864 r = kvm_s390_pv_dmp(kvm, cmd, dmp);
2865 if (r)
2866 break;
2867
2868 if (copy_to_user(argp, &dmp, sizeof(dmp))) {
2869 r = -EFAULT;
2870 break;
2871 }
2872
2873 break;
2874 }
2875 default:
2876 r = -ENOTTY;
2877 }
2878 if (need_lock)
2879 mutex_unlock(&kvm->lock);
2880
2881 return r;
2882}
2883
2884static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags)
2885{
2886 if (mop->flags & ~supported_flags || !mop->size)
2887 return -EINVAL;
2888 if (mop->size > MEM_OP_MAX_SIZE)
2889 return -E2BIG;
2890 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2891 if (mop->key > 0xf)
2892 return -EINVAL;
2893 } else {
2894 mop->key = 0;
2895 }
2896 return 0;
2897}
2898
2899static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2900{
2901 void __user *uaddr = (void __user *)mop->buf;
2902 enum gacc_mode acc_mode;
2903 void *tmpbuf = NULL;
2904 int r, srcu_idx;
2905
2906 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION |
2907 KVM_S390_MEMOP_F_CHECK_ONLY);
2908 if (r)
2909 return r;
2910
2911 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2912 tmpbuf = vmalloc(mop->size);
2913 if (!tmpbuf)
2914 return -ENOMEM;
2915 }
2916
2917 srcu_idx = srcu_read_lock(&kvm->srcu);
2918
2919 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
2920 r = PGM_ADDRESSING;
2921 goto out_unlock;
2922 }
2923
2924 acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE;
2925 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2926 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key);
2927 goto out_unlock;
2928 }
2929 if (acc_mode == GACC_FETCH) {
2930 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2931 mop->size, GACC_FETCH, mop->key);
2932 if (r)
2933 goto out_unlock;
2934 if (copy_to_user(uaddr, tmpbuf, mop->size))
2935 r = -EFAULT;
2936 } else {
2937 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2938 r = -EFAULT;
2939 goto out_unlock;
2940 }
2941 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2942 mop->size, GACC_STORE, mop->key);
2943 }
2944
2945out_unlock:
2946 srcu_read_unlock(&kvm->srcu, srcu_idx);
2947
2948 vfree(tmpbuf);
2949 return r;
2950}
2951
2952static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2953{
2954 void __user *uaddr = (void __user *)mop->buf;
2955 void __user *old_addr = (void __user *)mop->old_addr;
2956 union {
2957 __uint128_t quad;
2958 char raw[sizeof(__uint128_t)];
2959 } old = { .quad = 0}, new = { .quad = 0 };
2960 unsigned int off_in_quad = sizeof(new) - mop->size;
2961 int r, srcu_idx;
2962 bool success;
2963
2964 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION);
2965 if (r)
2966 return r;
2967 /*
2968 * This validates off_in_quad. Checking that size is a power
2969 * of two is not necessary, as cmpxchg_guest_abs_with_key
2970 * takes care of that
2971 */
2972 if (mop->size > sizeof(new))
2973 return -EINVAL;
2974 if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size))
2975 return -EFAULT;
2976 if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size))
2977 return -EFAULT;
2978
2979 srcu_idx = srcu_read_lock(&kvm->srcu);
2980
2981 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
2982 r = PGM_ADDRESSING;
2983 goto out_unlock;
2984 }
2985
2986 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad,
2987 new.quad, mop->key, &success);
2988 if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size))
2989 r = -EFAULT;
2990
2991out_unlock:
2992 srcu_read_unlock(&kvm->srcu, srcu_idx);
2993 return r;
2994}
2995
2996static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2997{
2998 /*
2999 * This is technically a heuristic only, if the kvm->lock is not
3000 * taken, it is not guaranteed that the vm is/remains non-protected.
3001 * This is ok from a kernel perspective, wrongdoing is detected
3002 * on the access, -EFAULT is returned and the vm may crash the
3003 * next time it accesses the memory in question.
3004 * There is no sane usecase to do switching and a memop on two
3005 * different CPUs at the same time.
3006 */
3007 if (kvm_s390_pv_get_handle(kvm))
3008 return -EINVAL;
3009
3010 switch (mop->op) {
3011 case KVM_S390_MEMOP_ABSOLUTE_READ:
3012 case KVM_S390_MEMOP_ABSOLUTE_WRITE:
3013 return kvm_s390_vm_mem_op_abs(kvm, mop);
3014 case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
3015 return kvm_s390_vm_mem_op_cmpxchg(kvm, mop);
3016 default:
3017 return -EINVAL;
3018 }
3019}
3020
3021int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
3022{
3023 struct kvm *kvm = filp->private_data;
3024 void __user *argp = (void __user *)arg;
3025 struct kvm_device_attr attr;
3026 int r;
3027
3028 switch (ioctl) {
3029 case KVM_S390_INTERRUPT: {
3030 struct kvm_s390_interrupt s390int;
3031
3032 r = -EFAULT;
3033 if (copy_from_user(&s390int, argp, sizeof(s390int)))
3034 break;
3035 r = kvm_s390_inject_vm(kvm, &s390int);
3036 break;
3037 }
3038 case KVM_CREATE_IRQCHIP: {
3039 r = -EINVAL;
3040 if (kvm->arch.use_irqchip)
3041 r = 0;
3042 break;
3043 }
3044 case KVM_SET_DEVICE_ATTR: {
3045 r = -EFAULT;
3046 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3047 break;
3048 r = kvm_s390_vm_set_attr(kvm, &attr);
3049 break;
3050 }
3051 case KVM_GET_DEVICE_ATTR: {
3052 r = -EFAULT;
3053 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3054 break;
3055 r = kvm_s390_vm_get_attr(kvm, &attr);
3056 break;
3057 }
3058 case KVM_HAS_DEVICE_ATTR: {
3059 r = -EFAULT;
3060 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3061 break;
3062 r = kvm_s390_vm_has_attr(kvm, &attr);
3063 break;
3064 }
3065 case KVM_S390_GET_SKEYS: {
3066 struct kvm_s390_skeys args;
3067
3068 r = -EFAULT;
3069 if (copy_from_user(&args, argp,
3070 sizeof(struct kvm_s390_skeys)))
3071 break;
3072 r = kvm_s390_get_skeys(kvm, &args);
3073 break;
3074 }
3075 case KVM_S390_SET_SKEYS: {
3076 struct kvm_s390_skeys args;
3077
3078 r = -EFAULT;
3079 if (copy_from_user(&args, argp,
3080 sizeof(struct kvm_s390_skeys)))
3081 break;
3082 r = kvm_s390_set_skeys(kvm, &args);
3083 break;
3084 }
3085 case KVM_S390_GET_CMMA_BITS: {
3086 struct kvm_s390_cmma_log args;
3087
3088 r = -EFAULT;
3089 if (copy_from_user(&args, argp, sizeof(args)))
3090 break;
3091 mutex_lock(&kvm->slots_lock);
3092 r = kvm_s390_get_cmma_bits(kvm, &args);
3093 mutex_unlock(&kvm->slots_lock);
3094 if (!r) {
3095 r = copy_to_user(argp, &args, sizeof(args));
3096 if (r)
3097 r = -EFAULT;
3098 }
3099 break;
3100 }
3101 case KVM_S390_SET_CMMA_BITS: {
3102 struct kvm_s390_cmma_log args;
3103
3104 r = -EFAULT;
3105 if (copy_from_user(&args, argp, sizeof(args)))
3106 break;
3107 mutex_lock(&kvm->slots_lock);
3108 r = kvm_s390_set_cmma_bits(kvm, &args);
3109 mutex_unlock(&kvm->slots_lock);
3110 break;
3111 }
3112 case KVM_S390_PV_COMMAND: {
3113 struct kvm_pv_cmd args;
3114
3115 /* protvirt means user cpu state */
3116 kvm_s390_set_user_cpu_state_ctrl(kvm);
3117 r = 0;
3118 if (!is_prot_virt_host()) {
3119 r = -EINVAL;
3120 break;
3121 }
3122 if (copy_from_user(&args, argp, sizeof(args))) {
3123 r = -EFAULT;
3124 break;
3125 }
3126 if (args.flags) {
3127 r = -EINVAL;
3128 break;
3129 }
3130 /* must be called without kvm->lock */
3131 r = kvm_s390_handle_pv(kvm, &args);
3132 if (copy_to_user(argp, &args, sizeof(args))) {
3133 r = -EFAULT;
3134 break;
3135 }
3136 break;
3137 }
3138 case KVM_S390_MEM_OP: {
3139 struct kvm_s390_mem_op mem_op;
3140
3141 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3142 r = kvm_s390_vm_mem_op(kvm, &mem_op);
3143 else
3144 r = -EFAULT;
3145 break;
3146 }
3147 case KVM_S390_ZPCI_OP: {
3148 struct kvm_s390_zpci_op args;
3149
3150 r = -EINVAL;
3151 if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3152 break;
3153 if (copy_from_user(&args, argp, sizeof(args))) {
3154 r = -EFAULT;
3155 break;
3156 }
3157 r = kvm_s390_pci_zpci_op(kvm, &args);
3158 break;
3159 }
3160 default:
3161 r = -ENOTTY;
3162 }
3163
3164 return r;
3165}
3166
3167static int kvm_s390_apxa_installed(void)
3168{
3169 struct ap_config_info info;
3170
3171 if (ap_instructions_available()) {
3172 if (ap_qci(&info) == 0)
3173 return info.apxa;
3174 }
3175
3176 return 0;
3177}
3178
3179/*
3180 * The format of the crypto control block (CRYCB) is specified in the 3 low
3181 * order bits of the CRYCB designation (CRYCBD) field as follows:
3182 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3183 * AP extended addressing (APXA) facility are installed.
3184 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3185 * Format 2: Both the APXA and MSAX3 facilities are installed
3186 */
3187static void kvm_s390_set_crycb_format(struct kvm *kvm)
3188{
3189 kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb);
3190
3191 /* Clear the CRYCB format bits - i.e., set format 0 by default */
3192 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3193
3194 /* Check whether MSAX3 is installed */
3195 if (!test_kvm_facility(kvm, 76))
3196 return;
3197
3198 if (kvm_s390_apxa_installed())
3199 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3200 else
3201 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3202}
3203
3204/*
3205 * kvm_arch_crypto_set_masks
3206 *
3207 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3208 * to be set.
3209 * @apm: the mask identifying the accessible AP adapters
3210 * @aqm: the mask identifying the accessible AP domains
3211 * @adm: the mask identifying the accessible AP control domains
3212 *
3213 * Set the masks that identify the adapters, domains and control domains to
3214 * which the KVM guest is granted access.
3215 *
3216 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3217 * function.
3218 */
3219void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
3220 unsigned long *aqm, unsigned long *adm)
3221{
3222 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3223
3224 kvm_s390_vcpu_block_all(kvm);
3225
3226 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3227 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
3228 memcpy(crycb->apcb1.apm, apm, 32);
3229 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3230 apm[0], apm[1], apm[2], apm[3]);
3231 memcpy(crycb->apcb1.aqm, aqm, 32);
3232 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3233 aqm[0], aqm[1], aqm[2], aqm[3]);
3234 memcpy(crycb->apcb1.adm, adm, 32);
3235 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3236 adm[0], adm[1], adm[2], adm[3]);
3237 break;
3238 case CRYCB_FORMAT1:
3239 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
3240 memcpy(crycb->apcb0.apm, apm, 8);
3241 memcpy(crycb->apcb0.aqm, aqm, 2);
3242 memcpy(crycb->apcb0.adm, adm, 2);
3243 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3244 apm[0], *((unsigned short *)aqm),
3245 *((unsigned short *)adm));
3246 break;
3247 default: /* Can not happen */
3248 break;
3249 }
3250
3251 /* recreate the shadow crycb for each vcpu */
3252 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3253 kvm_s390_vcpu_unblock_all(kvm);
3254}
3255EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
3256
3257/*
3258 * kvm_arch_crypto_clear_masks
3259 *
3260 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3261 * to be cleared.
3262 *
3263 * Clear the masks that identify the adapters, domains and control domains to
3264 * which the KVM guest is granted access.
3265 *
3266 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3267 * function.
3268 */
3269void kvm_arch_crypto_clear_masks(struct kvm *kvm)
3270{
3271 kvm_s390_vcpu_block_all(kvm);
3272
3273 memset(&kvm->arch.crypto.crycb->apcb0, 0,
3274 sizeof(kvm->arch.crypto.crycb->apcb0));
3275 memset(&kvm->arch.crypto.crycb->apcb1, 0,
3276 sizeof(kvm->arch.crypto.crycb->apcb1));
3277
3278 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
3279 /* recreate the shadow crycb for each vcpu */
3280 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3281 kvm_s390_vcpu_unblock_all(kvm);
3282}
3283EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
3284
3285static u64 kvm_s390_get_initial_cpuid(void)
3286{
3287 struct cpuid cpuid;
3288
3289 get_cpu_id(&cpuid);
3290 cpuid.version = 0xff;
3291 return *((u64 *) &cpuid);
3292}
3293
3294static void kvm_s390_crypto_init(struct kvm *kvm)
3295{
3296 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3297 kvm_s390_set_crycb_format(kvm);
3298 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3299
3300 if (!test_kvm_facility(kvm, 76))
3301 return;
3302
3303 /* Enable AES/DEA protected key functions by default */
3304 kvm->arch.crypto.aes_kw = 1;
3305 kvm->arch.crypto.dea_kw = 1;
3306 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3307 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3308 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3309 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3310}
3311
3312static void sca_dispose(struct kvm *kvm)
3313{
3314 if (kvm->arch.use_esca)
3315 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
3316 else
3317 free_page((unsigned long)(kvm->arch.sca));
3318 kvm->arch.sca = NULL;
3319}
3320
3321void kvm_arch_free_vm(struct kvm *kvm)
3322{
3323 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3324 kvm_s390_pci_clear_list(kvm);
3325
3326 __kvm_arch_free_vm(kvm);
3327}
3328
3329int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3330{
3331 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
3332 int i, rc;
3333 char debug_name[16];
3334 static unsigned long sca_offset;
3335
3336 rc = -EINVAL;
3337#ifdef CONFIG_KVM_S390_UCONTROL
3338 if (type & ~KVM_VM_S390_UCONTROL)
3339 goto out_err;
3340 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3341 goto out_err;
3342#else
3343 if (type)
3344 goto out_err;
3345#endif
3346
3347 rc = s390_enable_sie();
3348 if (rc)
3349 goto out_err;
3350
3351 rc = -ENOMEM;
3352
3353 if (!sclp.has_64bscao)
3354 alloc_flags |= GFP_DMA;
3355 rwlock_init(&kvm->arch.sca_lock);
3356 /* start with basic SCA */
3357 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3358 if (!kvm->arch.sca)
3359 goto out_err;
3360 mutex_lock(&kvm_lock);
3361 sca_offset += 16;
3362 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
3363 sca_offset = 0;
3364 kvm->arch.sca = (struct bsca_block *)
3365 ((char *) kvm->arch.sca + sca_offset);
3366 mutex_unlock(&kvm_lock);
3367
3368 sprintf(debug_name, "kvm-%u", current->pid);
3369
3370 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3371 if (!kvm->arch.dbf)
3372 goto out_err;
3373
3374 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3375 kvm->arch.sie_page2 =
3376 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3377 if (!kvm->arch.sie_page2)
3378 goto out_err;
3379
3380 kvm->arch.sie_page2->kvm = kvm;
3381 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3382
3383 for (i = 0; i < kvm_s390_fac_size(); i++) {
3384 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3385 (kvm_s390_fac_base[i] |
3386 kvm_s390_fac_ext[i]);
3387 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3388 kvm_s390_fac_base[i];
3389 }
3390 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3391
3392 /* we are always in czam mode - even on pre z14 machines */
3393 set_kvm_facility(kvm->arch.model.fac_mask, 138);
3394 set_kvm_facility(kvm->arch.model.fac_list, 138);
3395 /* we emulate STHYI in kvm */
3396 set_kvm_facility(kvm->arch.model.fac_mask, 74);
3397 set_kvm_facility(kvm->arch.model.fac_list, 74);
3398 if (MACHINE_HAS_TLB_GUEST) {
3399 set_kvm_facility(kvm->arch.model.fac_mask, 147);
3400 set_kvm_facility(kvm->arch.model.fac_list, 147);
3401 }
3402
3403 if (css_general_characteristics.aiv && test_facility(65))
3404 set_kvm_facility(kvm->arch.model.fac_mask, 65);
3405
3406 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3407 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3408
3409 kvm->arch.model.uv_feat_guest.feat = 0;
3410
3411 kvm_s390_crypto_init(kvm);
3412
3413 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3414 mutex_lock(&kvm->lock);
3415 kvm_s390_pci_init_list(kvm);
3416 kvm_s390_vcpu_pci_enable_interp(kvm);
3417 mutex_unlock(&kvm->lock);
3418 }
3419
3420 mutex_init(&kvm->arch.float_int.ais_lock);
3421 spin_lock_init(&kvm->arch.float_int.lock);
3422 for (i = 0; i < FIRQ_LIST_COUNT; i++)
3423 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3424 init_waitqueue_head(&kvm->arch.ipte_wq);
3425 mutex_init(&kvm->arch.ipte_mutex);
3426
3427 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3428 VM_EVENT(kvm, 3, "vm created with type %lu", type);
3429
3430 if (type & KVM_VM_S390_UCONTROL) {
3431 kvm->arch.gmap = NULL;
3432 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3433 } else {
3434 if (sclp.hamax == U64_MAX)
3435 kvm->arch.mem_limit = TASK_SIZE_MAX;
3436 else
3437 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
3438 sclp.hamax + 1);
3439 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3440 if (!kvm->arch.gmap)
3441 goto out_err;
3442 kvm->arch.gmap->private = kvm;
3443 kvm->arch.gmap->pfault_enabled = 0;
3444 }
3445
3446 kvm->arch.use_pfmfi = sclp.has_pfmfi;
3447 kvm->arch.use_skf = sclp.has_skey;
3448 spin_lock_init(&kvm->arch.start_stop_lock);
3449 kvm_s390_vsie_init(kvm);
3450 if (use_gisa)
3451 kvm_s390_gisa_init(kvm);
3452 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3453 kvm->arch.pv.set_aside = NULL;
3454 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
3455
3456 return 0;
3457out_err:
3458 free_page((unsigned long)kvm->arch.sie_page2);
3459 debug_unregister(kvm->arch.dbf);
3460 sca_dispose(kvm);
3461 KVM_EVENT(3, "creation of vm failed: %d", rc);
3462 return rc;
3463}
3464
3465void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3466{
3467 u16 rc, rrc;
3468
3469 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3470 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3471 kvm_s390_clear_local_irqs(vcpu);
3472 kvm_clear_async_pf_completion_queue(vcpu);
3473 if (!kvm_is_ucontrol(vcpu->kvm))
3474 sca_del_vcpu(vcpu);
3475 kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3476
3477 if (kvm_is_ucontrol(vcpu->kvm))
3478 gmap_remove(vcpu->arch.gmap);
3479
3480 if (vcpu->kvm->arch.use_cmma)
3481 kvm_s390_vcpu_unsetup_cmma(vcpu);
3482 /* We can not hold the vcpu mutex here, we are already dying */
3483 if (kvm_s390_pv_cpu_get_handle(vcpu))
3484 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3485 free_page((unsigned long)(vcpu->arch.sie_block));
3486}
3487
3488void kvm_arch_destroy_vm(struct kvm *kvm)
3489{
3490 u16 rc, rrc;
3491
3492 kvm_destroy_vcpus(kvm);
3493 sca_dispose(kvm);
3494 kvm_s390_gisa_destroy(kvm);
3495 /*
3496 * We are already at the end of life and kvm->lock is not taken.
3497 * This is ok as the file descriptor is closed by now and nobody
3498 * can mess with the pv state.
3499 */
3500 kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3501 /*
3502 * Remove the mmu notifier only when the whole KVM VM is torn down,
3503 * and only if one was registered to begin with. If the VM is
3504 * currently not protected, but has been previously been protected,
3505 * then it's possible that the notifier is still registered.
3506 */
3507 if (kvm->arch.pv.mmu_notifier.ops)
3508 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3509
3510 debug_unregister(kvm->arch.dbf);
3511 free_page((unsigned long)kvm->arch.sie_page2);
3512 if (!kvm_is_ucontrol(kvm))
3513 gmap_remove(kvm->arch.gmap);
3514 kvm_s390_destroy_adapters(kvm);
3515 kvm_s390_clear_float_irqs(kvm);
3516 kvm_s390_vsie_destroy(kvm);
3517 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
3518}
3519
3520/* Section: vcpu related */
3521static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
3522{
3523 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
3524 if (!vcpu->arch.gmap)
3525 return -ENOMEM;
3526 vcpu->arch.gmap->private = vcpu->kvm;
3527
3528 return 0;
3529}
3530
3531static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3532{
3533 if (!kvm_s390_use_sca_entries())
3534 return;
3535 read_lock(&vcpu->kvm->arch.sca_lock);
3536 if (vcpu->kvm->arch.use_esca) {
3537 struct esca_block *sca = vcpu->kvm->arch.sca;
3538
3539 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3540 sca->cpu[vcpu->vcpu_id].sda = 0;
3541 } else {
3542 struct bsca_block *sca = vcpu->kvm->arch.sca;
3543
3544 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3545 sca->cpu[vcpu->vcpu_id].sda = 0;
3546 }
3547 read_unlock(&vcpu->kvm->arch.sca_lock);
3548}
3549
3550static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3551{
3552 if (!kvm_s390_use_sca_entries()) {
3553 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
3554
3555 /* we still need the basic sca for the ipte control */
3556 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3557 vcpu->arch.sie_block->scaol = sca_phys;
3558 return;
3559 }
3560 read_lock(&vcpu->kvm->arch.sca_lock);
3561 if (vcpu->kvm->arch.use_esca) {
3562 struct esca_block *sca = vcpu->kvm->arch.sca;
3563 phys_addr_t sca_phys = virt_to_phys(sca);
3564
3565 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3566 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3567 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3568 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3569 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3570 } else {
3571 struct bsca_block *sca = vcpu->kvm->arch.sca;
3572 phys_addr_t sca_phys = virt_to_phys(sca);
3573
3574 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3575 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3576 vcpu->arch.sie_block->scaol = sca_phys;
3577 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3578 }
3579 read_unlock(&vcpu->kvm->arch.sca_lock);
3580}
3581
3582/* Basic SCA to Extended SCA data copy routines */
3583static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
3584{
3585 d->sda = s->sda;
3586 d->sigp_ctrl.c = s->sigp_ctrl.c;
3587 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
3588}
3589
3590static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
3591{
3592 int i;
3593
3594 d->ipte_control = s->ipte_control;
3595 d->mcn[0] = s->mcn;
3596 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
3597 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
3598}
3599
3600static int sca_switch_to_extended(struct kvm *kvm)
3601{
3602 struct bsca_block *old_sca = kvm->arch.sca;
3603 struct esca_block *new_sca;
3604 struct kvm_vcpu *vcpu;
3605 unsigned long vcpu_idx;
3606 u32 scaol, scaoh;
3607 phys_addr_t new_sca_phys;
3608
3609 if (kvm->arch.use_esca)
3610 return 0;
3611
3612 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3613 if (!new_sca)
3614 return -ENOMEM;
3615
3616 new_sca_phys = virt_to_phys(new_sca);
3617 scaoh = new_sca_phys >> 32;
3618 scaol = new_sca_phys & ESCA_SCAOL_MASK;
3619
3620 kvm_s390_vcpu_block_all(kvm);
3621 write_lock(&kvm->arch.sca_lock);
3622
3623 sca_copy_b_to_e(new_sca, old_sca);
3624
3625 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
3626 vcpu->arch.sie_block->scaoh = scaoh;
3627 vcpu->arch.sie_block->scaol = scaol;
3628 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3629 }
3630 kvm->arch.sca = new_sca;
3631 kvm->arch.use_esca = 1;
3632
3633 write_unlock(&kvm->arch.sca_lock);
3634 kvm_s390_vcpu_unblock_all(kvm);
3635
3636 free_page((unsigned long)old_sca);
3637
3638 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
3639 old_sca, kvm->arch.sca);
3640 return 0;
3641}
3642
3643static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3644{
3645 int rc;
3646
3647 if (!kvm_s390_use_sca_entries()) {
3648 if (id < KVM_MAX_VCPUS)
3649 return true;
3650 return false;
3651 }
3652 if (id < KVM_S390_BSCA_CPU_SLOTS)
3653 return true;
3654 if (!sclp.has_esca || !sclp.has_64bscao)
3655 return false;
3656
3657 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
3658
3659 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3660}
3661
3662/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3663static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3664{
3665 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3666 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3667 vcpu->arch.cputm_start = get_tod_clock_fast();
3668 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3669}
3670
3671/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3672static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3673{
3674 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3675 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3676 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3677 vcpu->arch.cputm_start = 0;
3678 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3679}
3680
3681/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3682static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3683{
3684 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3685 vcpu->arch.cputm_enabled = true;
3686 __start_cpu_timer_accounting(vcpu);
3687}
3688
3689/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3690static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3691{
3692 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3693 __stop_cpu_timer_accounting(vcpu);
3694 vcpu->arch.cputm_enabled = false;
3695}
3696
3697static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3698{
3699 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3700 __enable_cpu_timer_accounting(vcpu);
3701 preempt_enable();
3702}
3703
3704static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3705{
3706 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3707 __disable_cpu_timer_accounting(vcpu);
3708 preempt_enable();
3709}
3710
3711/* set the cpu timer - may only be called from the VCPU thread itself */
3712void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3713{
3714 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3715 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3716 if (vcpu->arch.cputm_enabled)
3717 vcpu->arch.cputm_start = get_tod_clock_fast();
3718 vcpu->arch.sie_block->cputm = cputm;
3719 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3720 preempt_enable();
3721}
3722
3723/* update and get the cpu timer - can also be called from other VCPU threads */
3724__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3725{
3726 unsigned int seq;
3727 __u64 value;
3728
3729 if (unlikely(!vcpu->arch.cputm_enabled))
3730 return vcpu->arch.sie_block->cputm;
3731
3732 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3733 do {
3734 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3735 /*
3736 * If the writer would ever execute a read in the critical
3737 * section, e.g. in irq context, we have a deadlock.
3738 */
3739 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3740 value = vcpu->arch.sie_block->cputm;
3741 /* if cputm_start is 0, accounting is being started/stopped */
3742 if (likely(vcpu->arch.cputm_start))
3743 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3744 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3745 preempt_enable();
3746 return value;
3747}
3748
3749void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3750{
3751
3752 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3753 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3754 __start_cpu_timer_accounting(vcpu);
3755 vcpu->cpu = cpu;
3756}
3757
3758void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3759{
3760 vcpu->cpu = -1;
3761 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3762 __stop_cpu_timer_accounting(vcpu);
3763 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3764
3765}
3766
3767void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3768{
3769 mutex_lock(&vcpu->kvm->lock);
3770 preempt_disable();
3771 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3772 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3773 preempt_enable();
3774 mutex_unlock(&vcpu->kvm->lock);
3775 if (!kvm_is_ucontrol(vcpu->kvm)) {
3776 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3777 sca_add_vcpu(vcpu);
3778 }
3779 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3780 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3781}
3782
3783static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3784{
3785 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3786 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3787 return true;
3788 return false;
3789}
3790
3791static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3792{
3793 /* At least one ECC subfunction must be present */
3794 return kvm_has_pckmo_subfunc(kvm, 32) ||
3795 kvm_has_pckmo_subfunc(kvm, 33) ||
3796 kvm_has_pckmo_subfunc(kvm, 34) ||
3797 kvm_has_pckmo_subfunc(kvm, 40) ||
3798 kvm_has_pckmo_subfunc(kvm, 41);
3799
3800}
3801
3802static bool kvm_has_pckmo_hmac(struct kvm *kvm)
3803{
3804 /* At least one HMAC subfunction must be present */
3805 return kvm_has_pckmo_subfunc(kvm, 118) ||
3806 kvm_has_pckmo_subfunc(kvm, 122);
3807}
3808
3809static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3810{
3811 /*
3812 * If the AP instructions are not being interpreted and the MSAX3
3813 * facility is not configured for the guest, there is nothing to set up.
3814 */
3815 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3816 return;
3817
3818 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3819 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3820 vcpu->arch.sie_block->eca &= ~ECA_APIE;
3821 vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC);
3822
3823 if (vcpu->kvm->arch.crypto.apie)
3824 vcpu->arch.sie_block->eca |= ECA_APIE;
3825
3826 /* Set up protected key support */
3827 if (vcpu->kvm->arch.crypto.aes_kw) {
3828 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3829 /* ecc/hmac is also wrapped with AES key */
3830 if (kvm_has_pckmo_ecc(vcpu->kvm))
3831 vcpu->arch.sie_block->ecd |= ECD_ECC;
3832 if (kvm_has_pckmo_hmac(vcpu->kvm))
3833 vcpu->arch.sie_block->ecd |= ECD_HMAC;
3834 }
3835
3836 if (vcpu->kvm->arch.crypto.dea_kw)
3837 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3838}
3839
3840void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3841{
3842 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3843 vcpu->arch.sie_block->cbrlo = 0;
3844}
3845
3846int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3847{
3848 void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3849
3850 if (!cbrlo_page)
3851 return -ENOMEM;
3852
3853 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3854 return 0;
3855}
3856
3857static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3858{
3859 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3860
3861 vcpu->arch.sie_block->ibc = model->ibc;
3862 if (test_kvm_facility(vcpu->kvm, 7))
3863 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3864}
3865
3866static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3867{
3868 int rc = 0;
3869 u16 uvrc, uvrrc;
3870
3871 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3872 CPUSTAT_SM |
3873 CPUSTAT_STOPPED);
3874
3875 if (test_kvm_facility(vcpu->kvm, 78))
3876 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3877 else if (test_kvm_facility(vcpu->kvm, 8))
3878 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3879
3880 kvm_s390_vcpu_setup_model(vcpu);
3881
3882 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3883 if (MACHINE_HAS_ESOP)
3884 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3885 if (test_kvm_facility(vcpu->kvm, 9))
3886 vcpu->arch.sie_block->ecb |= ECB_SRSI;
3887 if (test_kvm_facility(vcpu->kvm, 11))
3888 vcpu->arch.sie_block->ecb |= ECB_PTF;
3889 if (test_kvm_facility(vcpu->kvm, 73))
3890 vcpu->arch.sie_block->ecb |= ECB_TE;
3891 if (!kvm_is_ucontrol(vcpu->kvm))
3892 vcpu->arch.sie_block->ecb |= ECB_SPECI;
3893
3894 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3895 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3896 if (test_kvm_facility(vcpu->kvm, 130))
3897 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3898 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3899 if (sclp.has_cei)
3900 vcpu->arch.sie_block->eca |= ECA_CEI;
3901 if (sclp.has_ib)
3902 vcpu->arch.sie_block->eca |= ECA_IB;
3903 if (sclp.has_siif)
3904 vcpu->arch.sie_block->eca |= ECA_SII;
3905 if (sclp.has_sigpif)
3906 vcpu->arch.sie_block->eca |= ECA_SIGPI;
3907 if (test_kvm_facility(vcpu->kvm, 129)) {
3908 vcpu->arch.sie_block->eca |= ECA_VX;
3909 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3910 }
3911 if (test_kvm_facility(vcpu->kvm, 139))
3912 vcpu->arch.sie_block->ecd |= ECD_MEF;
3913 if (test_kvm_facility(vcpu->kvm, 156))
3914 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3915 if (vcpu->arch.sie_block->gd) {
3916 vcpu->arch.sie_block->eca |= ECA_AIV;
3917 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3918 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3919 }
3920 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3921 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3922
3923 if (sclp.has_kss)
3924 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3925 else
3926 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3927
3928 if (vcpu->kvm->arch.use_cmma) {
3929 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3930 if (rc)
3931 return rc;
3932 }
3933 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3934 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3935
3936 vcpu->arch.sie_block->hpid = HPID_KVM;
3937
3938 kvm_s390_vcpu_crypto_setup(vcpu);
3939
3940 kvm_s390_vcpu_pci_setup(vcpu);
3941
3942 mutex_lock(&vcpu->kvm->lock);
3943 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3944 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3945 if (rc)
3946 kvm_s390_vcpu_unsetup_cmma(vcpu);
3947 }
3948 mutex_unlock(&vcpu->kvm->lock);
3949
3950 return rc;
3951}
3952
3953int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3954{
3955 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3956 return -EINVAL;
3957 return 0;
3958}
3959
3960int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3961{
3962 struct sie_page *sie_page;
3963 int rc;
3964
3965 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3966 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3967 if (!sie_page)
3968 return -ENOMEM;
3969
3970 vcpu->arch.sie_block = &sie_page->sie_block;
3971 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3972
3973 /* the real guest size will always be smaller than msl */
3974 vcpu->arch.sie_block->mso = 0;
3975 vcpu->arch.sie_block->msl = sclp.hamax;
3976
3977 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3978 spin_lock_init(&vcpu->arch.local_int.lock);
3979 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3980 seqcount_init(&vcpu->arch.cputm_seqcount);
3981
3982 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3983 kvm_clear_async_pf_completion_queue(vcpu);
3984 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3985 KVM_SYNC_GPRS |
3986 KVM_SYNC_ACRS |
3987 KVM_SYNC_CRS |
3988 KVM_SYNC_ARCH0 |
3989 KVM_SYNC_PFAULT |
3990 KVM_SYNC_DIAG318;
3991 vcpu->arch.acrs_loaded = false;
3992 kvm_s390_set_prefix(vcpu, 0);
3993 if (test_kvm_facility(vcpu->kvm, 64))
3994 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3995 if (test_kvm_facility(vcpu->kvm, 82))
3996 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3997 if (test_kvm_facility(vcpu->kvm, 133))
3998 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3999 if (test_kvm_facility(vcpu->kvm, 156))
4000 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
4001 /* fprs can be synchronized via vrs, even if the guest has no vx. With
4002 * cpu_has_vx(), (load|store)_fpu_regs() will work with vrs format.
4003 */
4004 if (cpu_has_vx())
4005 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
4006 else
4007 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
4008
4009 if (kvm_is_ucontrol(vcpu->kvm)) {
4010 rc = __kvm_ucontrol_vcpu_init(vcpu);
4011 if (rc)
4012 goto out_free_sie_block;
4013 }
4014
4015 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
4016 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
4017 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
4018
4019 rc = kvm_s390_vcpu_setup(vcpu);
4020 if (rc)
4021 goto out_ucontrol_uninit;
4022
4023 kvm_s390_update_topology_change_report(vcpu->kvm, 1);
4024 return 0;
4025
4026out_ucontrol_uninit:
4027 if (kvm_is_ucontrol(vcpu->kvm))
4028 gmap_remove(vcpu->arch.gmap);
4029out_free_sie_block:
4030 free_page((unsigned long)(vcpu->arch.sie_block));
4031 return rc;
4032}
4033
4034int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4035{
4036 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4037 return kvm_s390_vcpu_has_irq(vcpu, 0);
4038}
4039
4040bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
4041{
4042 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
4043}
4044
4045void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
4046{
4047 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4048 exit_sie(vcpu);
4049}
4050
4051void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
4052{
4053 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4054}
4055
4056static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
4057{
4058 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4059 exit_sie(vcpu);
4060}
4061
4062bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
4063{
4064 return atomic_read(&vcpu->arch.sie_block->prog20) &
4065 (PROG_BLOCK_SIE | PROG_REQUEST);
4066}
4067
4068static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
4069{
4070 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4071}
4072
4073/*
4074 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
4075 * If the CPU is not running (e.g. waiting as idle) the function will
4076 * return immediately. */
4077void exit_sie(struct kvm_vcpu *vcpu)
4078{
4079 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
4080 kvm_s390_vsie_kick(vcpu);
4081 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
4082 cpu_relax();
4083}
4084
4085/* Kick a guest cpu out of SIE to process a request synchronously */
4086void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
4087{
4088 __kvm_make_request(req, vcpu);
4089 kvm_s390_vcpu_request(vcpu);
4090}
4091
4092static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
4093 unsigned long end)
4094{
4095 struct kvm *kvm = gmap->private;
4096 struct kvm_vcpu *vcpu;
4097 unsigned long prefix;
4098 unsigned long i;
4099
4100 trace_kvm_s390_gmap_notifier(start, end, gmap_is_shadow(gmap));
4101
4102 if (gmap_is_shadow(gmap))
4103 return;
4104 if (start >= 1UL << 31)
4105 /* We are only interested in prefix pages */
4106 return;
4107 kvm_for_each_vcpu(i, vcpu, kvm) {
4108 /* match against both prefix pages */
4109 prefix = kvm_s390_get_prefix(vcpu);
4110 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
4111 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
4112 start, end);
4113 kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4114 }
4115 }
4116}
4117
4118bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
4119{
4120 /* do not poll with more than halt_poll_max_steal percent of steal time */
4121 if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >=
4122 READ_ONCE(halt_poll_max_steal)) {
4123 vcpu->stat.halt_no_poll_steal++;
4124 return true;
4125 }
4126 return false;
4127}
4128
4129int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
4130{
4131 /* kvm common code refers to this, but never calls it */
4132 BUG();
4133 return 0;
4134}
4135
4136static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
4137 struct kvm_one_reg *reg)
4138{
4139 int r = -EINVAL;
4140
4141 switch (reg->id) {
4142 case KVM_REG_S390_TODPR:
4143 r = put_user(vcpu->arch.sie_block->todpr,
4144 (u32 __user *)reg->addr);
4145 break;
4146 case KVM_REG_S390_EPOCHDIFF:
4147 r = put_user(vcpu->arch.sie_block->epoch,
4148 (u64 __user *)reg->addr);
4149 break;
4150 case KVM_REG_S390_CPU_TIMER:
4151 r = put_user(kvm_s390_get_cpu_timer(vcpu),
4152 (u64 __user *)reg->addr);
4153 break;
4154 case KVM_REG_S390_CLOCK_COMP:
4155 r = put_user(vcpu->arch.sie_block->ckc,
4156 (u64 __user *)reg->addr);
4157 break;
4158 case KVM_REG_S390_PFTOKEN:
4159 r = put_user(vcpu->arch.pfault_token,
4160 (u64 __user *)reg->addr);
4161 break;
4162 case KVM_REG_S390_PFCOMPARE:
4163 r = put_user(vcpu->arch.pfault_compare,
4164 (u64 __user *)reg->addr);
4165 break;
4166 case KVM_REG_S390_PFSELECT:
4167 r = put_user(vcpu->arch.pfault_select,
4168 (u64 __user *)reg->addr);
4169 break;
4170 case KVM_REG_S390_PP:
4171 r = put_user(vcpu->arch.sie_block->pp,
4172 (u64 __user *)reg->addr);
4173 break;
4174 case KVM_REG_S390_GBEA:
4175 r = put_user(vcpu->arch.sie_block->gbea,
4176 (u64 __user *)reg->addr);
4177 break;
4178 default:
4179 break;
4180 }
4181
4182 return r;
4183}
4184
4185static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
4186 struct kvm_one_reg *reg)
4187{
4188 int r = -EINVAL;
4189 __u64 val;
4190
4191 switch (reg->id) {
4192 case KVM_REG_S390_TODPR:
4193 r = get_user(vcpu->arch.sie_block->todpr,
4194 (u32 __user *)reg->addr);
4195 break;
4196 case KVM_REG_S390_EPOCHDIFF:
4197 r = get_user(vcpu->arch.sie_block->epoch,
4198 (u64 __user *)reg->addr);
4199 break;
4200 case KVM_REG_S390_CPU_TIMER:
4201 r = get_user(val, (u64 __user *)reg->addr);
4202 if (!r)
4203 kvm_s390_set_cpu_timer(vcpu, val);
4204 break;
4205 case KVM_REG_S390_CLOCK_COMP:
4206 r = get_user(vcpu->arch.sie_block->ckc,
4207 (u64 __user *)reg->addr);
4208 break;
4209 case KVM_REG_S390_PFTOKEN:
4210 r = get_user(vcpu->arch.pfault_token,
4211 (u64 __user *)reg->addr);
4212 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4213 kvm_clear_async_pf_completion_queue(vcpu);
4214 break;
4215 case KVM_REG_S390_PFCOMPARE:
4216 r = get_user(vcpu->arch.pfault_compare,
4217 (u64 __user *)reg->addr);
4218 break;
4219 case KVM_REG_S390_PFSELECT:
4220 r = get_user(vcpu->arch.pfault_select,
4221 (u64 __user *)reg->addr);
4222 break;
4223 case KVM_REG_S390_PP:
4224 r = get_user(vcpu->arch.sie_block->pp,
4225 (u64 __user *)reg->addr);
4226 break;
4227 case KVM_REG_S390_GBEA:
4228 r = get_user(vcpu->arch.sie_block->gbea,
4229 (u64 __user *)reg->addr);
4230 break;
4231 default:
4232 break;
4233 }
4234
4235 return r;
4236}
4237
4238static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
4239{
4240 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
4241 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
4242 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
4243
4244 kvm_clear_async_pf_completion_queue(vcpu);
4245 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
4246 kvm_s390_vcpu_stop(vcpu);
4247 kvm_s390_clear_local_irqs(vcpu);
4248}
4249
4250static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
4251{
4252 /* Initial reset is a superset of the normal reset */
4253 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
4254
4255 /*
4256 * This equals initial cpu reset in pop, but we don't switch to ESA.
4257 * We do not only reset the internal data, but also ...
4258 */
4259 vcpu->arch.sie_block->gpsw.mask = 0;
4260 vcpu->arch.sie_block->gpsw.addr = 0;
4261 kvm_s390_set_prefix(vcpu, 0);
4262 kvm_s390_set_cpu_timer(vcpu, 0);
4263 vcpu->arch.sie_block->ckc = 0;
4264 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
4265 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
4266 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4267
4268 /* ... the data in sync regs */
4269 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
4270 vcpu->run->s.regs.ckc = 0;
4271 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
4272 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
4273 vcpu->run->psw_addr = 0;
4274 vcpu->run->psw_mask = 0;
4275 vcpu->run->s.regs.todpr = 0;
4276 vcpu->run->s.regs.cputm = 0;
4277 vcpu->run->s.regs.ckc = 0;
4278 vcpu->run->s.regs.pp = 0;
4279 vcpu->run->s.regs.gbea = 1;
4280 vcpu->run->s.regs.fpc = 0;
4281 /*
4282 * Do not reset these registers in the protected case, as some of
4283 * them are overlaid and they are not accessible in this case
4284 * anyway.
4285 */
4286 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4287 vcpu->arch.sie_block->gbea = 1;
4288 vcpu->arch.sie_block->pp = 0;
4289 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4290 vcpu->arch.sie_block->todpr = 0;
4291 }
4292}
4293
4294static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
4295{
4296 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4297
4298 /* Clear reset is a superset of the initial reset */
4299 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4300
4301 memset(®s->gprs, 0, sizeof(regs->gprs));
4302 memset(®s->vrs, 0, sizeof(regs->vrs));
4303 memset(®s->acrs, 0, sizeof(regs->acrs));
4304 memset(®s->gscb, 0, sizeof(regs->gscb));
4305
4306 regs->etoken = 0;
4307 regs->etoken_extension = 0;
4308}
4309
4310int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4311{
4312 vcpu_load(vcpu);
4313 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
4314 vcpu_put(vcpu);
4315 return 0;
4316}
4317
4318int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4319{
4320 vcpu_load(vcpu);
4321 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
4322 vcpu_put(vcpu);
4323 return 0;
4324}
4325
4326int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4327 struct kvm_sregs *sregs)
4328{
4329 vcpu_load(vcpu);
4330
4331 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4332 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4333
4334 vcpu_put(vcpu);
4335 return 0;
4336}
4337
4338int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4339 struct kvm_sregs *sregs)
4340{
4341 vcpu_load(vcpu);
4342
4343 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4344 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4345
4346 vcpu_put(vcpu);
4347 return 0;
4348}
4349
4350int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4351{
4352 int ret = 0;
4353
4354 vcpu_load(vcpu);
4355
4356 vcpu->run->s.regs.fpc = fpu->fpc;
4357 if (cpu_has_vx())
4358 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4359 (freg_t *) fpu->fprs);
4360 else
4361 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4362
4363 vcpu_put(vcpu);
4364 return ret;
4365}
4366
4367int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4368{
4369 vcpu_load(vcpu);
4370
4371 if (cpu_has_vx())
4372 convert_vx_to_fp((freg_t *) fpu->fprs,
4373 (__vector128 *) vcpu->run->s.regs.vrs);
4374 else
4375 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4376 fpu->fpc = vcpu->run->s.regs.fpc;
4377
4378 vcpu_put(vcpu);
4379 return 0;
4380}
4381
4382static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4383{
4384 int rc = 0;
4385
4386 if (!is_vcpu_stopped(vcpu))
4387 rc = -EBUSY;
4388 else {
4389 vcpu->run->psw_mask = psw.mask;
4390 vcpu->run->psw_addr = psw.addr;
4391 }
4392 return rc;
4393}
4394
4395int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4396 struct kvm_translation *tr)
4397{
4398 return -EINVAL; /* not implemented yet */
4399}
4400
4401#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4402 KVM_GUESTDBG_USE_HW_BP | \
4403 KVM_GUESTDBG_ENABLE)
4404
4405int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4406 struct kvm_guest_debug *dbg)
4407{
4408 int rc = 0;
4409
4410 vcpu_load(vcpu);
4411
4412 vcpu->guest_debug = 0;
4413 kvm_s390_clear_bp_data(vcpu);
4414
4415 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
4416 rc = -EINVAL;
4417 goto out;
4418 }
4419 if (!sclp.has_gpere) {
4420 rc = -EINVAL;
4421 goto out;
4422 }
4423
4424 if (dbg->control & KVM_GUESTDBG_ENABLE) {
4425 vcpu->guest_debug = dbg->control;
4426 /* enforce guest PER */
4427 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
4428
4429 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
4430 rc = kvm_s390_import_bp_data(vcpu, dbg);
4431 } else {
4432 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4433 vcpu->arch.guestdbg.last_bp = 0;
4434 }
4435
4436 if (rc) {
4437 vcpu->guest_debug = 0;
4438 kvm_s390_clear_bp_data(vcpu);
4439 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4440 }
4441
4442out:
4443 vcpu_put(vcpu);
4444 return rc;
4445}
4446
4447int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4448 struct kvm_mp_state *mp_state)
4449{
4450 int ret;
4451
4452 vcpu_load(vcpu);
4453
4454 /* CHECK_STOP and LOAD are not supported yet */
4455 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
4456 KVM_MP_STATE_OPERATING;
4457
4458 vcpu_put(vcpu);
4459 return ret;
4460}
4461
4462int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4463 struct kvm_mp_state *mp_state)
4464{
4465 int rc = 0;
4466
4467 vcpu_load(vcpu);
4468
4469 /* user space knows about this interface - let it control the state */
4470 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
4471
4472 switch (mp_state->mp_state) {
4473 case KVM_MP_STATE_STOPPED:
4474 rc = kvm_s390_vcpu_stop(vcpu);
4475 break;
4476 case KVM_MP_STATE_OPERATING:
4477 rc = kvm_s390_vcpu_start(vcpu);
4478 break;
4479 case KVM_MP_STATE_LOAD:
4480 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4481 rc = -ENXIO;
4482 break;
4483 }
4484 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
4485 break;
4486 case KVM_MP_STATE_CHECK_STOP:
4487 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
4488 default:
4489 rc = -ENXIO;
4490 }
4491
4492 vcpu_put(vcpu);
4493 return rc;
4494}
4495
4496static bool ibs_enabled(struct kvm_vcpu *vcpu)
4497{
4498 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
4499}
4500
4501static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
4502{
4503retry:
4504 kvm_s390_vcpu_request_handled(vcpu);
4505 if (!kvm_request_pending(vcpu))
4506 return 0;
4507 /*
4508 * If the guest prefix changed, re-arm the ipte notifier for the
4509 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4510 * This ensures that the ipte instruction for this request has
4511 * already finished. We might race against a second unmapper that
4512 * wants to set the blocking bit. Lets just retry the request loop.
4513 */
4514 if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
4515 int rc;
4516 rc = gmap_mprotect_notify(vcpu->arch.gmap,
4517 kvm_s390_get_prefix(vcpu),
4518 PAGE_SIZE * 2, PROT_WRITE);
4519 if (rc) {
4520 kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4521 return rc;
4522 }
4523 goto retry;
4524 }
4525
4526 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4527 vcpu->arch.sie_block->ihcpu = 0xffff;
4528 goto retry;
4529 }
4530
4531 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
4532 if (!ibs_enabled(vcpu)) {
4533 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4534 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
4535 }
4536 goto retry;
4537 }
4538
4539 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
4540 if (ibs_enabled(vcpu)) {
4541 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
4542 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
4543 }
4544 goto retry;
4545 }
4546
4547 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
4548 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4549 goto retry;
4550 }
4551
4552 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4553 /*
4554 * Disable CMM virtualization; we will emulate the ESSA
4555 * instruction manually, in order to provide additional
4556 * functionalities needed for live migration.
4557 */
4558 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4559 goto retry;
4560 }
4561
4562 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4563 /*
4564 * Re-enable CMM virtualization if CMMA is available and
4565 * CMM has been used.
4566 */
4567 if ((vcpu->kvm->arch.use_cmma) &&
4568 (vcpu->kvm->mm->context.uses_cmm))
4569 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4570 goto retry;
4571 }
4572
4573 /* we left the vsie handler, nothing to do, just clear the request */
4574 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
4575
4576 return 0;
4577}
4578
4579static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4580{
4581 struct kvm_vcpu *vcpu;
4582 union tod_clock clk;
4583 unsigned long i;
4584
4585 preempt_disable();
4586
4587 store_tod_clock_ext(&clk);
4588
4589 kvm->arch.epoch = gtod->tod - clk.tod;
4590 kvm->arch.epdx = 0;
4591 if (test_kvm_facility(kvm, 139)) {
4592 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4593 if (kvm->arch.epoch > gtod->tod)
4594 kvm->arch.epdx -= 1;
4595 }
4596
4597 kvm_s390_vcpu_block_all(kvm);
4598 kvm_for_each_vcpu(i, vcpu, kvm) {
4599 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4600 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
4601 }
4602
4603 kvm_s390_vcpu_unblock_all(kvm);
4604 preempt_enable();
4605}
4606
4607int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4608{
4609 if (!mutex_trylock(&kvm->lock))
4610 return 0;
4611 __kvm_s390_set_tod_clock(kvm, gtod);
4612 mutex_unlock(&kvm->lock);
4613 return 1;
4614}
4615
4616static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
4617 unsigned long token)
4618{
4619 struct kvm_s390_interrupt inti;
4620 struct kvm_s390_irq irq;
4621
4622 if (start_token) {
4623 irq.u.ext.ext_params2 = token;
4624 irq.type = KVM_S390_INT_PFAULT_INIT;
4625 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
4626 } else {
4627 inti.type = KVM_S390_INT_PFAULT_DONE;
4628 inti.parm64 = token;
4629 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
4630 }
4631}
4632
4633bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
4634 struct kvm_async_pf *work)
4635{
4636 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4637 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4638
4639 return true;
4640}
4641
4642void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
4643 struct kvm_async_pf *work)
4644{
4645 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4646 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4647}
4648
4649void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
4650 struct kvm_async_pf *work)
4651{
4652 /* s390 will always inject the page directly */
4653}
4654
4655bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
4656{
4657 /*
4658 * s390 will always inject the page directly,
4659 * but we still want check_async_completion to cleanup
4660 */
4661 return true;
4662}
4663
4664static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
4665{
4666 hva_t hva;
4667 struct kvm_arch_async_pf arch;
4668
4669 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4670 return false;
4671 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4672 vcpu->arch.pfault_compare)
4673 return false;
4674 if (psw_extint_disabled(vcpu))
4675 return false;
4676 if (kvm_s390_vcpu_has_irq(vcpu, 0))
4677 return false;
4678 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4679 return false;
4680 if (!vcpu->arch.gmap->pfault_enabled)
4681 return false;
4682
4683 hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr);
4684 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4685 return false;
4686
4687 return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch);
4688}
4689
4690static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4691{
4692 int rc, cpuflags;
4693
4694 /*
4695 * On s390 notifications for arriving pages will be delivered directly
4696 * to the guest but the house keeping for completed pfaults is
4697 * handled outside the worker.
4698 */
4699 kvm_check_async_pf_completion(vcpu);
4700
4701 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4702 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4703
4704 if (need_resched())
4705 schedule();
4706
4707 if (!kvm_is_ucontrol(vcpu->kvm)) {
4708 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4709 if (rc || guestdbg_exit_pending(vcpu))
4710 return rc;
4711 }
4712
4713 rc = kvm_s390_handle_requests(vcpu);
4714 if (rc)
4715 return rc;
4716
4717 if (guestdbg_enabled(vcpu)) {
4718 kvm_s390_backup_guest_per_regs(vcpu);
4719 kvm_s390_patch_guest_per_regs(vcpu);
4720 }
4721
4722 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4723
4724 vcpu->arch.sie_block->icptcode = 0;
4725 current->thread.gmap_int_code = 0;
4726 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4727 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4728 trace_kvm_s390_sie_enter(vcpu, cpuflags);
4729
4730 return 0;
4731}
4732
4733static int vcpu_post_run_addressing_exception(struct kvm_vcpu *vcpu)
4734{
4735 struct kvm_s390_pgm_info pgm_info = {
4736 .code = PGM_ADDRESSING,
4737 };
4738 u8 opcode, ilen;
4739 int rc;
4740
4741 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4742 trace_kvm_s390_sie_fault(vcpu);
4743
4744 /*
4745 * We want to inject an addressing exception, which is defined as a
4746 * suppressing or terminating exception. However, since we came here
4747 * by a DAT access exception, the PSW still points to the faulting
4748 * instruction since DAT exceptions are nullifying. So we've got
4749 * to look up the current opcode to get the length of the instruction
4750 * to be able to forward the PSW.
4751 */
4752 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4753 ilen = insn_length(opcode);
4754 if (rc < 0) {
4755 return rc;
4756 } else if (rc) {
4757 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4758 * Forward by arbitrary ilc, injection will take care of
4759 * nullification if necessary.
4760 */
4761 pgm_info = vcpu->arch.pgm;
4762 ilen = 4;
4763 }
4764 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4765 kvm_s390_forward_psw(vcpu, ilen);
4766 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4767}
4768
4769static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
4770{
4771 unsigned int flags = 0;
4772 unsigned long gaddr;
4773 int rc = 0;
4774
4775 gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
4776 if (kvm_s390_cur_gmap_fault_is_write())
4777 flags = FAULT_FLAG_WRITE;
4778
4779 switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) {
4780 case 0:
4781 vcpu->stat.exit_null++;
4782 break;
4783 case PGM_NON_SECURE_STORAGE_ACCESS:
4784 KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4785 "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4786 current->thread.gmap_int_code, current->thread.gmap_teid.val);
4787 /*
4788 * This is normal operation; a page belonging to a protected
4789 * guest has not been imported yet. Try to import the page into
4790 * the protected guest.
4791 */
4792 if (gmap_convert_to_secure(vcpu->arch.gmap, gaddr) == -EINVAL)
4793 send_sig(SIGSEGV, current, 0);
4794 break;
4795 case PGM_SECURE_STORAGE_ACCESS:
4796 case PGM_SECURE_STORAGE_VIOLATION:
4797 KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4798 "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4799 current->thread.gmap_int_code, current->thread.gmap_teid.val);
4800 /*
4801 * This can happen after a reboot with asynchronous teardown;
4802 * the new guest (normal or protected) will run on top of the
4803 * previous protected guest. The old pages need to be destroyed
4804 * so the new guest can use them.
4805 */
4806 if (gmap_destroy_page(vcpu->arch.gmap, gaddr)) {
4807 /*
4808 * Either KVM messed up the secure guest mapping or the
4809 * same page is mapped into multiple secure guests.
4810 *
4811 * This exception is only triggered when a guest 2 is
4812 * running and can therefore never occur in kernel
4813 * context.
4814 */
4815 pr_warn_ratelimited("Secure storage violation (%x) in task: %s, pid %d\n",
4816 current->thread.gmap_int_code, current->comm,
4817 current->pid);
4818 send_sig(SIGSEGV, current, 0);
4819 }
4820 break;
4821 case PGM_PROTECTION:
4822 case PGM_SEGMENT_TRANSLATION:
4823 case PGM_PAGE_TRANSLATION:
4824 case PGM_ASCE_TYPE:
4825 case PGM_REGION_FIRST_TRANS:
4826 case PGM_REGION_SECOND_TRANS:
4827 case PGM_REGION_THIRD_TRANS:
4828 KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4829 "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4830 current->thread.gmap_int_code, current->thread.gmap_teid.val);
4831 if (vcpu->arch.gmap->pfault_enabled) {
4832 rc = gmap_fault(vcpu->arch.gmap, gaddr, flags | FAULT_FLAG_RETRY_NOWAIT);
4833 if (rc == -EFAULT)
4834 return vcpu_post_run_addressing_exception(vcpu);
4835 if (rc == -EAGAIN) {
4836 trace_kvm_s390_major_guest_pfault(vcpu);
4837 if (kvm_arch_setup_async_pf(vcpu))
4838 return 0;
4839 vcpu->stat.pfault_sync++;
4840 } else {
4841 return rc;
4842 }
4843 }
4844 rc = gmap_fault(vcpu->arch.gmap, gaddr, flags);
4845 if (rc == -EFAULT) {
4846 if (kvm_is_ucontrol(vcpu->kvm)) {
4847 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4848 vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
4849 vcpu->run->s390_ucontrol.pgm_code = 0x10;
4850 return -EREMOTE;
4851 }
4852 return vcpu_post_run_addressing_exception(vcpu);
4853 }
4854 break;
4855 default:
4856 KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4857 current->thread.gmap_int_code, current->thread.gmap_teid.val);
4858 send_sig(SIGSEGV, current, 0);
4859 break;
4860 }
4861 return rc;
4862}
4863
4864static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4865{
4866 struct mcck_volatile_info *mcck_info;
4867 struct sie_page *sie_page;
4868 int rc;
4869
4870 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4871 vcpu->arch.sie_block->icptcode);
4872 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4873
4874 if (guestdbg_enabled(vcpu))
4875 kvm_s390_restore_guest_per_regs(vcpu);
4876
4877 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4878 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4879
4880 if (exit_reason == -EINTR) {
4881 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4882 sie_page = container_of(vcpu->arch.sie_block,
4883 struct sie_page, sie_block);
4884 mcck_info = &sie_page->mcck_info;
4885 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4886 return 0;
4887 }
4888
4889 if (vcpu->arch.sie_block->icptcode > 0) {
4890 rc = kvm_handle_sie_intercept(vcpu);
4891
4892 if (rc != -EOPNOTSUPP)
4893 return rc;
4894 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4895 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4896 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4897 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4898 return -EREMOTE;
4899 }
4900
4901 return vcpu_post_run_handle_fault(vcpu);
4902}
4903
4904#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
4905static int __vcpu_run(struct kvm_vcpu *vcpu)
4906{
4907 int rc, exit_reason;
4908 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4909
4910 /*
4911 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4912 * ning the guest), so that memslots (and other stuff) are protected
4913 */
4914 kvm_vcpu_srcu_read_lock(vcpu);
4915
4916 do {
4917 rc = vcpu_pre_run(vcpu);
4918 if (rc || guestdbg_exit_pending(vcpu))
4919 break;
4920
4921 kvm_vcpu_srcu_read_unlock(vcpu);
4922 /*
4923 * As PF_VCPU will be used in fault handler, between
4924 * guest_enter and guest_exit should be no uaccess.
4925 */
4926 local_irq_disable();
4927 guest_enter_irqoff();
4928 __disable_cpu_timer_accounting(vcpu);
4929 local_irq_enable();
4930 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4931 memcpy(sie_page->pv_grregs,
4932 vcpu->run->s.regs.gprs,
4933 sizeof(sie_page->pv_grregs));
4934 }
4935 exit_reason = sie64a(vcpu->arch.sie_block,
4936 vcpu->run->s.regs.gprs,
4937 vcpu->arch.gmap->asce);
4938 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4939 memcpy(vcpu->run->s.regs.gprs,
4940 sie_page->pv_grregs,
4941 sizeof(sie_page->pv_grregs));
4942 /*
4943 * We're not allowed to inject interrupts on intercepts
4944 * that leave the guest state in an "in-between" state
4945 * where the next SIE entry will do a continuation.
4946 * Fence interrupts in our "internal" PSW.
4947 */
4948 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4949 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4950 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4951 }
4952 }
4953 local_irq_disable();
4954 __enable_cpu_timer_accounting(vcpu);
4955 guest_exit_irqoff();
4956 local_irq_enable();
4957 kvm_vcpu_srcu_read_lock(vcpu);
4958
4959 rc = vcpu_post_run(vcpu, exit_reason);
4960 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
4961
4962 kvm_vcpu_srcu_read_unlock(vcpu);
4963 return rc;
4964}
4965
4966static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4967{
4968 struct kvm_run *kvm_run = vcpu->run;
4969 struct runtime_instr_cb *riccb;
4970 struct gs_cb *gscb;
4971
4972 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4973 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4974 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4975 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4976 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4977 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4978 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4979 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4980 }
4981 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4982 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4983 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4984 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4985 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4986 kvm_clear_async_pf_completion_queue(vcpu);
4987 }
4988 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4989 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4990 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4991 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4992 }
4993 /*
4994 * If userspace sets the riccb (e.g. after migration) to a valid state,
4995 * we should enable RI here instead of doing the lazy enablement.
4996 */
4997 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4998 test_kvm_facility(vcpu->kvm, 64) &&
4999 riccb->v &&
5000 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
5001 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
5002 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
5003 }
5004 /*
5005 * If userspace sets the gscb (e.g. after migration) to non-zero,
5006 * we should enable GS here instead of doing the lazy enablement.
5007 */
5008 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
5009 test_kvm_facility(vcpu->kvm, 133) &&
5010 gscb->gssm &&
5011 !vcpu->arch.gs_enabled) {
5012 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
5013 vcpu->arch.sie_block->ecb |= ECB_GS;
5014 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
5015 vcpu->arch.gs_enabled = 1;
5016 }
5017 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
5018 test_kvm_facility(vcpu->kvm, 82)) {
5019 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
5020 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
5021 }
5022 if (MACHINE_HAS_GS) {
5023 preempt_disable();
5024 local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
5025 if (current->thread.gs_cb) {
5026 vcpu->arch.host_gscb = current->thread.gs_cb;
5027 save_gs_cb(vcpu->arch.host_gscb);
5028 }
5029 if (vcpu->arch.gs_enabled) {
5030 current->thread.gs_cb = (struct gs_cb *)
5031 &vcpu->run->s.regs.gscb;
5032 restore_gs_cb(current->thread.gs_cb);
5033 }
5034 preempt_enable();
5035 }
5036 /* SIE will load etoken directly from SDNX and therefore kvm_run */
5037}
5038
5039static void sync_regs(struct kvm_vcpu *vcpu)
5040{
5041 struct kvm_run *kvm_run = vcpu->run;
5042
5043 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
5044 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
5045 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
5046 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
5047 /* some control register changes require a tlb flush */
5048 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5049 }
5050 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
5051 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
5052 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
5053 }
5054 save_access_regs(vcpu->arch.host_acrs);
5055 restore_access_regs(vcpu->run->s.regs.acrs);
5056 vcpu->arch.acrs_loaded = true;
5057 kvm_s390_fpu_load(vcpu->run);
5058 /* Sync fmt2 only data */
5059 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
5060 sync_regs_fmt2(vcpu);
5061 } else {
5062 /*
5063 * In several places we have to modify our internal view to
5064 * not do things that are disallowed by the ultravisor. For
5065 * example we must not inject interrupts after specific exits
5066 * (e.g. 112 prefix page not secure). We do this by turning
5067 * off the machine check, external and I/O interrupt bits
5068 * of our PSW copy. To avoid getting validity intercepts, we
5069 * do only accept the condition code from userspace.
5070 */
5071 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
5072 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
5073 PSW_MASK_CC;
5074 }
5075
5076 kvm_run->kvm_dirty_regs = 0;
5077}
5078
5079static void store_regs_fmt2(struct kvm_vcpu *vcpu)
5080{
5081 struct kvm_run *kvm_run = vcpu->run;
5082
5083 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
5084 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
5085 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
5086 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
5087 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
5088 if (MACHINE_HAS_GS) {
5089 preempt_disable();
5090 local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
5091 if (vcpu->arch.gs_enabled)
5092 save_gs_cb(current->thread.gs_cb);
5093 current->thread.gs_cb = vcpu->arch.host_gscb;
5094 restore_gs_cb(vcpu->arch.host_gscb);
5095 if (!vcpu->arch.host_gscb)
5096 local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
5097 vcpu->arch.host_gscb = NULL;
5098 preempt_enable();
5099 }
5100 /* SIE will save etoken directly into SDNX and therefore kvm_run */
5101}
5102
5103static void store_regs(struct kvm_vcpu *vcpu)
5104{
5105 struct kvm_run *kvm_run = vcpu->run;
5106
5107 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
5108 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
5109 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
5110 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
5111 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
5112 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
5113 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
5114 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
5115 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
5116 save_access_regs(vcpu->run->s.regs.acrs);
5117 restore_access_regs(vcpu->arch.host_acrs);
5118 vcpu->arch.acrs_loaded = false;
5119 kvm_s390_fpu_store(vcpu->run);
5120 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
5121 store_regs_fmt2(vcpu);
5122}
5123
5124int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
5125{
5126 struct kvm_run *kvm_run = vcpu->run;
5127 DECLARE_KERNEL_FPU_ONSTACK32(fpu);
5128 int rc;
5129
5130 /*
5131 * Running a VM while dumping always has the potential to
5132 * produce inconsistent dump data. But for PV vcpus a SIE
5133 * entry while dumping could also lead to a fatal validity
5134 * intercept which we absolutely want to avoid.
5135 */
5136 if (vcpu->kvm->arch.pv.dumping)
5137 return -EINVAL;
5138
5139 if (!vcpu->wants_to_run)
5140 return -EINTR;
5141
5142 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
5143 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
5144 return -EINVAL;
5145
5146 vcpu_load(vcpu);
5147
5148 if (guestdbg_exit_pending(vcpu)) {
5149 kvm_s390_prepare_debug_exit(vcpu);
5150 rc = 0;
5151 goto out;
5152 }
5153
5154 kvm_sigset_activate(vcpu);
5155
5156 /*
5157 * no need to check the return value of vcpu_start as it can only have
5158 * an error for protvirt, but protvirt means user cpu state
5159 */
5160 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
5161 kvm_s390_vcpu_start(vcpu);
5162 } else if (is_vcpu_stopped(vcpu)) {
5163 pr_err_ratelimited("can't run stopped vcpu %d\n",
5164 vcpu->vcpu_id);
5165 rc = -EINVAL;
5166 goto out;
5167 }
5168
5169 kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR);
5170 sync_regs(vcpu);
5171 enable_cpu_timer_accounting(vcpu);
5172
5173 might_fault();
5174 rc = __vcpu_run(vcpu);
5175
5176 if (signal_pending(current) && !rc) {
5177 kvm_run->exit_reason = KVM_EXIT_INTR;
5178 rc = -EINTR;
5179 }
5180
5181 if (guestdbg_exit_pending(vcpu) && !rc) {
5182 kvm_s390_prepare_debug_exit(vcpu);
5183 rc = 0;
5184 }
5185
5186 if (rc == -EREMOTE) {
5187 /* userspace support is needed, kvm_run has been prepared */
5188 rc = 0;
5189 }
5190
5191 disable_cpu_timer_accounting(vcpu);
5192 store_regs(vcpu);
5193 kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR);
5194
5195 kvm_sigset_deactivate(vcpu);
5196
5197 vcpu->stat.exit_userspace++;
5198out:
5199 vcpu_put(vcpu);
5200 return rc;
5201}
5202
5203/*
5204 * store status at address
5205 * we use have two special cases:
5206 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5207 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5208 */
5209int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
5210{
5211 unsigned char archmode = 1;
5212 freg_t fprs[NUM_FPRS];
5213 unsigned int px;
5214 u64 clkcomp, cputm;
5215 int rc;
5216
5217 px = kvm_s390_get_prefix(vcpu);
5218 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
5219 if (write_guest_abs(vcpu, 163, &archmode, 1))
5220 return -EFAULT;
5221 gpa = 0;
5222 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5223 if (write_guest_real(vcpu, 163, &archmode, 1))
5224 return -EFAULT;
5225 gpa = px;
5226 } else
5227 gpa -= __LC_FPREGS_SAVE_AREA;
5228
5229 /* manually convert vector registers if necessary */
5230 if (cpu_has_vx()) {
5231 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5232 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5233 fprs, 128);
5234 } else {
5235 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5236 vcpu->run->s.regs.fprs, 128);
5237 }
5238 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5239 vcpu->run->s.regs.gprs, 128);
5240 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5241 &vcpu->arch.sie_block->gpsw, 16);
5242 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5243 &px, 4);
5244 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
5245 &vcpu->run->s.regs.fpc, 4);
5246 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5247 &vcpu->arch.sie_block->todpr, 4);
5248 cputm = kvm_s390_get_cpu_timer(vcpu);
5249 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
5250 &cputm, 8);
5251 clkcomp = vcpu->arch.sie_block->ckc >> 8;
5252 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5253 &clkcomp, 8);
5254 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5255 &vcpu->run->s.regs.acrs, 64);
5256 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5257 &vcpu->arch.sie_block->gcr, 128);
5258 return rc ? -EFAULT : 0;
5259}
5260
5261int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5262{
5263 /*
5264 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5265 * switch in the run ioctl. Let's update our copies before we save
5266 * it into the save area
5267 */
5268 kvm_s390_fpu_store(vcpu->run);
5269 save_access_regs(vcpu->run->s.regs.acrs);
5270
5271 return kvm_s390_store_status_unloaded(vcpu, addr);
5272}
5273
5274static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5275{
5276 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
5277 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
5278}
5279
5280static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
5281{
5282 unsigned long i;
5283 struct kvm_vcpu *vcpu;
5284
5285 kvm_for_each_vcpu(i, vcpu, kvm) {
5286 __disable_ibs_on_vcpu(vcpu);
5287 }
5288}
5289
5290static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5291{
5292 if (!sclp.has_ibs)
5293 return;
5294 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
5295 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
5296}
5297
5298int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
5299{
5300 int i, online_vcpus, r = 0, started_vcpus = 0;
5301
5302 if (!is_vcpu_stopped(vcpu))
5303 return 0;
5304
5305 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
5306 /* Only one cpu at a time may enter/leave the STOPPED state. */
5307 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5308 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5309
5310 /* Let's tell the UV that we want to change into the operating state */
5311 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5312 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5313 if (r) {
5314 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5315 return r;
5316 }
5317 }
5318
5319 for (i = 0; i < online_vcpus; i++) {
5320 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
5321 started_vcpus++;
5322 }
5323
5324 if (started_vcpus == 0) {
5325 /* we're the only active VCPU -> speed it up */
5326 __enable_ibs_on_vcpu(vcpu);
5327 } else if (started_vcpus == 1) {
5328 /*
5329 * As we are starting a second VCPU, we have to disable
5330 * the IBS facility on all VCPUs to remove potentially
5331 * outstanding ENABLE requests.
5332 */
5333 __disable_ibs_on_all_vcpus(vcpu->kvm);
5334 }
5335
5336 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
5337 /*
5338 * The real PSW might have changed due to a RESTART interpreted by the
5339 * ultravisor. We block all interrupts and let the next sie exit
5340 * refresh our view.
5341 */
5342 if (kvm_s390_pv_cpu_is_protected(vcpu))
5343 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5344 /*
5345 * Another VCPU might have used IBS while we were offline.
5346 * Let's play safe and flush the VCPU at startup.
5347 */
5348 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5349 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5350 return 0;
5351}
5352
5353int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
5354{
5355 int i, online_vcpus, r = 0, started_vcpus = 0;
5356 struct kvm_vcpu *started_vcpu = NULL;
5357
5358 if (is_vcpu_stopped(vcpu))
5359 return 0;
5360
5361 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
5362 /* Only one cpu at a time may enter/leave the STOPPED state. */
5363 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5364 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5365
5366 /* Let's tell the UV that we want to change into the stopped state */
5367 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5368 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5369 if (r) {
5370 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5371 return r;
5372 }
5373 }
5374
5375 /*
5376 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5377 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5378 * have been fully processed. This will ensure that the VCPU
5379 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5380 */
5381 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
5382 kvm_s390_clear_stop_irq(vcpu);
5383
5384 __disable_ibs_on_vcpu(vcpu);
5385
5386 for (i = 0; i < online_vcpus; i++) {
5387 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5388
5389 if (!is_vcpu_stopped(tmp)) {
5390 started_vcpus++;
5391 started_vcpu = tmp;
5392 }
5393 }
5394
5395 if (started_vcpus == 1) {
5396 /*
5397 * As we only have one VCPU left, we want to enable the
5398 * IBS facility for that VCPU to speed it up.
5399 */
5400 __enable_ibs_on_vcpu(started_vcpu);
5401 }
5402
5403 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5404 return 0;
5405}
5406
5407static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5408 struct kvm_enable_cap *cap)
5409{
5410 int r;
5411
5412 if (cap->flags)
5413 return -EINVAL;
5414
5415 switch (cap->cap) {
5416 case KVM_CAP_S390_CSS_SUPPORT:
5417 if (!vcpu->kvm->arch.css_support) {
5418 vcpu->kvm->arch.css_support = 1;
5419 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5420 trace_kvm_s390_enable_css(vcpu->kvm);
5421 }
5422 r = 0;
5423 break;
5424 default:
5425 r = -EINVAL;
5426 break;
5427 }
5428 return r;
5429}
5430
5431static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
5432 struct kvm_s390_mem_op *mop)
5433{
5434 void __user *uaddr = (void __user *)mop->buf;
5435 void *sida_addr;
5436 int r = 0;
5437
5438 if (mop->flags || !mop->size)
5439 return -EINVAL;
5440 if (mop->size + mop->sida_offset < mop->size)
5441 return -EINVAL;
5442 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5443 return -E2BIG;
5444 if (!kvm_s390_pv_cpu_is_protected(vcpu))
5445 return -EINVAL;
5446
5447 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5448
5449 switch (mop->op) {
5450 case KVM_S390_MEMOP_SIDA_READ:
5451 if (copy_to_user(uaddr, sida_addr, mop->size))
5452 r = -EFAULT;
5453
5454 break;
5455 case KVM_S390_MEMOP_SIDA_WRITE:
5456 if (copy_from_user(sida_addr, uaddr, mop->size))
5457 r = -EFAULT;
5458 break;
5459 }
5460 return r;
5461}
5462
5463static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
5464 struct kvm_s390_mem_op *mop)
5465{
5466 void __user *uaddr = (void __user *)mop->buf;
5467 enum gacc_mode acc_mode;
5468 void *tmpbuf = NULL;
5469 int r;
5470
5471 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION |
5472 KVM_S390_MEMOP_F_CHECK_ONLY |
5473 KVM_S390_MEMOP_F_SKEY_PROTECTION);
5474 if (r)
5475 return r;
5476 if (mop->ar >= NUM_ACRS)
5477 return -EINVAL;
5478 if (kvm_s390_pv_cpu_is_protected(vcpu))
5479 return -EINVAL;
5480 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
5481 tmpbuf = vmalloc(mop->size);
5482 if (!tmpbuf)
5483 return -ENOMEM;
5484 }
5485
5486 acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE;
5487 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5488 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5489 acc_mode, mop->key);
5490 goto out_inject;
5491 }
5492 if (acc_mode == GACC_FETCH) {
5493 r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5494 mop->size, mop->key);
5495 if (r)
5496 goto out_inject;
5497 if (copy_to_user(uaddr, tmpbuf, mop->size)) {
5498 r = -EFAULT;
5499 goto out_free;
5500 }
5501 } else {
5502 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
5503 r = -EFAULT;
5504 goto out_free;
5505 }
5506 r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5507 mop->size, mop->key);
5508 }
5509
5510out_inject:
5511 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
5512 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5513
5514out_free:
5515 vfree(tmpbuf);
5516 return r;
5517}
5518
5519static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
5520 struct kvm_s390_mem_op *mop)
5521{
5522 int r, srcu_idx;
5523
5524 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5525
5526 switch (mop->op) {
5527 case KVM_S390_MEMOP_LOGICAL_READ:
5528 case KVM_S390_MEMOP_LOGICAL_WRITE:
5529 r = kvm_s390_vcpu_mem_op(vcpu, mop);
5530 break;
5531 case KVM_S390_MEMOP_SIDA_READ:
5532 case KVM_S390_MEMOP_SIDA_WRITE:
5533 /* we are locked against sida going away by the vcpu->mutex */
5534 r = kvm_s390_vcpu_sida_op(vcpu, mop);
5535 break;
5536 default:
5537 r = -EINVAL;
5538 }
5539
5540 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5541 return r;
5542}
5543
5544long kvm_arch_vcpu_async_ioctl(struct file *filp,
5545 unsigned int ioctl, unsigned long arg)
5546{
5547 struct kvm_vcpu *vcpu = filp->private_data;
5548 void __user *argp = (void __user *)arg;
5549 int rc;
5550
5551 switch (ioctl) {
5552 case KVM_S390_IRQ: {
5553 struct kvm_s390_irq s390irq;
5554
5555 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
5556 return -EFAULT;
5557 rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5558 break;
5559 }
5560 case KVM_S390_INTERRUPT: {
5561 struct kvm_s390_interrupt s390int;
5562 struct kvm_s390_irq s390irq = {};
5563
5564 if (copy_from_user(&s390int, argp, sizeof(s390int)))
5565 return -EFAULT;
5566 if (s390int_to_s390irq(&s390int, &s390irq))
5567 return -EINVAL;
5568 rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5569 break;
5570 }
5571 default:
5572 rc = -ENOIOCTLCMD;
5573 break;
5574 }
5575
5576 /*
5577 * To simplify single stepping of userspace-emulated instructions,
5578 * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see
5579 * should_handle_per_ifetch()). However, if userspace emulation injects
5580 * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens
5581 * after (and not before) the interrupt delivery.
5582 */
5583 if (!rc)
5584 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
5585
5586 return rc;
5587}
5588
5589static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
5590 struct kvm_pv_cmd *cmd)
5591{
5592 struct kvm_s390_pv_dmp dmp;
5593 void *data;
5594 int ret;
5595
5596 /* Dump initialization is a prerequisite */
5597 if (!vcpu->kvm->arch.pv.dumping)
5598 return -EINVAL;
5599
5600 if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
5601 return -EFAULT;
5602
5603 /* We only handle this subcmd right now */
5604 if (dmp.subcmd != KVM_PV_DUMP_CPU)
5605 return -EINVAL;
5606
5607 /* CPU dump length is the same as create cpu storage donation. */
5608 if (dmp.buff_len != uv_info.guest_cpu_stor_len)
5609 return -EINVAL;
5610
5611 data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
5612 if (!data)
5613 return -ENOMEM;
5614
5615 ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
5616
5617 VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5618 vcpu->vcpu_id, cmd->rc, cmd->rrc);
5619
5620 if (ret)
5621 ret = -EINVAL;
5622
5623 /* On success copy over the dump data */
5624 if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
5625 ret = -EFAULT;
5626
5627 kvfree(data);
5628 return ret;
5629}
5630
5631long kvm_arch_vcpu_ioctl(struct file *filp,
5632 unsigned int ioctl, unsigned long arg)
5633{
5634 struct kvm_vcpu *vcpu = filp->private_data;
5635 void __user *argp = (void __user *)arg;
5636 int idx;
5637 long r;
5638 u16 rc, rrc;
5639
5640 vcpu_load(vcpu);
5641
5642 switch (ioctl) {
5643 case KVM_S390_STORE_STATUS:
5644 idx = srcu_read_lock(&vcpu->kvm->srcu);
5645 r = kvm_s390_store_status_unloaded(vcpu, arg);
5646 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5647 break;
5648 case KVM_S390_SET_INITIAL_PSW: {
5649 psw_t psw;
5650
5651 r = -EFAULT;
5652 if (copy_from_user(&psw, argp, sizeof(psw)))
5653 break;
5654 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5655 break;
5656 }
5657 case KVM_S390_CLEAR_RESET:
5658 r = 0;
5659 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
5660 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5661 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5662 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
5663 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5664 rc, rrc);
5665 }
5666 break;
5667 case KVM_S390_INITIAL_RESET:
5668 r = 0;
5669 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
5670 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5671 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5672 UVC_CMD_CPU_RESET_INITIAL,
5673 &rc, &rrc);
5674 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5675 rc, rrc);
5676 }
5677 break;
5678 case KVM_S390_NORMAL_RESET:
5679 r = 0;
5680 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
5681 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5682 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5683 UVC_CMD_CPU_RESET, &rc, &rrc);
5684 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5685 rc, rrc);
5686 }
5687 break;
5688 case KVM_SET_ONE_REG:
5689 case KVM_GET_ONE_REG: {
5690 struct kvm_one_reg reg;
5691 r = -EINVAL;
5692 if (kvm_s390_pv_cpu_is_protected(vcpu))
5693 break;
5694 r = -EFAULT;
5695 if (copy_from_user(®, argp, sizeof(reg)))
5696 break;
5697 if (ioctl == KVM_SET_ONE_REG)
5698 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
5699 else
5700 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
5701 break;
5702 }
5703#ifdef CONFIG_KVM_S390_UCONTROL
5704 case KVM_S390_UCAS_MAP: {
5705 struct kvm_s390_ucas_mapping ucasmap;
5706
5707 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5708 r = -EFAULT;
5709 break;
5710 }
5711
5712 if (!kvm_is_ucontrol(vcpu->kvm)) {
5713 r = -EINVAL;
5714 break;
5715 }
5716
5717 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
5718 ucasmap.vcpu_addr, ucasmap.length);
5719 break;
5720 }
5721 case KVM_S390_UCAS_UNMAP: {
5722 struct kvm_s390_ucas_mapping ucasmap;
5723
5724 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5725 r = -EFAULT;
5726 break;
5727 }
5728
5729 if (!kvm_is_ucontrol(vcpu->kvm)) {
5730 r = -EINVAL;
5731 break;
5732 }
5733
5734 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
5735 ucasmap.length);
5736 break;
5737 }
5738#endif
5739 case KVM_S390_VCPU_FAULT: {
5740 r = gmap_fault(vcpu->arch.gmap, arg, 0);
5741 break;
5742 }
5743 case KVM_ENABLE_CAP:
5744 {
5745 struct kvm_enable_cap cap;
5746 r = -EFAULT;
5747 if (copy_from_user(&cap, argp, sizeof(cap)))
5748 break;
5749 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5750 break;
5751 }
5752 case KVM_S390_MEM_OP: {
5753 struct kvm_s390_mem_op mem_op;
5754
5755 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
5756 r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
5757 else
5758 r = -EFAULT;
5759 break;
5760 }
5761 case KVM_S390_SET_IRQ_STATE: {
5762 struct kvm_s390_irq_state irq_state;
5763
5764 r = -EFAULT;
5765 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5766 break;
5767 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5768 irq_state.len == 0 ||
5769 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5770 r = -EINVAL;
5771 break;
5772 }
5773 /* do not use irq_state.flags, it will break old QEMUs */
5774 r = kvm_s390_set_irq_state(vcpu,
5775 (void __user *) irq_state.buf,
5776 irq_state.len);
5777 break;
5778 }
5779 case KVM_S390_GET_IRQ_STATE: {
5780 struct kvm_s390_irq_state irq_state;
5781
5782 r = -EFAULT;
5783 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5784 break;
5785 if (irq_state.len == 0) {
5786 r = -EINVAL;
5787 break;
5788 }
5789 /* do not use irq_state.flags, it will break old QEMUs */
5790 r = kvm_s390_get_irq_state(vcpu,
5791 (__u8 __user *) irq_state.buf,
5792 irq_state.len);
5793 break;
5794 }
5795 case KVM_S390_PV_CPU_COMMAND: {
5796 struct kvm_pv_cmd cmd;
5797
5798 r = -EINVAL;
5799 if (!is_prot_virt_host())
5800 break;
5801
5802 r = -EFAULT;
5803 if (copy_from_user(&cmd, argp, sizeof(cmd)))
5804 break;
5805
5806 r = -EINVAL;
5807 if (cmd.flags)
5808 break;
5809
5810 /* We only handle this cmd right now */
5811 if (cmd.cmd != KVM_PV_DUMP)
5812 break;
5813
5814 r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
5815
5816 /* Always copy over UV rc / rrc data */
5817 if (copy_to_user((__u8 __user *)argp, &cmd.rc,
5818 sizeof(cmd.rc) + sizeof(cmd.rrc)))
5819 r = -EFAULT;
5820 break;
5821 }
5822 default:
5823 r = -ENOTTY;
5824 }
5825
5826 vcpu_put(vcpu);
5827 return r;
5828}
5829
5830vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5831{
5832#ifdef CONFIG_KVM_S390_UCONTROL
5833 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5834 && (kvm_is_ucontrol(vcpu->kvm))) {
5835 vmf->page = virt_to_page(vcpu->arch.sie_block);
5836 get_page(vmf->page);
5837 return 0;
5838 }
5839#endif
5840 return VM_FAULT_SIGBUS;
5841}
5842
5843bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5844{
5845 return true;
5846}
5847
5848/* Section: memory related */
5849int kvm_arch_prepare_memory_region(struct kvm *kvm,
5850 const struct kvm_memory_slot *old,
5851 struct kvm_memory_slot *new,
5852 enum kvm_mr_change change)
5853{
5854 gpa_t size;
5855
5856 if (kvm_is_ucontrol(kvm))
5857 return -EINVAL;
5858
5859 /* When we are protected, we should not change the memory slots */
5860 if (kvm_s390_pv_get_handle(kvm))
5861 return -EINVAL;
5862
5863 if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
5864 /*
5865 * A few sanity checks. We can have memory slots which have to be
5866 * located/ended at a segment boundary (1MB). The memory in userland is
5867 * ok to be fragmented into various different vmas. It is okay to mmap()
5868 * and munmap() stuff in this slot after doing this call at any time
5869 */
5870
5871 if (new->userspace_addr & 0xffffful)
5872 return -EINVAL;
5873
5874 size = new->npages * PAGE_SIZE;
5875 if (size & 0xffffful)
5876 return -EINVAL;
5877
5878 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5879 return -EINVAL;
5880 }
5881
5882 if (!kvm->arch.migration_mode)
5883 return 0;
5884
5885 /*
5886 * Turn off migration mode when:
5887 * - userspace creates a new memslot with dirty logging off,
5888 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5889 * dirty logging is turned off.
5890 * Migration mode expects dirty page logging being enabled to store
5891 * its dirty bitmap.
5892 */
5893 if (change != KVM_MR_DELETE &&
5894 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
5895 WARN(kvm_s390_vm_stop_migration(kvm),
5896 "Failed to stop migration mode");
5897
5898 return 0;
5899}
5900
5901void kvm_arch_commit_memory_region(struct kvm *kvm,
5902 struct kvm_memory_slot *old,
5903 const struct kvm_memory_slot *new,
5904 enum kvm_mr_change change)
5905{
5906 int rc = 0;
5907
5908 switch (change) {
5909 case KVM_MR_DELETE:
5910 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5911 old->npages * PAGE_SIZE);
5912 break;
5913 case KVM_MR_MOVE:
5914 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5915 old->npages * PAGE_SIZE);
5916 if (rc)
5917 break;
5918 fallthrough;
5919 case KVM_MR_CREATE:
5920 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5921 new->base_gfn * PAGE_SIZE,
5922 new->npages * PAGE_SIZE);
5923 break;
5924 case KVM_MR_FLAGS_ONLY:
5925 break;
5926 default:
5927 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5928 }
5929 if (rc)
5930 pr_warn("failed to commit memory region\n");
5931 return;
5932}
5933
5934static inline unsigned long nonhyp_mask(int i)
5935{
5936 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5937
5938 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5939}
5940
5941static int __init kvm_s390_init(void)
5942{
5943 int i, r;
5944
5945 if (!sclp.has_sief2) {
5946 pr_info("SIE is not available\n");
5947 return -ENODEV;
5948 }
5949
5950 if (nested && hpage) {
5951 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5952 return -EINVAL;
5953 }
5954
5955 for (i = 0; i < 16; i++)
5956 kvm_s390_fac_base[i] |=
5957 stfle_fac_list[i] & nonhyp_mask(i);
5958
5959 r = __kvm_s390_init();
5960 if (r)
5961 return r;
5962
5963 r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5964 if (r) {
5965 __kvm_s390_exit();
5966 return r;
5967 }
5968 return 0;
5969}
5970
5971static void __exit kvm_s390_exit(void)
5972{
5973 kvm_exit();
5974
5975 __kvm_s390_exit();
5976}
5977
5978module_init(kvm_s390_init);
5979module_exit(kvm_s390_exit);
5980
5981/*
5982 * Enable autoloading of the kvm module.
5983 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5984 * since x86 takes a different approach.
5985 */
5986#include <linux/miscdevice.h>
5987MODULE_ALIAS_MISCDEV(KVM_MINOR);
5988MODULE_ALIAS("devname:kvm");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * hosting IBM Z kernel virtual machines (s390x)
4 *
5 * Copyright IBM Corp. 2008, 2020
6 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 * Christian Ehrhardt <ehrhardt@de.ibm.com>
11 * Jason J. Herne <jjherne@us.ibm.com>
12 */
13
14#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
20#include <linux/hrtimer.h>
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/mman.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/random.h>
28#include <linux/slab.h>
29#include <linux/timer.h>
30#include <linux/vmalloc.h>
31#include <linux/bitmap.h>
32#include <linux/sched/signal.h>
33#include <linux/string.h>
34#include <linux/pgtable.h>
35
36#include <asm/asm-offsets.h>
37#include <asm/lowcore.h>
38#include <asm/stp.h>
39#include <asm/gmap.h>
40#include <asm/nmi.h>
41#include <asm/switch_to.h>
42#include <asm/isc.h>
43#include <asm/sclp.h>
44#include <asm/cpacf.h>
45#include <asm/timex.h>
46#include <asm/ap.h>
47#include <asm/uv.h>
48#include <asm/fpu/api.h>
49#include "kvm-s390.h"
50#include "gaccess.h"
51
52#define CREATE_TRACE_POINTS
53#include "trace.h"
54#include "trace-s390.h"
55
56#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
57#define LOCAL_IRQS 32
58#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
59 (KVM_MAX_VCPUS + LOCAL_IRQS))
60
61const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
62 KVM_GENERIC_VM_STATS(),
63 STATS_DESC_COUNTER(VM, inject_io),
64 STATS_DESC_COUNTER(VM, inject_float_mchk),
65 STATS_DESC_COUNTER(VM, inject_pfault_done),
66 STATS_DESC_COUNTER(VM, inject_service_signal),
67 STATS_DESC_COUNTER(VM, inject_virtio)
68};
69static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
70 sizeof(struct kvm_vm_stat) / sizeof(u64));
71
72const struct kvm_stats_header kvm_vm_stats_header = {
73 .name_size = KVM_STATS_NAME_SIZE,
74 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
75 .id_offset = sizeof(struct kvm_stats_header),
76 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
77 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
78 sizeof(kvm_vm_stats_desc),
79};
80
81const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
82 KVM_GENERIC_VCPU_STATS(),
83 STATS_DESC_COUNTER(VCPU, exit_userspace),
84 STATS_DESC_COUNTER(VCPU, exit_null),
85 STATS_DESC_COUNTER(VCPU, exit_external_request),
86 STATS_DESC_COUNTER(VCPU, exit_io_request),
87 STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
88 STATS_DESC_COUNTER(VCPU, exit_stop_request),
89 STATS_DESC_COUNTER(VCPU, exit_validity),
90 STATS_DESC_COUNTER(VCPU, exit_instruction),
91 STATS_DESC_COUNTER(VCPU, exit_pei),
92 STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
93 STATS_DESC_COUNTER(VCPU, instruction_lctl),
94 STATS_DESC_COUNTER(VCPU, instruction_lctlg),
95 STATS_DESC_COUNTER(VCPU, instruction_stctl),
96 STATS_DESC_COUNTER(VCPU, instruction_stctg),
97 STATS_DESC_COUNTER(VCPU, exit_program_interruption),
98 STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
99 STATS_DESC_COUNTER(VCPU, exit_operation_exception),
100 STATS_DESC_COUNTER(VCPU, deliver_ckc),
101 STATS_DESC_COUNTER(VCPU, deliver_cputm),
102 STATS_DESC_COUNTER(VCPU, deliver_external_call),
103 STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
104 STATS_DESC_COUNTER(VCPU, deliver_service_signal),
105 STATS_DESC_COUNTER(VCPU, deliver_virtio),
106 STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
107 STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
108 STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
109 STATS_DESC_COUNTER(VCPU, deliver_program),
110 STATS_DESC_COUNTER(VCPU, deliver_io),
111 STATS_DESC_COUNTER(VCPU, deliver_machine_check),
112 STATS_DESC_COUNTER(VCPU, exit_wait_state),
113 STATS_DESC_COUNTER(VCPU, inject_ckc),
114 STATS_DESC_COUNTER(VCPU, inject_cputm),
115 STATS_DESC_COUNTER(VCPU, inject_external_call),
116 STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
117 STATS_DESC_COUNTER(VCPU, inject_mchk),
118 STATS_DESC_COUNTER(VCPU, inject_pfault_init),
119 STATS_DESC_COUNTER(VCPU, inject_program),
120 STATS_DESC_COUNTER(VCPU, inject_restart),
121 STATS_DESC_COUNTER(VCPU, inject_set_prefix),
122 STATS_DESC_COUNTER(VCPU, inject_stop_signal),
123 STATS_DESC_COUNTER(VCPU, instruction_epsw),
124 STATS_DESC_COUNTER(VCPU, instruction_gs),
125 STATS_DESC_COUNTER(VCPU, instruction_io_other),
126 STATS_DESC_COUNTER(VCPU, instruction_lpsw),
127 STATS_DESC_COUNTER(VCPU, instruction_lpswe),
128 STATS_DESC_COUNTER(VCPU, instruction_pfmf),
129 STATS_DESC_COUNTER(VCPU, instruction_ptff),
130 STATS_DESC_COUNTER(VCPU, instruction_sck),
131 STATS_DESC_COUNTER(VCPU, instruction_sckpf),
132 STATS_DESC_COUNTER(VCPU, instruction_stidp),
133 STATS_DESC_COUNTER(VCPU, instruction_spx),
134 STATS_DESC_COUNTER(VCPU, instruction_stpx),
135 STATS_DESC_COUNTER(VCPU, instruction_stap),
136 STATS_DESC_COUNTER(VCPU, instruction_iske),
137 STATS_DESC_COUNTER(VCPU, instruction_ri),
138 STATS_DESC_COUNTER(VCPU, instruction_rrbe),
139 STATS_DESC_COUNTER(VCPU, instruction_sske),
140 STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
141 STATS_DESC_COUNTER(VCPU, instruction_stsi),
142 STATS_DESC_COUNTER(VCPU, instruction_stfl),
143 STATS_DESC_COUNTER(VCPU, instruction_tb),
144 STATS_DESC_COUNTER(VCPU, instruction_tpi),
145 STATS_DESC_COUNTER(VCPU, instruction_tprot),
146 STATS_DESC_COUNTER(VCPU, instruction_tsch),
147 STATS_DESC_COUNTER(VCPU, instruction_sie),
148 STATS_DESC_COUNTER(VCPU, instruction_essa),
149 STATS_DESC_COUNTER(VCPU, instruction_sthyi),
150 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
151 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
152 STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
153 STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
154 STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
155 STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
156 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
157 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
158 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
159 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
160 STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
161 STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
162 STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
163 STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
164 STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
165 STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
166 STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
167 STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
168 STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
169 STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
170 STATS_DESC_COUNTER(VCPU, diag_9c_forward),
171 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
172 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
173 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
174 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
175 STATS_DESC_COUNTER(VCPU, pfault_sync)
176};
177static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
178 sizeof(struct kvm_vcpu_stat) / sizeof(u64));
179
180const struct kvm_stats_header kvm_vcpu_stats_header = {
181 .name_size = KVM_STATS_NAME_SIZE,
182 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
183 .id_offset = sizeof(struct kvm_stats_header),
184 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
185 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
186 sizeof(kvm_vcpu_stats_desc),
187};
188
189/* allow nested virtualization in KVM (if enabled by user space) */
190static int nested;
191module_param(nested, int, S_IRUGO);
192MODULE_PARM_DESC(nested, "Nested virtualization support");
193
194/* allow 1m huge page guest backing, if !nested */
195static int hpage;
196module_param(hpage, int, 0444);
197MODULE_PARM_DESC(hpage, "1m huge page backing support");
198
199/* maximum percentage of steal time for polling. >100 is treated like 100 */
200static u8 halt_poll_max_steal = 10;
201module_param(halt_poll_max_steal, byte, 0644);
202MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
203
204/* if set to true, the GISA will be initialized and used if available */
205static bool use_gisa = true;
206module_param(use_gisa, bool, 0644);
207MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
208
209/* maximum diag9c forwarding per second */
210unsigned int diag9c_forwarding_hz;
211module_param(diag9c_forwarding_hz, uint, 0644);
212MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
213
214/*
215 * For now we handle at most 16 double words as this is what the s390 base
216 * kernel handles and stores in the prefix page. If we ever need to go beyond
217 * this, this requires changes to code, but the external uapi can stay.
218 */
219#define SIZE_INTERNAL 16
220
221/*
222 * Base feature mask that defines default mask for facilities. Consists of the
223 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
224 */
225static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
226/*
227 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
228 * and defines the facilities that can be enabled via a cpu model.
229 */
230static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
231
232static unsigned long kvm_s390_fac_size(void)
233{
234 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
235 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
236 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
237 sizeof(stfle_fac_list));
238
239 return SIZE_INTERNAL;
240}
241
242/* available cpu features supported by kvm */
243static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
244/* available subfunctions indicated via query / "test bit" */
245static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
246
247static struct gmap_notifier gmap_notifier;
248static struct gmap_notifier vsie_gmap_notifier;
249debug_info_t *kvm_s390_dbf;
250debug_info_t *kvm_s390_dbf_uv;
251
252/* Section: not file related */
253int kvm_arch_hardware_enable(void)
254{
255 /* every s390 is virtualization enabled ;-) */
256 return 0;
257}
258
259int kvm_arch_check_processor_compat(void *opaque)
260{
261 return 0;
262}
263
264/* forward declarations */
265static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
266 unsigned long end);
267static int sca_switch_to_extended(struct kvm *kvm);
268
269static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
270{
271 u8 delta_idx = 0;
272
273 /*
274 * The TOD jumps by delta, we have to compensate this by adding
275 * -delta to the epoch.
276 */
277 delta = -delta;
278
279 /* sign-extension - we're adding to signed values below */
280 if ((s64)delta < 0)
281 delta_idx = -1;
282
283 scb->epoch += delta;
284 if (scb->ecd & ECD_MEF) {
285 scb->epdx += delta_idx;
286 if (scb->epoch < delta)
287 scb->epdx += 1;
288 }
289}
290
291/*
292 * This callback is executed during stop_machine(). All CPUs are therefore
293 * temporarily stopped. In order not to change guest behavior, we have to
294 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
295 * so a CPU won't be stopped while calculating with the epoch.
296 */
297static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
298 void *v)
299{
300 struct kvm *kvm;
301 struct kvm_vcpu *vcpu;
302 int i;
303 unsigned long long *delta = v;
304
305 list_for_each_entry(kvm, &vm_list, vm_list) {
306 kvm_for_each_vcpu(i, vcpu, kvm) {
307 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
308 if (i == 0) {
309 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
310 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
311 }
312 if (vcpu->arch.cputm_enabled)
313 vcpu->arch.cputm_start += *delta;
314 if (vcpu->arch.vsie_block)
315 kvm_clock_sync_scb(vcpu->arch.vsie_block,
316 *delta);
317 }
318 }
319 return NOTIFY_OK;
320}
321
322static struct notifier_block kvm_clock_notifier = {
323 .notifier_call = kvm_clock_sync,
324};
325
326int kvm_arch_hardware_setup(void *opaque)
327{
328 gmap_notifier.notifier_call = kvm_gmap_notifier;
329 gmap_register_pte_notifier(&gmap_notifier);
330 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
331 gmap_register_pte_notifier(&vsie_gmap_notifier);
332 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
333 &kvm_clock_notifier);
334 return 0;
335}
336
337void kvm_arch_hardware_unsetup(void)
338{
339 gmap_unregister_pte_notifier(&gmap_notifier);
340 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
341 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
342 &kvm_clock_notifier);
343}
344
345static void allow_cpu_feat(unsigned long nr)
346{
347 set_bit_inv(nr, kvm_s390_available_cpu_feat);
348}
349
350static inline int plo_test_bit(unsigned char nr)
351{
352 unsigned long function = (unsigned long)nr | 0x100;
353 int cc;
354
355 asm volatile(
356 " lgr 0,%[function]\n"
357 /* Parameter registers are ignored for "test bit" */
358 " plo 0,0,0,0(0)\n"
359 " ipm %0\n"
360 " srl %0,28\n"
361 : "=d" (cc)
362 : [function] "d" (function)
363 : "cc", "0");
364 return cc == 0;
365}
366
367static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
368{
369 asm volatile(
370 " lghi 0,0\n"
371 " lgr 1,%[query]\n"
372 /* Parameter registers are ignored */
373 " .insn rrf,%[opc] << 16,2,4,6,0\n"
374 :
375 : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
376 : "cc", "memory", "0", "1");
377}
378
379#define INSN_SORTL 0xb938
380#define INSN_DFLTCC 0xb939
381
382static void kvm_s390_cpu_feat_init(void)
383{
384 int i;
385
386 for (i = 0; i < 256; ++i) {
387 if (plo_test_bit(i))
388 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
389 }
390
391 if (test_facility(28)) /* TOD-clock steering */
392 ptff(kvm_s390_available_subfunc.ptff,
393 sizeof(kvm_s390_available_subfunc.ptff),
394 PTFF_QAF);
395
396 if (test_facility(17)) { /* MSA */
397 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
398 kvm_s390_available_subfunc.kmac);
399 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
400 kvm_s390_available_subfunc.kmc);
401 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
402 kvm_s390_available_subfunc.km);
403 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
404 kvm_s390_available_subfunc.kimd);
405 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
406 kvm_s390_available_subfunc.klmd);
407 }
408 if (test_facility(76)) /* MSA3 */
409 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
410 kvm_s390_available_subfunc.pckmo);
411 if (test_facility(77)) { /* MSA4 */
412 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
413 kvm_s390_available_subfunc.kmctr);
414 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
415 kvm_s390_available_subfunc.kmf);
416 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
417 kvm_s390_available_subfunc.kmo);
418 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
419 kvm_s390_available_subfunc.pcc);
420 }
421 if (test_facility(57)) /* MSA5 */
422 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
423 kvm_s390_available_subfunc.ppno);
424
425 if (test_facility(146)) /* MSA8 */
426 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
427 kvm_s390_available_subfunc.kma);
428
429 if (test_facility(155)) /* MSA9 */
430 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
431 kvm_s390_available_subfunc.kdsa);
432
433 if (test_facility(150)) /* SORTL */
434 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
435
436 if (test_facility(151)) /* DFLTCC */
437 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
438
439 if (MACHINE_HAS_ESOP)
440 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
441 /*
442 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
443 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
444 */
445 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
446 !test_facility(3) || !nested)
447 return;
448 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
449 if (sclp.has_64bscao)
450 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
451 if (sclp.has_siif)
452 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
453 if (sclp.has_gpere)
454 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
455 if (sclp.has_gsls)
456 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
457 if (sclp.has_ib)
458 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
459 if (sclp.has_cei)
460 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
461 if (sclp.has_ibs)
462 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
463 if (sclp.has_kss)
464 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
465 /*
466 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
467 * all skey handling functions read/set the skey from the PGSTE
468 * instead of the real storage key.
469 *
470 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
471 * pages being detected as preserved although they are resident.
472 *
473 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
474 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
475 *
476 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
477 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
478 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
479 *
480 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
481 * cannot easily shadow the SCA because of the ipte lock.
482 */
483}
484
485int kvm_arch_init(void *opaque)
486{
487 int rc = -ENOMEM;
488
489 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
490 if (!kvm_s390_dbf)
491 return -ENOMEM;
492
493 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
494 if (!kvm_s390_dbf_uv)
495 goto out;
496
497 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
498 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
499 goto out;
500
501 kvm_s390_cpu_feat_init();
502
503 /* Register floating interrupt controller interface. */
504 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
505 if (rc) {
506 pr_err("A FLIC registration call failed with rc=%d\n", rc);
507 goto out;
508 }
509
510 rc = kvm_s390_gib_init(GAL_ISC);
511 if (rc)
512 goto out;
513
514 return 0;
515
516out:
517 kvm_arch_exit();
518 return rc;
519}
520
521void kvm_arch_exit(void)
522{
523 kvm_s390_gib_destroy();
524 debug_unregister(kvm_s390_dbf);
525 debug_unregister(kvm_s390_dbf_uv);
526}
527
528/* Section: device related */
529long kvm_arch_dev_ioctl(struct file *filp,
530 unsigned int ioctl, unsigned long arg)
531{
532 if (ioctl == KVM_S390_ENABLE_SIE)
533 return s390_enable_sie();
534 return -EINVAL;
535}
536
537int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
538{
539 int r;
540
541 switch (ext) {
542 case KVM_CAP_S390_PSW:
543 case KVM_CAP_S390_GMAP:
544 case KVM_CAP_SYNC_MMU:
545#ifdef CONFIG_KVM_S390_UCONTROL
546 case KVM_CAP_S390_UCONTROL:
547#endif
548 case KVM_CAP_ASYNC_PF:
549 case KVM_CAP_SYNC_REGS:
550 case KVM_CAP_ONE_REG:
551 case KVM_CAP_ENABLE_CAP:
552 case KVM_CAP_S390_CSS_SUPPORT:
553 case KVM_CAP_IOEVENTFD:
554 case KVM_CAP_DEVICE_CTRL:
555 case KVM_CAP_S390_IRQCHIP:
556 case KVM_CAP_VM_ATTRIBUTES:
557 case KVM_CAP_MP_STATE:
558 case KVM_CAP_IMMEDIATE_EXIT:
559 case KVM_CAP_S390_INJECT_IRQ:
560 case KVM_CAP_S390_USER_SIGP:
561 case KVM_CAP_S390_USER_STSI:
562 case KVM_CAP_S390_SKEYS:
563 case KVM_CAP_S390_IRQ_STATE:
564 case KVM_CAP_S390_USER_INSTR0:
565 case KVM_CAP_S390_CMMA_MIGRATION:
566 case KVM_CAP_S390_AIS:
567 case KVM_CAP_S390_AIS_MIGRATION:
568 case KVM_CAP_S390_VCPU_RESETS:
569 case KVM_CAP_SET_GUEST_DEBUG:
570 case KVM_CAP_S390_DIAG318:
571 r = 1;
572 break;
573 case KVM_CAP_SET_GUEST_DEBUG2:
574 r = KVM_GUESTDBG_VALID_MASK;
575 break;
576 case KVM_CAP_S390_HPAGE_1M:
577 r = 0;
578 if (hpage && !kvm_is_ucontrol(kvm))
579 r = 1;
580 break;
581 case KVM_CAP_S390_MEM_OP:
582 r = MEM_OP_MAX_SIZE;
583 break;
584 case KVM_CAP_NR_VCPUS:
585 case KVM_CAP_MAX_VCPUS:
586 case KVM_CAP_MAX_VCPU_ID:
587 r = KVM_S390_BSCA_CPU_SLOTS;
588 if (!kvm_s390_use_sca_entries())
589 r = KVM_MAX_VCPUS;
590 else if (sclp.has_esca && sclp.has_64bscao)
591 r = KVM_S390_ESCA_CPU_SLOTS;
592 break;
593 case KVM_CAP_S390_COW:
594 r = MACHINE_HAS_ESOP;
595 break;
596 case KVM_CAP_S390_VECTOR_REGISTERS:
597 r = MACHINE_HAS_VX;
598 break;
599 case KVM_CAP_S390_RI:
600 r = test_facility(64);
601 break;
602 case KVM_CAP_S390_GS:
603 r = test_facility(133);
604 break;
605 case KVM_CAP_S390_BPB:
606 r = test_facility(82);
607 break;
608 case KVM_CAP_S390_PROTECTED:
609 r = is_prot_virt_host();
610 break;
611 default:
612 r = 0;
613 }
614 return r;
615}
616
617void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
618{
619 int i;
620 gfn_t cur_gfn, last_gfn;
621 unsigned long gaddr, vmaddr;
622 struct gmap *gmap = kvm->arch.gmap;
623 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
624
625 /* Loop over all guest segments */
626 cur_gfn = memslot->base_gfn;
627 last_gfn = memslot->base_gfn + memslot->npages;
628 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
629 gaddr = gfn_to_gpa(cur_gfn);
630 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
631 if (kvm_is_error_hva(vmaddr))
632 continue;
633
634 bitmap_zero(bitmap, _PAGE_ENTRIES);
635 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
636 for (i = 0; i < _PAGE_ENTRIES; i++) {
637 if (test_bit(i, bitmap))
638 mark_page_dirty(kvm, cur_gfn + i);
639 }
640
641 if (fatal_signal_pending(current))
642 return;
643 cond_resched();
644 }
645}
646
647/* Section: vm related */
648static void sca_del_vcpu(struct kvm_vcpu *vcpu);
649
650/*
651 * Get (and clear) the dirty memory log for a memory slot.
652 */
653int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
654 struct kvm_dirty_log *log)
655{
656 int r;
657 unsigned long n;
658 struct kvm_memory_slot *memslot;
659 int is_dirty;
660
661 if (kvm_is_ucontrol(kvm))
662 return -EINVAL;
663
664 mutex_lock(&kvm->slots_lock);
665
666 r = -EINVAL;
667 if (log->slot >= KVM_USER_MEM_SLOTS)
668 goto out;
669
670 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
671 if (r)
672 goto out;
673
674 /* Clear the dirty log */
675 if (is_dirty) {
676 n = kvm_dirty_bitmap_bytes(memslot);
677 memset(memslot->dirty_bitmap, 0, n);
678 }
679 r = 0;
680out:
681 mutex_unlock(&kvm->slots_lock);
682 return r;
683}
684
685static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
686{
687 unsigned int i;
688 struct kvm_vcpu *vcpu;
689
690 kvm_for_each_vcpu(i, vcpu, kvm) {
691 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
692 }
693}
694
695int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
696{
697 int r;
698
699 if (cap->flags)
700 return -EINVAL;
701
702 switch (cap->cap) {
703 case KVM_CAP_S390_IRQCHIP:
704 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
705 kvm->arch.use_irqchip = 1;
706 r = 0;
707 break;
708 case KVM_CAP_S390_USER_SIGP:
709 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
710 kvm->arch.user_sigp = 1;
711 r = 0;
712 break;
713 case KVM_CAP_S390_VECTOR_REGISTERS:
714 mutex_lock(&kvm->lock);
715 if (kvm->created_vcpus) {
716 r = -EBUSY;
717 } else if (MACHINE_HAS_VX) {
718 set_kvm_facility(kvm->arch.model.fac_mask, 129);
719 set_kvm_facility(kvm->arch.model.fac_list, 129);
720 if (test_facility(134)) {
721 set_kvm_facility(kvm->arch.model.fac_mask, 134);
722 set_kvm_facility(kvm->arch.model.fac_list, 134);
723 }
724 if (test_facility(135)) {
725 set_kvm_facility(kvm->arch.model.fac_mask, 135);
726 set_kvm_facility(kvm->arch.model.fac_list, 135);
727 }
728 if (test_facility(148)) {
729 set_kvm_facility(kvm->arch.model.fac_mask, 148);
730 set_kvm_facility(kvm->arch.model.fac_list, 148);
731 }
732 if (test_facility(152)) {
733 set_kvm_facility(kvm->arch.model.fac_mask, 152);
734 set_kvm_facility(kvm->arch.model.fac_list, 152);
735 }
736 if (test_facility(192)) {
737 set_kvm_facility(kvm->arch.model.fac_mask, 192);
738 set_kvm_facility(kvm->arch.model.fac_list, 192);
739 }
740 r = 0;
741 } else
742 r = -EINVAL;
743 mutex_unlock(&kvm->lock);
744 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
745 r ? "(not available)" : "(success)");
746 break;
747 case KVM_CAP_S390_RI:
748 r = -EINVAL;
749 mutex_lock(&kvm->lock);
750 if (kvm->created_vcpus) {
751 r = -EBUSY;
752 } else if (test_facility(64)) {
753 set_kvm_facility(kvm->arch.model.fac_mask, 64);
754 set_kvm_facility(kvm->arch.model.fac_list, 64);
755 r = 0;
756 }
757 mutex_unlock(&kvm->lock);
758 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
759 r ? "(not available)" : "(success)");
760 break;
761 case KVM_CAP_S390_AIS:
762 mutex_lock(&kvm->lock);
763 if (kvm->created_vcpus) {
764 r = -EBUSY;
765 } else {
766 set_kvm_facility(kvm->arch.model.fac_mask, 72);
767 set_kvm_facility(kvm->arch.model.fac_list, 72);
768 r = 0;
769 }
770 mutex_unlock(&kvm->lock);
771 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
772 r ? "(not available)" : "(success)");
773 break;
774 case KVM_CAP_S390_GS:
775 r = -EINVAL;
776 mutex_lock(&kvm->lock);
777 if (kvm->created_vcpus) {
778 r = -EBUSY;
779 } else if (test_facility(133)) {
780 set_kvm_facility(kvm->arch.model.fac_mask, 133);
781 set_kvm_facility(kvm->arch.model.fac_list, 133);
782 r = 0;
783 }
784 mutex_unlock(&kvm->lock);
785 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
786 r ? "(not available)" : "(success)");
787 break;
788 case KVM_CAP_S390_HPAGE_1M:
789 mutex_lock(&kvm->lock);
790 if (kvm->created_vcpus)
791 r = -EBUSY;
792 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
793 r = -EINVAL;
794 else {
795 r = 0;
796 mmap_write_lock(kvm->mm);
797 kvm->mm->context.allow_gmap_hpage_1m = 1;
798 mmap_write_unlock(kvm->mm);
799 /*
800 * We might have to create fake 4k page
801 * tables. To avoid that the hardware works on
802 * stale PGSTEs, we emulate these instructions.
803 */
804 kvm->arch.use_skf = 0;
805 kvm->arch.use_pfmfi = 0;
806 }
807 mutex_unlock(&kvm->lock);
808 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
809 r ? "(not available)" : "(success)");
810 break;
811 case KVM_CAP_S390_USER_STSI:
812 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
813 kvm->arch.user_stsi = 1;
814 r = 0;
815 break;
816 case KVM_CAP_S390_USER_INSTR0:
817 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
818 kvm->arch.user_instr0 = 1;
819 icpt_operexc_on_all_vcpus(kvm);
820 r = 0;
821 break;
822 default:
823 r = -EINVAL;
824 break;
825 }
826 return r;
827}
828
829static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
830{
831 int ret;
832
833 switch (attr->attr) {
834 case KVM_S390_VM_MEM_LIMIT_SIZE:
835 ret = 0;
836 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
837 kvm->arch.mem_limit);
838 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
839 ret = -EFAULT;
840 break;
841 default:
842 ret = -ENXIO;
843 break;
844 }
845 return ret;
846}
847
848static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
849{
850 int ret;
851 unsigned int idx;
852 switch (attr->attr) {
853 case KVM_S390_VM_MEM_ENABLE_CMMA:
854 ret = -ENXIO;
855 if (!sclp.has_cmma)
856 break;
857
858 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
859 mutex_lock(&kvm->lock);
860 if (kvm->created_vcpus)
861 ret = -EBUSY;
862 else if (kvm->mm->context.allow_gmap_hpage_1m)
863 ret = -EINVAL;
864 else {
865 kvm->arch.use_cmma = 1;
866 /* Not compatible with cmma. */
867 kvm->arch.use_pfmfi = 0;
868 ret = 0;
869 }
870 mutex_unlock(&kvm->lock);
871 break;
872 case KVM_S390_VM_MEM_CLR_CMMA:
873 ret = -ENXIO;
874 if (!sclp.has_cmma)
875 break;
876 ret = -EINVAL;
877 if (!kvm->arch.use_cmma)
878 break;
879
880 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
881 mutex_lock(&kvm->lock);
882 idx = srcu_read_lock(&kvm->srcu);
883 s390_reset_cmma(kvm->arch.gmap->mm);
884 srcu_read_unlock(&kvm->srcu, idx);
885 mutex_unlock(&kvm->lock);
886 ret = 0;
887 break;
888 case KVM_S390_VM_MEM_LIMIT_SIZE: {
889 unsigned long new_limit;
890
891 if (kvm_is_ucontrol(kvm))
892 return -EINVAL;
893
894 if (get_user(new_limit, (u64 __user *)attr->addr))
895 return -EFAULT;
896
897 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
898 new_limit > kvm->arch.mem_limit)
899 return -E2BIG;
900
901 if (!new_limit)
902 return -EINVAL;
903
904 /* gmap_create takes last usable address */
905 if (new_limit != KVM_S390_NO_MEM_LIMIT)
906 new_limit -= 1;
907
908 ret = -EBUSY;
909 mutex_lock(&kvm->lock);
910 if (!kvm->created_vcpus) {
911 /* gmap_create will round the limit up */
912 struct gmap *new = gmap_create(current->mm, new_limit);
913
914 if (!new) {
915 ret = -ENOMEM;
916 } else {
917 gmap_remove(kvm->arch.gmap);
918 new->private = kvm;
919 kvm->arch.gmap = new;
920 ret = 0;
921 }
922 }
923 mutex_unlock(&kvm->lock);
924 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
925 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
926 (void *) kvm->arch.gmap->asce);
927 break;
928 }
929 default:
930 ret = -ENXIO;
931 break;
932 }
933 return ret;
934}
935
936static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
937
938void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
939{
940 struct kvm_vcpu *vcpu;
941 int i;
942
943 kvm_s390_vcpu_block_all(kvm);
944
945 kvm_for_each_vcpu(i, vcpu, kvm) {
946 kvm_s390_vcpu_crypto_setup(vcpu);
947 /* recreate the shadow crycb by leaving the VSIE handler */
948 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
949 }
950
951 kvm_s390_vcpu_unblock_all(kvm);
952}
953
954static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
955{
956 mutex_lock(&kvm->lock);
957 switch (attr->attr) {
958 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
959 if (!test_kvm_facility(kvm, 76)) {
960 mutex_unlock(&kvm->lock);
961 return -EINVAL;
962 }
963 get_random_bytes(
964 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
965 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
966 kvm->arch.crypto.aes_kw = 1;
967 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
968 break;
969 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
970 if (!test_kvm_facility(kvm, 76)) {
971 mutex_unlock(&kvm->lock);
972 return -EINVAL;
973 }
974 get_random_bytes(
975 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
976 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
977 kvm->arch.crypto.dea_kw = 1;
978 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
979 break;
980 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
981 if (!test_kvm_facility(kvm, 76)) {
982 mutex_unlock(&kvm->lock);
983 return -EINVAL;
984 }
985 kvm->arch.crypto.aes_kw = 0;
986 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
987 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
988 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
989 break;
990 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
991 if (!test_kvm_facility(kvm, 76)) {
992 mutex_unlock(&kvm->lock);
993 return -EINVAL;
994 }
995 kvm->arch.crypto.dea_kw = 0;
996 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
997 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
998 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
999 break;
1000 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1001 if (!ap_instructions_available()) {
1002 mutex_unlock(&kvm->lock);
1003 return -EOPNOTSUPP;
1004 }
1005 kvm->arch.crypto.apie = 1;
1006 break;
1007 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1008 if (!ap_instructions_available()) {
1009 mutex_unlock(&kvm->lock);
1010 return -EOPNOTSUPP;
1011 }
1012 kvm->arch.crypto.apie = 0;
1013 break;
1014 default:
1015 mutex_unlock(&kvm->lock);
1016 return -ENXIO;
1017 }
1018
1019 kvm_s390_vcpu_crypto_reset_all(kvm);
1020 mutex_unlock(&kvm->lock);
1021 return 0;
1022}
1023
1024static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1025{
1026 int cx;
1027 struct kvm_vcpu *vcpu;
1028
1029 kvm_for_each_vcpu(cx, vcpu, kvm)
1030 kvm_s390_sync_request(req, vcpu);
1031}
1032
1033/*
1034 * Must be called with kvm->srcu held to avoid races on memslots, and with
1035 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1036 */
1037static int kvm_s390_vm_start_migration(struct kvm *kvm)
1038{
1039 struct kvm_memory_slot *ms;
1040 struct kvm_memslots *slots;
1041 unsigned long ram_pages = 0;
1042 int slotnr;
1043
1044 /* migration mode already enabled */
1045 if (kvm->arch.migration_mode)
1046 return 0;
1047 slots = kvm_memslots(kvm);
1048 if (!slots || !slots->used_slots)
1049 return -EINVAL;
1050
1051 if (!kvm->arch.use_cmma) {
1052 kvm->arch.migration_mode = 1;
1053 return 0;
1054 }
1055 /* mark all the pages in active slots as dirty */
1056 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1057 ms = slots->memslots + slotnr;
1058 if (!ms->dirty_bitmap)
1059 return -EINVAL;
1060 /*
1061 * The second half of the bitmap is only used on x86,
1062 * and would be wasted otherwise, so we put it to good
1063 * use here to keep track of the state of the storage
1064 * attributes.
1065 */
1066 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1067 ram_pages += ms->npages;
1068 }
1069 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1070 kvm->arch.migration_mode = 1;
1071 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1072 return 0;
1073}
1074
1075/*
1076 * Must be called with kvm->slots_lock to avoid races with ourselves and
1077 * kvm_s390_vm_start_migration.
1078 */
1079static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1080{
1081 /* migration mode already disabled */
1082 if (!kvm->arch.migration_mode)
1083 return 0;
1084 kvm->arch.migration_mode = 0;
1085 if (kvm->arch.use_cmma)
1086 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1087 return 0;
1088}
1089
1090static int kvm_s390_vm_set_migration(struct kvm *kvm,
1091 struct kvm_device_attr *attr)
1092{
1093 int res = -ENXIO;
1094
1095 mutex_lock(&kvm->slots_lock);
1096 switch (attr->attr) {
1097 case KVM_S390_VM_MIGRATION_START:
1098 res = kvm_s390_vm_start_migration(kvm);
1099 break;
1100 case KVM_S390_VM_MIGRATION_STOP:
1101 res = kvm_s390_vm_stop_migration(kvm);
1102 break;
1103 default:
1104 break;
1105 }
1106 mutex_unlock(&kvm->slots_lock);
1107
1108 return res;
1109}
1110
1111static int kvm_s390_vm_get_migration(struct kvm *kvm,
1112 struct kvm_device_attr *attr)
1113{
1114 u64 mig = kvm->arch.migration_mode;
1115
1116 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1117 return -ENXIO;
1118
1119 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1120 return -EFAULT;
1121 return 0;
1122}
1123
1124static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1125{
1126 struct kvm_s390_vm_tod_clock gtod;
1127
1128 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
1129 return -EFAULT;
1130
1131 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1132 return -EINVAL;
1133 kvm_s390_set_tod_clock(kvm, >od);
1134
1135 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1136 gtod.epoch_idx, gtod.tod);
1137
1138 return 0;
1139}
1140
1141static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1142{
1143 u8 gtod_high;
1144
1145 if (copy_from_user(>od_high, (void __user *)attr->addr,
1146 sizeof(gtod_high)))
1147 return -EFAULT;
1148
1149 if (gtod_high != 0)
1150 return -EINVAL;
1151 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1152
1153 return 0;
1154}
1155
1156static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1157{
1158 struct kvm_s390_vm_tod_clock gtod = { 0 };
1159
1160 if (copy_from_user(>od.tod, (void __user *)attr->addr,
1161 sizeof(gtod.tod)))
1162 return -EFAULT;
1163
1164 kvm_s390_set_tod_clock(kvm, >od);
1165 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1166 return 0;
1167}
1168
1169static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1170{
1171 int ret;
1172
1173 if (attr->flags)
1174 return -EINVAL;
1175
1176 switch (attr->attr) {
1177 case KVM_S390_VM_TOD_EXT:
1178 ret = kvm_s390_set_tod_ext(kvm, attr);
1179 break;
1180 case KVM_S390_VM_TOD_HIGH:
1181 ret = kvm_s390_set_tod_high(kvm, attr);
1182 break;
1183 case KVM_S390_VM_TOD_LOW:
1184 ret = kvm_s390_set_tod_low(kvm, attr);
1185 break;
1186 default:
1187 ret = -ENXIO;
1188 break;
1189 }
1190 return ret;
1191}
1192
1193static void kvm_s390_get_tod_clock(struct kvm *kvm,
1194 struct kvm_s390_vm_tod_clock *gtod)
1195{
1196 union tod_clock clk;
1197
1198 preempt_disable();
1199
1200 store_tod_clock_ext(&clk);
1201
1202 gtod->tod = clk.tod + kvm->arch.epoch;
1203 gtod->epoch_idx = 0;
1204 if (test_kvm_facility(kvm, 139)) {
1205 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1206 if (gtod->tod < clk.tod)
1207 gtod->epoch_idx += 1;
1208 }
1209
1210 preempt_enable();
1211}
1212
1213static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1214{
1215 struct kvm_s390_vm_tod_clock gtod;
1216
1217 memset(>od, 0, sizeof(gtod));
1218 kvm_s390_get_tod_clock(kvm, >od);
1219 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1220 return -EFAULT;
1221
1222 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1223 gtod.epoch_idx, gtod.tod);
1224 return 0;
1225}
1226
1227static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1228{
1229 u8 gtod_high = 0;
1230
1231 if (copy_to_user((void __user *)attr->addr, >od_high,
1232 sizeof(gtod_high)))
1233 return -EFAULT;
1234 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1235
1236 return 0;
1237}
1238
1239static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1240{
1241 u64 gtod;
1242
1243 gtod = kvm_s390_get_tod_clock_fast(kvm);
1244 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1245 return -EFAULT;
1246 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1247
1248 return 0;
1249}
1250
1251static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1252{
1253 int ret;
1254
1255 if (attr->flags)
1256 return -EINVAL;
1257
1258 switch (attr->attr) {
1259 case KVM_S390_VM_TOD_EXT:
1260 ret = kvm_s390_get_tod_ext(kvm, attr);
1261 break;
1262 case KVM_S390_VM_TOD_HIGH:
1263 ret = kvm_s390_get_tod_high(kvm, attr);
1264 break;
1265 case KVM_S390_VM_TOD_LOW:
1266 ret = kvm_s390_get_tod_low(kvm, attr);
1267 break;
1268 default:
1269 ret = -ENXIO;
1270 break;
1271 }
1272 return ret;
1273}
1274
1275static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1276{
1277 struct kvm_s390_vm_cpu_processor *proc;
1278 u16 lowest_ibc, unblocked_ibc;
1279 int ret = 0;
1280
1281 mutex_lock(&kvm->lock);
1282 if (kvm->created_vcpus) {
1283 ret = -EBUSY;
1284 goto out;
1285 }
1286 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1287 if (!proc) {
1288 ret = -ENOMEM;
1289 goto out;
1290 }
1291 if (!copy_from_user(proc, (void __user *)attr->addr,
1292 sizeof(*proc))) {
1293 kvm->arch.model.cpuid = proc->cpuid;
1294 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1295 unblocked_ibc = sclp.ibc & 0xfff;
1296 if (lowest_ibc && proc->ibc) {
1297 if (proc->ibc > unblocked_ibc)
1298 kvm->arch.model.ibc = unblocked_ibc;
1299 else if (proc->ibc < lowest_ibc)
1300 kvm->arch.model.ibc = lowest_ibc;
1301 else
1302 kvm->arch.model.ibc = proc->ibc;
1303 }
1304 memcpy(kvm->arch.model.fac_list, proc->fac_list,
1305 S390_ARCH_FAC_LIST_SIZE_BYTE);
1306 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1307 kvm->arch.model.ibc,
1308 kvm->arch.model.cpuid);
1309 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1310 kvm->arch.model.fac_list[0],
1311 kvm->arch.model.fac_list[1],
1312 kvm->arch.model.fac_list[2]);
1313 } else
1314 ret = -EFAULT;
1315 kfree(proc);
1316out:
1317 mutex_unlock(&kvm->lock);
1318 return ret;
1319}
1320
1321static int kvm_s390_set_processor_feat(struct kvm *kvm,
1322 struct kvm_device_attr *attr)
1323{
1324 struct kvm_s390_vm_cpu_feat data;
1325
1326 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1327 return -EFAULT;
1328 if (!bitmap_subset((unsigned long *) data.feat,
1329 kvm_s390_available_cpu_feat,
1330 KVM_S390_VM_CPU_FEAT_NR_BITS))
1331 return -EINVAL;
1332
1333 mutex_lock(&kvm->lock);
1334 if (kvm->created_vcpus) {
1335 mutex_unlock(&kvm->lock);
1336 return -EBUSY;
1337 }
1338 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1339 KVM_S390_VM_CPU_FEAT_NR_BITS);
1340 mutex_unlock(&kvm->lock);
1341 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1342 data.feat[0],
1343 data.feat[1],
1344 data.feat[2]);
1345 return 0;
1346}
1347
1348static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1349 struct kvm_device_attr *attr)
1350{
1351 mutex_lock(&kvm->lock);
1352 if (kvm->created_vcpus) {
1353 mutex_unlock(&kvm->lock);
1354 return -EBUSY;
1355 }
1356
1357 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1358 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1359 mutex_unlock(&kvm->lock);
1360 return -EFAULT;
1361 }
1362 mutex_unlock(&kvm->lock);
1363
1364 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1365 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1367 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1368 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1369 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1370 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1371 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1372 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1373 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1374 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1375 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1376 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1378 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1379 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1380 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1381 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1382 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1383 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1384 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1385 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1386 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1387 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1388 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1389 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1390 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1391 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1392 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1393 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1394 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1395 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1396 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1397 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1398 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1399 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1400 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1401 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1402 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1403 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1404 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1405 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1406 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1407 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1408 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1409 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1410 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1411 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1412 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1413 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1414 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1415 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1416 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1417 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1418 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1419 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1420 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1421
1422 return 0;
1423}
1424
1425static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1426{
1427 int ret = -ENXIO;
1428
1429 switch (attr->attr) {
1430 case KVM_S390_VM_CPU_PROCESSOR:
1431 ret = kvm_s390_set_processor(kvm, attr);
1432 break;
1433 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1434 ret = kvm_s390_set_processor_feat(kvm, attr);
1435 break;
1436 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1437 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1438 break;
1439 }
1440 return ret;
1441}
1442
1443static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1444{
1445 struct kvm_s390_vm_cpu_processor *proc;
1446 int ret = 0;
1447
1448 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1449 if (!proc) {
1450 ret = -ENOMEM;
1451 goto out;
1452 }
1453 proc->cpuid = kvm->arch.model.cpuid;
1454 proc->ibc = kvm->arch.model.ibc;
1455 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1456 S390_ARCH_FAC_LIST_SIZE_BYTE);
1457 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1458 kvm->arch.model.ibc,
1459 kvm->arch.model.cpuid);
1460 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1461 kvm->arch.model.fac_list[0],
1462 kvm->arch.model.fac_list[1],
1463 kvm->arch.model.fac_list[2]);
1464 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1465 ret = -EFAULT;
1466 kfree(proc);
1467out:
1468 return ret;
1469}
1470
1471static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1472{
1473 struct kvm_s390_vm_cpu_machine *mach;
1474 int ret = 0;
1475
1476 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1477 if (!mach) {
1478 ret = -ENOMEM;
1479 goto out;
1480 }
1481 get_cpu_id((struct cpuid *) &mach->cpuid);
1482 mach->ibc = sclp.ibc;
1483 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1484 S390_ARCH_FAC_LIST_SIZE_BYTE);
1485 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1486 sizeof(stfle_fac_list));
1487 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1488 kvm->arch.model.ibc,
1489 kvm->arch.model.cpuid);
1490 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1491 mach->fac_mask[0],
1492 mach->fac_mask[1],
1493 mach->fac_mask[2]);
1494 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1495 mach->fac_list[0],
1496 mach->fac_list[1],
1497 mach->fac_list[2]);
1498 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1499 ret = -EFAULT;
1500 kfree(mach);
1501out:
1502 return ret;
1503}
1504
1505static int kvm_s390_get_processor_feat(struct kvm *kvm,
1506 struct kvm_device_attr *attr)
1507{
1508 struct kvm_s390_vm_cpu_feat data;
1509
1510 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1511 KVM_S390_VM_CPU_FEAT_NR_BITS);
1512 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1513 return -EFAULT;
1514 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1515 data.feat[0],
1516 data.feat[1],
1517 data.feat[2]);
1518 return 0;
1519}
1520
1521static int kvm_s390_get_machine_feat(struct kvm *kvm,
1522 struct kvm_device_attr *attr)
1523{
1524 struct kvm_s390_vm_cpu_feat data;
1525
1526 bitmap_copy((unsigned long *) data.feat,
1527 kvm_s390_available_cpu_feat,
1528 KVM_S390_VM_CPU_FEAT_NR_BITS);
1529 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1530 return -EFAULT;
1531 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1532 data.feat[0],
1533 data.feat[1],
1534 data.feat[2]);
1535 return 0;
1536}
1537
1538static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1539 struct kvm_device_attr *attr)
1540{
1541 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1542 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1543 return -EFAULT;
1544
1545 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1546 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1548 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1549 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1550 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1551 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1552 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1553 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1554 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1555 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1556 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1557 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1559 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1560 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1561 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1562 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1563 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1564 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1565 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1566 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1567 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1568 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1569 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1570 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1571 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1572 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1573 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1574 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1575 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1576 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1577 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1578 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1579 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1580 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1581 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1582 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1583 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1584 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1585 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1586 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1587 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1588 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1589 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1590 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1591 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1592 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1593 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1594 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1595 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1596 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1597 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1598 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1599 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1600 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1601 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1602
1603 return 0;
1604}
1605
1606static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1607 struct kvm_device_attr *attr)
1608{
1609 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1610 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1611 return -EFAULT;
1612
1613 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1614 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1615 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1616 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1617 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1618 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1619 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1620 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1621 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1622 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1623 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1624 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1625 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1626 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1627 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1628 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1629 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1630 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1631 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1632 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1633 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1634 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1635 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1636 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1637 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1638 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1639 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1640 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1641 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1642 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1643 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1644 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1645 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1646 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1647 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1648 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1649 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1650 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1651 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1652 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1653 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1654 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1655 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1656 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1657 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1658 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1659 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1660 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1661 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1662 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1663 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1664 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1665 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1666 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1667 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1668 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1669 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1670
1671 return 0;
1672}
1673
1674static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1675{
1676 int ret = -ENXIO;
1677
1678 switch (attr->attr) {
1679 case KVM_S390_VM_CPU_PROCESSOR:
1680 ret = kvm_s390_get_processor(kvm, attr);
1681 break;
1682 case KVM_S390_VM_CPU_MACHINE:
1683 ret = kvm_s390_get_machine(kvm, attr);
1684 break;
1685 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1686 ret = kvm_s390_get_processor_feat(kvm, attr);
1687 break;
1688 case KVM_S390_VM_CPU_MACHINE_FEAT:
1689 ret = kvm_s390_get_machine_feat(kvm, attr);
1690 break;
1691 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1692 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1693 break;
1694 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1695 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1696 break;
1697 }
1698 return ret;
1699}
1700
1701static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1702{
1703 int ret;
1704
1705 switch (attr->group) {
1706 case KVM_S390_VM_MEM_CTRL:
1707 ret = kvm_s390_set_mem_control(kvm, attr);
1708 break;
1709 case KVM_S390_VM_TOD:
1710 ret = kvm_s390_set_tod(kvm, attr);
1711 break;
1712 case KVM_S390_VM_CPU_MODEL:
1713 ret = kvm_s390_set_cpu_model(kvm, attr);
1714 break;
1715 case KVM_S390_VM_CRYPTO:
1716 ret = kvm_s390_vm_set_crypto(kvm, attr);
1717 break;
1718 case KVM_S390_VM_MIGRATION:
1719 ret = kvm_s390_vm_set_migration(kvm, attr);
1720 break;
1721 default:
1722 ret = -ENXIO;
1723 break;
1724 }
1725
1726 return ret;
1727}
1728
1729static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1730{
1731 int ret;
1732
1733 switch (attr->group) {
1734 case KVM_S390_VM_MEM_CTRL:
1735 ret = kvm_s390_get_mem_control(kvm, attr);
1736 break;
1737 case KVM_S390_VM_TOD:
1738 ret = kvm_s390_get_tod(kvm, attr);
1739 break;
1740 case KVM_S390_VM_CPU_MODEL:
1741 ret = kvm_s390_get_cpu_model(kvm, attr);
1742 break;
1743 case KVM_S390_VM_MIGRATION:
1744 ret = kvm_s390_vm_get_migration(kvm, attr);
1745 break;
1746 default:
1747 ret = -ENXIO;
1748 break;
1749 }
1750
1751 return ret;
1752}
1753
1754static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1755{
1756 int ret;
1757
1758 switch (attr->group) {
1759 case KVM_S390_VM_MEM_CTRL:
1760 switch (attr->attr) {
1761 case KVM_S390_VM_MEM_ENABLE_CMMA:
1762 case KVM_S390_VM_MEM_CLR_CMMA:
1763 ret = sclp.has_cmma ? 0 : -ENXIO;
1764 break;
1765 case KVM_S390_VM_MEM_LIMIT_SIZE:
1766 ret = 0;
1767 break;
1768 default:
1769 ret = -ENXIO;
1770 break;
1771 }
1772 break;
1773 case KVM_S390_VM_TOD:
1774 switch (attr->attr) {
1775 case KVM_S390_VM_TOD_LOW:
1776 case KVM_S390_VM_TOD_HIGH:
1777 ret = 0;
1778 break;
1779 default:
1780 ret = -ENXIO;
1781 break;
1782 }
1783 break;
1784 case KVM_S390_VM_CPU_MODEL:
1785 switch (attr->attr) {
1786 case KVM_S390_VM_CPU_PROCESSOR:
1787 case KVM_S390_VM_CPU_MACHINE:
1788 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1789 case KVM_S390_VM_CPU_MACHINE_FEAT:
1790 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1791 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1792 ret = 0;
1793 break;
1794 default:
1795 ret = -ENXIO;
1796 break;
1797 }
1798 break;
1799 case KVM_S390_VM_CRYPTO:
1800 switch (attr->attr) {
1801 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1802 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1803 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1804 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1805 ret = 0;
1806 break;
1807 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1808 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1809 ret = ap_instructions_available() ? 0 : -ENXIO;
1810 break;
1811 default:
1812 ret = -ENXIO;
1813 break;
1814 }
1815 break;
1816 case KVM_S390_VM_MIGRATION:
1817 ret = 0;
1818 break;
1819 default:
1820 ret = -ENXIO;
1821 break;
1822 }
1823
1824 return ret;
1825}
1826
1827static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1828{
1829 uint8_t *keys;
1830 uint64_t hva;
1831 int srcu_idx, i, r = 0;
1832
1833 if (args->flags != 0)
1834 return -EINVAL;
1835
1836 /* Is this guest using storage keys? */
1837 if (!mm_uses_skeys(current->mm))
1838 return KVM_S390_GET_SKEYS_NONE;
1839
1840 /* Enforce sane limit on memory allocation */
1841 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1842 return -EINVAL;
1843
1844 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
1845 if (!keys)
1846 return -ENOMEM;
1847
1848 mmap_read_lock(current->mm);
1849 srcu_idx = srcu_read_lock(&kvm->srcu);
1850 for (i = 0; i < args->count; i++) {
1851 hva = gfn_to_hva(kvm, args->start_gfn + i);
1852 if (kvm_is_error_hva(hva)) {
1853 r = -EFAULT;
1854 break;
1855 }
1856
1857 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1858 if (r)
1859 break;
1860 }
1861 srcu_read_unlock(&kvm->srcu, srcu_idx);
1862 mmap_read_unlock(current->mm);
1863
1864 if (!r) {
1865 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1866 sizeof(uint8_t) * args->count);
1867 if (r)
1868 r = -EFAULT;
1869 }
1870
1871 kvfree(keys);
1872 return r;
1873}
1874
1875static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1876{
1877 uint8_t *keys;
1878 uint64_t hva;
1879 int srcu_idx, i, r = 0;
1880 bool unlocked;
1881
1882 if (args->flags != 0)
1883 return -EINVAL;
1884
1885 /* Enforce sane limit on memory allocation */
1886 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1887 return -EINVAL;
1888
1889 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
1890 if (!keys)
1891 return -ENOMEM;
1892
1893 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1894 sizeof(uint8_t) * args->count);
1895 if (r) {
1896 r = -EFAULT;
1897 goto out;
1898 }
1899
1900 /* Enable storage key handling for the guest */
1901 r = s390_enable_skey();
1902 if (r)
1903 goto out;
1904
1905 i = 0;
1906 mmap_read_lock(current->mm);
1907 srcu_idx = srcu_read_lock(&kvm->srcu);
1908 while (i < args->count) {
1909 unlocked = false;
1910 hva = gfn_to_hva(kvm, args->start_gfn + i);
1911 if (kvm_is_error_hva(hva)) {
1912 r = -EFAULT;
1913 break;
1914 }
1915
1916 /* Lowest order bit is reserved */
1917 if (keys[i] & 0x01) {
1918 r = -EINVAL;
1919 break;
1920 }
1921
1922 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
1923 if (r) {
1924 r = fixup_user_fault(current->mm, hva,
1925 FAULT_FLAG_WRITE, &unlocked);
1926 if (r)
1927 break;
1928 }
1929 if (!r)
1930 i++;
1931 }
1932 srcu_read_unlock(&kvm->srcu, srcu_idx);
1933 mmap_read_unlock(current->mm);
1934out:
1935 kvfree(keys);
1936 return r;
1937}
1938
1939/*
1940 * Base address and length must be sent at the start of each block, therefore
1941 * it's cheaper to send some clean data, as long as it's less than the size of
1942 * two longs.
1943 */
1944#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1945/* for consistency */
1946#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1947
1948/*
1949 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1950 * address falls in a hole. In that case the index of one of the memslots
1951 * bordering the hole is returned.
1952 */
1953static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1954{
1955 int start = 0, end = slots->used_slots;
1956 int slot = atomic_read(&slots->lru_slot);
1957 struct kvm_memory_slot *memslots = slots->memslots;
1958
1959 if (gfn >= memslots[slot].base_gfn &&
1960 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1961 return slot;
1962
1963 while (start < end) {
1964 slot = start + (end - start) / 2;
1965
1966 if (gfn >= memslots[slot].base_gfn)
1967 end = slot;
1968 else
1969 start = slot + 1;
1970 }
1971
1972 if (start >= slots->used_slots)
1973 return slots->used_slots - 1;
1974
1975 if (gfn >= memslots[start].base_gfn &&
1976 gfn < memslots[start].base_gfn + memslots[start].npages) {
1977 atomic_set(&slots->lru_slot, start);
1978 }
1979
1980 return start;
1981}
1982
1983static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1984 u8 *res, unsigned long bufsize)
1985{
1986 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1987
1988 args->count = 0;
1989 while (args->count < bufsize) {
1990 hva = gfn_to_hva(kvm, cur_gfn);
1991 /*
1992 * We return an error if the first value was invalid, but we
1993 * return successfully if at least one value was copied.
1994 */
1995 if (kvm_is_error_hva(hva))
1996 return args->count ? 0 : -EFAULT;
1997 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1998 pgstev = 0;
1999 res[args->count++] = (pgstev >> 24) & 0x43;
2000 cur_gfn++;
2001 }
2002
2003 return 0;
2004}
2005
2006static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2007 unsigned long cur_gfn)
2008{
2009 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
2010 struct kvm_memory_slot *ms = slots->memslots + slotidx;
2011 unsigned long ofs = cur_gfn - ms->base_gfn;
2012
2013 if (ms->base_gfn + ms->npages <= cur_gfn) {
2014 slotidx--;
2015 /* If we are above the highest slot, wrap around */
2016 if (slotidx < 0)
2017 slotidx = slots->used_slots - 1;
2018
2019 ms = slots->memslots + slotidx;
2020 ofs = 0;
2021 }
2022 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2023 while ((slotidx > 0) && (ofs >= ms->npages)) {
2024 slotidx--;
2025 ms = slots->memslots + slotidx;
2026 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
2027 }
2028 return ms->base_gfn + ofs;
2029}
2030
2031static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2032 u8 *res, unsigned long bufsize)
2033{
2034 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2035 struct kvm_memslots *slots = kvm_memslots(kvm);
2036 struct kvm_memory_slot *ms;
2037
2038 if (unlikely(!slots->used_slots))
2039 return 0;
2040
2041 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2042 ms = gfn_to_memslot(kvm, cur_gfn);
2043 args->count = 0;
2044 args->start_gfn = cur_gfn;
2045 if (!ms)
2046 return 0;
2047 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2048 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2049
2050 while (args->count < bufsize) {
2051 hva = gfn_to_hva(kvm, cur_gfn);
2052 if (kvm_is_error_hva(hva))
2053 return 0;
2054 /* Decrement only if we actually flipped the bit to 0 */
2055 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2056 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2057 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2058 pgstev = 0;
2059 /* Save the value */
2060 res[args->count++] = (pgstev >> 24) & 0x43;
2061 /* If the next bit is too far away, stop. */
2062 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2063 return 0;
2064 /* If we reached the previous "next", find the next one */
2065 if (cur_gfn == next_gfn)
2066 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2067 /* Reached the end of memory or of the buffer, stop */
2068 if ((next_gfn >= mem_end) ||
2069 (next_gfn - args->start_gfn >= bufsize))
2070 return 0;
2071 cur_gfn++;
2072 /* Reached the end of the current memslot, take the next one. */
2073 if (cur_gfn - ms->base_gfn >= ms->npages) {
2074 ms = gfn_to_memslot(kvm, cur_gfn);
2075 if (!ms)
2076 return 0;
2077 }
2078 }
2079 return 0;
2080}
2081
2082/*
2083 * This function searches for the next page with dirty CMMA attributes, and
2084 * saves the attributes in the buffer up to either the end of the buffer or
2085 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2086 * no trailing clean bytes are saved.
2087 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2088 * output buffer will indicate 0 as length.
2089 */
2090static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2091 struct kvm_s390_cmma_log *args)
2092{
2093 unsigned long bufsize;
2094 int srcu_idx, peek, ret;
2095 u8 *values;
2096
2097 if (!kvm->arch.use_cmma)
2098 return -ENXIO;
2099 /* Invalid/unsupported flags were specified */
2100 if (args->flags & ~KVM_S390_CMMA_PEEK)
2101 return -EINVAL;
2102 /* Migration mode query, and we are not doing a migration */
2103 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2104 if (!peek && !kvm->arch.migration_mode)
2105 return -EINVAL;
2106 /* CMMA is disabled or was not used, or the buffer has length zero */
2107 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2108 if (!bufsize || !kvm->mm->context.uses_cmm) {
2109 memset(args, 0, sizeof(*args));
2110 return 0;
2111 }
2112 /* We are not peeking, and there are no dirty pages */
2113 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2114 memset(args, 0, sizeof(*args));
2115 return 0;
2116 }
2117
2118 values = vmalloc(bufsize);
2119 if (!values)
2120 return -ENOMEM;
2121
2122 mmap_read_lock(kvm->mm);
2123 srcu_idx = srcu_read_lock(&kvm->srcu);
2124 if (peek)
2125 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2126 else
2127 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2128 srcu_read_unlock(&kvm->srcu, srcu_idx);
2129 mmap_read_unlock(kvm->mm);
2130
2131 if (kvm->arch.migration_mode)
2132 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2133 else
2134 args->remaining = 0;
2135
2136 if (copy_to_user((void __user *)args->values, values, args->count))
2137 ret = -EFAULT;
2138
2139 vfree(values);
2140 return ret;
2141}
2142
2143/*
2144 * This function sets the CMMA attributes for the given pages. If the input
2145 * buffer has zero length, no action is taken, otherwise the attributes are
2146 * set and the mm->context.uses_cmm flag is set.
2147 */
2148static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2149 const struct kvm_s390_cmma_log *args)
2150{
2151 unsigned long hva, mask, pgstev, i;
2152 uint8_t *bits;
2153 int srcu_idx, r = 0;
2154
2155 mask = args->mask;
2156
2157 if (!kvm->arch.use_cmma)
2158 return -ENXIO;
2159 /* invalid/unsupported flags */
2160 if (args->flags != 0)
2161 return -EINVAL;
2162 /* Enforce sane limit on memory allocation */
2163 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2164 return -EINVAL;
2165 /* Nothing to do */
2166 if (args->count == 0)
2167 return 0;
2168
2169 bits = vmalloc(array_size(sizeof(*bits), args->count));
2170 if (!bits)
2171 return -ENOMEM;
2172
2173 r = copy_from_user(bits, (void __user *)args->values, args->count);
2174 if (r) {
2175 r = -EFAULT;
2176 goto out;
2177 }
2178
2179 mmap_read_lock(kvm->mm);
2180 srcu_idx = srcu_read_lock(&kvm->srcu);
2181 for (i = 0; i < args->count; i++) {
2182 hva = gfn_to_hva(kvm, args->start_gfn + i);
2183 if (kvm_is_error_hva(hva)) {
2184 r = -EFAULT;
2185 break;
2186 }
2187
2188 pgstev = bits[i];
2189 pgstev = pgstev << 24;
2190 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2191 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2192 }
2193 srcu_read_unlock(&kvm->srcu, srcu_idx);
2194 mmap_read_unlock(kvm->mm);
2195
2196 if (!kvm->mm->context.uses_cmm) {
2197 mmap_write_lock(kvm->mm);
2198 kvm->mm->context.uses_cmm = 1;
2199 mmap_write_unlock(kvm->mm);
2200 }
2201out:
2202 vfree(bits);
2203 return r;
2204}
2205
2206static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
2207{
2208 struct kvm_vcpu *vcpu;
2209 u16 rc, rrc;
2210 int ret = 0;
2211 int i;
2212
2213 /*
2214 * We ignore failures and try to destroy as many CPUs as possible.
2215 * At the same time we must not free the assigned resources when
2216 * this fails, as the ultravisor has still access to that memory.
2217 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2218 * behind.
2219 * We want to return the first failure rc and rrc, though.
2220 */
2221 kvm_for_each_vcpu(i, vcpu, kvm) {
2222 mutex_lock(&vcpu->mutex);
2223 if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
2224 *rcp = rc;
2225 *rrcp = rrc;
2226 ret = -EIO;
2227 }
2228 mutex_unlock(&vcpu->mutex);
2229 }
2230 return ret;
2231}
2232
2233static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2234{
2235 int i, r = 0;
2236 u16 dummy;
2237
2238 struct kvm_vcpu *vcpu;
2239
2240 kvm_for_each_vcpu(i, vcpu, kvm) {
2241 mutex_lock(&vcpu->mutex);
2242 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2243 mutex_unlock(&vcpu->mutex);
2244 if (r)
2245 break;
2246 }
2247 if (r)
2248 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2249 return r;
2250}
2251
2252static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2253{
2254 int r = 0;
2255 u16 dummy;
2256 void __user *argp = (void __user *)cmd->data;
2257
2258 switch (cmd->cmd) {
2259 case KVM_PV_ENABLE: {
2260 r = -EINVAL;
2261 if (kvm_s390_pv_is_protected(kvm))
2262 break;
2263
2264 /*
2265 * FMT 4 SIE needs esca. As we never switch back to bsca from
2266 * esca, we need no cleanup in the error cases below
2267 */
2268 r = sca_switch_to_extended(kvm);
2269 if (r)
2270 break;
2271
2272 mmap_write_lock(current->mm);
2273 r = gmap_mark_unmergeable();
2274 mmap_write_unlock(current->mm);
2275 if (r)
2276 break;
2277
2278 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2279 if (r)
2280 break;
2281
2282 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2283 if (r)
2284 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2285
2286 /* we need to block service interrupts from now on */
2287 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2288 break;
2289 }
2290 case KVM_PV_DISABLE: {
2291 r = -EINVAL;
2292 if (!kvm_s390_pv_is_protected(kvm))
2293 break;
2294
2295 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2296 /*
2297 * If a CPU could not be destroyed, destroy VM will also fail.
2298 * There is no point in trying to destroy it. Instead return
2299 * the rc and rrc from the first CPU that failed destroying.
2300 */
2301 if (r)
2302 break;
2303 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
2304
2305 /* no need to block service interrupts any more */
2306 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2307 break;
2308 }
2309 case KVM_PV_SET_SEC_PARMS: {
2310 struct kvm_s390_pv_sec_parm parms = {};
2311 void *hdr;
2312
2313 r = -EINVAL;
2314 if (!kvm_s390_pv_is_protected(kvm))
2315 break;
2316
2317 r = -EFAULT;
2318 if (copy_from_user(&parms, argp, sizeof(parms)))
2319 break;
2320
2321 /* Currently restricted to 8KB */
2322 r = -EINVAL;
2323 if (parms.length > PAGE_SIZE * 2)
2324 break;
2325
2326 r = -ENOMEM;
2327 hdr = vmalloc(parms.length);
2328 if (!hdr)
2329 break;
2330
2331 r = -EFAULT;
2332 if (!copy_from_user(hdr, (void __user *)parms.origin,
2333 parms.length))
2334 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2335 &cmd->rc, &cmd->rrc);
2336
2337 vfree(hdr);
2338 break;
2339 }
2340 case KVM_PV_UNPACK: {
2341 struct kvm_s390_pv_unp unp = {};
2342
2343 r = -EINVAL;
2344 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2345 break;
2346
2347 r = -EFAULT;
2348 if (copy_from_user(&unp, argp, sizeof(unp)))
2349 break;
2350
2351 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2352 &cmd->rc, &cmd->rrc);
2353 break;
2354 }
2355 case KVM_PV_VERIFY: {
2356 r = -EINVAL;
2357 if (!kvm_s390_pv_is_protected(kvm))
2358 break;
2359
2360 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2361 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2362 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2363 cmd->rrc);
2364 break;
2365 }
2366 case KVM_PV_PREP_RESET: {
2367 r = -EINVAL;
2368 if (!kvm_s390_pv_is_protected(kvm))
2369 break;
2370
2371 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2372 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2373 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2374 cmd->rc, cmd->rrc);
2375 break;
2376 }
2377 case KVM_PV_UNSHARE_ALL: {
2378 r = -EINVAL;
2379 if (!kvm_s390_pv_is_protected(kvm))
2380 break;
2381
2382 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2383 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2384 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2385 cmd->rc, cmd->rrc);
2386 break;
2387 }
2388 default:
2389 r = -ENOTTY;
2390 }
2391 return r;
2392}
2393
2394long kvm_arch_vm_ioctl(struct file *filp,
2395 unsigned int ioctl, unsigned long arg)
2396{
2397 struct kvm *kvm = filp->private_data;
2398 void __user *argp = (void __user *)arg;
2399 struct kvm_device_attr attr;
2400 int r;
2401
2402 switch (ioctl) {
2403 case KVM_S390_INTERRUPT: {
2404 struct kvm_s390_interrupt s390int;
2405
2406 r = -EFAULT;
2407 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2408 break;
2409 r = kvm_s390_inject_vm(kvm, &s390int);
2410 break;
2411 }
2412 case KVM_CREATE_IRQCHIP: {
2413 struct kvm_irq_routing_entry routing;
2414
2415 r = -EINVAL;
2416 if (kvm->arch.use_irqchip) {
2417 /* Set up dummy routing. */
2418 memset(&routing, 0, sizeof(routing));
2419 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
2420 }
2421 break;
2422 }
2423 case KVM_SET_DEVICE_ATTR: {
2424 r = -EFAULT;
2425 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2426 break;
2427 r = kvm_s390_vm_set_attr(kvm, &attr);
2428 break;
2429 }
2430 case KVM_GET_DEVICE_ATTR: {
2431 r = -EFAULT;
2432 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2433 break;
2434 r = kvm_s390_vm_get_attr(kvm, &attr);
2435 break;
2436 }
2437 case KVM_HAS_DEVICE_ATTR: {
2438 r = -EFAULT;
2439 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2440 break;
2441 r = kvm_s390_vm_has_attr(kvm, &attr);
2442 break;
2443 }
2444 case KVM_S390_GET_SKEYS: {
2445 struct kvm_s390_skeys args;
2446
2447 r = -EFAULT;
2448 if (copy_from_user(&args, argp,
2449 sizeof(struct kvm_s390_skeys)))
2450 break;
2451 r = kvm_s390_get_skeys(kvm, &args);
2452 break;
2453 }
2454 case KVM_S390_SET_SKEYS: {
2455 struct kvm_s390_skeys args;
2456
2457 r = -EFAULT;
2458 if (copy_from_user(&args, argp,
2459 sizeof(struct kvm_s390_skeys)))
2460 break;
2461 r = kvm_s390_set_skeys(kvm, &args);
2462 break;
2463 }
2464 case KVM_S390_GET_CMMA_BITS: {
2465 struct kvm_s390_cmma_log args;
2466
2467 r = -EFAULT;
2468 if (copy_from_user(&args, argp, sizeof(args)))
2469 break;
2470 mutex_lock(&kvm->slots_lock);
2471 r = kvm_s390_get_cmma_bits(kvm, &args);
2472 mutex_unlock(&kvm->slots_lock);
2473 if (!r) {
2474 r = copy_to_user(argp, &args, sizeof(args));
2475 if (r)
2476 r = -EFAULT;
2477 }
2478 break;
2479 }
2480 case KVM_S390_SET_CMMA_BITS: {
2481 struct kvm_s390_cmma_log args;
2482
2483 r = -EFAULT;
2484 if (copy_from_user(&args, argp, sizeof(args)))
2485 break;
2486 mutex_lock(&kvm->slots_lock);
2487 r = kvm_s390_set_cmma_bits(kvm, &args);
2488 mutex_unlock(&kvm->slots_lock);
2489 break;
2490 }
2491 case KVM_S390_PV_COMMAND: {
2492 struct kvm_pv_cmd args;
2493
2494 /* protvirt means user sigp */
2495 kvm->arch.user_cpu_state_ctrl = 1;
2496 r = 0;
2497 if (!is_prot_virt_host()) {
2498 r = -EINVAL;
2499 break;
2500 }
2501 if (copy_from_user(&args, argp, sizeof(args))) {
2502 r = -EFAULT;
2503 break;
2504 }
2505 if (args.flags) {
2506 r = -EINVAL;
2507 break;
2508 }
2509 mutex_lock(&kvm->lock);
2510 r = kvm_s390_handle_pv(kvm, &args);
2511 mutex_unlock(&kvm->lock);
2512 if (copy_to_user(argp, &args, sizeof(args))) {
2513 r = -EFAULT;
2514 break;
2515 }
2516 break;
2517 }
2518 default:
2519 r = -ENOTTY;
2520 }
2521
2522 return r;
2523}
2524
2525static int kvm_s390_apxa_installed(void)
2526{
2527 struct ap_config_info info;
2528
2529 if (ap_instructions_available()) {
2530 if (ap_qci(&info) == 0)
2531 return info.apxa;
2532 }
2533
2534 return 0;
2535}
2536
2537/*
2538 * The format of the crypto control block (CRYCB) is specified in the 3 low
2539 * order bits of the CRYCB designation (CRYCBD) field as follows:
2540 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2541 * AP extended addressing (APXA) facility are installed.
2542 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2543 * Format 2: Both the APXA and MSAX3 facilities are installed
2544 */
2545static void kvm_s390_set_crycb_format(struct kvm *kvm)
2546{
2547 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2548
2549 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2550 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2551
2552 /* Check whether MSAX3 is installed */
2553 if (!test_kvm_facility(kvm, 76))
2554 return;
2555
2556 if (kvm_s390_apxa_installed())
2557 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2558 else
2559 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2560}
2561
2562void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2563 unsigned long *aqm, unsigned long *adm)
2564{
2565 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2566
2567 mutex_lock(&kvm->lock);
2568 kvm_s390_vcpu_block_all(kvm);
2569
2570 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2571 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2572 memcpy(crycb->apcb1.apm, apm, 32);
2573 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2574 apm[0], apm[1], apm[2], apm[3]);
2575 memcpy(crycb->apcb1.aqm, aqm, 32);
2576 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2577 aqm[0], aqm[1], aqm[2], aqm[3]);
2578 memcpy(crycb->apcb1.adm, adm, 32);
2579 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2580 adm[0], adm[1], adm[2], adm[3]);
2581 break;
2582 case CRYCB_FORMAT1:
2583 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2584 memcpy(crycb->apcb0.apm, apm, 8);
2585 memcpy(crycb->apcb0.aqm, aqm, 2);
2586 memcpy(crycb->apcb0.adm, adm, 2);
2587 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2588 apm[0], *((unsigned short *)aqm),
2589 *((unsigned short *)adm));
2590 break;
2591 default: /* Can not happen */
2592 break;
2593 }
2594
2595 /* recreate the shadow crycb for each vcpu */
2596 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2597 kvm_s390_vcpu_unblock_all(kvm);
2598 mutex_unlock(&kvm->lock);
2599}
2600EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2601
2602void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2603{
2604 mutex_lock(&kvm->lock);
2605 kvm_s390_vcpu_block_all(kvm);
2606
2607 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2608 sizeof(kvm->arch.crypto.crycb->apcb0));
2609 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2610 sizeof(kvm->arch.crypto.crycb->apcb1));
2611
2612 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
2613 /* recreate the shadow crycb for each vcpu */
2614 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2615 kvm_s390_vcpu_unblock_all(kvm);
2616 mutex_unlock(&kvm->lock);
2617}
2618EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2619
2620static u64 kvm_s390_get_initial_cpuid(void)
2621{
2622 struct cpuid cpuid;
2623
2624 get_cpu_id(&cpuid);
2625 cpuid.version = 0xff;
2626 return *((u64 *) &cpuid);
2627}
2628
2629static void kvm_s390_crypto_init(struct kvm *kvm)
2630{
2631 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
2632 kvm_s390_set_crycb_format(kvm);
2633
2634 if (!test_kvm_facility(kvm, 76))
2635 return;
2636
2637 /* Enable AES/DEA protected key functions by default */
2638 kvm->arch.crypto.aes_kw = 1;
2639 kvm->arch.crypto.dea_kw = 1;
2640 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2641 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2642 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2643 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
2644}
2645
2646static void sca_dispose(struct kvm *kvm)
2647{
2648 if (kvm->arch.use_esca)
2649 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
2650 else
2651 free_page((unsigned long)(kvm->arch.sca));
2652 kvm->arch.sca = NULL;
2653}
2654
2655int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
2656{
2657 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
2658 int i, rc;
2659 char debug_name[16];
2660 static unsigned long sca_offset;
2661
2662 rc = -EINVAL;
2663#ifdef CONFIG_KVM_S390_UCONTROL
2664 if (type & ~KVM_VM_S390_UCONTROL)
2665 goto out_err;
2666 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2667 goto out_err;
2668#else
2669 if (type)
2670 goto out_err;
2671#endif
2672
2673 rc = s390_enable_sie();
2674 if (rc)
2675 goto out_err;
2676
2677 rc = -ENOMEM;
2678
2679 if (!sclp.has_64bscao)
2680 alloc_flags |= GFP_DMA;
2681 rwlock_init(&kvm->arch.sca_lock);
2682 /* start with basic SCA */
2683 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
2684 if (!kvm->arch.sca)
2685 goto out_err;
2686 mutex_lock(&kvm_lock);
2687 sca_offset += 16;
2688 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
2689 sca_offset = 0;
2690 kvm->arch.sca = (struct bsca_block *)
2691 ((char *) kvm->arch.sca + sca_offset);
2692 mutex_unlock(&kvm_lock);
2693
2694 sprintf(debug_name, "kvm-%u", current->pid);
2695
2696 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
2697 if (!kvm->arch.dbf)
2698 goto out_err;
2699
2700 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
2701 kvm->arch.sie_page2 =
2702 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
2703 if (!kvm->arch.sie_page2)
2704 goto out_err;
2705
2706 kvm->arch.sie_page2->kvm = kvm;
2707 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
2708
2709 for (i = 0; i < kvm_s390_fac_size(); i++) {
2710 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
2711 (kvm_s390_fac_base[i] |
2712 kvm_s390_fac_ext[i]);
2713 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
2714 kvm_s390_fac_base[i];
2715 }
2716 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
2717
2718 /* we are always in czam mode - even on pre z14 machines */
2719 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2720 set_kvm_facility(kvm->arch.model.fac_list, 138);
2721 /* we emulate STHYI in kvm */
2722 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2723 set_kvm_facility(kvm->arch.model.fac_list, 74);
2724 if (MACHINE_HAS_TLB_GUEST) {
2725 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2726 set_kvm_facility(kvm->arch.model.fac_list, 147);
2727 }
2728
2729 if (css_general_characteristics.aiv && test_facility(65))
2730 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2731
2732 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
2733 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
2734
2735 kvm_s390_crypto_init(kvm);
2736
2737 mutex_init(&kvm->arch.float_int.ais_lock);
2738 spin_lock_init(&kvm->arch.float_int.lock);
2739 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2740 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
2741 init_waitqueue_head(&kvm->arch.ipte_wq);
2742 mutex_init(&kvm->arch.ipte_mutex);
2743
2744 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
2745 VM_EVENT(kvm, 3, "vm created with type %lu", type);
2746
2747 if (type & KVM_VM_S390_UCONTROL) {
2748 kvm->arch.gmap = NULL;
2749 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
2750 } else {
2751 if (sclp.hamax == U64_MAX)
2752 kvm->arch.mem_limit = TASK_SIZE_MAX;
2753 else
2754 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
2755 sclp.hamax + 1);
2756 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
2757 if (!kvm->arch.gmap)
2758 goto out_err;
2759 kvm->arch.gmap->private = kvm;
2760 kvm->arch.gmap->pfault_enabled = 0;
2761 }
2762
2763 kvm->arch.use_pfmfi = sclp.has_pfmfi;
2764 kvm->arch.use_skf = sclp.has_skey;
2765 spin_lock_init(&kvm->arch.start_stop_lock);
2766 kvm_s390_vsie_init(kvm);
2767 if (use_gisa)
2768 kvm_s390_gisa_init(kvm);
2769 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
2770
2771 return 0;
2772out_err:
2773 free_page((unsigned long)kvm->arch.sie_page2);
2774 debug_unregister(kvm->arch.dbf);
2775 sca_dispose(kvm);
2776 KVM_EVENT(3, "creation of vm failed: %d", rc);
2777 return rc;
2778}
2779
2780void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2781{
2782 u16 rc, rrc;
2783
2784 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
2785 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
2786 kvm_s390_clear_local_irqs(vcpu);
2787 kvm_clear_async_pf_completion_queue(vcpu);
2788 if (!kvm_is_ucontrol(vcpu->kvm))
2789 sca_del_vcpu(vcpu);
2790
2791 if (kvm_is_ucontrol(vcpu->kvm))
2792 gmap_remove(vcpu->arch.gmap);
2793
2794 if (vcpu->kvm->arch.use_cmma)
2795 kvm_s390_vcpu_unsetup_cmma(vcpu);
2796 /* We can not hold the vcpu mutex here, we are already dying */
2797 if (kvm_s390_pv_cpu_get_handle(vcpu))
2798 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
2799 free_page((unsigned long)(vcpu->arch.sie_block));
2800}
2801
2802static void kvm_free_vcpus(struct kvm *kvm)
2803{
2804 unsigned int i;
2805 struct kvm_vcpu *vcpu;
2806
2807 kvm_for_each_vcpu(i, vcpu, kvm)
2808 kvm_vcpu_destroy(vcpu);
2809
2810 mutex_lock(&kvm->lock);
2811 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2812 kvm->vcpus[i] = NULL;
2813
2814 atomic_set(&kvm->online_vcpus, 0);
2815 mutex_unlock(&kvm->lock);
2816}
2817
2818void kvm_arch_destroy_vm(struct kvm *kvm)
2819{
2820 u16 rc, rrc;
2821
2822 kvm_free_vcpus(kvm);
2823 sca_dispose(kvm);
2824 kvm_s390_gisa_destroy(kvm);
2825 /*
2826 * We are already at the end of life and kvm->lock is not taken.
2827 * This is ok as the file descriptor is closed by now and nobody
2828 * can mess with the pv state. To avoid lockdep_assert_held from
2829 * complaining we do not use kvm_s390_pv_is_protected.
2830 */
2831 if (kvm_s390_pv_get_handle(kvm))
2832 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
2833 debug_unregister(kvm->arch.dbf);
2834 free_page((unsigned long)kvm->arch.sie_page2);
2835 if (!kvm_is_ucontrol(kvm))
2836 gmap_remove(kvm->arch.gmap);
2837 kvm_s390_destroy_adapters(kvm);
2838 kvm_s390_clear_float_irqs(kvm);
2839 kvm_s390_vsie_destroy(kvm);
2840 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
2841}
2842
2843/* Section: vcpu related */
2844static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2845{
2846 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
2847 if (!vcpu->arch.gmap)
2848 return -ENOMEM;
2849 vcpu->arch.gmap->private = vcpu->kvm;
2850
2851 return 0;
2852}
2853
2854static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2855{
2856 if (!kvm_s390_use_sca_entries())
2857 return;
2858 read_lock(&vcpu->kvm->arch.sca_lock);
2859 if (vcpu->kvm->arch.use_esca) {
2860 struct esca_block *sca = vcpu->kvm->arch.sca;
2861
2862 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
2863 sca->cpu[vcpu->vcpu_id].sda = 0;
2864 } else {
2865 struct bsca_block *sca = vcpu->kvm->arch.sca;
2866
2867 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
2868 sca->cpu[vcpu->vcpu_id].sda = 0;
2869 }
2870 read_unlock(&vcpu->kvm->arch.sca_lock);
2871}
2872
2873static void sca_add_vcpu(struct kvm_vcpu *vcpu)
2874{
2875 if (!kvm_s390_use_sca_entries()) {
2876 struct bsca_block *sca = vcpu->kvm->arch.sca;
2877
2878 /* we still need the basic sca for the ipte control */
2879 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2880 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2881 return;
2882 }
2883 read_lock(&vcpu->kvm->arch.sca_lock);
2884 if (vcpu->kvm->arch.use_esca) {
2885 struct esca_block *sca = vcpu->kvm->arch.sca;
2886
2887 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2888 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2889 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
2890 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2891 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
2892 } else {
2893 struct bsca_block *sca = vcpu->kvm->arch.sca;
2894
2895 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2896 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2897 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2898 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
2899 }
2900 read_unlock(&vcpu->kvm->arch.sca_lock);
2901}
2902
2903/* Basic SCA to Extended SCA data copy routines */
2904static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2905{
2906 d->sda = s->sda;
2907 d->sigp_ctrl.c = s->sigp_ctrl.c;
2908 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2909}
2910
2911static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2912{
2913 int i;
2914
2915 d->ipte_control = s->ipte_control;
2916 d->mcn[0] = s->mcn;
2917 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2918 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2919}
2920
2921static int sca_switch_to_extended(struct kvm *kvm)
2922{
2923 struct bsca_block *old_sca = kvm->arch.sca;
2924 struct esca_block *new_sca;
2925 struct kvm_vcpu *vcpu;
2926 unsigned int vcpu_idx;
2927 u32 scaol, scaoh;
2928
2929 if (kvm->arch.use_esca)
2930 return 0;
2931
2932 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
2933 if (!new_sca)
2934 return -ENOMEM;
2935
2936 scaoh = (u32)((u64)(new_sca) >> 32);
2937 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2938
2939 kvm_s390_vcpu_block_all(kvm);
2940 write_lock(&kvm->arch.sca_lock);
2941
2942 sca_copy_b_to_e(new_sca, old_sca);
2943
2944 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2945 vcpu->arch.sie_block->scaoh = scaoh;
2946 vcpu->arch.sie_block->scaol = scaol;
2947 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2948 }
2949 kvm->arch.sca = new_sca;
2950 kvm->arch.use_esca = 1;
2951
2952 write_unlock(&kvm->arch.sca_lock);
2953 kvm_s390_vcpu_unblock_all(kvm);
2954
2955 free_page((unsigned long)old_sca);
2956
2957 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2958 old_sca, kvm->arch.sca);
2959 return 0;
2960}
2961
2962static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2963{
2964 int rc;
2965
2966 if (!kvm_s390_use_sca_entries()) {
2967 if (id < KVM_MAX_VCPUS)
2968 return true;
2969 return false;
2970 }
2971 if (id < KVM_S390_BSCA_CPU_SLOTS)
2972 return true;
2973 if (!sclp.has_esca || !sclp.has_64bscao)
2974 return false;
2975
2976 mutex_lock(&kvm->lock);
2977 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2978 mutex_unlock(&kvm->lock);
2979
2980 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
2981}
2982
2983/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2984static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2985{
2986 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
2987 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2988 vcpu->arch.cputm_start = get_tod_clock_fast();
2989 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2990}
2991
2992/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2993static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2994{
2995 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
2996 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2997 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2998 vcpu->arch.cputm_start = 0;
2999 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3000}
3001
3002/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3003static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3004{
3005 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3006 vcpu->arch.cputm_enabled = true;
3007 __start_cpu_timer_accounting(vcpu);
3008}
3009
3010/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3011static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3012{
3013 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3014 __stop_cpu_timer_accounting(vcpu);
3015 vcpu->arch.cputm_enabled = false;
3016}
3017
3018static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3019{
3020 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3021 __enable_cpu_timer_accounting(vcpu);
3022 preempt_enable();
3023}
3024
3025static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3026{
3027 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3028 __disable_cpu_timer_accounting(vcpu);
3029 preempt_enable();
3030}
3031
3032/* set the cpu timer - may only be called from the VCPU thread itself */
3033void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3034{
3035 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3036 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3037 if (vcpu->arch.cputm_enabled)
3038 vcpu->arch.cputm_start = get_tod_clock_fast();
3039 vcpu->arch.sie_block->cputm = cputm;
3040 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3041 preempt_enable();
3042}
3043
3044/* update and get the cpu timer - can also be called from other VCPU threads */
3045__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3046{
3047 unsigned int seq;
3048 __u64 value;
3049
3050 if (unlikely(!vcpu->arch.cputm_enabled))
3051 return vcpu->arch.sie_block->cputm;
3052
3053 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3054 do {
3055 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3056 /*
3057 * If the writer would ever execute a read in the critical
3058 * section, e.g. in irq context, we have a deadlock.
3059 */
3060 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3061 value = vcpu->arch.sie_block->cputm;
3062 /* if cputm_start is 0, accounting is being started/stopped */
3063 if (likely(vcpu->arch.cputm_start))
3064 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3065 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3066 preempt_enable();
3067 return value;
3068}
3069
3070void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3071{
3072
3073 gmap_enable(vcpu->arch.enabled_gmap);
3074 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3075 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3076 __start_cpu_timer_accounting(vcpu);
3077 vcpu->cpu = cpu;
3078}
3079
3080void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3081{
3082 vcpu->cpu = -1;
3083 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3084 __stop_cpu_timer_accounting(vcpu);
3085 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3086 vcpu->arch.enabled_gmap = gmap_get_enabled();
3087 gmap_disable(vcpu->arch.enabled_gmap);
3088
3089}
3090
3091void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3092{
3093 mutex_lock(&vcpu->kvm->lock);
3094 preempt_disable();
3095 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3096 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3097 preempt_enable();
3098 mutex_unlock(&vcpu->kvm->lock);
3099 if (!kvm_is_ucontrol(vcpu->kvm)) {
3100 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3101 sca_add_vcpu(vcpu);
3102 }
3103 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3104 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3105 /* make vcpu_load load the right gmap on the first trigger */
3106 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
3107}
3108
3109static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3110{
3111 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3112 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3113 return true;
3114 return false;
3115}
3116
3117static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3118{
3119 /* At least one ECC subfunction must be present */
3120 return kvm_has_pckmo_subfunc(kvm, 32) ||
3121 kvm_has_pckmo_subfunc(kvm, 33) ||
3122 kvm_has_pckmo_subfunc(kvm, 34) ||
3123 kvm_has_pckmo_subfunc(kvm, 40) ||
3124 kvm_has_pckmo_subfunc(kvm, 41);
3125
3126}
3127
3128static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3129{
3130 /*
3131 * If the AP instructions are not being interpreted and the MSAX3
3132 * facility is not configured for the guest, there is nothing to set up.
3133 */
3134 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3135 return;
3136
3137 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3138 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3139 vcpu->arch.sie_block->eca &= ~ECA_APIE;
3140 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3141
3142 if (vcpu->kvm->arch.crypto.apie)
3143 vcpu->arch.sie_block->eca |= ECA_APIE;
3144
3145 /* Set up protected key support */
3146 if (vcpu->kvm->arch.crypto.aes_kw) {
3147 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3148 /* ecc is also wrapped with AES key */
3149 if (kvm_has_pckmo_ecc(vcpu->kvm))
3150 vcpu->arch.sie_block->ecd |= ECD_ECC;
3151 }
3152
3153 if (vcpu->kvm->arch.crypto.dea_kw)
3154 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3155}
3156
3157void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3158{
3159 free_page(vcpu->arch.sie_block->cbrlo);
3160 vcpu->arch.sie_block->cbrlo = 0;
3161}
3162
3163int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3164{
3165 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
3166 if (!vcpu->arch.sie_block->cbrlo)
3167 return -ENOMEM;
3168 return 0;
3169}
3170
3171static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3172{
3173 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3174
3175 vcpu->arch.sie_block->ibc = model->ibc;
3176 if (test_kvm_facility(vcpu->kvm, 7))
3177 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
3178}
3179
3180static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3181{
3182 int rc = 0;
3183 u16 uvrc, uvrrc;
3184
3185 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3186 CPUSTAT_SM |
3187 CPUSTAT_STOPPED);
3188
3189 if (test_kvm_facility(vcpu->kvm, 78))
3190 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3191 else if (test_kvm_facility(vcpu->kvm, 8))
3192 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3193
3194 kvm_s390_vcpu_setup_model(vcpu);
3195
3196 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3197 if (MACHINE_HAS_ESOP)
3198 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3199 if (test_kvm_facility(vcpu->kvm, 9))
3200 vcpu->arch.sie_block->ecb |= ECB_SRSI;
3201 if (test_kvm_facility(vcpu->kvm, 73))
3202 vcpu->arch.sie_block->ecb |= ECB_TE;
3203
3204 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3205 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3206 if (test_kvm_facility(vcpu->kvm, 130))
3207 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3208 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3209 if (sclp.has_cei)
3210 vcpu->arch.sie_block->eca |= ECA_CEI;
3211 if (sclp.has_ib)
3212 vcpu->arch.sie_block->eca |= ECA_IB;
3213 if (sclp.has_siif)
3214 vcpu->arch.sie_block->eca |= ECA_SII;
3215 if (sclp.has_sigpif)
3216 vcpu->arch.sie_block->eca |= ECA_SIGPI;
3217 if (test_kvm_facility(vcpu->kvm, 129)) {
3218 vcpu->arch.sie_block->eca |= ECA_VX;
3219 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3220 }
3221 if (test_kvm_facility(vcpu->kvm, 139))
3222 vcpu->arch.sie_block->ecd |= ECD_MEF;
3223 if (test_kvm_facility(vcpu->kvm, 156))
3224 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3225 if (vcpu->arch.sie_block->gd) {
3226 vcpu->arch.sie_block->eca |= ECA_AIV;
3227 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3228 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3229 }
3230 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3231 | SDNXC;
3232 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
3233
3234 if (sclp.has_kss)
3235 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3236 else
3237 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3238
3239 if (vcpu->kvm->arch.use_cmma) {
3240 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3241 if (rc)
3242 return rc;
3243 }
3244 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3245 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3246
3247 vcpu->arch.sie_block->hpid = HPID_KVM;
3248
3249 kvm_s390_vcpu_crypto_setup(vcpu);
3250
3251 mutex_lock(&vcpu->kvm->lock);
3252 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3253 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3254 if (rc)
3255 kvm_s390_vcpu_unsetup_cmma(vcpu);
3256 }
3257 mutex_unlock(&vcpu->kvm->lock);
3258
3259 return rc;
3260}
3261
3262int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3263{
3264 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3265 return -EINVAL;
3266 return 0;
3267}
3268
3269int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3270{
3271 struct sie_page *sie_page;
3272 int rc;
3273
3274 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3275 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3276 if (!sie_page)
3277 return -ENOMEM;
3278
3279 vcpu->arch.sie_block = &sie_page->sie_block;
3280 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3281
3282 /* the real guest size will always be smaller than msl */
3283 vcpu->arch.sie_block->mso = 0;
3284 vcpu->arch.sie_block->msl = sclp.hamax;
3285
3286 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3287 spin_lock_init(&vcpu->arch.local_int.lock);
3288 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
3289 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3290 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
3291 seqcount_init(&vcpu->arch.cputm_seqcount);
3292
3293 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3294 kvm_clear_async_pf_completion_queue(vcpu);
3295 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3296 KVM_SYNC_GPRS |
3297 KVM_SYNC_ACRS |
3298 KVM_SYNC_CRS |
3299 KVM_SYNC_ARCH0 |
3300 KVM_SYNC_PFAULT |
3301 KVM_SYNC_DIAG318;
3302 kvm_s390_set_prefix(vcpu, 0);
3303 if (test_kvm_facility(vcpu->kvm, 64))
3304 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3305 if (test_kvm_facility(vcpu->kvm, 82))
3306 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3307 if (test_kvm_facility(vcpu->kvm, 133))
3308 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3309 if (test_kvm_facility(vcpu->kvm, 156))
3310 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3311 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3312 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3313 */
3314 if (MACHINE_HAS_VX)
3315 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3316 else
3317 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3318
3319 if (kvm_is_ucontrol(vcpu->kvm)) {
3320 rc = __kvm_ucontrol_vcpu_init(vcpu);
3321 if (rc)
3322 goto out_free_sie_block;
3323 }
3324
3325 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3326 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3327 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3328
3329 rc = kvm_s390_vcpu_setup(vcpu);
3330 if (rc)
3331 goto out_ucontrol_uninit;
3332 return 0;
3333
3334out_ucontrol_uninit:
3335 if (kvm_is_ucontrol(vcpu->kvm))
3336 gmap_remove(vcpu->arch.gmap);
3337out_free_sie_block:
3338 free_page((unsigned long)(vcpu->arch.sie_block));
3339 return rc;
3340}
3341
3342int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3343{
3344 return kvm_s390_vcpu_has_irq(vcpu, 0);
3345}
3346
3347bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3348{
3349 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3350}
3351
3352void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
3353{
3354 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3355 exit_sie(vcpu);
3356}
3357
3358void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
3359{
3360 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3361}
3362
3363static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3364{
3365 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3366 exit_sie(vcpu);
3367}
3368
3369bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3370{
3371 return atomic_read(&vcpu->arch.sie_block->prog20) &
3372 (PROG_BLOCK_SIE | PROG_REQUEST);
3373}
3374
3375static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3376{
3377 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3378}
3379
3380/*
3381 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
3382 * If the CPU is not running (e.g. waiting as idle) the function will
3383 * return immediately. */
3384void exit_sie(struct kvm_vcpu *vcpu)
3385{
3386 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
3387 kvm_s390_vsie_kick(vcpu);
3388 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3389 cpu_relax();
3390}
3391
3392/* Kick a guest cpu out of SIE to process a request synchronously */
3393void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
3394{
3395 kvm_make_request(req, vcpu);
3396 kvm_s390_vcpu_request(vcpu);
3397}
3398
3399static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3400 unsigned long end)
3401{
3402 struct kvm *kvm = gmap->private;
3403 struct kvm_vcpu *vcpu;
3404 unsigned long prefix;
3405 int i;
3406
3407 if (gmap_is_shadow(gmap))
3408 return;
3409 if (start >= 1UL << 31)
3410 /* We are only interested in prefix pages */
3411 return;
3412 kvm_for_each_vcpu(i, vcpu, kvm) {
3413 /* match against both prefix pages */
3414 prefix = kvm_s390_get_prefix(vcpu);
3415 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3416 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3417 start, end);
3418 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
3419 }
3420 }
3421}
3422
3423bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3424{
3425 /* do not poll with more than halt_poll_max_steal percent of steal time */
3426 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3427 halt_poll_max_steal) {
3428 vcpu->stat.halt_no_poll_steal++;
3429 return true;
3430 }
3431 return false;
3432}
3433
3434int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3435{
3436 /* kvm common code refers to this, but never calls it */
3437 BUG();
3438 return 0;
3439}
3440
3441static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3442 struct kvm_one_reg *reg)
3443{
3444 int r = -EINVAL;
3445
3446 switch (reg->id) {
3447 case KVM_REG_S390_TODPR:
3448 r = put_user(vcpu->arch.sie_block->todpr,
3449 (u32 __user *)reg->addr);
3450 break;
3451 case KVM_REG_S390_EPOCHDIFF:
3452 r = put_user(vcpu->arch.sie_block->epoch,
3453 (u64 __user *)reg->addr);
3454 break;
3455 case KVM_REG_S390_CPU_TIMER:
3456 r = put_user(kvm_s390_get_cpu_timer(vcpu),
3457 (u64 __user *)reg->addr);
3458 break;
3459 case KVM_REG_S390_CLOCK_COMP:
3460 r = put_user(vcpu->arch.sie_block->ckc,
3461 (u64 __user *)reg->addr);
3462 break;
3463 case KVM_REG_S390_PFTOKEN:
3464 r = put_user(vcpu->arch.pfault_token,
3465 (u64 __user *)reg->addr);
3466 break;
3467 case KVM_REG_S390_PFCOMPARE:
3468 r = put_user(vcpu->arch.pfault_compare,
3469 (u64 __user *)reg->addr);
3470 break;
3471 case KVM_REG_S390_PFSELECT:
3472 r = put_user(vcpu->arch.pfault_select,
3473 (u64 __user *)reg->addr);
3474 break;
3475 case KVM_REG_S390_PP:
3476 r = put_user(vcpu->arch.sie_block->pp,
3477 (u64 __user *)reg->addr);
3478 break;
3479 case KVM_REG_S390_GBEA:
3480 r = put_user(vcpu->arch.sie_block->gbea,
3481 (u64 __user *)reg->addr);
3482 break;
3483 default:
3484 break;
3485 }
3486
3487 return r;
3488}
3489
3490static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3491 struct kvm_one_reg *reg)
3492{
3493 int r = -EINVAL;
3494 __u64 val;
3495
3496 switch (reg->id) {
3497 case KVM_REG_S390_TODPR:
3498 r = get_user(vcpu->arch.sie_block->todpr,
3499 (u32 __user *)reg->addr);
3500 break;
3501 case KVM_REG_S390_EPOCHDIFF:
3502 r = get_user(vcpu->arch.sie_block->epoch,
3503 (u64 __user *)reg->addr);
3504 break;
3505 case KVM_REG_S390_CPU_TIMER:
3506 r = get_user(val, (u64 __user *)reg->addr);
3507 if (!r)
3508 kvm_s390_set_cpu_timer(vcpu, val);
3509 break;
3510 case KVM_REG_S390_CLOCK_COMP:
3511 r = get_user(vcpu->arch.sie_block->ckc,
3512 (u64 __user *)reg->addr);
3513 break;
3514 case KVM_REG_S390_PFTOKEN:
3515 r = get_user(vcpu->arch.pfault_token,
3516 (u64 __user *)reg->addr);
3517 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3518 kvm_clear_async_pf_completion_queue(vcpu);
3519 break;
3520 case KVM_REG_S390_PFCOMPARE:
3521 r = get_user(vcpu->arch.pfault_compare,
3522 (u64 __user *)reg->addr);
3523 break;
3524 case KVM_REG_S390_PFSELECT:
3525 r = get_user(vcpu->arch.pfault_select,
3526 (u64 __user *)reg->addr);
3527 break;
3528 case KVM_REG_S390_PP:
3529 r = get_user(vcpu->arch.sie_block->pp,
3530 (u64 __user *)reg->addr);
3531 break;
3532 case KVM_REG_S390_GBEA:
3533 r = get_user(vcpu->arch.sie_block->gbea,
3534 (u64 __user *)reg->addr);
3535 break;
3536 default:
3537 break;
3538 }
3539
3540 return r;
3541}
3542
3543static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
3544{
3545 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3546 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3547 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3548
3549 kvm_clear_async_pf_completion_queue(vcpu);
3550 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3551 kvm_s390_vcpu_stop(vcpu);
3552 kvm_s390_clear_local_irqs(vcpu);
3553}
3554
3555static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3556{
3557 /* Initial reset is a superset of the normal reset */
3558 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3559
3560 /*
3561 * This equals initial cpu reset in pop, but we don't switch to ESA.
3562 * We do not only reset the internal data, but also ...
3563 */
3564 vcpu->arch.sie_block->gpsw.mask = 0;
3565 vcpu->arch.sie_block->gpsw.addr = 0;
3566 kvm_s390_set_prefix(vcpu, 0);
3567 kvm_s390_set_cpu_timer(vcpu, 0);
3568 vcpu->arch.sie_block->ckc = 0;
3569 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3570 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3571 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
3572
3573 /* ... the data in sync regs */
3574 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3575 vcpu->run->s.regs.ckc = 0;
3576 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3577 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3578 vcpu->run->psw_addr = 0;
3579 vcpu->run->psw_mask = 0;
3580 vcpu->run->s.regs.todpr = 0;
3581 vcpu->run->s.regs.cputm = 0;
3582 vcpu->run->s.regs.ckc = 0;
3583 vcpu->run->s.regs.pp = 0;
3584 vcpu->run->s.regs.gbea = 1;
3585 vcpu->run->s.regs.fpc = 0;
3586 /*
3587 * Do not reset these registers in the protected case, as some of
3588 * them are overlayed and they are not accessible in this case
3589 * anyway.
3590 */
3591 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3592 vcpu->arch.sie_block->gbea = 1;
3593 vcpu->arch.sie_block->pp = 0;
3594 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3595 vcpu->arch.sie_block->todpr = 0;
3596 }
3597}
3598
3599static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3600{
3601 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3602
3603 /* Clear reset is a superset of the initial reset */
3604 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3605
3606 memset(®s->gprs, 0, sizeof(regs->gprs));
3607 memset(®s->vrs, 0, sizeof(regs->vrs));
3608 memset(®s->acrs, 0, sizeof(regs->acrs));
3609 memset(®s->gscb, 0, sizeof(regs->gscb));
3610
3611 regs->etoken = 0;
3612 regs->etoken_extension = 0;
3613}
3614
3615int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3616{
3617 vcpu_load(vcpu);
3618 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
3619 vcpu_put(vcpu);
3620 return 0;
3621}
3622
3623int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3624{
3625 vcpu_load(vcpu);
3626 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
3627 vcpu_put(vcpu);
3628 return 0;
3629}
3630
3631int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3632 struct kvm_sregs *sregs)
3633{
3634 vcpu_load(vcpu);
3635
3636 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
3637 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
3638
3639 vcpu_put(vcpu);
3640 return 0;
3641}
3642
3643int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3644 struct kvm_sregs *sregs)
3645{
3646 vcpu_load(vcpu);
3647
3648 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
3649 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
3650
3651 vcpu_put(vcpu);
3652 return 0;
3653}
3654
3655int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3656{
3657 int ret = 0;
3658
3659 vcpu_load(vcpu);
3660
3661 if (test_fp_ctl(fpu->fpc)) {
3662 ret = -EINVAL;
3663 goto out;
3664 }
3665 vcpu->run->s.regs.fpc = fpu->fpc;
3666 if (MACHINE_HAS_VX)
3667 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3668 (freg_t *) fpu->fprs);
3669 else
3670 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
3671
3672out:
3673 vcpu_put(vcpu);
3674 return ret;
3675}
3676
3677int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3678{
3679 vcpu_load(vcpu);
3680
3681 /* make sure we have the latest values */
3682 save_fpu_regs();
3683 if (MACHINE_HAS_VX)
3684 convert_vx_to_fp((freg_t *) fpu->fprs,
3685 (__vector128 *) vcpu->run->s.regs.vrs);
3686 else
3687 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
3688 fpu->fpc = vcpu->run->s.regs.fpc;
3689
3690 vcpu_put(vcpu);
3691 return 0;
3692}
3693
3694static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3695{
3696 int rc = 0;
3697
3698 if (!is_vcpu_stopped(vcpu))
3699 rc = -EBUSY;
3700 else {
3701 vcpu->run->psw_mask = psw.mask;
3702 vcpu->run->psw_addr = psw.addr;
3703 }
3704 return rc;
3705}
3706
3707int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3708 struct kvm_translation *tr)
3709{
3710 return -EINVAL; /* not implemented yet */
3711}
3712
3713#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3714 KVM_GUESTDBG_USE_HW_BP | \
3715 KVM_GUESTDBG_ENABLE)
3716
3717int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3718 struct kvm_guest_debug *dbg)
3719{
3720 int rc = 0;
3721
3722 vcpu_load(vcpu);
3723
3724 vcpu->guest_debug = 0;
3725 kvm_s390_clear_bp_data(vcpu);
3726
3727 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3728 rc = -EINVAL;
3729 goto out;
3730 }
3731 if (!sclp.has_gpere) {
3732 rc = -EINVAL;
3733 goto out;
3734 }
3735
3736 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3737 vcpu->guest_debug = dbg->control;
3738 /* enforce guest PER */
3739 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
3740
3741 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3742 rc = kvm_s390_import_bp_data(vcpu, dbg);
3743 } else {
3744 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
3745 vcpu->arch.guestdbg.last_bp = 0;
3746 }
3747
3748 if (rc) {
3749 vcpu->guest_debug = 0;
3750 kvm_s390_clear_bp_data(vcpu);
3751 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
3752 }
3753
3754out:
3755 vcpu_put(vcpu);
3756 return rc;
3757}
3758
3759int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3760 struct kvm_mp_state *mp_state)
3761{
3762 int ret;
3763
3764 vcpu_load(vcpu);
3765
3766 /* CHECK_STOP and LOAD are not supported yet */
3767 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3768 KVM_MP_STATE_OPERATING;
3769
3770 vcpu_put(vcpu);
3771 return ret;
3772}
3773
3774int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3775 struct kvm_mp_state *mp_state)
3776{
3777 int rc = 0;
3778
3779 vcpu_load(vcpu);
3780
3781 /* user space knows about this interface - let it control the state */
3782 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3783
3784 switch (mp_state->mp_state) {
3785 case KVM_MP_STATE_STOPPED:
3786 rc = kvm_s390_vcpu_stop(vcpu);
3787 break;
3788 case KVM_MP_STATE_OPERATING:
3789 rc = kvm_s390_vcpu_start(vcpu);
3790 break;
3791 case KVM_MP_STATE_LOAD:
3792 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3793 rc = -ENXIO;
3794 break;
3795 }
3796 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
3797 break;
3798 case KVM_MP_STATE_CHECK_STOP:
3799 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
3800 default:
3801 rc = -ENXIO;
3802 }
3803
3804 vcpu_put(vcpu);
3805 return rc;
3806}
3807
3808static bool ibs_enabled(struct kvm_vcpu *vcpu)
3809{
3810 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
3811}
3812
3813static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3814{
3815retry:
3816 kvm_s390_vcpu_request_handled(vcpu);
3817 if (!kvm_request_pending(vcpu))
3818 return 0;
3819 /*
3820 * We use MMU_RELOAD just to re-arm the ipte notifier for the
3821 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
3822 * This ensures that the ipte instruction for this request has
3823 * already finished. We might race against a second unmapper that
3824 * wants to set the blocking bit. Lets just retry the request loop.
3825 */
3826 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
3827 int rc;
3828 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3829 kvm_s390_get_prefix(vcpu),
3830 PAGE_SIZE * 2, PROT_WRITE);
3831 if (rc) {
3832 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
3833 return rc;
3834 }
3835 goto retry;
3836 }
3837
3838 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3839 vcpu->arch.sie_block->ihcpu = 0xffff;
3840 goto retry;
3841 }
3842
3843 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3844 if (!ibs_enabled(vcpu)) {
3845 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
3846 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
3847 }
3848 goto retry;
3849 }
3850
3851 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3852 if (ibs_enabled(vcpu)) {
3853 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
3854 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
3855 }
3856 goto retry;
3857 }
3858
3859 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3860 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3861 goto retry;
3862 }
3863
3864 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3865 /*
3866 * Disable CMM virtualization; we will emulate the ESSA
3867 * instruction manually, in order to provide additional
3868 * functionalities needed for live migration.
3869 */
3870 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3871 goto retry;
3872 }
3873
3874 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3875 /*
3876 * Re-enable CMM virtualization if CMMA is available and
3877 * CMM has been used.
3878 */
3879 if ((vcpu->kvm->arch.use_cmma) &&
3880 (vcpu->kvm->mm->context.uses_cmm))
3881 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3882 goto retry;
3883 }
3884
3885 /* nothing to do, just clear the request */
3886 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
3887 /* we left the vsie handler, nothing to do, just clear the request */
3888 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
3889
3890 return 0;
3891}
3892
3893void kvm_s390_set_tod_clock(struct kvm *kvm,
3894 const struct kvm_s390_vm_tod_clock *gtod)
3895{
3896 struct kvm_vcpu *vcpu;
3897 union tod_clock clk;
3898 int i;
3899
3900 mutex_lock(&kvm->lock);
3901 preempt_disable();
3902
3903 store_tod_clock_ext(&clk);
3904
3905 kvm->arch.epoch = gtod->tod - clk.tod;
3906 kvm->arch.epdx = 0;
3907 if (test_kvm_facility(kvm, 139)) {
3908 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
3909 if (kvm->arch.epoch > gtod->tod)
3910 kvm->arch.epdx -= 1;
3911 }
3912
3913 kvm_s390_vcpu_block_all(kvm);
3914 kvm_for_each_vcpu(i, vcpu, kvm) {
3915 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3916 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3917 }
3918
3919 kvm_s390_vcpu_unblock_all(kvm);
3920 preempt_enable();
3921 mutex_unlock(&kvm->lock);
3922}
3923
3924/**
3925 * kvm_arch_fault_in_page - fault-in guest page if necessary
3926 * @vcpu: The corresponding virtual cpu
3927 * @gpa: Guest physical address
3928 * @writable: Whether the page should be writable or not
3929 *
3930 * Make sure that a guest page has been faulted-in on the host.
3931 *
3932 * Return: Zero on success, negative error code otherwise.
3933 */
3934long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
3935{
3936 return gmap_fault(vcpu->arch.gmap, gpa,
3937 writable ? FAULT_FLAG_WRITE : 0);
3938}
3939
3940static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3941 unsigned long token)
3942{
3943 struct kvm_s390_interrupt inti;
3944 struct kvm_s390_irq irq;
3945
3946 if (start_token) {
3947 irq.u.ext.ext_params2 = token;
3948 irq.type = KVM_S390_INT_PFAULT_INIT;
3949 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3950 } else {
3951 inti.type = KVM_S390_INT_PFAULT_DONE;
3952 inti.parm64 = token;
3953 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3954 }
3955}
3956
3957bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3958 struct kvm_async_pf *work)
3959{
3960 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3961 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3962
3963 return true;
3964}
3965
3966void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3967 struct kvm_async_pf *work)
3968{
3969 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3970 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3971}
3972
3973void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3974 struct kvm_async_pf *work)
3975{
3976 /* s390 will always inject the page directly */
3977}
3978
3979bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
3980{
3981 /*
3982 * s390 will always inject the page directly,
3983 * but we still want check_async_completion to cleanup
3984 */
3985 return true;
3986}
3987
3988static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3989{
3990 hva_t hva;
3991 struct kvm_arch_async_pf arch;
3992
3993 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3994 return false;
3995 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3996 vcpu->arch.pfault_compare)
3997 return false;
3998 if (psw_extint_disabled(vcpu))
3999 return false;
4000 if (kvm_s390_vcpu_has_irq(vcpu, 0))
4001 return false;
4002 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4003 return false;
4004 if (!vcpu->arch.gmap->pfault_enabled)
4005 return false;
4006
4007 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
4008 hva += current->thread.gmap_addr & ~PAGE_MASK;
4009 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4010 return false;
4011
4012 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
4013}
4014
4015static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4016{
4017 int rc, cpuflags;
4018
4019 /*
4020 * On s390 notifications for arriving pages will be delivered directly
4021 * to the guest but the house keeping for completed pfaults is
4022 * handled outside the worker.
4023 */
4024 kvm_check_async_pf_completion(vcpu);
4025
4026 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4027 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4028
4029 if (need_resched())
4030 schedule();
4031
4032 if (!kvm_is_ucontrol(vcpu->kvm)) {
4033 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4034 if (rc)
4035 return rc;
4036 }
4037
4038 rc = kvm_s390_handle_requests(vcpu);
4039 if (rc)
4040 return rc;
4041
4042 if (guestdbg_enabled(vcpu)) {
4043 kvm_s390_backup_guest_per_regs(vcpu);
4044 kvm_s390_patch_guest_per_regs(vcpu);
4045 }
4046
4047 clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
4048
4049 vcpu->arch.sie_block->icptcode = 0;
4050 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4051 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4052 trace_kvm_s390_sie_enter(vcpu, cpuflags);
4053
4054 return 0;
4055}
4056
4057static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4058{
4059 struct kvm_s390_pgm_info pgm_info = {
4060 .code = PGM_ADDRESSING,
4061 };
4062 u8 opcode, ilen;
4063 int rc;
4064
4065 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4066 trace_kvm_s390_sie_fault(vcpu);
4067
4068 /*
4069 * We want to inject an addressing exception, which is defined as a
4070 * suppressing or terminating exception. However, since we came here
4071 * by a DAT access exception, the PSW still points to the faulting
4072 * instruction since DAT exceptions are nullifying. So we've got
4073 * to look up the current opcode to get the length of the instruction
4074 * to be able to forward the PSW.
4075 */
4076 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4077 ilen = insn_length(opcode);
4078 if (rc < 0) {
4079 return rc;
4080 } else if (rc) {
4081 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4082 * Forward by arbitrary ilc, injection will take care of
4083 * nullification if necessary.
4084 */
4085 pgm_info = vcpu->arch.pgm;
4086 ilen = 4;
4087 }
4088 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4089 kvm_s390_forward_psw(vcpu, ilen);
4090 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4091}
4092
4093static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4094{
4095 struct mcck_volatile_info *mcck_info;
4096 struct sie_page *sie_page;
4097
4098 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4099 vcpu->arch.sie_block->icptcode);
4100 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4101
4102 if (guestdbg_enabled(vcpu))
4103 kvm_s390_restore_guest_per_regs(vcpu);
4104
4105 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4106 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4107
4108 if (exit_reason == -EINTR) {
4109 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4110 sie_page = container_of(vcpu->arch.sie_block,
4111 struct sie_page, sie_block);
4112 mcck_info = &sie_page->mcck_info;
4113 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4114 return 0;
4115 }
4116
4117 if (vcpu->arch.sie_block->icptcode > 0) {
4118 int rc = kvm_handle_sie_intercept(vcpu);
4119
4120 if (rc != -EOPNOTSUPP)
4121 return rc;
4122 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4123 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4124 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4125 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4126 return -EREMOTE;
4127 } else if (exit_reason != -EFAULT) {
4128 vcpu->stat.exit_null++;
4129 return 0;
4130 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4131 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4132 vcpu->run->s390_ucontrol.trans_exc_code =
4133 current->thread.gmap_addr;
4134 vcpu->run->s390_ucontrol.pgm_code = 0x10;
4135 return -EREMOTE;
4136 } else if (current->thread.gmap_pfault) {
4137 trace_kvm_s390_major_guest_pfault(vcpu);
4138 current->thread.gmap_pfault = 0;
4139 if (kvm_arch_setup_async_pf(vcpu))
4140 return 0;
4141 vcpu->stat.pfault_sync++;
4142 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
4143 }
4144 return vcpu_post_run_fault_in_sie(vcpu);
4145}
4146
4147#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
4148static int __vcpu_run(struct kvm_vcpu *vcpu)
4149{
4150 int rc, exit_reason;
4151 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4152
4153 /*
4154 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4155 * ning the guest), so that memslots (and other stuff) are protected
4156 */
4157 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4158
4159 do {
4160 rc = vcpu_pre_run(vcpu);
4161 if (rc)
4162 break;
4163
4164 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4165 /*
4166 * As PF_VCPU will be used in fault handler, between
4167 * guest_enter and guest_exit should be no uaccess.
4168 */
4169 local_irq_disable();
4170 guest_enter_irqoff();
4171 __disable_cpu_timer_accounting(vcpu);
4172 local_irq_enable();
4173 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4174 memcpy(sie_page->pv_grregs,
4175 vcpu->run->s.regs.gprs,
4176 sizeof(sie_page->pv_grregs));
4177 }
4178 if (test_cpu_flag(CIF_FPU))
4179 load_fpu_regs();
4180 exit_reason = sie64a(vcpu->arch.sie_block,
4181 vcpu->run->s.regs.gprs);
4182 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4183 memcpy(vcpu->run->s.regs.gprs,
4184 sie_page->pv_grregs,
4185 sizeof(sie_page->pv_grregs));
4186 /*
4187 * We're not allowed to inject interrupts on intercepts
4188 * that leave the guest state in an "in-between" state
4189 * where the next SIE entry will do a continuation.
4190 * Fence interrupts in our "internal" PSW.
4191 */
4192 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4193 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4194 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4195 }
4196 }
4197 local_irq_disable();
4198 __enable_cpu_timer_accounting(vcpu);
4199 guest_exit_irqoff();
4200 local_irq_enable();
4201 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4202
4203 rc = vcpu_post_run(vcpu, exit_reason);
4204 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
4205
4206 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4207 return rc;
4208}
4209
4210static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4211{
4212 struct kvm_run *kvm_run = vcpu->run;
4213 struct runtime_instr_cb *riccb;
4214 struct gs_cb *gscb;
4215
4216 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4217 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4218 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4219 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4220 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4221 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4222 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4223 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4224 }
4225 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4226 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4227 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4228 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4229 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4230 kvm_clear_async_pf_completion_queue(vcpu);
4231 }
4232 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4233 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4234 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4235 }
4236 /*
4237 * If userspace sets the riccb (e.g. after migration) to a valid state,
4238 * we should enable RI here instead of doing the lazy enablement.
4239 */
4240 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4241 test_kvm_facility(vcpu->kvm, 64) &&
4242 riccb->v &&
4243 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4244 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
4245 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4246 }
4247 /*
4248 * If userspace sets the gscb (e.g. after migration) to non-zero,
4249 * we should enable GS here instead of doing the lazy enablement.
4250 */
4251 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4252 test_kvm_facility(vcpu->kvm, 133) &&
4253 gscb->gssm &&
4254 !vcpu->arch.gs_enabled) {
4255 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4256 vcpu->arch.sie_block->ecb |= ECB_GS;
4257 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4258 vcpu->arch.gs_enabled = 1;
4259 }
4260 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4261 test_kvm_facility(vcpu->kvm, 82)) {
4262 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4263 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4264 }
4265 if (MACHINE_HAS_GS) {
4266 preempt_disable();
4267 __ctl_set_bit(2, 4);
4268 if (current->thread.gs_cb) {
4269 vcpu->arch.host_gscb = current->thread.gs_cb;
4270 save_gs_cb(vcpu->arch.host_gscb);
4271 }
4272 if (vcpu->arch.gs_enabled) {
4273 current->thread.gs_cb = (struct gs_cb *)
4274 &vcpu->run->s.regs.gscb;
4275 restore_gs_cb(current->thread.gs_cb);
4276 }
4277 preempt_enable();
4278 }
4279 /* SIE will load etoken directly from SDNX and therefore kvm_run */
4280}
4281
4282static void sync_regs(struct kvm_vcpu *vcpu)
4283{
4284 struct kvm_run *kvm_run = vcpu->run;
4285
4286 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4287 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4288 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4289 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4290 /* some control register changes require a tlb flush */
4291 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4292 }
4293 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4294 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4295 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4296 }
4297 save_access_regs(vcpu->arch.host_acrs);
4298 restore_access_regs(vcpu->run->s.regs.acrs);
4299 /* save host (userspace) fprs/vrs */
4300 save_fpu_regs();
4301 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4302 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4303 if (MACHINE_HAS_VX)
4304 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4305 else
4306 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4307 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4308 if (test_fp_ctl(current->thread.fpu.fpc))
4309 /* User space provided an invalid FPC, let's clear it */
4310 current->thread.fpu.fpc = 0;
4311
4312 /* Sync fmt2 only data */
4313 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
4314 sync_regs_fmt2(vcpu);
4315 } else {
4316 /*
4317 * In several places we have to modify our internal view to
4318 * not do things that are disallowed by the ultravisor. For
4319 * example we must not inject interrupts after specific exits
4320 * (e.g. 112 prefix page not secure). We do this by turning
4321 * off the machine check, external and I/O interrupt bits
4322 * of our PSW copy. To avoid getting validity intercepts, we
4323 * do only accept the condition code from userspace.
4324 */
4325 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4326 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4327 PSW_MASK_CC;
4328 }
4329
4330 kvm_run->kvm_dirty_regs = 0;
4331}
4332
4333static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4334{
4335 struct kvm_run *kvm_run = vcpu->run;
4336
4337 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4338 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4339 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4340 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4341 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
4342 if (MACHINE_HAS_GS) {
4343 preempt_disable();
4344 __ctl_set_bit(2, 4);
4345 if (vcpu->arch.gs_enabled)
4346 save_gs_cb(current->thread.gs_cb);
4347 current->thread.gs_cb = vcpu->arch.host_gscb;
4348 restore_gs_cb(vcpu->arch.host_gscb);
4349 if (!vcpu->arch.host_gscb)
4350 __ctl_clear_bit(2, 4);
4351 vcpu->arch.host_gscb = NULL;
4352 preempt_enable();
4353 }
4354 /* SIE will save etoken directly into SDNX and therefore kvm_run */
4355}
4356
4357static void store_regs(struct kvm_vcpu *vcpu)
4358{
4359 struct kvm_run *kvm_run = vcpu->run;
4360
4361 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4362 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4363 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4364 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4365 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4366 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4367 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4368 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4369 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4370 save_access_regs(vcpu->run->s.regs.acrs);
4371 restore_access_regs(vcpu->arch.host_acrs);
4372 /* Save guest register state */
4373 save_fpu_regs();
4374 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4375 /* Restore will be done lazily at return */
4376 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4377 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4378 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
4379 store_regs_fmt2(vcpu);
4380}
4381
4382int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
4383{
4384 struct kvm_run *kvm_run = vcpu->run;
4385 int rc;
4386
4387 if (kvm_run->immediate_exit)
4388 return -EINTR;
4389
4390 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4391 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4392 return -EINVAL;
4393
4394 vcpu_load(vcpu);
4395
4396 if (guestdbg_exit_pending(vcpu)) {
4397 kvm_s390_prepare_debug_exit(vcpu);
4398 rc = 0;
4399 goto out;
4400 }
4401
4402 kvm_sigset_activate(vcpu);
4403
4404 /*
4405 * no need to check the return value of vcpu_start as it can only have
4406 * an error for protvirt, but protvirt means user cpu state
4407 */
4408 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4409 kvm_s390_vcpu_start(vcpu);
4410 } else if (is_vcpu_stopped(vcpu)) {
4411 pr_err_ratelimited("can't run stopped vcpu %d\n",
4412 vcpu->vcpu_id);
4413 rc = -EINVAL;
4414 goto out;
4415 }
4416
4417 sync_regs(vcpu);
4418 enable_cpu_timer_accounting(vcpu);
4419
4420 might_fault();
4421 rc = __vcpu_run(vcpu);
4422
4423 if (signal_pending(current) && !rc) {
4424 kvm_run->exit_reason = KVM_EXIT_INTR;
4425 rc = -EINTR;
4426 }
4427
4428 if (guestdbg_exit_pending(vcpu) && !rc) {
4429 kvm_s390_prepare_debug_exit(vcpu);
4430 rc = 0;
4431 }
4432
4433 if (rc == -EREMOTE) {
4434 /* userspace support is needed, kvm_run has been prepared */
4435 rc = 0;
4436 }
4437
4438 disable_cpu_timer_accounting(vcpu);
4439 store_regs(vcpu);
4440
4441 kvm_sigset_deactivate(vcpu);
4442
4443 vcpu->stat.exit_userspace++;
4444out:
4445 vcpu_put(vcpu);
4446 return rc;
4447}
4448
4449/*
4450 * store status at address
4451 * we use have two special cases:
4452 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4453 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4454 */
4455int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
4456{
4457 unsigned char archmode = 1;
4458 freg_t fprs[NUM_FPRS];
4459 unsigned int px;
4460 u64 clkcomp, cputm;
4461 int rc;
4462
4463 px = kvm_s390_get_prefix(vcpu);
4464 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4465 if (write_guest_abs(vcpu, 163, &archmode, 1))
4466 return -EFAULT;
4467 gpa = 0;
4468 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4469 if (write_guest_real(vcpu, 163, &archmode, 1))
4470 return -EFAULT;
4471 gpa = px;
4472 } else
4473 gpa -= __LC_FPREGS_SAVE_AREA;
4474
4475 /* manually convert vector registers if necessary */
4476 if (MACHINE_HAS_VX) {
4477 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
4478 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4479 fprs, 128);
4480 } else {
4481 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4482 vcpu->run->s.regs.fprs, 128);
4483 }
4484 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
4485 vcpu->run->s.regs.gprs, 128);
4486 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
4487 &vcpu->arch.sie_block->gpsw, 16);
4488 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
4489 &px, 4);
4490 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
4491 &vcpu->run->s.regs.fpc, 4);
4492 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
4493 &vcpu->arch.sie_block->todpr, 4);
4494 cputm = kvm_s390_get_cpu_timer(vcpu);
4495 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4496 &cputm, 8);
4497 clkcomp = vcpu->arch.sie_block->ckc >> 8;
4498 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
4499 &clkcomp, 8);
4500 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
4501 &vcpu->run->s.regs.acrs, 64);
4502 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
4503 &vcpu->arch.sie_block->gcr, 128);
4504 return rc ? -EFAULT : 0;
4505}
4506
4507int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4508{
4509 /*
4510 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
4511 * switch in the run ioctl. Let's update our copies before we save
4512 * it into the save area
4513 */
4514 save_fpu_regs();
4515 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4516 save_access_regs(vcpu->run->s.regs.acrs);
4517
4518 return kvm_s390_store_status_unloaded(vcpu, addr);
4519}
4520
4521static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4522{
4523 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
4524 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
4525}
4526
4527static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4528{
4529 unsigned int i;
4530 struct kvm_vcpu *vcpu;
4531
4532 kvm_for_each_vcpu(i, vcpu, kvm) {
4533 __disable_ibs_on_vcpu(vcpu);
4534 }
4535}
4536
4537static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4538{
4539 if (!sclp.has_ibs)
4540 return;
4541 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
4542 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
4543}
4544
4545int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4546{
4547 int i, online_vcpus, r = 0, started_vcpus = 0;
4548
4549 if (!is_vcpu_stopped(vcpu))
4550 return 0;
4551
4552 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
4553 /* Only one cpu at a time may enter/leave the STOPPED state. */
4554 spin_lock(&vcpu->kvm->arch.start_stop_lock);
4555 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4556
4557 /* Let's tell the UV that we want to change into the operating state */
4558 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4559 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4560 if (r) {
4561 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4562 return r;
4563 }
4564 }
4565
4566 for (i = 0; i < online_vcpus; i++) {
4567 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4568 started_vcpus++;
4569 }
4570
4571 if (started_vcpus == 0) {
4572 /* we're the only active VCPU -> speed it up */
4573 __enable_ibs_on_vcpu(vcpu);
4574 } else if (started_vcpus == 1) {
4575 /*
4576 * As we are starting a second VCPU, we have to disable
4577 * the IBS facility on all VCPUs to remove potentially
4578 * outstanding ENABLE requests.
4579 */
4580 __disable_ibs_on_all_vcpus(vcpu->kvm);
4581 }
4582
4583 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
4584 /*
4585 * The real PSW might have changed due to a RESTART interpreted by the
4586 * ultravisor. We block all interrupts and let the next sie exit
4587 * refresh our view.
4588 */
4589 if (kvm_s390_pv_cpu_is_protected(vcpu))
4590 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4591 /*
4592 * Another VCPU might have used IBS while we were offline.
4593 * Let's play safe and flush the VCPU at startup.
4594 */
4595 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4596 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4597 return 0;
4598}
4599
4600int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4601{
4602 int i, online_vcpus, r = 0, started_vcpus = 0;
4603 struct kvm_vcpu *started_vcpu = NULL;
4604
4605 if (is_vcpu_stopped(vcpu))
4606 return 0;
4607
4608 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
4609 /* Only one cpu at a time may enter/leave the STOPPED state. */
4610 spin_lock(&vcpu->kvm->arch.start_stop_lock);
4611 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4612
4613 /* Let's tell the UV that we want to change into the stopped state */
4614 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4615 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
4616 if (r) {
4617 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4618 return r;
4619 }
4620 }
4621
4622 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
4623 kvm_s390_clear_stop_irq(vcpu);
4624
4625 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
4626 __disable_ibs_on_vcpu(vcpu);
4627
4628 for (i = 0; i < online_vcpus; i++) {
4629 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4630 started_vcpus++;
4631 started_vcpu = vcpu->kvm->vcpus[i];
4632 }
4633 }
4634
4635 if (started_vcpus == 1) {
4636 /*
4637 * As we only have one VCPU left, we want to enable the
4638 * IBS facility for that VCPU to speed it up.
4639 */
4640 __enable_ibs_on_vcpu(started_vcpu);
4641 }
4642
4643 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4644 return 0;
4645}
4646
4647static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4648 struct kvm_enable_cap *cap)
4649{
4650 int r;
4651
4652 if (cap->flags)
4653 return -EINVAL;
4654
4655 switch (cap->cap) {
4656 case KVM_CAP_S390_CSS_SUPPORT:
4657 if (!vcpu->kvm->arch.css_support) {
4658 vcpu->kvm->arch.css_support = 1;
4659 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
4660 trace_kvm_s390_enable_css(vcpu->kvm);
4661 }
4662 r = 0;
4663 break;
4664 default:
4665 r = -EINVAL;
4666 break;
4667 }
4668 return r;
4669}
4670
4671static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
4672 struct kvm_s390_mem_op *mop)
4673{
4674 void __user *uaddr = (void __user *)mop->buf;
4675 int r = 0;
4676
4677 if (mop->flags || !mop->size)
4678 return -EINVAL;
4679 if (mop->size + mop->sida_offset < mop->size)
4680 return -EINVAL;
4681 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
4682 return -E2BIG;
4683
4684 switch (mop->op) {
4685 case KVM_S390_MEMOP_SIDA_READ:
4686 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
4687 mop->sida_offset), mop->size))
4688 r = -EFAULT;
4689
4690 break;
4691 case KVM_S390_MEMOP_SIDA_WRITE:
4692 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
4693 mop->sida_offset), uaddr, mop->size))
4694 r = -EFAULT;
4695 break;
4696 }
4697 return r;
4698}
4699static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4700 struct kvm_s390_mem_op *mop)
4701{
4702 void __user *uaddr = (void __user *)mop->buf;
4703 void *tmpbuf = NULL;
4704 int r = 0;
4705 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4706 | KVM_S390_MEMOP_F_CHECK_ONLY;
4707
4708 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
4709 return -EINVAL;
4710
4711 if (mop->size > MEM_OP_MAX_SIZE)
4712 return -E2BIG;
4713
4714 if (kvm_s390_pv_cpu_is_protected(vcpu))
4715 return -EINVAL;
4716
4717 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4718 tmpbuf = vmalloc(mop->size);
4719 if (!tmpbuf)
4720 return -ENOMEM;
4721 }
4722
4723 switch (mop->op) {
4724 case KVM_S390_MEMOP_LOGICAL_READ:
4725 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
4726 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4727 mop->size, GACC_FETCH);
4728 break;
4729 }
4730 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4731 if (r == 0) {
4732 if (copy_to_user(uaddr, tmpbuf, mop->size))
4733 r = -EFAULT;
4734 }
4735 break;
4736 case KVM_S390_MEMOP_LOGICAL_WRITE:
4737 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
4738 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4739 mop->size, GACC_STORE);
4740 break;
4741 }
4742 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4743 r = -EFAULT;
4744 break;
4745 }
4746 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4747 break;
4748 }
4749
4750 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4751 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4752
4753 vfree(tmpbuf);
4754 return r;
4755}
4756
4757static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
4758 struct kvm_s390_mem_op *mop)
4759{
4760 int r, srcu_idx;
4761
4762 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4763
4764 switch (mop->op) {
4765 case KVM_S390_MEMOP_LOGICAL_READ:
4766 case KVM_S390_MEMOP_LOGICAL_WRITE:
4767 r = kvm_s390_guest_mem_op(vcpu, mop);
4768 break;
4769 case KVM_S390_MEMOP_SIDA_READ:
4770 case KVM_S390_MEMOP_SIDA_WRITE:
4771 /* we are locked against sida going away by the vcpu->mutex */
4772 r = kvm_s390_guest_sida_op(vcpu, mop);
4773 break;
4774 default:
4775 r = -EINVAL;
4776 }
4777
4778 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4779 return r;
4780}
4781
4782long kvm_arch_vcpu_async_ioctl(struct file *filp,
4783 unsigned int ioctl, unsigned long arg)
4784{
4785 struct kvm_vcpu *vcpu = filp->private_data;
4786 void __user *argp = (void __user *)arg;
4787
4788 switch (ioctl) {
4789 case KVM_S390_IRQ: {
4790 struct kvm_s390_irq s390irq;
4791
4792 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
4793 return -EFAULT;
4794 return kvm_s390_inject_vcpu(vcpu, &s390irq);
4795 }
4796 case KVM_S390_INTERRUPT: {
4797 struct kvm_s390_interrupt s390int;
4798 struct kvm_s390_irq s390irq = {};
4799
4800 if (copy_from_user(&s390int, argp, sizeof(s390int)))
4801 return -EFAULT;
4802 if (s390int_to_s390irq(&s390int, &s390irq))
4803 return -EINVAL;
4804 return kvm_s390_inject_vcpu(vcpu, &s390irq);
4805 }
4806 }
4807 return -ENOIOCTLCMD;
4808}
4809
4810long kvm_arch_vcpu_ioctl(struct file *filp,
4811 unsigned int ioctl, unsigned long arg)
4812{
4813 struct kvm_vcpu *vcpu = filp->private_data;
4814 void __user *argp = (void __user *)arg;
4815 int idx;
4816 long r;
4817 u16 rc, rrc;
4818
4819 vcpu_load(vcpu);
4820
4821 switch (ioctl) {
4822 case KVM_S390_STORE_STATUS:
4823 idx = srcu_read_lock(&vcpu->kvm->srcu);
4824 r = kvm_s390_store_status_unloaded(vcpu, arg);
4825 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4826 break;
4827 case KVM_S390_SET_INITIAL_PSW: {
4828 psw_t psw;
4829
4830 r = -EFAULT;
4831 if (copy_from_user(&psw, argp, sizeof(psw)))
4832 break;
4833 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4834 break;
4835 }
4836 case KVM_S390_CLEAR_RESET:
4837 r = 0;
4838 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
4839 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4840 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4841 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
4842 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
4843 rc, rrc);
4844 }
4845 break;
4846 case KVM_S390_INITIAL_RESET:
4847 r = 0;
4848 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4849 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4850 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4851 UVC_CMD_CPU_RESET_INITIAL,
4852 &rc, &rrc);
4853 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
4854 rc, rrc);
4855 }
4856 break;
4857 case KVM_S390_NORMAL_RESET:
4858 r = 0;
4859 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
4860 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4861 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4862 UVC_CMD_CPU_RESET, &rc, &rrc);
4863 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
4864 rc, rrc);
4865 }
4866 break;
4867 case KVM_SET_ONE_REG:
4868 case KVM_GET_ONE_REG: {
4869 struct kvm_one_reg reg;
4870 r = -EINVAL;
4871 if (kvm_s390_pv_cpu_is_protected(vcpu))
4872 break;
4873 r = -EFAULT;
4874 if (copy_from_user(®, argp, sizeof(reg)))
4875 break;
4876 if (ioctl == KVM_SET_ONE_REG)
4877 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
4878 else
4879 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
4880 break;
4881 }
4882#ifdef CONFIG_KVM_S390_UCONTROL
4883 case KVM_S390_UCAS_MAP: {
4884 struct kvm_s390_ucas_mapping ucasmap;
4885
4886 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4887 r = -EFAULT;
4888 break;
4889 }
4890
4891 if (!kvm_is_ucontrol(vcpu->kvm)) {
4892 r = -EINVAL;
4893 break;
4894 }
4895
4896 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4897 ucasmap.vcpu_addr, ucasmap.length);
4898 break;
4899 }
4900 case KVM_S390_UCAS_UNMAP: {
4901 struct kvm_s390_ucas_mapping ucasmap;
4902
4903 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4904 r = -EFAULT;
4905 break;
4906 }
4907
4908 if (!kvm_is_ucontrol(vcpu->kvm)) {
4909 r = -EINVAL;
4910 break;
4911 }
4912
4913 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4914 ucasmap.length);
4915 break;
4916 }
4917#endif
4918 case KVM_S390_VCPU_FAULT: {
4919 r = gmap_fault(vcpu->arch.gmap, arg, 0);
4920 break;
4921 }
4922 case KVM_ENABLE_CAP:
4923 {
4924 struct kvm_enable_cap cap;
4925 r = -EFAULT;
4926 if (copy_from_user(&cap, argp, sizeof(cap)))
4927 break;
4928 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4929 break;
4930 }
4931 case KVM_S390_MEM_OP: {
4932 struct kvm_s390_mem_op mem_op;
4933
4934 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4935 r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
4936 else
4937 r = -EFAULT;
4938 break;
4939 }
4940 case KVM_S390_SET_IRQ_STATE: {
4941 struct kvm_s390_irq_state irq_state;
4942
4943 r = -EFAULT;
4944 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4945 break;
4946 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4947 irq_state.len == 0 ||
4948 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4949 r = -EINVAL;
4950 break;
4951 }
4952 /* do not use irq_state.flags, it will break old QEMUs */
4953 r = kvm_s390_set_irq_state(vcpu,
4954 (void __user *) irq_state.buf,
4955 irq_state.len);
4956 break;
4957 }
4958 case KVM_S390_GET_IRQ_STATE: {
4959 struct kvm_s390_irq_state irq_state;
4960
4961 r = -EFAULT;
4962 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4963 break;
4964 if (irq_state.len == 0) {
4965 r = -EINVAL;
4966 break;
4967 }
4968 /* do not use irq_state.flags, it will break old QEMUs */
4969 r = kvm_s390_get_irq_state(vcpu,
4970 (__u8 __user *) irq_state.buf,
4971 irq_state.len);
4972 break;
4973 }
4974 default:
4975 r = -ENOTTY;
4976 }
4977
4978 vcpu_put(vcpu);
4979 return r;
4980}
4981
4982vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
4983{
4984#ifdef CONFIG_KVM_S390_UCONTROL
4985 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4986 && (kvm_is_ucontrol(vcpu->kvm))) {
4987 vmf->page = virt_to_page(vcpu->arch.sie_block);
4988 get_page(vmf->page);
4989 return 0;
4990 }
4991#endif
4992 return VM_FAULT_SIGBUS;
4993}
4994
4995/* Section: memory related */
4996int kvm_arch_prepare_memory_region(struct kvm *kvm,
4997 struct kvm_memory_slot *memslot,
4998 const struct kvm_userspace_memory_region *mem,
4999 enum kvm_mr_change change)
5000{
5001 /* A few sanity checks. We can have memory slots which have to be
5002 located/ended at a segment boundary (1MB). The memory in userland is
5003 ok to be fragmented into various different vmas. It is okay to mmap()
5004 and munmap() stuff in this slot after doing this call at any time */
5005
5006 if (mem->userspace_addr & 0xffffful)
5007 return -EINVAL;
5008
5009 if (mem->memory_size & 0xffffful)
5010 return -EINVAL;
5011
5012 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
5013 return -EINVAL;
5014
5015 /* When we are protected, we should not change the memory slots */
5016 if (kvm_s390_pv_get_handle(kvm))
5017 return -EINVAL;
5018 return 0;
5019}
5020
5021void kvm_arch_commit_memory_region(struct kvm *kvm,
5022 const struct kvm_userspace_memory_region *mem,
5023 struct kvm_memory_slot *old,
5024 const struct kvm_memory_slot *new,
5025 enum kvm_mr_change change)
5026{
5027 int rc = 0;
5028
5029 switch (change) {
5030 case KVM_MR_DELETE:
5031 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5032 old->npages * PAGE_SIZE);
5033 break;
5034 case KVM_MR_MOVE:
5035 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5036 old->npages * PAGE_SIZE);
5037 if (rc)
5038 break;
5039 fallthrough;
5040 case KVM_MR_CREATE:
5041 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
5042 mem->guest_phys_addr, mem->memory_size);
5043 break;
5044 case KVM_MR_FLAGS_ONLY:
5045 break;
5046 default:
5047 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5048 }
5049 if (rc)
5050 pr_warn("failed to commit memory region\n");
5051 return;
5052}
5053
5054static inline unsigned long nonhyp_mask(int i)
5055{
5056 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5057
5058 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5059}
5060
5061void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
5062{
5063 vcpu->valid_wakeup = false;
5064}
5065
5066static int __init kvm_s390_init(void)
5067{
5068 int i;
5069
5070 if (!sclp.has_sief2) {
5071 pr_info("SIE is not available\n");
5072 return -ENODEV;
5073 }
5074
5075 if (nested && hpage) {
5076 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5077 return -EINVAL;
5078 }
5079
5080 for (i = 0; i < 16; i++)
5081 kvm_s390_fac_base[i] |=
5082 stfle_fac_list[i] & nonhyp_mask(i);
5083
5084 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5085}
5086
5087static void __exit kvm_s390_exit(void)
5088{
5089 kvm_exit();
5090}
5091
5092module_init(kvm_s390_init);
5093module_exit(kvm_s390_exit);
5094
5095/*
5096 * Enable autoloading of the kvm module.
5097 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5098 * since x86 takes a different approach.
5099 */
5100#include <linux/miscdevice.h>
5101MODULE_ALIAS_MISCDEV(KVM_MINOR);
5102MODULE_ALIAS("devname:kvm");