Loading...
1/*
2 * interrupt.c - handling kvm guest interrupts
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#include <linux/interrupt.h>
14#include <linux/kvm_host.h>
15#include <linux/hrtimer.h>
16#include <linux/signal.h>
17#include <linux/slab.h>
18#include <asm/asm-offsets.h>
19#include <asm/uaccess.h>
20#include "kvm-s390.h"
21#include "gaccess.h"
22
23static int psw_extint_disabled(struct kvm_vcpu *vcpu)
24{
25 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
26}
27
28static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
29{
30 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
31 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
32 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
33 return 0;
34 return 1;
35}
36
37static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
38 struct kvm_s390_interrupt_info *inti)
39{
40 switch (inti->type) {
41 case KVM_S390_INT_EXTERNAL_CALL:
42 if (psw_extint_disabled(vcpu))
43 return 0;
44 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
45 return 1;
46 case KVM_S390_INT_EMERGENCY:
47 if (psw_extint_disabled(vcpu))
48 return 0;
49 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
50 return 1;
51 return 0;
52 case KVM_S390_INT_SERVICE:
53 if (psw_extint_disabled(vcpu))
54 return 0;
55 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
56 return 1;
57 return 0;
58 case KVM_S390_INT_VIRTIO:
59 if (psw_extint_disabled(vcpu))
60 return 0;
61 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
62 return 1;
63 return 0;
64 case KVM_S390_PROGRAM_INT:
65 case KVM_S390_SIGP_STOP:
66 case KVM_S390_SIGP_SET_PREFIX:
67 case KVM_S390_RESTART:
68 return 1;
69 default:
70 BUG();
71 }
72 return 0;
73}
74
75static void __set_cpu_idle(struct kvm_vcpu *vcpu)
76{
77 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
78 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
79 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
80}
81
82static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
83{
84 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
85 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
86 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
87}
88
89static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
90{
91 atomic_clear_mask(CPUSTAT_ECALL_PEND |
92 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
93 &vcpu->arch.sie_block->cpuflags);
94 vcpu->arch.sie_block->lctl = 0x0000;
95}
96
97static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
98{
99 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
100}
101
102static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
103 struct kvm_s390_interrupt_info *inti)
104{
105 switch (inti->type) {
106 case KVM_S390_INT_EXTERNAL_CALL:
107 case KVM_S390_INT_EMERGENCY:
108 case KVM_S390_INT_SERVICE:
109 case KVM_S390_INT_VIRTIO:
110 if (psw_extint_disabled(vcpu))
111 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
112 else
113 vcpu->arch.sie_block->lctl |= LCTL_CR0;
114 break;
115 case KVM_S390_SIGP_STOP:
116 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
117 break;
118 default:
119 BUG();
120 }
121}
122
123static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
124 struct kvm_s390_interrupt_info *inti)
125{
126 const unsigned short table[] = { 2, 4, 4, 6 };
127 int rc, exception = 0;
128
129 switch (inti->type) {
130 case KVM_S390_INT_EMERGENCY:
131 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
132 vcpu->stat.deliver_emergency_signal++;
133 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
134 if (rc == -EFAULT)
135 exception = 1;
136
137 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code);
138 if (rc == -EFAULT)
139 exception = 1;
140
141 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
142 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
143 if (rc == -EFAULT)
144 exception = 1;
145
146 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
147 __LC_EXT_NEW_PSW, sizeof(psw_t));
148 if (rc == -EFAULT)
149 exception = 1;
150 break;
151
152 case KVM_S390_INT_EXTERNAL_CALL:
153 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
154 vcpu->stat.deliver_external_call++;
155 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202);
156 if (rc == -EFAULT)
157 exception = 1;
158
159 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code);
160 if (rc == -EFAULT)
161 exception = 1;
162
163 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
164 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
165 if (rc == -EFAULT)
166 exception = 1;
167
168 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
169 __LC_EXT_NEW_PSW, sizeof(psw_t));
170 if (rc == -EFAULT)
171 exception = 1;
172 break;
173
174 case KVM_S390_INT_SERVICE:
175 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
176 inti->ext.ext_params);
177 vcpu->stat.deliver_service_signal++;
178 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
179 if (rc == -EFAULT)
180 exception = 1;
181
182 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
183 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
184 if (rc == -EFAULT)
185 exception = 1;
186
187 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
188 __LC_EXT_NEW_PSW, sizeof(psw_t));
189 if (rc == -EFAULT)
190 exception = 1;
191
192 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
193 if (rc == -EFAULT)
194 exception = 1;
195 break;
196
197 case KVM_S390_INT_VIRTIO:
198 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
199 inti->ext.ext_params, inti->ext.ext_params2);
200 vcpu->stat.deliver_virtio_interrupt++;
201 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
202 if (rc == -EFAULT)
203 exception = 1;
204
205 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
206 if (rc == -EFAULT)
207 exception = 1;
208
209 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
210 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
211 if (rc == -EFAULT)
212 exception = 1;
213
214 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
215 __LC_EXT_NEW_PSW, sizeof(psw_t));
216 if (rc == -EFAULT)
217 exception = 1;
218
219 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
220 if (rc == -EFAULT)
221 exception = 1;
222
223 rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
224 inti->ext.ext_params2);
225 if (rc == -EFAULT)
226 exception = 1;
227 break;
228
229 case KVM_S390_SIGP_STOP:
230 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
231 vcpu->stat.deliver_stop_signal++;
232 __set_intercept_indicator(vcpu, inti);
233 break;
234
235 case KVM_S390_SIGP_SET_PREFIX:
236 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
237 inti->prefix.address);
238 vcpu->stat.deliver_prefix_signal++;
239 kvm_s390_set_prefix(vcpu, inti->prefix.address);
240 break;
241
242 case KVM_S390_RESTART:
243 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
244 vcpu->stat.deliver_restart_signal++;
245 rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
246 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
247 if (rc == -EFAULT)
248 exception = 1;
249
250 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
251 offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
252 if (rc == -EFAULT)
253 exception = 1;
254 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
255 break;
256
257 case KVM_S390_PROGRAM_INT:
258 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
259 inti->pgm.code,
260 table[vcpu->arch.sie_block->ipa >> 14]);
261 vcpu->stat.deliver_program_int++;
262 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
263 if (rc == -EFAULT)
264 exception = 1;
265
266 rc = put_guest_u16(vcpu, __LC_PGM_ILC,
267 table[vcpu->arch.sie_block->ipa >> 14]);
268 if (rc == -EFAULT)
269 exception = 1;
270
271 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
272 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
273 if (rc == -EFAULT)
274 exception = 1;
275
276 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
277 __LC_PGM_NEW_PSW, sizeof(psw_t));
278 if (rc == -EFAULT)
279 exception = 1;
280 break;
281
282 default:
283 BUG();
284 }
285 if (exception) {
286 printk("kvm: The guest lowcore is not mapped during interrupt "
287 "delivery, killing userspace\n");
288 do_exit(SIGKILL);
289 }
290}
291
292static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
293{
294 int rc, exception = 0;
295
296 if (psw_extint_disabled(vcpu))
297 return 0;
298 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
299 return 0;
300 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
301 if (rc == -EFAULT)
302 exception = 1;
303 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
304 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
305 if (rc == -EFAULT)
306 exception = 1;
307 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
308 __LC_EXT_NEW_PSW, sizeof(psw_t));
309 if (rc == -EFAULT)
310 exception = 1;
311 if (exception) {
312 printk("kvm: The guest lowcore is not mapped during interrupt "
313 "delivery, killing userspace\n");
314 do_exit(SIGKILL);
315 }
316 return 1;
317}
318
319static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
320{
321 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
322 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
323 struct kvm_s390_interrupt_info *inti;
324 int rc = 0;
325
326 if (atomic_read(&li->active)) {
327 spin_lock_bh(&li->lock);
328 list_for_each_entry(inti, &li->list, list)
329 if (__interrupt_is_deliverable(vcpu, inti)) {
330 rc = 1;
331 break;
332 }
333 spin_unlock_bh(&li->lock);
334 }
335
336 if ((!rc) && atomic_read(&fi->active)) {
337 spin_lock(&fi->lock);
338 list_for_each_entry(inti, &fi->list, list)
339 if (__interrupt_is_deliverable(vcpu, inti)) {
340 rc = 1;
341 break;
342 }
343 spin_unlock(&fi->lock);
344 }
345
346 if ((!rc) && (vcpu->arch.sie_block->ckc <
347 get_clock() + vcpu->arch.sie_block->epoch)) {
348 if ((!psw_extint_disabled(vcpu)) &&
349 (vcpu->arch.sie_block->gcr[0] & 0x800ul))
350 rc = 1;
351 }
352
353 return rc;
354}
355
356int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
357{
358 return 0;
359}
360
361int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
362{
363 u64 now, sltime;
364 DECLARE_WAITQUEUE(wait, current);
365
366 vcpu->stat.exit_wait_state++;
367 if (kvm_cpu_has_interrupt(vcpu))
368 return 0;
369
370 __set_cpu_idle(vcpu);
371 spin_lock_bh(&vcpu->arch.local_int.lock);
372 vcpu->arch.local_int.timer_due = 0;
373 spin_unlock_bh(&vcpu->arch.local_int.lock);
374
375 if (psw_interrupts_disabled(vcpu)) {
376 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
377 __unset_cpu_idle(vcpu);
378 return -EOPNOTSUPP; /* disabled wait */
379 }
380
381 if (psw_extint_disabled(vcpu) ||
382 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
383 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
384 goto no_timer;
385 }
386
387 now = get_clock() + vcpu->arch.sie_block->epoch;
388 if (vcpu->arch.sie_block->ckc < now) {
389 __unset_cpu_idle(vcpu);
390 return 0;
391 }
392
393 sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
394
395 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
396 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
397no_timer:
398 spin_lock(&vcpu->arch.local_int.float_int->lock);
399 spin_lock_bh(&vcpu->arch.local_int.lock);
400 add_wait_queue(&vcpu->arch.local_int.wq, &wait);
401 while (list_empty(&vcpu->arch.local_int.list) &&
402 list_empty(&vcpu->arch.local_int.float_int->list) &&
403 (!vcpu->arch.local_int.timer_due) &&
404 !signal_pending(current)) {
405 set_current_state(TASK_INTERRUPTIBLE);
406 spin_unlock_bh(&vcpu->arch.local_int.lock);
407 spin_unlock(&vcpu->arch.local_int.float_int->lock);
408 vcpu_put(vcpu);
409 schedule();
410 vcpu_load(vcpu);
411 spin_lock(&vcpu->arch.local_int.float_int->lock);
412 spin_lock_bh(&vcpu->arch.local_int.lock);
413 }
414 __unset_cpu_idle(vcpu);
415 __set_current_state(TASK_RUNNING);
416 remove_wait_queue(&vcpu->arch.local_int.wq, &wait);
417 spin_unlock_bh(&vcpu->arch.local_int.lock);
418 spin_unlock(&vcpu->arch.local_int.float_int->lock);
419 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
420 return 0;
421}
422
423void kvm_s390_tasklet(unsigned long parm)
424{
425 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
426
427 spin_lock(&vcpu->arch.local_int.lock);
428 vcpu->arch.local_int.timer_due = 1;
429 if (waitqueue_active(&vcpu->arch.local_int.wq))
430 wake_up_interruptible(&vcpu->arch.local_int.wq);
431 spin_unlock(&vcpu->arch.local_int.lock);
432}
433
434/*
435 * low level hrtimer wake routine. Because this runs in hardirq context
436 * we schedule a tasklet to do the real work.
437 */
438enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
439{
440 struct kvm_vcpu *vcpu;
441
442 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
443 tasklet_schedule(&vcpu->arch.tasklet);
444
445 return HRTIMER_NORESTART;
446}
447
448void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
449{
450 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
451 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
452 struct kvm_s390_interrupt_info *n, *inti = NULL;
453 int deliver;
454
455 __reset_intercept_indicators(vcpu);
456 if (atomic_read(&li->active)) {
457 do {
458 deliver = 0;
459 spin_lock_bh(&li->lock);
460 list_for_each_entry_safe(inti, n, &li->list, list) {
461 if (__interrupt_is_deliverable(vcpu, inti)) {
462 list_del(&inti->list);
463 deliver = 1;
464 break;
465 }
466 __set_intercept_indicator(vcpu, inti);
467 }
468 if (list_empty(&li->list))
469 atomic_set(&li->active, 0);
470 spin_unlock_bh(&li->lock);
471 if (deliver) {
472 __do_deliver_interrupt(vcpu, inti);
473 kfree(inti);
474 }
475 } while (deliver);
476 }
477
478 if ((vcpu->arch.sie_block->ckc <
479 get_clock() + vcpu->arch.sie_block->epoch))
480 __try_deliver_ckc_interrupt(vcpu);
481
482 if (atomic_read(&fi->active)) {
483 do {
484 deliver = 0;
485 spin_lock(&fi->lock);
486 list_for_each_entry_safe(inti, n, &fi->list, list) {
487 if (__interrupt_is_deliverable(vcpu, inti)) {
488 list_del(&inti->list);
489 deliver = 1;
490 break;
491 }
492 __set_intercept_indicator(vcpu, inti);
493 }
494 if (list_empty(&fi->list))
495 atomic_set(&fi->active, 0);
496 spin_unlock(&fi->lock);
497 if (deliver) {
498 __do_deliver_interrupt(vcpu, inti);
499 kfree(inti);
500 }
501 } while (deliver);
502 }
503}
504
505int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
506{
507 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
508 struct kvm_s390_interrupt_info *inti;
509
510 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
511 if (!inti)
512 return -ENOMEM;
513
514 inti->type = KVM_S390_PROGRAM_INT;
515 inti->pgm.code = code;
516
517 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
518 spin_lock_bh(&li->lock);
519 list_add(&inti->list, &li->list);
520 atomic_set(&li->active, 1);
521 BUG_ON(waitqueue_active(&li->wq));
522 spin_unlock_bh(&li->lock);
523 return 0;
524}
525
526int kvm_s390_inject_vm(struct kvm *kvm,
527 struct kvm_s390_interrupt *s390int)
528{
529 struct kvm_s390_local_interrupt *li;
530 struct kvm_s390_float_interrupt *fi;
531 struct kvm_s390_interrupt_info *inti;
532 int sigcpu;
533
534 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
535 if (!inti)
536 return -ENOMEM;
537
538 switch (s390int->type) {
539 case KVM_S390_INT_VIRTIO:
540 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
541 s390int->parm, s390int->parm64);
542 inti->type = s390int->type;
543 inti->ext.ext_params = s390int->parm;
544 inti->ext.ext_params2 = s390int->parm64;
545 break;
546 case KVM_S390_INT_SERVICE:
547 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
548 inti->type = s390int->type;
549 inti->ext.ext_params = s390int->parm;
550 break;
551 case KVM_S390_PROGRAM_INT:
552 case KVM_S390_SIGP_STOP:
553 case KVM_S390_INT_EXTERNAL_CALL:
554 case KVM_S390_INT_EMERGENCY:
555 default:
556 kfree(inti);
557 return -EINVAL;
558 }
559
560 mutex_lock(&kvm->lock);
561 fi = &kvm->arch.float_int;
562 spin_lock(&fi->lock);
563 list_add_tail(&inti->list, &fi->list);
564 atomic_set(&fi->active, 1);
565 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
566 if (sigcpu == KVM_MAX_VCPUS) {
567 do {
568 sigcpu = fi->next_rr_cpu++;
569 if (sigcpu == KVM_MAX_VCPUS)
570 sigcpu = fi->next_rr_cpu = 0;
571 } while (fi->local_int[sigcpu] == NULL);
572 }
573 li = fi->local_int[sigcpu];
574 spin_lock_bh(&li->lock);
575 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
576 if (waitqueue_active(&li->wq))
577 wake_up_interruptible(&li->wq);
578 spin_unlock_bh(&li->lock);
579 spin_unlock(&fi->lock);
580 mutex_unlock(&kvm->lock);
581 return 0;
582}
583
584int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
585 struct kvm_s390_interrupt *s390int)
586{
587 struct kvm_s390_local_interrupt *li;
588 struct kvm_s390_interrupt_info *inti;
589
590 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
591 if (!inti)
592 return -ENOMEM;
593
594 switch (s390int->type) {
595 case KVM_S390_PROGRAM_INT:
596 if (s390int->parm & 0xffff0000) {
597 kfree(inti);
598 return -EINVAL;
599 }
600 inti->type = s390int->type;
601 inti->pgm.code = s390int->parm;
602 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
603 s390int->parm);
604 break;
605 case KVM_S390_SIGP_SET_PREFIX:
606 inti->prefix.address = s390int->parm;
607 inti->type = s390int->type;
608 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
609 s390int->parm);
610 break;
611 case KVM_S390_SIGP_STOP:
612 case KVM_S390_RESTART:
613 case KVM_S390_INT_EXTERNAL_CALL:
614 case KVM_S390_INT_EMERGENCY:
615 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
616 inti->type = s390int->type;
617 break;
618 case KVM_S390_INT_VIRTIO:
619 case KVM_S390_INT_SERVICE:
620 default:
621 kfree(inti);
622 return -EINVAL;
623 }
624
625 mutex_lock(&vcpu->kvm->lock);
626 li = &vcpu->arch.local_int;
627 spin_lock_bh(&li->lock);
628 if (inti->type == KVM_S390_PROGRAM_INT)
629 list_add(&inti->list, &li->list);
630 else
631 list_add_tail(&inti->list, &li->list);
632 atomic_set(&li->active, 1);
633 if (inti->type == KVM_S390_SIGP_STOP)
634 li->action_bits |= ACTION_STOP_ON_STOP;
635 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
636 if (waitqueue_active(&li->wq))
637 wake_up_interruptible(&vcpu->arch.local_int.wq);
638 spin_unlock_bh(&li->lock);
639 mutex_unlock(&vcpu->kvm->lock);
640 return 0;
641}
1/*
2 * interrupt.c - handling kvm guest interrupts
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#include <linux/interrupt.h>
14#include <linux/kvm_host.h>
15#include <linux/hrtimer.h>
16#include <linux/signal.h>
17#include <linux/slab.h>
18#include <asm/asm-offsets.h>
19#include <asm/uaccess.h>
20#include "kvm-s390.h"
21#include "gaccess.h"
22
23static int psw_extint_disabled(struct kvm_vcpu *vcpu)
24{
25 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
26}
27
28static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
29{
30 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
31 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
32 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
33 return 0;
34 return 1;
35}
36
37static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
38 struct kvm_s390_interrupt_info *inti)
39{
40 switch (inti->type) {
41 case KVM_S390_INT_EMERGENCY:
42 if (psw_extint_disabled(vcpu))
43 return 0;
44 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
45 return 1;
46 return 0;
47 case KVM_S390_INT_SERVICE:
48 if (psw_extint_disabled(vcpu))
49 return 0;
50 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
51 return 1;
52 return 0;
53 case KVM_S390_INT_VIRTIO:
54 if (psw_extint_disabled(vcpu))
55 return 0;
56 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
57 return 1;
58 return 0;
59 case KVM_S390_PROGRAM_INT:
60 case KVM_S390_SIGP_STOP:
61 case KVM_S390_SIGP_SET_PREFIX:
62 case KVM_S390_RESTART:
63 return 1;
64 default:
65 BUG();
66 }
67 return 0;
68}
69
70static void __set_cpu_idle(struct kvm_vcpu *vcpu)
71{
72 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
73 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
74 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
75}
76
77static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
78{
79 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
80 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
81 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
82}
83
84static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
85{
86 atomic_clear_mask(CPUSTAT_ECALL_PEND |
87 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
88 &vcpu->arch.sie_block->cpuflags);
89 vcpu->arch.sie_block->lctl = 0x0000;
90}
91
92static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
93{
94 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
95}
96
97static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
98 struct kvm_s390_interrupt_info *inti)
99{
100 switch (inti->type) {
101 case KVM_S390_INT_EMERGENCY:
102 case KVM_S390_INT_SERVICE:
103 case KVM_S390_INT_VIRTIO:
104 if (psw_extint_disabled(vcpu))
105 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
106 else
107 vcpu->arch.sie_block->lctl |= LCTL_CR0;
108 break;
109 case KVM_S390_SIGP_STOP:
110 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
111 break;
112 default:
113 BUG();
114 }
115}
116
117static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
118 struct kvm_s390_interrupt_info *inti)
119{
120 const unsigned short table[] = { 2, 4, 4, 6 };
121 int rc, exception = 0;
122
123 switch (inti->type) {
124 case KVM_S390_INT_EMERGENCY:
125 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
126 vcpu->stat.deliver_emergency_signal++;
127 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
128 if (rc == -EFAULT)
129 exception = 1;
130
131 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code);
132 if (rc == -EFAULT)
133 exception = 1;
134
135 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
136 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
137 if (rc == -EFAULT)
138 exception = 1;
139
140 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
141 __LC_EXT_NEW_PSW, sizeof(psw_t));
142 if (rc == -EFAULT)
143 exception = 1;
144 break;
145
146 case KVM_S390_INT_SERVICE:
147 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
148 inti->ext.ext_params);
149 vcpu->stat.deliver_service_signal++;
150 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
151 if (rc == -EFAULT)
152 exception = 1;
153
154 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
155 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
156 if (rc == -EFAULT)
157 exception = 1;
158
159 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
160 __LC_EXT_NEW_PSW, sizeof(psw_t));
161 if (rc == -EFAULT)
162 exception = 1;
163
164 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
165 if (rc == -EFAULT)
166 exception = 1;
167 break;
168
169 case KVM_S390_INT_VIRTIO:
170 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
171 inti->ext.ext_params, inti->ext.ext_params2);
172 vcpu->stat.deliver_virtio_interrupt++;
173 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
174 if (rc == -EFAULT)
175 exception = 1;
176
177 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00);
178 if (rc == -EFAULT)
179 exception = 1;
180
181 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
182 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
183 if (rc == -EFAULT)
184 exception = 1;
185
186 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
187 __LC_EXT_NEW_PSW, sizeof(psw_t));
188 if (rc == -EFAULT)
189 exception = 1;
190
191 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
192 if (rc == -EFAULT)
193 exception = 1;
194
195 rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
196 inti->ext.ext_params2);
197 if (rc == -EFAULT)
198 exception = 1;
199 break;
200
201 case KVM_S390_SIGP_STOP:
202 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
203 vcpu->stat.deliver_stop_signal++;
204 __set_intercept_indicator(vcpu, inti);
205 break;
206
207 case KVM_S390_SIGP_SET_PREFIX:
208 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
209 inti->prefix.address);
210 vcpu->stat.deliver_prefix_signal++;
211 vcpu->arch.sie_block->prefix = inti->prefix.address;
212 vcpu->arch.sie_block->ihcpu = 0xffff;
213 break;
214
215 case KVM_S390_RESTART:
216 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
217 vcpu->stat.deliver_restart_signal++;
218 rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
219 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
220 if (rc == -EFAULT)
221 exception = 1;
222
223 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
224 offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
225 if (rc == -EFAULT)
226 exception = 1;
227 break;
228
229 case KVM_S390_PROGRAM_INT:
230 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
231 inti->pgm.code,
232 table[vcpu->arch.sie_block->ipa >> 14]);
233 vcpu->stat.deliver_program_int++;
234 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
235 if (rc == -EFAULT)
236 exception = 1;
237
238 rc = put_guest_u16(vcpu, __LC_PGM_ILC,
239 table[vcpu->arch.sie_block->ipa >> 14]);
240 if (rc == -EFAULT)
241 exception = 1;
242
243 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
244 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
245 if (rc == -EFAULT)
246 exception = 1;
247
248 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
249 __LC_PGM_NEW_PSW, sizeof(psw_t));
250 if (rc == -EFAULT)
251 exception = 1;
252 break;
253
254 default:
255 BUG();
256 }
257 if (exception) {
258 printk("kvm: The guest lowcore is not mapped during interrupt "
259 "delivery, killing userspace\n");
260 do_exit(SIGKILL);
261 }
262}
263
264static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
265{
266 int rc, exception = 0;
267
268 if (psw_extint_disabled(vcpu))
269 return 0;
270 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
271 return 0;
272 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
273 if (rc == -EFAULT)
274 exception = 1;
275 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
276 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
277 if (rc == -EFAULT)
278 exception = 1;
279 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
280 __LC_EXT_NEW_PSW, sizeof(psw_t));
281 if (rc == -EFAULT)
282 exception = 1;
283 if (exception) {
284 printk("kvm: The guest lowcore is not mapped during interrupt "
285 "delivery, killing userspace\n");
286 do_exit(SIGKILL);
287 }
288 return 1;
289}
290
291static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
292{
293 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
294 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
295 struct kvm_s390_interrupt_info *inti;
296 int rc = 0;
297
298 if (atomic_read(&li->active)) {
299 spin_lock_bh(&li->lock);
300 list_for_each_entry(inti, &li->list, list)
301 if (__interrupt_is_deliverable(vcpu, inti)) {
302 rc = 1;
303 break;
304 }
305 spin_unlock_bh(&li->lock);
306 }
307
308 if ((!rc) && atomic_read(&fi->active)) {
309 spin_lock(&fi->lock);
310 list_for_each_entry(inti, &fi->list, list)
311 if (__interrupt_is_deliverable(vcpu, inti)) {
312 rc = 1;
313 break;
314 }
315 spin_unlock(&fi->lock);
316 }
317
318 if ((!rc) && (vcpu->arch.sie_block->ckc <
319 get_clock() + vcpu->arch.sie_block->epoch)) {
320 if ((!psw_extint_disabled(vcpu)) &&
321 (vcpu->arch.sie_block->gcr[0] & 0x800ul))
322 rc = 1;
323 }
324
325 return rc;
326}
327
328int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
329{
330 return 0;
331}
332
333int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
334{
335 u64 now, sltime;
336 DECLARE_WAITQUEUE(wait, current);
337
338 vcpu->stat.exit_wait_state++;
339 if (kvm_cpu_has_interrupt(vcpu))
340 return 0;
341
342 __set_cpu_idle(vcpu);
343 spin_lock_bh(&vcpu->arch.local_int.lock);
344 vcpu->arch.local_int.timer_due = 0;
345 spin_unlock_bh(&vcpu->arch.local_int.lock);
346
347 if (psw_interrupts_disabled(vcpu)) {
348 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
349 __unset_cpu_idle(vcpu);
350 return -EOPNOTSUPP; /* disabled wait */
351 }
352
353 if (psw_extint_disabled(vcpu) ||
354 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
355 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
356 goto no_timer;
357 }
358
359 now = get_clock() + vcpu->arch.sie_block->epoch;
360 if (vcpu->arch.sie_block->ckc < now) {
361 __unset_cpu_idle(vcpu);
362 return 0;
363 }
364
365 sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
366
367 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
368 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
369no_timer:
370 spin_lock(&vcpu->arch.local_int.float_int->lock);
371 spin_lock_bh(&vcpu->arch.local_int.lock);
372 add_wait_queue(&vcpu->arch.local_int.wq, &wait);
373 while (list_empty(&vcpu->arch.local_int.list) &&
374 list_empty(&vcpu->arch.local_int.float_int->list) &&
375 (!vcpu->arch.local_int.timer_due) &&
376 !signal_pending(current)) {
377 set_current_state(TASK_INTERRUPTIBLE);
378 spin_unlock_bh(&vcpu->arch.local_int.lock);
379 spin_unlock(&vcpu->arch.local_int.float_int->lock);
380 vcpu_put(vcpu);
381 schedule();
382 vcpu_load(vcpu);
383 spin_lock(&vcpu->arch.local_int.float_int->lock);
384 spin_lock_bh(&vcpu->arch.local_int.lock);
385 }
386 __unset_cpu_idle(vcpu);
387 __set_current_state(TASK_RUNNING);
388 remove_wait_queue(&vcpu->arch.local_int.wq, &wait);
389 spin_unlock_bh(&vcpu->arch.local_int.lock);
390 spin_unlock(&vcpu->arch.local_int.float_int->lock);
391 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
392 return 0;
393}
394
395void kvm_s390_tasklet(unsigned long parm)
396{
397 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
398
399 spin_lock(&vcpu->arch.local_int.lock);
400 vcpu->arch.local_int.timer_due = 1;
401 if (waitqueue_active(&vcpu->arch.local_int.wq))
402 wake_up_interruptible(&vcpu->arch.local_int.wq);
403 spin_unlock(&vcpu->arch.local_int.lock);
404}
405
406/*
407 * low level hrtimer wake routine. Because this runs in hardirq context
408 * we schedule a tasklet to do the real work.
409 */
410enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
411{
412 struct kvm_vcpu *vcpu;
413
414 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
415 tasklet_schedule(&vcpu->arch.tasklet);
416
417 return HRTIMER_NORESTART;
418}
419
420void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
421{
422 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
423 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
424 struct kvm_s390_interrupt_info *n, *inti = NULL;
425 int deliver;
426
427 __reset_intercept_indicators(vcpu);
428 if (atomic_read(&li->active)) {
429 do {
430 deliver = 0;
431 spin_lock_bh(&li->lock);
432 list_for_each_entry_safe(inti, n, &li->list, list) {
433 if (__interrupt_is_deliverable(vcpu, inti)) {
434 list_del(&inti->list);
435 deliver = 1;
436 break;
437 }
438 __set_intercept_indicator(vcpu, inti);
439 }
440 if (list_empty(&li->list))
441 atomic_set(&li->active, 0);
442 spin_unlock_bh(&li->lock);
443 if (deliver) {
444 __do_deliver_interrupt(vcpu, inti);
445 kfree(inti);
446 }
447 } while (deliver);
448 }
449
450 if ((vcpu->arch.sie_block->ckc <
451 get_clock() + vcpu->arch.sie_block->epoch))
452 __try_deliver_ckc_interrupt(vcpu);
453
454 if (atomic_read(&fi->active)) {
455 do {
456 deliver = 0;
457 spin_lock(&fi->lock);
458 list_for_each_entry_safe(inti, n, &fi->list, list) {
459 if (__interrupt_is_deliverable(vcpu, inti)) {
460 list_del(&inti->list);
461 deliver = 1;
462 break;
463 }
464 __set_intercept_indicator(vcpu, inti);
465 }
466 if (list_empty(&fi->list))
467 atomic_set(&fi->active, 0);
468 spin_unlock(&fi->lock);
469 if (deliver) {
470 __do_deliver_interrupt(vcpu, inti);
471 kfree(inti);
472 }
473 } while (deliver);
474 }
475}
476
477int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
478{
479 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
480 struct kvm_s390_interrupt_info *inti;
481
482 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
483 if (!inti)
484 return -ENOMEM;
485
486 inti->type = KVM_S390_PROGRAM_INT;
487 inti->pgm.code = code;
488
489 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
490 spin_lock_bh(&li->lock);
491 list_add(&inti->list, &li->list);
492 atomic_set(&li->active, 1);
493 BUG_ON(waitqueue_active(&li->wq));
494 spin_unlock_bh(&li->lock);
495 return 0;
496}
497
498int kvm_s390_inject_vm(struct kvm *kvm,
499 struct kvm_s390_interrupt *s390int)
500{
501 struct kvm_s390_local_interrupt *li;
502 struct kvm_s390_float_interrupt *fi;
503 struct kvm_s390_interrupt_info *inti;
504 int sigcpu;
505
506 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
507 if (!inti)
508 return -ENOMEM;
509
510 switch (s390int->type) {
511 case KVM_S390_INT_VIRTIO:
512 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
513 s390int->parm, s390int->parm64);
514 inti->type = s390int->type;
515 inti->ext.ext_params = s390int->parm;
516 inti->ext.ext_params2 = s390int->parm64;
517 break;
518 case KVM_S390_INT_SERVICE:
519 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
520 inti->type = s390int->type;
521 inti->ext.ext_params = s390int->parm;
522 break;
523 case KVM_S390_PROGRAM_INT:
524 case KVM_S390_SIGP_STOP:
525 case KVM_S390_INT_EMERGENCY:
526 default:
527 kfree(inti);
528 return -EINVAL;
529 }
530
531 mutex_lock(&kvm->lock);
532 fi = &kvm->arch.float_int;
533 spin_lock(&fi->lock);
534 list_add_tail(&inti->list, &fi->list);
535 atomic_set(&fi->active, 1);
536 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
537 if (sigcpu == KVM_MAX_VCPUS) {
538 do {
539 sigcpu = fi->next_rr_cpu++;
540 if (sigcpu == KVM_MAX_VCPUS)
541 sigcpu = fi->next_rr_cpu = 0;
542 } while (fi->local_int[sigcpu] == NULL);
543 }
544 li = fi->local_int[sigcpu];
545 spin_lock_bh(&li->lock);
546 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
547 if (waitqueue_active(&li->wq))
548 wake_up_interruptible(&li->wq);
549 spin_unlock_bh(&li->lock);
550 spin_unlock(&fi->lock);
551 mutex_unlock(&kvm->lock);
552 return 0;
553}
554
555int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
556 struct kvm_s390_interrupt *s390int)
557{
558 struct kvm_s390_local_interrupt *li;
559 struct kvm_s390_interrupt_info *inti;
560
561 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
562 if (!inti)
563 return -ENOMEM;
564
565 switch (s390int->type) {
566 case KVM_S390_PROGRAM_INT:
567 if (s390int->parm & 0xffff0000) {
568 kfree(inti);
569 return -EINVAL;
570 }
571 inti->type = s390int->type;
572 inti->pgm.code = s390int->parm;
573 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
574 s390int->parm);
575 break;
576 case KVM_S390_SIGP_SET_PREFIX:
577 inti->prefix.address = s390int->parm;
578 inti->type = s390int->type;
579 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
580 s390int->parm);
581 break;
582 case KVM_S390_SIGP_STOP:
583 case KVM_S390_RESTART:
584 case KVM_S390_INT_EMERGENCY:
585 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
586 inti->type = s390int->type;
587 break;
588 case KVM_S390_INT_VIRTIO:
589 case KVM_S390_INT_SERVICE:
590 default:
591 kfree(inti);
592 return -EINVAL;
593 }
594
595 mutex_lock(&vcpu->kvm->lock);
596 li = &vcpu->arch.local_int;
597 spin_lock_bh(&li->lock);
598 if (inti->type == KVM_S390_PROGRAM_INT)
599 list_add(&inti->list, &li->list);
600 else
601 list_add_tail(&inti->list, &li->list);
602 atomic_set(&li->active, 1);
603 if (inti->type == KVM_S390_SIGP_STOP)
604 li->action_bits |= ACTION_STOP_ON_STOP;
605 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
606 if (waitqueue_active(&li->wq))
607 wake_up_interruptible(&vcpu->arch.local_int.wq);
608 spin_unlock_bh(&li->lock);
609 mutex_unlock(&vcpu->kvm->lock);
610 return 0;
611}