Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Test for s390x CPU resets
4 *
5 * Copyright (C) 2020, IBM
6 */
7
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/ioctl.h>
12
13#include "test_util.h"
14#include "kvm_util.h"
15#include "kselftest.h"
16
17#define LOCAL_IRQS 32
18
19#define ARBITRARY_NON_ZERO_VCPU_ID 3
20
21struct kvm_s390_irq buf[ARBITRARY_NON_ZERO_VCPU_ID + LOCAL_IRQS];
22
23static uint8_t regs_null[512];
24
25static void guest_code_initial(void)
26{
27 /* set several CRs to "safe" value */
28 unsigned long cr2_59 = 0x10; /* enable guarded storage */
29 unsigned long cr8_63 = 0x1; /* monitor mask = 1 */
30 unsigned long cr10 = 1; /* PER START */
31 unsigned long cr11 = -1; /* PER END */
32
33
34 /* Dirty registers */
35 asm volatile (
36 " lghi 2,0x11\n" /* Round toward 0 */
37 " sfpc 2\n" /* set fpc to !=0 */
38 " lctlg 2,2,%0\n"
39 " lctlg 8,8,%1\n"
40 " lctlg 10,10,%2\n"
41 " lctlg 11,11,%3\n"
42 /* now clobber some general purpose regs */
43 " llihh 0,0xffff\n"
44 " llihl 1,0x5555\n"
45 " llilh 2,0xaaaa\n"
46 " llill 3,0x0000\n"
47 /* now clobber a floating point reg */
48 " lghi 4,0x1\n"
49 " cdgbr 0,4\n"
50 /* now clobber an access reg */
51 " sar 9,4\n"
52 /* We embed diag 501 here to control register content */
53 " diag 0,0,0x501\n"
54 :
55 : "m" (cr2_59), "m" (cr8_63), "m" (cr10), "m" (cr11)
56 /* no clobber list as this should not return */
57 );
58}
59
60static void test_one_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t value)
61{
62 uint64_t eval_reg;
63
64 vcpu_get_reg(vcpu, id, &eval_reg);
65 TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
66}
67
68static void assert_noirq(struct kvm_vcpu *vcpu)
69{
70 struct kvm_s390_irq_state irq_state;
71 int irqs;
72
73 irq_state.len = sizeof(buf);
74 irq_state.buf = (unsigned long)buf;
75 irqs = __vcpu_ioctl(vcpu, KVM_S390_GET_IRQ_STATE, &irq_state);
76 /*
77 * irqs contains the number of retrieved interrupts. Any interrupt
78 * (notably, the emergency call interrupt we have injected) should
79 * be cleared by the resets, so this should be 0.
80 */
81 TEST_ASSERT(irqs >= 0, "Could not fetch IRQs: errno %d", errno);
82 TEST_ASSERT(!irqs, "IRQ pending");
83}
84
85static void assert_clear(struct kvm_vcpu *vcpu)
86{
87 struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
88 struct kvm_sregs sregs;
89 struct kvm_regs regs;
90 struct kvm_fpu fpu;
91
92 vcpu_regs_get(vcpu, ®s);
93 TEST_ASSERT(!memcmp(®s.gprs, regs_null, sizeof(regs.gprs)), "grs == 0");
94
95 vcpu_sregs_get(vcpu, &sregs);
96 TEST_ASSERT(!memcmp(&sregs.acrs, regs_null, sizeof(sregs.acrs)), "acrs == 0");
97
98 vcpu_fpu_get(vcpu, &fpu);
99 TEST_ASSERT(!memcmp(&fpu.fprs, regs_null, sizeof(fpu.fprs)), "fprs == 0");
100
101 /* sync regs */
102 TEST_ASSERT(!memcmp(sync_regs->gprs, regs_null, sizeof(sync_regs->gprs)),
103 "gprs0-15 == 0 (sync_regs)");
104
105 TEST_ASSERT(!memcmp(sync_regs->acrs, regs_null, sizeof(sync_regs->acrs)),
106 "acrs0-15 == 0 (sync_regs)");
107
108 TEST_ASSERT(!memcmp(sync_regs->vrs, regs_null, sizeof(sync_regs->vrs)),
109 "vrs0-15 == 0 (sync_regs)");
110}
111
112static void assert_initial_noclear(struct kvm_vcpu *vcpu)
113{
114 struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
115
116 TEST_ASSERT(sync_regs->gprs[0] == 0xffff000000000000UL,
117 "gpr0 == 0xffff000000000000 (sync_regs)");
118 TEST_ASSERT(sync_regs->gprs[1] == 0x0000555500000000UL,
119 "gpr1 == 0x0000555500000000 (sync_regs)");
120 TEST_ASSERT(sync_regs->gprs[2] == 0x00000000aaaa0000UL,
121 "gpr2 == 0x00000000aaaa0000 (sync_regs)");
122 TEST_ASSERT(sync_regs->gprs[3] == 0x0000000000000000UL,
123 "gpr3 == 0x0000000000000000 (sync_regs)");
124 TEST_ASSERT(sync_regs->fprs[0] == 0x3ff0000000000000UL,
125 "fpr0 == 0f1 (sync_regs)");
126 TEST_ASSERT(sync_regs->acrs[9] == 1, "ar9 == 1 (sync_regs)");
127}
128
129static void assert_initial(struct kvm_vcpu *vcpu)
130{
131 struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
132 struct kvm_sregs sregs;
133 struct kvm_fpu fpu;
134
135 /* KVM_GET_SREGS */
136 vcpu_sregs_get(vcpu, &sregs);
137 TEST_ASSERT(sregs.crs[0] == 0xE0UL, "cr0 == 0xE0 (KVM_GET_SREGS)");
138 TEST_ASSERT(sregs.crs[14] == 0xC2000000UL,
139 "cr14 == 0xC2000000 (KVM_GET_SREGS)");
140 TEST_ASSERT(!memcmp(&sregs.crs[1], regs_null, sizeof(sregs.crs[1]) * 12),
141 "cr1-13 == 0 (KVM_GET_SREGS)");
142 TEST_ASSERT(sregs.crs[15] == 0, "cr15 == 0 (KVM_GET_SREGS)");
143
144 /* sync regs */
145 TEST_ASSERT(sync_regs->crs[0] == 0xE0UL, "cr0 == 0xE0 (sync_regs)");
146 TEST_ASSERT(sync_regs->crs[14] == 0xC2000000UL,
147 "cr14 == 0xC2000000 (sync_regs)");
148 TEST_ASSERT(!memcmp(&sync_regs->crs[1], regs_null, 8 * 12),
149 "cr1-13 == 0 (sync_regs)");
150 TEST_ASSERT(sync_regs->crs[15] == 0, "cr15 == 0 (sync_regs)");
151 TEST_ASSERT(sync_regs->fpc == 0, "fpc == 0 (sync_regs)");
152 TEST_ASSERT(sync_regs->todpr == 0, "todpr == 0 (sync_regs)");
153 TEST_ASSERT(sync_regs->cputm == 0, "cputm == 0 (sync_regs)");
154 TEST_ASSERT(sync_regs->ckc == 0, "ckc == 0 (sync_regs)");
155 TEST_ASSERT(sync_regs->pp == 0, "pp == 0 (sync_regs)");
156 TEST_ASSERT(sync_regs->gbea == 1, "gbea == 1 (sync_regs)");
157
158 /* kvm_run */
159 TEST_ASSERT(vcpu->run->psw_addr == 0, "psw_addr == 0 (kvm_run)");
160 TEST_ASSERT(vcpu->run->psw_mask == 0, "psw_mask == 0 (kvm_run)");
161
162 vcpu_fpu_get(vcpu, &fpu);
163 TEST_ASSERT(!fpu.fpc, "fpc == 0");
164
165 test_one_reg(vcpu, KVM_REG_S390_GBEA, 1);
166 test_one_reg(vcpu, KVM_REG_S390_PP, 0);
167 test_one_reg(vcpu, KVM_REG_S390_TODPR, 0);
168 test_one_reg(vcpu, KVM_REG_S390_CPU_TIMER, 0);
169 test_one_reg(vcpu, KVM_REG_S390_CLOCK_COMP, 0);
170}
171
172static void assert_normal_noclear(struct kvm_vcpu *vcpu)
173{
174 struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
175
176 TEST_ASSERT(sync_regs->crs[2] == 0x10, "cr2 == 10 (sync_regs)");
177 TEST_ASSERT(sync_regs->crs[8] == 1, "cr10 == 1 (sync_regs)");
178 TEST_ASSERT(sync_regs->crs[10] == 1, "cr10 == 1 (sync_regs)");
179 TEST_ASSERT(sync_regs->crs[11] == -1, "cr11 == -1 (sync_regs)");
180}
181
182static void assert_normal(struct kvm_vcpu *vcpu)
183{
184 test_one_reg(vcpu, KVM_REG_S390_PFTOKEN, KVM_S390_PFAULT_TOKEN_INVALID);
185 TEST_ASSERT(vcpu->run->s.regs.pft == KVM_S390_PFAULT_TOKEN_INVALID,
186 "pft == 0xff..... (sync_regs)");
187 assert_noirq(vcpu);
188}
189
190static void inject_irq(struct kvm_vcpu *vcpu)
191{
192 struct kvm_s390_irq_state irq_state;
193 struct kvm_s390_irq *irq = &buf[0];
194 int irqs;
195
196 /* Inject IRQ */
197 irq_state.len = sizeof(struct kvm_s390_irq);
198 irq_state.buf = (unsigned long)buf;
199 irq->type = KVM_S390_INT_EMERGENCY;
200 irq->u.emerg.code = vcpu->id;
201 irqs = __vcpu_ioctl(vcpu, KVM_S390_SET_IRQ_STATE, &irq_state);
202 TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d", errno);
203}
204
205static struct kvm_vm *create_vm(struct kvm_vcpu **vcpu)
206{
207 struct kvm_vm *vm;
208
209 vm = vm_create(1);
210
211 *vcpu = vm_vcpu_add(vm, ARBITRARY_NON_ZERO_VCPU_ID, guest_code_initial);
212
213 return vm;
214}
215
216static void test_normal(void)
217{
218 struct kvm_vcpu *vcpu;
219 struct kvm_vm *vm;
220
221 ksft_print_msg("Testing normal reset\n");
222 vm = create_vm(&vcpu);
223
224 vcpu_run(vcpu);
225
226 inject_irq(vcpu);
227
228 vcpu_ioctl(vcpu, KVM_S390_NORMAL_RESET, NULL);
229
230 /* must clears */
231 assert_normal(vcpu);
232 /* must not clears */
233 assert_normal_noclear(vcpu);
234 assert_initial_noclear(vcpu);
235
236 kvm_vm_free(vm);
237}
238
239static void test_initial(void)
240{
241 struct kvm_vcpu *vcpu;
242 struct kvm_vm *vm;
243
244 ksft_print_msg("Testing initial reset\n");
245 vm = create_vm(&vcpu);
246
247 vcpu_run(vcpu);
248
249 inject_irq(vcpu);
250
251 vcpu_ioctl(vcpu, KVM_S390_INITIAL_RESET, NULL);
252
253 /* must clears */
254 assert_normal(vcpu);
255 assert_initial(vcpu);
256 /* must not clears */
257 assert_initial_noclear(vcpu);
258
259 kvm_vm_free(vm);
260}
261
262static void test_clear(void)
263{
264 struct kvm_vcpu *vcpu;
265 struct kvm_vm *vm;
266
267 ksft_print_msg("Testing clear reset\n");
268 vm = create_vm(&vcpu);
269
270 vcpu_run(vcpu);
271
272 inject_irq(vcpu);
273
274 vcpu_ioctl(vcpu, KVM_S390_CLEAR_RESET, NULL);
275
276 /* must clears */
277 assert_normal(vcpu);
278 assert_initial(vcpu);
279 assert_clear(vcpu);
280
281 kvm_vm_free(vm);
282}
283
284struct testdef {
285 const char *name;
286 void (*test)(void);
287 bool needs_cap;
288} testlist[] = {
289 { "initial", test_initial, false },
290 { "normal", test_normal, true },
291 { "clear", test_clear, true },
292};
293
294int main(int argc, char *argv[])
295{
296 bool has_s390_vcpu_resets = kvm_check_cap(KVM_CAP_S390_VCPU_RESETS);
297 int idx;
298
299 ksft_print_header();
300 ksft_set_plan(ARRAY_SIZE(testlist));
301
302 for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
303 if (!testlist[idx].needs_cap || has_s390_vcpu_resets) {
304 testlist[idx].test();
305 ksft_test_result_pass("%s\n", testlist[idx].name);
306 } else {
307 ksft_test_result_skip("%s - no VCPU_RESETS capability\n",
308 testlist[idx].name);
309 }
310 }
311
312 ksft_finished(); /* Print results and exit() accordingly */
313}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Test for s390x CPU resets
4 *
5 * Copyright (C) 2020, IBM
6 */
7
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/ioctl.h>
12
13#include "test_util.h"
14#include "kvm_util.h"
15#include "kselftest.h"
16
17#define LOCAL_IRQS 32
18
19#define ARBITRARY_NON_ZERO_VCPU_ID 3
20
21struct kvm_s390_irq buf[ARBITRARY_NON_ZERO_VCPU_ID + LOCAL_IRQS];
22
23static uint8_t regs_null[512];
24
25static void guest_code_initial(void)
26{
27 /* set several CRs to "safe" value */
28 unsigned long cr2_59 = 0x10; /* enable guarded storage */
29 unsigned long cr8_63 = 0x1; /* monitor mask = 1 */
30 unsigned long cr10 = 1; /* PER START */
31 unsigned long cr11 = -1; /* PER END */
32
33
34 /* Dirty registers */
35 asm volatile (
36 " lghi 2,0x11\n" /* Round toward 0 */
37 " sfpc 2\n" /* set fpc to !=0 */
38 " lctlg 2,2,%0\n"
39 " lctlg 8,8,%1\n"
40 " lctlg 10,10,%2\n"
41 " lctlg 11,11,%3\n"
42 /* now clobber some general purpose regs */
43 " llihh 0,0xffff\n"
44 " llihl 1,0x5555\n"
45 " llilh 2,0xaaaa\n"
46 " llill 3,0x0000\n"
47 /* now clobber a floating point reg */
48 " lghi 4,0x1\n"
49 " cdgbr 0,4\n"
50 /* now clobber an access reg */
51 " sar 9,4\n"
52 /* We embed diag 501 here to control register content */
53 " diag 0,0,0x501\n"
54 :
55 : "m" (cr2_59), "m" (cr8_63), "m" (cr10), "m" (cr11)
56 /* no clobber list as this should not return */
57 );
58}
59
60static void test_one_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t value)
61{
62 uint64_t eval_reg;
63
64 vcpu_get_reg(vcpu, id, &eval_reg);
65 TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
66}
67
68static void assert_noirq(struct kvm_vcpu *vcpu)
69{
70 struct kvm_s390_irq_state irq_state;
71 int irqs;
72
73 irq_state.len = sizeof(buf);
74 irq_state.buf = (unsigned long)buf;
75 irqs = __vcpu_ioctl(vcpu, KVM_S390_GET_IRQ_STATE, &irq_state);
76 /*
77 * irqs contains the number of retrieved interrupts. Any interrupt
78 * (notably, the emergency call interrupt we have injected) should
79 * be cleared by the resets, so this should be 0.
80 */
81 TEST_ASSERT(irqs >= 0, "Could not fetch IRQs: errno %d\n", errno);
82 TEST_ASSERT(!irqs, "IRQ pending");
83}
84
85static void assert_clear(struct kvm_vcpu *vcpu)
86{
87 struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
88 struct kvm_sregs sregs;
89 struct kvm_regs regs;
90 struct kvm_fpu fpu;
91
92 vcpu_regs_get(vcpu, ®s);
93 TEST_ASSERT(!memcmp(®s.gprs, regs_null, sizeof(regs.gprs)), "grs == 0");
94
95 vcpu_sregs_get(vcpu, &sregs);
96 TEST_ASSERT(!memcmp(&sregs.acrs, regs_null, sizeof(sregs.acrs)), "acrs == 0");
97
98 vcpu_fpu_get(vcpu, &fpu);
99 TEST_ASSERT(!memcmp(&fpu.fprs, regs_null, sizeof(fpu.fprs)), "fprs == 0");
100
101 /* sync regs */
102 TEST_ASSERT(!memcmp(sync_regs->gprs, regs_null, sizeof(sync_regs->gprs)),
103 "gprs0-15 == 0 (sync_regs)");
104
105 TEST_ASSERT(!memcmp(sync_regs->acrs, regs_null, sizeof(sync_regs->acrs)),
106 "acrs0-15 == 0 (sync_regs)");
107
108 TEST_ASSERT(!memcmp(sync_regs->vrs, regs_null, sizeof(sync_regs->vrs)),
109 "vrs0-15 == 0 (sync_regs)");
110}
111
112static void assert_initial_noclear(struct kvm_vcpu *vcpu)
113{
114 struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
115
116 TEST_ASSERT(sync_regs->gprs[0] == 0xffff000000000000UL,
117 "gpr0 == 0xffff000000000000 (sync_regs)");
118 TEST_ASSERT(sync_regs->gprs[1] == 0x0000555500000000UL,
119 "gpr1 == 0x0000555500000000 (sync_regs)");
120 TEST_ASSERT(sync_regs->gprs[2] == 0x00000000aaaa0000UL,
121 "gpr2 == 0x00000000aaaa0000 (sync_regs)");
122 TEST_ASSERT(sync_regs->gprs[3] == 0x0000000000000000UL,
123 "gpr3 == 0x0000000000000000 (sync_regs)");
124 TEST_ASSERT(sync_regs->fprs[0] == 0x3ff0000000000000UL,
125 "fpr0 == 0f1 (sync_regs)");
126 TEST_ASSERT(sync_regs->acrs[9] == 1, "ar9 == 1 (sync_regs)");
127}
128
129static void assert_initial(struct kvm_vcpu *vcpu)
130{
131 struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
132 struct kvm_sregs sregs;
133 struct kvm_fpu fpu;
134
135 /* KVM_GET_SREGS */
136 vcpu_sregs_get(vcpu, &sregs);
137 TEST_ASSERT(sregs.crs[0] == 0xE0UL, "cr0 == 0xE0 (KVM_GET_SREGS)");
138 TEST_ASSERT(sregs.crs[14] == 0xC2000000UL,
139 "cr14 == 0xC2000000 (KVM_GET_SREGS)");
140 TEST_ASSERT(!memcmp(&sregs.crs[1], regs_null, sizeof(sregs.crs[1]) * 12),
141 "cr1-13 == 0 (KVM_GET_SREGS)");
142 TEST_ASSERT(sregs.crs[15] == 0, "cr15 == 0 (KVM_GET_SREGS)");
143
144 /* sync regs */
145 TEST_ASSERT(sync_regs->crs[0] == 0xE0UL, "cr0 == 0xE0 (sync_regs)");
146 TEST_ASSERT(sync_regs->crs[14] == 0xC2000000UL,
147 "cr14 == 0xC2000000 (sync_regs)");
148 TEST_ASSERT(!memcmp(&sync_regs->crs[1], regs_null, 8 * 12),
149 "cr1-13 == 0 (sync_regs)");
150 TEST_ASSERT(sync_regs->crs[15] == 0, "cr15 == 0 (sync_regs)");
151 TEST_ASSERT(sync_regs->fpc == 0, "fpc == 0 (sync_regs)");
152 TEST_ASSERT(sync_regs->todpr == 0, "todpr == 0 (sync_regs)");
153 TEST_ASSERT(sync_regs->cputm == 0, "cputm == 0 (sync_regs)");
154 TEST_ASSERT(sync_regs->ckc == 0, "ckc == 0 (sync_regs)");
155 TEST_ASSERT(sync_regs->pp == 0, "pp == 0 (sync_regs)");
156 TEST_ASSERT(sync_regs->gbea == 1, "gbea == 1 (sync_regs)");
157
158 /* kvm_run */
159 TEST_ASSERT(vcpu->run->psw_addr == 0, "psw_addr == 0 (kvm_run)");
160 TEST_ASSERT(vcpu->run->psw_mask == 0, "psw_mask == 0 (kvm_run)");
161
162 vcpu_fpu_get(vcpu, &fpu);
163 TEST_ASSERT(!fpu.fpc, "fpc == 0");
164
165 test_one_reg(vcpu, KVM_REG_S390_GBEA, 1);
166 test_one_reg(vcpu, KVM_REG_S390_PP, 0);
167 test_one_reg(vcpu, KVM_REG_S390_TODPR, 0);
168 test_one_reg(vcpu, KVM_REG_S390_CPU_TIMER, 0);
169 test_one_reg(vcpu, KVM_REG_S390_CLOCK_COMP, 0);
170}
171
172static void assert_normal_noclear(struct kvm_vcpu *vcpu)
173{
174 struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
175
176 TEST_ASSERT(sync_regs->crs[2] == 0x10, "cr2 == 10 (sync_regs)");
177 TEST_ASSERT(sync_regs->crs[8] == 1, "cr10 == 1 (sync_regs)");
178 TEST_ASSERT(sync_regs->crs[10] == 1, "cr10 == 1 (sync_regs)");
179 TEST_ASSERT(sync_regs->crs[11] == -1, "cr11 == -1 (sync_regs)");
180}
181
182static void assert_normal(struct kvm_vcpu *vcpu)
183{
184 test_one_reg(vcpu, KVM_REG_S390_PFTOKEN, KVM_S390_PFAULT_TOKEN_INVALID);
185 TEST_ASSERT(vcpu->run->s.regs.pft == KVM_S390_PFAULT_TOKEN_INVALID,
186 "pft == 0xff..... (sync_regs)");
187 assert_noirq(vcpu);
188}
189
190static void inject_irq(struct kvm_vcpu *vcpu)
191{
192 struct kvm_s390_irq_state irq_state;
193 struct kvm_s390_irq *irq = &buf[0];
194 int irqs;
195
196 /* Inject IRQ */
197 irq_state.len = sizeof(struct kvm_s390_irq);
198 irq_state.buf = (unsigned long)buf;
199 irq->type = KVM_S390_INT_EMERGENCY;
200 irq->u.emerg.code = vcpu->id;
201 irqs = __vcpu_ioctl(vcpu, KVM_S390_SET_IRQ_STATE, &irq_state);
202 TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d\n", errno);
203}
204
205static struct kvm_vm *create_vm(struct kvm_vcpu **vcpu)
206{
207 struct kvm_vm *vm;
208
209 vm = vm_create(1);
210
211 *vcpu = vm_vcpu_add(vm, ARBITRARY_NON_ZERO_VCPU_ID, guest_code_initial);
212
213 return vm;
214}
215
216static void test_normal(void)
217{
218 struct kvm_vcpu *vcpu;
219 struct kvm_vm *vm;
220
221 ksft_print_msg("Testing normal reset\n");
222 vm = create_vm(&vcpu);
223
224 vcpu_run(vcpu);
225
226 inject_irq(vcpu);
227
228 vcpu_ioctl(vcpu, KVM_S390_NORMAL_RESET, NULL);
229
230 /* must clears */
231 assert_normal(vcpu);
232 /* must not clears */
233 assert_normal_noclear(vcpu);
234 assert_initial_noclear(vcpu);
235
236 kvm_vm_free(vm);
237}
238
239static void test_initial(void)
240{
241 struct kvm_vcpu *vcpu;
242 struct kvm_vm *vm;
243
244 ksft_print_msg("Testing initial reset\n");
245 vm = create_vm(&vcpu);
246
247 vcpu_run(vcpu);
248
249 inject_irq(vcpu);
250
251 vcpu_ioctl(vcpu, KVM_S390_INITIAL_RESET, NULL);
252
253 /* must clears */
254 assert_normal(vcpu);
255 assert_initial(vcpu);
256 /* must not clears */
257 assert_initial_noclear(vcpu);
258
259 kvm_vm_free(vm);
260}
261
262static void test_clear(void)
263{
264 struct kvm_vcpu *vcpu;
265 struct kvm_vm *vm;
266
267 ksft_print_msg("Testing clear reset\n");
268 vm = create_vm(&vcpu);
269
270 vcpu_run(vcpu);
271
272 inject_irq(vcpu);
273
274 vcpu_ioctl(vcpu, KVM_S390_CLEAR_RESET, NULL);
275
276 /* must clears */
277 assert_normal(vcpu);
278 assert_initial(vcpu);
279 assert_clear(vcpu);
280
281 kvm_vm_free(vm);
282}
283
284struct testdef {
285 const char *name;
286 void (*test)(void);
287 bool needs_cap;
288} testlist[] = {
289 { "initial", test_initial, false },
290 { "normal", test_normal, true },
291 { "clear", test_clear, true },
292};
293
294int main(int argc, char *argv[])
295{
296 bool has_s390_vcpu_resets = kvm_check_cap(KVM_CAP_S390_VCPU_RESETS);
297 int idx;
298
299 ksft_print_header();
300 ksft_set_plan(ARRAY_SIZE(testlist));
301
302 for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
303 if (!testlist[idx].needs_cap || has_s390_vcpu_resets) {
304 testlist[idx].test();
305 ksft_test_result_pass("%s\n", testlist[idx].name);
306 } else {
307 ksft_test_result_skip("%s - no VCPU_RESETS capability\n",
308 testlist[idx].name);
309 }
310 }
311
312 ksft_finished(); /* Print results and exit() accordingly */
313}