Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Machine check handler
4 *
5 * Copyright IBM Corp. 2000, 2009
6 * Author(s): Ingo Adlung <adlung@de.ibm.com>,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Cornelia Huck <cornelia.huck@de.ibm.com>,
9 */
10
11#include <linux/kernel_stat.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/entry-common.h>
15#include <linux/hardirq.h>
16#include <linux/log2.h>
17#include <linux/kprobes.h>
18#include <linux/kmemleak.h>
19#include <linux/time.h>
20#include <linux/module.h>
21#include <linux/sched/signal.h>
22#include <linux/kvm_host.h>
23#include <linux/export.h>
24#include <asm/lowcore.h>
25#include <asm/smp.h>
26#include <asm/stp.h>
27#include <asm/cputime.h>
28#include <asm/nmi.h>
29#include <asm/crw.h>
30#include <asm/switch_to.h>
31#include <asm/ctl_reg.h>
32#include <asm/asm-offsets.h>
33#include <asm/pai.h>
34#include <asm/vx-insn.h>
35
36struct mcck_struct {
37 unsigned int kill_task : 1;
38 unsigned int channel_report : 1;
39 unsigned int warning : 1;
40 unsigned int stp_queue : 1;
41 unsigned long mcck_code;
42};
43
44static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
45
46static inline int nmi_needs_mcesa(void)
47{
48 return MACHINE_HAS_VX || MACHINE_HAS_GS;
49}
50
51/*
52 * The initial machine check extended save area for the boot CPU.
53 * It will be replaced on the boot CPU reinit with an allocated
54 * structure. The structure is required for machine check happening
55 * early in the boot process.
56 */
57static struct mcesa boot_mcesa __aligned(MCESA_MAX_SIZE);
58
59void __init nmi_alloc_mcesa_early(u64 *mcesad)
60{
61 if (!nmi_needs_mcesa())
62 return;
63 *mcesad = __pa(&boot_mcesa);
64 if (MACHINE_HAS_GS)
65 *mcesad |= ilog2(MCESA_MAX_SIZE);
66}
67
68int nmi_alloc_mcesa(u64 *mcesad)
69{
70 unsigned long size;
71 void *origin;
72
73 *mcesad = 0;
74 if (!nmi_needs_mcesa())
75 return 0;
76 size = MACHINE_HAS_GS ? MCESA_MAX_SIZE : MCESA_MIN_SIZE;
77 origin = kmalloc(size, GFP_KERNEL);
78 if (!origin)
79 return -ENOMEM;
80 /* The pointer is stored with mcesa_bits ORed in */
81 kmemleak_not_leak(origin);
82 *mcesad = __pa(origin);
83 if (MACHINE_HAS_GS)
84 *mcesad |= ilog2(MCESA_MAX_SIZE);
85 return 0;
86}
87
88void nmi_free_mcesa(u64 *mcesad)
89{
90 if (!nmi_needs_mcesa())
91 return;
92 kfree(__va(*mcesad & MCESA_ORIGIN_MASK));
93}
94
95static __always_inline char *nmi_puts(char *dest, const char *src)
96{
97 while (*src)
98 *dest++ = *src++;
99 *dest = 0;
100 return dest;
101}
102
103static __always_inline char *u64_to_hex(char *dest, u64 val)
104{
105 int i, num;
106
107 for (i = 1; i <= 16; i++) {
108 num = (val >> (64 - 4 * i)) & 0xf;
109 if (num >= 10)
110 *dest++ = 'A' + num - 10;
111 else
112 *dest++ = '0' + num;
113 }
114 *dest = 0;
115 return dest;
116}
117
118static notrace void s390_handle_damage(void)
119{
120 union ctlreg0 cr0, cr0_new;
121 char message[100];
122 psw_t psw_save;
123 char *ptr;
124
125 smp_emergency_stop();
126 diag_amode31_ops.diag308_reset();
127 ptr = nmi_puts(message, "System stopped due to unrecoverable machine check, code: 0x");
128 u64_to_hex(ptr, S390_lowcore.mcck_interruption_code);
129
130 /*
131 * Disable low address protection and make machine check new PSW a
132 * disabled wait PSW. Any additional machine check cannot be handled.
133 */
134 __ctl_store(cr0.val, 0, 0);
135 cr0_new = cr0;
136 cr0_new.lap = 0;
137 __ctl_load(cr0_new.val, 0, 0);
138 psw_save = S390_lowcore.mcck_new_psw;
139 psw_bits(S390_lowcore.mcck_new_psw).io = 0;
140 psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
141 psw_bits(S390_lowcore.mcck_new_psw).wait = 1;
142 sclp_emergency_printk(message);
143
144 /*
145 * Restore machine check new PSW and control register 0 to original
146 * values. This makes possible system dump analysis easier.
147 */
148 S390_lowcore.mcck_new_psw = psw_save;
149 __ctl_load(cr0.val, 0, 0);
150 disabled_wait();
151 while (1);
152}
153NOKPROBE_SYMBOL(s390_handle_damage);
154
155/*
156 * Main machine check handler function. Will be called with interrupts disabled
157 * and machine checks enabled.
158 */
159void __s390_handle_mcck(void)
160{
161 struct mcck_struct mcck;
162
163 /*
164 * Disable machine checks and get the current state of accumulated
165 * machine checks. Afterwards delete the old state and enable machine
166 * checks again.
167 */
168 local_mcck_disable();
169 mcck = *this_cpu_ptr(&cpu_mcck);
170 memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
171 local_mcck_enable();
172
173 if (mcck.channel_report)
174 crw_handle_channel_report();
175 /*
176 * A warning may remain for a prolonged period on the bare iron.
177 * (actually until the machine is powered off, or the problem is gone)
178 * So we just stop listening for the WARNING MCH and avoid continuously
179 * being interrupted. One caveat is however, that we must do this per
180 * processor and cannot use the smp version of ctl_clear_bit().
181 * On VM we only get one interrupt per virtally presented machinecheck.
182 * Though one suffices, we may get one interrupt per (virtual) cpu.
183 */
184 if (mcck.warning) { /* WARNING pending ? */
185 static int mchchk_wng_posted = 0;
186
187 /* Use single cpu clear, as we cannot handle smp here. */
188 __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
189 if (xchg(&mchchk_wng_posted, 1) == 0)
190 kill_cad_pid(SIGPWR, 1);
191 }
192 if (mcck.stp_queue)
193 stp_queue_work();
194 if (mcck.kill_task) {
195 local_irq_enable();
196 printk(KERN_EMERG "mcck: Terminating task because of machine "
197 "malfunction (code 0x%016lx).\n", mcck.mcck_code);
198 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
199 current->comm, current->pid);
200 make_task_dead(SIGSEGV);
201 }
202}
203
204void noinstr s390_handle_mcck(struct pt_regs *regs)
205{
206 trace_hardirqs_off();
207 pai_kernel_enter(regs);
208 __s390_handle_mcck();
209 pai_kernel_exit(regs);
210 trace_hardirqs_on();
211}
212/*
213 * returns 0 if register contents could be validated
214 * returns 1 otherwise
215 */
216static int notrace s390_validate_registers(union mci mci)
217{
218 struct mcesa *mcesa;
219 void *fpt_save_area;
220 union ctlreg2 cr2;
221 int kill_task;
222 u64 zero;
223
224 kill_task = 0;
225 zero = 0;
226
227 if (!mci.gr || !mci.fp)
228 kill_task = 1;
229 fpt_save_area = &S390_lowcore.floating_pt_save_area;
230 if (!mci.fc) {
231 kill_task = 1;
232 asm volatile(
233 " lfpc %0\n"
234 :
235 : "Q" (zero));
236 } else {
237 asm volatile(
238 " lfpc %0\n"
239 :
240 : "Q" (S390_lowcore.fpt_creg_save_area));
241 }
242
243 mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
244 if (!MACHINE_HAS_VX) {
245 /* Validate floating point registers */
246 asm volatile(
247 " ld 0,0(%0)\n"
248 " ld 1,8(%0)\n"
249 " ld 2,16(%0)\n"
250 " ld 3,24(%0)\n"
251 " ld 4,32(%0)\n"
252 " ld 5,40(%0)\n"
253 " ld 6,48(%0)\n"
254 " ld 7,56(%0)\n"
255 " ld 8,64(%0)\n"
256 " ld 9,72(%0)\n"
257 " ld 10,80(%0)\n"
258 " ld 11,88(%0)\n"
259 " ld 12,96(%0)\n"
260 " ld 13,104(%0)\n"
261 " ld 14,112(%0)\n"
262 " ld 15,120(%0)\n"
263 :
264 : "a" (fpt_save_area)
265 : "memory");
266 } else {
267 /* Validate vector registers */
268 union ctlreg0 cr0;
269
270 /*
271 * The vector validity must only be checked if not running a
272 * KVM guest. For KVM guests the machine check is forwarded by
273 * KVM and it is the responsibility of the guest to take
274 * appropriate actions. The host vector or FPU values have been
275 * saved by KVM and will be restored by KVM.
276 */
277 if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST))
278 kill_task = 1;
279 cr0.val = S390_lowcore.cregs_save_area[0];
280 cr0.afp = cr0.vx = 1;
281 __ctl_load(cr0.val, 0, 0);
282 asm volatile(
283 " la 1,%0\n"
284 " VLM 0,15,0,1\n"
285 " VLM 16,31,256,1\n"
286 :
287 : "Q" (*(struct vx_array *)mcesa->vector_save_area)
288 : "1");
289 __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
290 }
291 /* Validate access registers */
292 asm volatile(
293 " lam 0,15,0(%0)\n"
294 :
295 : "a" (&S390_lowcore.access_regs_save_area)
296 : "memory");
297 if (!mci.ar)
298 kill_task = 1;
299 /* Validate guarded storage registers */
300 cr2.val = S390_lowcore.cregs_save_area[2];
301 if (cr2.gse) {
302 if (!mci.gs) {
303 /*
304 * 2 cases:
305 * - machine check in kernel or userspace
306 * - machine check while running SIE (KVM guest)
307 * For kernel or userspace the userspace values of
308 * guarded storage control can not be recreated, the
309 * process must be terminated.
310 * For SIE the guest values of guarded storage can not
311 * be recreated. This is either due to a bug or due to
312 * GS being disabled in the guest. The guest will be
313 * notified by KVM code and the guests machine check
314 * handling must take care of this. The host values
315 * are saved by KVM and are not affected.
316 */
317 if (!test_cpu_flag(CIF_MCCK_GUEST))
318 kill_task = 1;
319 } else {
320 load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area);
321 }
322 }
323 /*
324 * The getcpu vdso syscall reads CPU number from the programmable
325 * field of the TOD clock. Disregard the TOD programmable register
326 * validity bit and load the CPU number into the TOD programmable
327 * field unconditionally.
328 */
329 set_tod_programmable_field(raw_smp_processor_id());
330 /* Validate clock comparator register */
331 set_clock_comparator(S390_lowcore.clock_comparator);
332
333 if (!mci.ms || !mci.pm || !mci.ia)
334 kill_task = 1;
335
336 return kill_task;
337}
338NOKPROBE_SYMBOL(s390_validate_registers);
339
340/*
341 * Backup the guest's machine check info to its description block
342 */
343static void notrace s390_backup_mcck_info(struct pt_regs *regs)
344{
345 struct mcck_volatile_info *mcck_backup;
346 struct sie_page *sie_page;
347
348 /* r14 contains the sie block, which was set in sie64a */
349 struct kvm_s390_sie_block *sie_block =
350 (struct kvm_s390_sie_block *) regs->gprs[14];
351
352 if (sie_block == NULL)
353 /* Something's seriously wrong, stop system. */
354 s390_handle_damage();
355
356 sie_page = container_of(sie_block, struct sie_page, sie_block);
357 mcck_backup = &sie_page->mcck_info;
358 mcck_backup->mcic = S390_lowcore.mcck_interruption_code &
359 ~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE);
360 mcck_backup->ext_damage_code = S390_lowcore.external_damage_code;
361 mcck_backup->failing_storage_address
362 = S390_lowcore.failing_storage_address;
363}
364NOKPROBE_SYMBOL(s390_backup_mcck_info);
365
366#define MAX_IPD_COUNT 29
367#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
368
369#define ED_STP_ISLAND 6 /* External damage STP island check */
370#define ED_STP_SYNC 7 /* External damage STP sync check */
371
372#define MCCK_CODE_NO_GUEST (MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE)
373
374/*
375 * machine check handler.
376 */
377int notrace s390_do_machine_check(struct pt_regs *regs)
378{
379 static int ipd_count;
380 static DEFINE_SPINLOCK(ipd_lock);
381 static unsigned long long last_ipd;
382 struct mcck_struct *mcck;
383 unsigned long long tmp;
384 irqentry_state_t irq_state;
385 union mci mci;
386 unsigned long mcck_dam_code;
387 int mcck_pending = 0;
388
389 irq_state = irqentry_nmi_enter(regs);
390
391 if (user_mode(regs))
392 update_timer_mcck();
393 inc_irq_stat(NMI_NMI);
394 mci.val = S390_lowcore.mcck_interruption_code;
395 mcck = this_cpu_ptr(&cpu_mcck);
396
397 /*
398 * Reinject the instruction processing damages' machine checks
399 * including Delayed Access Exception into the guest
400 * instead of damaging the host if they happen in the guest.
401 */
402 if (mci.pd && !test_cpu_flag(CIF_MCCK_GUEST)) {
403 if (mci.b) {
404 /* Processing backup -> verify if we can survive this */
405 u64 z_mcic, o_mcic, t_mcic;
406 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
407 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
408 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
409 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
410 1ULL<<16);
411 t_mcic = mci.val;
412
413 if (((t_mcic & z_mcic) != 0) ||
414 ((t_mcic & o_mcic) != o_mcic)) {
415 s390_handle_damage();
416 }
417
418 /*
419 * Nullifying exigent condition, therefore we might
420 * retry this instruction.
421 */
422 spin_lock(&ipd_lock);
423 tmp = get_tod_clock();
424 if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
425 ipd_count++;
426 else
427 ipd_count = 1;
428 last_ipd = tmp;
429 if (ipd_count == MAX_IPD_COUNT)
430 s390_handle_damage();
431 spin_unlock(&ipd_lock);
432 } else {
433 /* Processing damage -> stopping machine */
434 s390_handle_damage();
435 }
436 }
437 if (s390_validate_registers(mci)) {
438 if (!user_mode(regs))
439 s390_handle_damage();
440 /*
441 * Couldn't restore all register contents for the
442 * user space process -> mark task for termination.
443 */
444 mcck->kill_task = 1;
445 mcck->mcck_code = mci.val;
446 mcck_pending = 1;
447 }
448
449 /*
450 * Backup the machine check's info if it happens when the guest
451 * is running.
452 */
453 if (test_cpu_flag(CIF_MCCK_GUEST))
454 s390_backup_mcck_info(regs);
455
456 if (mci.cd) {
457 /* Timing facility damage */
458 s390_handle_damage();
459 }
460 if (mci.ed && mci.ec) {
461 /* External damage */
462 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
463 mcck->stp_queue |= stp_sync_check();
464 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
465 mcck->stp_queue |= stp_island_check();
466 mcck_pending = 1;
467 }
468 /*
469 * Reinject storage related machine checks into the guest if they
470 * happen when the guest is running.
471 */
472 if (!test_cpu_flag(CIF_MCCK_GUEST)) {
473 /* Storage error uncorrected */
474 if (mci.se)
475 s390_handle_damage();
476 /* Storage key-error uncorrected */
477 if (mci.ke)
478 s390_handle_damage();
479 /* Storage degradation */
480 if (mci.ds && mci.fa)
481 s390_handle_damage();
482 }
483 if (mci.cp) {
484 /* Channel report word pending */
485 mcck->channel_report = 1;
486 mcck_pending = 1;
487 }
488 if (mci.w) {
489 /* Warning pending */
490 mcck->warning = 1;
491 mcck_pending = 1;
492 }
493
494 /*
495 * If there are only Channel Report Pending and External Damage
496 * machine checks, they will not be reinjected into the guest
497 * because they refer to host conditions only.
498 */
499 mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
500 if (test_cpu_flag(CIF_MCCK_GUEST) &&
501 (mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
502 /* Set exit reason code for host's later handling */
503 *((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
504 }
505 clear_cpu_flag(CIF_MCCK_GUEST);
506
507 if (user_mode(regs) && mcck_pending) {
508 irqentry_nmi_exit(regs, irq_state);
509 return 1;
510 }
511
512 if (mcck_pending)
513 schedule_mcck_handler();
514
515 irqentry_nmi_exit(regs, irq_state);
516 return 0;
517}
518NOKPROBE_SYMBOL(s390_do_machine_check);
519
520static int __init machine_check_init(void)
521{
522 ctl_set_bit(14, 25); /* enable external damage MCH */
523 ctl_set_bit(14, 27); /* enable system recovery MCH */
524 ctl_set_bit(14, 24); /* enable warning MCH */
525 return 0;
526}
527early_initcall(machine_check_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Machine check handler
4 *
5 * Copyright IBM Corp. 2000, 2009
6 * Author(s): Ingo Adlung <adlung@de.ibm.com>,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Cornelia Huck <cornelia.huck@de.ibm.com>,
9 * Heiko Carstens <heiko.carstens@de.ibm.com>,
10 */
11
12#include <linux/kernel_stat.h>
13#include <linux/init.h>
14#include <linux/errno.h>
15#include <linux/hardirq.h>
16#include <linux/log2.h>
17#include <linux/kprobes.h>
18#include <linux/kmemleak.h>
19#include <linux/time.h>
20#include <linux/module.h>
21#include <linux/sched/signal.h>
22
23#include <linux/export.h>
24#include <asm/lowcore.h>
25#include <asm/smp.h>
26#include <asm/stp.h>
27#include <asm/cputime.h>
28#include <asm/nmi.h>
29#include <asm/crw.h>
30#include <asm/switch_to.h>
31#include <asm/ctl_reg.h>
32#include <asm/asm-offsets.h>
33#include <linux/kvm_host.h>
34
35struct mcck_struct {
36 unsigned int kill_task : 1;
37 unsigned int channel_report : 1;
38 unsigned int warning : 1;
39 unsigned int stp_queue : 1;
40 unsigned long mcck_code;
41};
42
43static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
44static struct kmem_cache *mcesa_cache;
45static unsigned long mcesa_origin_lc;
46
47static inline int nmi_needs_mcesa(void)
48{
49 return MACHINE_HAS_VX || MACHINE_HAS_GS;
50}
51
52static inline unsigned long nmi_get_mcesa_size(void)
53{
54 if (MACHINE_HAS_GS)
55 return MCESA_MAX_SIZE;
56 return MCESA_MIN_SIZE;
57}
58
59/*
60 * The initial machine check extended save area for the boot CPU.
61 * It will be replaced by nmi_init() with an allocated structure.
62 * The structure is required for machine check happening early in
63 * the boot process.
64 */
65static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
66
67void __init nmi_alloc_boot_cpu(struct lowcore *lc)
68{
69 if (!nmi_needs_mcesa())
70 return;
71 lc->mcesad = (unsigned long) &boot_mcesa;
72 if (MACHINE_HAS_GS)
73 lc->mcesad |= ilog2(MCESA_MAX_SIZE);
74}
75
76static int __init nmi_init(void)
77{
78 unsigned long origin, cr0, size;
79
80 if (!nmi_needs_mcesa())
81 return 0;
82 size = nmi_get_mcesa_size();
83 if (size > MCESA_MIN_SIZE)
84 mcesa_origin_lc = ilog2(size);
85 /* create slab cache for the machine-check-extended-save-areas */
86 mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
87 if (!mcesa_cache)
88 panic("Couldn't create nmi save area cache");
89 origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
90 if (!origin)
91 panic("Couldn't allocate nmi save area");
92 /* The pointer is stored with mcesa_bits ORed in */
93 kmemleak_not_leak((void *) origin);
94 __ctl_store(cr0, 0, 0);
95 __ctl_clear_bit(0, 28); /* disable lowcore protection */
96 /* Replace boot_mcesa on the boot CPU */
97 S390_lowcore.mcesad = origin | mcesa_origin_lc;
98 __ctl_load(cr0, 0, 0);
99 return 0;
100}
101early_initcall(nmi_init);
102
103int nmi_alloc_per_cpu(struct lowcore *lc)
104{
105 unsigned long origin;
106
107 if (!nmi_needs_mcesa())
108 return 0;
109 origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
110 if (!origin)
111 return -ENOMEM;
112 /* The pointer is stored with mcesa_bits ORed in */
113 kmemleak_not_leak((void *) origin);
114 lc->mcesad = origin | mcesa_origin_lc;
115 return 0;
116}
117
118void nmi_free_per_cpu(struct lowcore *lc)
119{
120 if (!nmi_needs_mcesa())
121 return;
122 kmem_cache_free(mcesa_cache, (void *)(lc->mcesad & MCESA_ORIGIN_MASK));
123}
124
125static notrace void s390_handle_damage(void)
126{
127 smp_emergency_stop();
128 disabled_wait((unsigned long) __builtin_return_address(0));
129 while (1);
130}
131NOKPROBE_SYMBOL(s390_handle_damage);
132
133/*
134 * Main machine check handler function. Will be called with interrupts enabled
135 * or disabled and machine checks enabled or disabled.
136 */
137void s390_handle_mcck(void)
138{
139 unsigned long flags;
140 struct mcck_struct mcck;
141
142 /*
143 * Disable machine checks and get the current state of accumulated
144 * machine checks. Afterwards delete the old state and enable machine
145 * checks again.
146 */
147 local_irq_save(flags);
148 local_mcck_disable();
149 mcck = *this_cpu_ptr(&cpu_mcck);
150 memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
151 clear_cpu_flag(CIF_MCCK_PENDING);
152 local_mcck_enable();
153 local_irq_restore(flags);
154
155 if (mcck.channel_report)
156 crw_handle_channel_report();
157 /*
158 * A warning may remain for a prolonged period on the bare iron.
159 * (actually until the machine is powered off, or the problem is gone)
160 * So we just stop listening for the WARNING MCH and avoid continuously
161 * being interrupted. One caveat is however, that we must do this per
162 * processor and cannot use the smp version of ctl_clear_bit().
163 * On VM we only get one interrupt per virtally presented machinecheck.
164 * Though one suffices, we may get one interrupt per (virtual) cpu.
165 */
166 if (mcck.warning) { /* WARNING pending ? */
167 static int mchchk_wng_posted = 0;
168
169 /* Use single cpu clear, as we cannot handle smp here. */
170 __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
171 if (xchg(&mchchk_wng_posted, 1) == 0)
172 kill_cad_pid(SIGPWR, 1);
173 }
174 if (mcck.stp_queue)
175 stp_queue_work();
176 if (mcck.kill_task) {
177 local_irq_enable();
178 printk(KERN_EMERG "mcck: Terminating task because of machine "
179 "malfunction (code 0x%016lx).\n", mcck.mcck_code);
180 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
181 current->comm, current->pid);
182 do_exit(SIGSEGV);
183 }
184}
185EXPORT_SYMBOL_GPL(s390_handle_mcck);
186
187/*
188 * returns 0 if all required registers are available
189 * returns 1 otherwise
190 */
191static int notrace s390_check_registers(union mci mci, int umode)
192{
193 union ctlreg2 cr2;
194 int kill_task;
195
196 kill_task = 0;
197
198 if (!mci.gr) {
199 /*
200 * General purpose registers couldn't be restored and have
201 * unknown contents. Stop system or terminate process.
202 */
203 if (!umode)
204 s390_handle_damage();
205 kill_task = 1;
206 }
207 /* Check control registers */
208 if (!mci.cr) {
209 /*
210 * Control registers have unknown contents.
211 * Can't recover and therefore stopping machine.
212 */
213 s390_handle_damage();
214 }
215 if (!mci.fp) {
216 /*
217 * Floating point registers can't be restored. If the
218 * kernel currently uses floating point registers the
219 * system is stopped. If the process has its floating
220 * pointer registers loaded it is terminated.
221 */
222 if (S390_lowcore.fpu_flags & KERNEL_VXR_V0V7)
223 s390_handle_damage();
224 if (!test_cpu_flag(CIF_FPU))
225 kill_task = 1;
226 }
227 if (!mci.fc) {
228 /*
229 * Floating point control register can't be restored.
230 * If the kernel currently uses the floating pointer
231 * registers and needs the FPC register the system is
232 * stopped. If the process has its floating pointer
233 * registers loaded it is terminated.
234 */
235 if (S390_lowcore.fpu_flags & KERNEL_FPC)
236 s390_handle_damage();
237 if (!test_cpu_flag(CIF_FPU))
238 kill_task = 1;
239 }
240
241 if (MACHINE_HAS_VX) {
242 if (!mci.vr) {
243 /*
244 * Vector registers can't be restored. If the kernel
245 * currently uses vector registers the system is
246 * stopped. If the process has its vector registers
247 * loaded it is terminated.
248 */
249 if (S390_lowcore.fpu_flags & KERNEL_VXR)
250 s390_handle_damage();
251 if (!test_cpu_flag(CIF_FPU))
252 kill_task = 1;
253 }
254 }
255 /* Check if access registers are valid */
256 if (!mci.ar) {
257 /*
258 * Access registers have unknown contents.
259 * Terminating task.
260 */
261 kill_task = 1;
262 }
263 /* Check guarded storage registers */
264 cr2.val = S390_lowcore.cregs_save_area[2];
265 if (cr2.gse) {
266 if (!mci.gs) {
267 /*
268 * Guarded storage register can't be restored and
269 * the current processes uses guarded storage.
270 * It has to be terminated.
271 */
272 kill_task = 1;
273 }
274 }
275 /* Check if old PSW is valid */
276 if (!mci.wp) {
277 /*
278 * Can't tell if we come from user or kernel mode
279 * -> stopping machine.
280 */
281 s390_handle_damage();
282 }
283 /* Check for invalid kernel instruction address */
284 if (!mci.ia && !umode) {
285 /*
286 * The instruction address got lost while running
287 * in the kernel -> stopping machine.
288 */
289 s390_handle_damage();
290 }
291
292 if (!mci.ms || !mci.pm || !mci.ia)
293 kill_task = 1;
294
295 return kill_task;
296}
297NOKPROBE_SYMBOL(s390_check_registers);
298
299/*
300 * Backup the guest's machine check info to its description block
301 */
302static void notrace s390_backup_mcck_info(struct pt_regs *regs)
303{
304 struct mcck_volatile_info *mcck_backup;
305 struct sie_page *sie_page;
306
307 /* r14 contains the sie block, which was set in sie64a */
308 struct kvm_s390_sie_block *sie_block =
309 (struct kvm_s390_sie_block *) regs->gprs[14];
310
311 if (sie_block == NULL)
312 /* Something's seriously wrong, stop system. */
313 s390_handle_damage();
314
315 sie_page = container_of(sie_block, struct sie_page, sie_block);
316 mcck_backup = &sie_page->mcck_info;
317 mcck_backup->mcic = S390_lowcore.mcck_interruption_code &
318 ~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE);
319 mcck_backup->ext_damage_code = S390_lowcore.external_damage_code;
320 mcck_backup->failing_storage_address
321 = S390_lowcore.failing_storage_address;
322}
323NOKPROBE_SYMBOL(s390_backup_mcck_info);
324
325#define MAX_IPD_COUNT 29
326#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
327
328#define ED_STP_ISLAND 6 /* External damage STP island check */
329#define ED_STP_SYNC 7 /* External damage STP sync check */
330
331#define MCCK_CODE_NO_GUEST (MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE)
332
333/*
334 * machine check handler.
335 */
336void notrace s390_do_machine_check(struct pt_regs *regs)
337{
338 static int ipd_count;
339 static DEFINE_SPINLOCK(ipd_lock);
340 static unsigned long long last_ipd;
341 struct mcck_struct *mcck;
342 unsigned long long tmp;
343 union mci mci;
344 unsigned long mcck_dam_code;
345
346 nmi_enter();
347 inc_irq_stat(NMI_NMI);
348 mci.val = S390_lowcore.mcck_interruption_code;
349 mcck = this_cpu_ptr(&cpu_mcck);
350
351 if (mci.sd) {
352 /* System damage -> stopping machine */
353 s390_handle_damage();
354 }
355
356 /*
357 * Reinject the instruction processing damages' machine checks
358 * including Delayed Access Exception into the guest
359 * instead of damaging the host if they happen in the guest.
360 */
361 if (mci.pd && !test_cpu_flag(CIF_MCCK_GUEST)) {
362 if (mci.b) {
363 /* Processing backup -> verify if we can survive this */
364 u64 z_mcic, o_mcic, t_mcic;
365 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
366 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
367 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
368 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
369 1ULL<<16);
370 t_mcic = mci.val;
371
372 if (((t_mcic & z_mcic) != 0) ||
373 ((t_mcic & o_mcic) != o_mcic)) {
374 s390_handle_damage();
375 }
376
377 /*
378 * Nullifying exigent condition, therefore we might
379 * retry this instruction.
380 */
381 spin_lock(&ipd_lock);
382 tmp = get_tod_clock();
383 if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
384 ipd_count++;
385 else
386 ipd_count = 1;
387 last_ipd = tmp;
388 if (ipd_count == MAX_IPD_COUNT)
389 s390_handle_damage();
390 spin_unlock(&ipd_lock);
391 } else {
392 /* Processing damage -> stopping machine */
393 s390_handle_damage();
394 }
395 }
396 if (s390_check_registers(mci, user_mode(regs))) {
397 /*
398 * Couldn't restore all register contents for the
399 * user space process -> mark task for termination.
400 */
401 mcck->kill_task = 1;
402 mcck->mcck_code = mci.val;
403 set_cpu_flag(CIF_MCCK_PENDING);
404 }
405
406 /*
407 * Backup the machine check's info if it happens when the guest
408 * is running.
409 */
410 if (test_cpu_flag(CIF_MCCK_GUEST))
411 s390_backup_mcck_info(regs);
412
413 if (mci.cd) {
414 /* Timing facility damage */
415 s390_handle_damage();
416 }
417 if (mci.ed && mci.ec) {
418 /* External damage */
419 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
420 mcck->stp_queue |= stp_sync_check();
421 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
422 mcck->stp_queue |= stp_island_check();
423 if (mcck->stp_queue)
424 set_cpu_flag(CIF_MCCK_PENDING);
425 }
426
427 /*
428 * Reinject storage related machine checks into the guest if they
429 * happen when the guest is running.
430 */
431 if (!test_cpu_flag(CIF_MCCK_GUEST)) {
432 if (mci.se)
433 /* Storage error uncorrected */
434 s390_handle_damage();
435 if (mci.ke)
436 /* Storage key-error uncorrected */
437 s390_handle_damage();
438 if (mci.ds && mci.fa)
439 /* Storage degradation */
440 s390_handle_damage();
441 }
442 if (mci.cp) {
443 /* Channel report word pending */
444 mcck->channel_report = 1;
445 set_cpu_flag(CIF_MCCK_PENDING);
446 }
447 if (mci.w) {
448 /* Warning pending */
449 mcck->warning = 1;
450 set_cpu_flag(CIF_MCCK_PENDING);
451 }
452
453 /*
454 * If there are only Channel Report Pending and External Damage
455 * machine checks, they will not be reinjected into the guest
456 * because they refer to host conditions only.
457 */
458 mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
459 if (test_cpu_flag(CIF_MCCK_GUEST) &&
460 (mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
461 /* Set exit reason code for host's later handling */
462 *((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
463 }
464 clear_cpu_flag(CIF_MCCK_GUEST);
465 nmi_exit();
466}
467NOKPROBE_SYMBOL(s390_do_machine_check);
468
469static int __init machine_check_init(void)
470{
471 ctl_set_bit(14, 25); /* enable external damage MCH */
472 ctl_set_bit(14, 27); /* enable system recovery MCH */
473 ctl_set_bit(14, 24); /* enable warning MCH */
474 return 0;
475}
476early_initcall(machine_check_init);