Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Machine check exception handling.
  4 *
  5 * Copyright 2013 IBM Corporation
  6 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
  7 */
  8
  9#undef DEBUG
 10#define pr_fmt(fmt) "mce: " fmt
 11
 12#include <linux/hardirq.h>
 13#include <linux/types.h>
 14#include <linux/ptrace.h>
 15#include <linux/percpu.h>
 16#include <linux/export.h>
 17#include <linux/irq_work.h>
 18#include <linux/extable.h>
 19#include <linux/ftrace.h>
 20
 21#include <asm/machdep.h>
 22#include <asm/mce.h>
 23#include <asm/nmi.h>
 24
 25static DEFINE_PER_CPU(int, mce_nest_count);
 26static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
 27
 28/* Queue for delayed MCE events. */
 29static DEFINE_PER_CPU(int, mce_queue_count);
 30static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
 31
 32/* Queue for delayed MCE UE events. */
 33static DEFINE_PER_CPU(int, mce_ue_count);
 34static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
 35					mce_ue_event_queue);
 36
 37static void machine_check_process_queued_event(struct irq_work *work);
 38static void machine_check_ue_irq_work(struct irq_work *work);
 39static void machine_check_ue_event(struct machine_check_event *evt);
 40static void machine_process_ue_event(struct work_struct *work);
 41
 42static struct irq_work mce_event_process_work = {
 43        .func = machine_check_process_queued_event,
 44};
 45
 46static struct irq_work mce_ue_event_irq_work = {
 47	.func = machine_check_ue_irq_work,
 48};
 49
 50DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
 51
 52static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
 53
 54int mce_register_notifier(struct notifier_block *nb)
 55{
 56	return blocking_notifier_chain_register(&mce_notifier_list, nb);
 57}
 58EXPORT_SYMBOL_GPL(mce_register_notifier);
 59
 60int mce_unregister_notifier(struct notifier_block *nb)
 61{
 62	return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
 63}
 64EXPORT_SYMBOL_GPL(mce_unregister_notifier);
 65
 66static void mce_set_error_info(struct machine_check_event *mce,
 67			       struct mce_error_info *mce_err)
 68{
 69	mce->error_type = mce_err->error_type;
 70	switch (mce_err->error_type) {
 71	case MCE_ERROR_TYPE_UE:
 72		mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
 73		break;
 74	case MCE_ERROR_TYPE_SLB:
 75		mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
 76		break;
 77	case MCE_ERROR_TYPE_ERAT:
 78		mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
 79		break;
 80	case MCE_ERROR_TYPE_TLB:
 81		mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
 82		break;
 83	case MCE_ERROR_TYPE_USER:
 84		mce->u.user_error.user_error_type = mce_err->u.user_error_type;
 85		break;
 86	case MCE_ERROR_TYPE_RA:
 87		mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
 88		break;
 89	case MCE_ERROR_TYPE_LINK:
 90		mce->u.link_error.link_error_type = mce_err->u.link_error_type;
 91		break;
 92	case MCE_ERROR_TYPE_UNKNOWN:
 93	default:
 94		break;
 95	}
 96}
 97
 98/*
 99 * Decode and save high level MCE information into per cpu buffer which
100 * is an array of machine_check_event structure.
101 */
102void save_mce_event(struct pt_regs *regs, long handled,
103		    struct mce_error_info *mce_err,
104		    uint64_t nip, uint64_t addr, uint64_t phys_addr)
105{
106	int index = __this_cpu_inc_return(mce_nest_count) - 1;
107	struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
108
109	/*
110	 * Return if we don't have enough space to log mce event.
111	 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
112	 * the check below will stop buffer overrun.
113	 */
114	if (index >= MAX_MC_EVT)
115		return;
116
117	/* Populate generic machine check info */
118	mce->version = MCE_V1;
119	mce->srr0 = nip;
120	mce->srr1 = regs->msr;
121	mce->gpr3 = regs->gpr[3];
122	mce->in_use = 1;
123	mce->cpu = get_paca()->paca_index;
124
125	/* Mark it recovered if we have handled it and MSR(RI=1). */
126	if (handled && (regs->msr & MSR_RI))
127		mce->disposition = MCE_DISPOSITION_RECOVERED;
128	else
129		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
130
131	mce->initiator = mce_err->initiator;
132	mce->severity = mce_err->severity;
133	mce->sync_error = mce_err->sync_error;
134	mce->error_class = mce_err->error_class;
135
136	/*
137	 * Populate the mce error_type and type-specific error_type.
138	 */
139	mce_set_error_info(mce, mce_err);
140
141	if (!addr)
142		return;
143
144	if (mce->error_type == MCE_ERROR_TYPE_TLB) {
145		mce->u.tlb_error.effective_address_provided = true;
146		mce->u.tlb_error.effective_address = addr;
147	} else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
148		mce->u.slb_error.effective_address_provided = true;
149		mce->u.slb_error.effective_address = addr;
150	} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
151		mce->u.erat_error.effective_address_provided = true;
152		mce->u.erat_error.effective_address = addr;
153	} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
154		mce->u.user_error.effective_address_provided = true;
155		mce->u.user_error.effective_address = addr;
156	} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
157		mce->u.ra_error.effective_address_provided = true;
158		mce->u.ra_error.effective_address = addr;
159	} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
160		mce->u.link_error.effective_address_provided = true;
161		mce->u.link_error.effective_address = addr;
162	} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
163		mce->u.ue_error.effective_address_provided = true;
164		mce->u.ue_error.effective_address = addr;
165		if (phys_addr != ULONG_MAX) {
166			mce->u.ue_error.physical_address_provided = true;
167			mce->u.ue_error.physical_address = phys_addr;
168			mce->u.ue_error.ignore_event = mce_err->ignore_event;
169			machine_check_ue_event(mce);
170		}
171	}
172	return;
173}
174
175/*
176 * get_mce_event:
177 *	mce	Pointer to machine_check_event structure to be filled.
178 *	release Flag to indicate whether to free the event slot or not.
179 *		0 <= do not release the mce event. Caller will invoke
180 *		     release_mce_event() once event has been consumed.
181 *		1 <= release the slot.
182 *
183 *	return	1 = success
184 *		0 = failure
185 *
186 * get_mce_event() will be called by platform specific machine check
187 * handle routine and in KVM.
188 * When we call get_mce_event(), we are still in interrupt context and
189 * preemption will not be scheduled until ret_from_expect() routine
190 * is called.
191 */
192int get_mce_event(struct machine_check_event *mce, bool release)
193{
194	int index = __this_cpu_read(mce_nest_count) - 1;
195	struct machine_check_event *mc_evt;
196	int ret = 0;
197
198	/* Sanity check */
199	if (index < 0)
200		return ret;
201
202	/* Check if we have MCE info to process. */
203	if (index < MAX_MC_EVT) {
204		mc_evt = this_cpu_ptr(&mce_event[index]);
205		/* Copy the event structure and release the original */
206		if (mce)
207			*mce = *mc_evt;
208		if (release)
209			mc_evt->in_use = 0;
210		ret = 1;
211	}
212	/* Decrement the count to free the slot. */
213	if (release)
214		__this_cpu_dec(mce_nest_count);
215
216	return ret;
217}
218
219void release_mce_event(void)
220{
221	get_mce_event(NULL, true);
222}
223
224static void machine_check_ue_irq_work(struct irq_work *work)
225{
226	schedule_work(&mce_ue_event_work);
227}
228
229/*
230 * Queue up the MCE event which then can be handled later.
231 */
232static void machine_check_ue_event(struct machine_check_event *evt)
233{
234	int index;
235
236	index = __this_cpu_inc_return(mce_ue_count) - 1;
237	/* If queue is full, just return for now. */
238	if (index >= MAX_MC_EVT) {
239		__this_cpu_dec(mce_ue_count);
240		return;
241	}
242	memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
243
244	/* Queue work to process this event later. */
245	irq_work_queue(&mce_ue_event_irq_work);
246}
247
248/*
249 * Queue up the MCE event which then can be handled later.
250 */
251void machine_check_queue_event(void)
252{
253	int index;
254	struct machine_check_event evt;
255
256	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
257		return;
258
259	index = __this_cpu_inc_return(mce_queue_count) - 1;
260	/* If queue is full, just return for now. */
261	if (index >= MAX_MC_EVT) {
262		__this_cpu_dec(mce_queue_count);
263		return;
264	}
265	memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
266
267	/* Queue irq work to process this event later. */
268	irq_work_queue(&mce_event_process_work);
269}
270
271void mce_common_process_ue(struct pt_regs *regs,
272			   struct mce_error_info *mce_err)
273{
274	const struct exception_table_entry *entry;
275
276	entry = search_kernel_exception_table(regs->nip);
277	if (entry) {
278		mce_err->ignore_event = true;
279		regs->nip = extable_fixup(entry);
280	}
281}
282
283/*
284 * process pending MCE event from the mce event queue. This function will be
285 * called during syscall exit.
286 */
287static void machine_process_ue_event(struct work_struct *work)
288{
289	int index;
290	struct machine_check_event *evt;
291
292	while (__this_cpu_read(mce_ue_count) > 0) {
293		index = __this_cpu_read(mce_ue_count) - 1;
294		evt = this_cpu_ptr(&mce_ue_event_queue[index]);
295		blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
296#ifdef CONFIG_MEMORY_FAILURE
297		/*
298		 * This should probably queued elsewhere, but
299		 * oh! well
300		 *
301		 * Don't report this machine check because the caller has a
302		 * asked us to ignore the event, it has a fixup handler which
303		 * will do the appropriate error handling and reporting.
304		 */
305		if (evt->error_type == MCE_ERROR_TYPE_UE) {
306			if (evt->u.ue_error.ignore_event) {
307				__this_cpu_dec(mce_ue_count);
308				continue;
309			}
310
311			if (evt->u.ue_error.physical_address_provided) {
312				unsigned long pfn;
313
314				pfn = evt->u.ue_error.physical_address >>
315					PAGE_SHIFT;
316				memory_failure(pfn, 0);
317			} else
318				pr_warn("Failed to identify bad address from "
319					"where the uncorrectable error (UE) "
320					"was generated\n");
321		}
322#endif
323		__this_cpu_dec(mce_ue_count);
324	}
325}
326/*
327 * process pending MCE event from the mce event queue. This function will be
328 * called during syscall exit.
329 */
330static void machine_check_process_queued_event(struct irq_work *work)
331{
332	int index;
333	struct machine_check_event *evt;
334
335	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
336
337	/*
338	 * For now just print it to console.
339	 * TODO: log this error event to FSP or nvram.
340	 */
341	while (__this_cpu_read(mce_queue_count) > 0) {
342		index = __this_cpu_read(mce_queue_count) - 1;
343		evt = this_cpu_ptr(&mce_event_queue[index]);
344
345		if (evt->error_type == MCE_ERROR_TYPE_UE &&
346		    evt->u.ue_error.ignore_event) {
347			__this_cpu_dec(mce_queue_count);
348			continue;
349		}
350		machine_check_print_event_info(evt, false, false);
351		__this_cpu_dec(mce_queue_count);
352	}
353}
354
355void machine_check_print_event_info(struct machine_check_event *evt,
356				    bool user_mode, bool in_guest)
357{
358	const char *level, *sevstr, *subtype, *err_type, *initiator;
359	uint64_t ea = 0, pa = 0;
360	int n = 0;
361	char dar_str[50];
362	char pa_str[50];
363	static const char *mc_ue_types[] = {
364		"Indeterminate",
365		"Instruction fetch",
366		"Page table walk ifetch",
367		"Load/Store",
368		"Page table walk Load/Store",
369	};
370	static const char *mc_slb_types[] = {
371		"Indeterminate",
372		"Parity",
373		"Multihit",
374	};
375	static const char *mc_erat_types[] = {
376		"Indeterminate",
377		"Parity",
378		"Multihit",
379	};
380	static const char *mc_tlb_types[] = {
381		"Indeterminate",
382		"Parity",
383		"Multihit",
384	};
385	static const char *mc_user_types[] = {
386		"Indeterminate",
387		"tlbie(l) invalid",
388		"scv invalid",
389	};
390	static const char *mc_ra_types[] = {
391		"Indeterminate",
392		"Instruction fetch (bad)",
393		"Instruction fetch (foreign)",
394		"Page table walk ifetch (bad)",
395		"Page table walk ifetch (foreign)",
396		"Load (bad)",
397		"Store (bad)",
398		"Page table walk Load/Store (bad)",
399		"Page table walk Load/Store (foreign)",
400		"Load/Store (foreign)",
401	};
402	static const char *mc_link_types[] = {
403		"Indeterminate",
404		"Instruction fetch (timeout)",
405		"Page table walk ifetch (timeout)",
406		"Load (timeout)",
407		"Store (timeout)",
408		"Page table walk Load/Store (timeout)",
409	};
410	static const char *mc_error_class[] = {
411		"Unknown",
412		"Hardware error",
413		"Probable Hardware error (some chance of software cause)",
414		"Software error",
415		"Probable Software error (some chance of hardware cause)",
416	};
417
418	/* Print things out */
419	if (evt->version != MCE_V1) {
420		pr_err("Machine Check Exception, Unknown event version %d !\n",
421		       evt->version);
422		return;
423	}
424	switch (evt->severity) {
425	case MCE_SEV_NO_ERROR:
426		level = KERN_INFO;
427		sevstr = "Harmless";
428		break;
429	case MCE_SEV_WARNING:
430		level = KERN_WARNING;
431		sevstr = "Warning";
432		break;
433	case MCE_SEV_SEVERE:
434		level = KERN_ERR;
435		sevstr = "Severe";
436		break;
437	case MCE_SEV_FATAL:
438	default:
439		level = KERN_ERR;
440		sevstr = "Fatal";
441		break;
442	}
443
444	switch(evt->initiator) {
445	case MCE_INITIATOR_CPU:
446		initiator = "CPU";
447		break;
448	case MCE_INITIATOR_PCI:
449		initiator = "PCI";
450		break;
451	case MCE_INITIATOR_ISA:
452		initiator = "ISA";
453		break;
454	case MCE_INITIATOR_MEMORY:
455		initiator = "Memory";
456		break;
457	case MCE_INITIATOR_POWERMGM:
458		initiator = "Power Management";
459		break;
460	case MCE_INITIATOR_UNKNOWN:
461	default:
462		initiator = "Unknown";
463		break;
464	}
465
466	switch (evt->error_type) {
467	case MCE_ERROR_TYPE_UE:
468		err_type = "UE";
469		subtype = evt->u.ue_error.ue_error_type <
470			ARRAY_SIZE(mc_ue_types) ?
471			mc_ue_types[evt->u.ue_error.ue_error_type]
472			: "Unknown";
473		if (evt->u.ue_error.effective_address_provided)
474			ea = evt->u.ue_error.effective_address;
475		if (evt->u.ue_error.physical_address_provided)
476			pa = evt->u.ue_error.physical_address;
477		break;
478	case MCE_ERROR_TYPE_SLB:
479		err_type = "SLB";
480		subtype = evt->u.slb_error.slb_error_type <
481			ARRAY_SIZE(mc_slb_types) ?
482			mc_slb_types[evt->u.slb_error.slb_error_type]
483			: "Unknown";
484		if (evt->u.slb_error.effective_address_provided)
485			ea = evt->u.slb_error.effective_address;
486		break;
487	case MCE_ERROR_TYPE_ERAT:
488		err_type = "ERAT";
489		subtype = evt->u.erat_error.erat_error_type <
490			ARRAY_SIZE(mc_erat_types) ?
491			mc_erat_types[evt->u.erat_error.erat_error_type]
492			: "Unknown";
493		if (evt->u.erat_error.effective_address_provided)
494			ea = evt->u.erat_error.effective_address;
495		break;
496	case MCE_ERROR_TYPE_TLB:
497		err_type = "TLB";
498		subtype = evt->u.tlb_error.tlb_error_type <
499			ARRAY_SIZE(mc_tlb_types) ?
500			mc_tlb_types[evt->u.tlb_error.tlb_error_type]
501			: "Unknown";
502		if (evt->u.tlb_error.effective_address_provided)
503			ea = evt->u.tlb_error.effective_address;
504		break;
505	case MCE_ERROR_TYPE_USER:
506		err_type = "User";
507		subtype = evt->u.user_error.user_error_type <
508			ARRAY_SIZE(mc_user_types) ?
509			mc_user_types[evt->u.user_error.user_error_type]
510			: "Unknown";
511		if (evt->u.user_error.effective_address_provided)
512			ea = evt->u.user_error.effective_address;
513		break;
514	case MCE_ERROR_TYPE_RA:
515		err_type = "Real address";
516		subtype = evt->u.ra_error.ra_error_type <
517			ARRAY_SIZE(mc_ra_types) ?
518			mc_ra_types[evt->u.ra_error.ra_error_type]
519			: "Unknown";
520		if (evt->u.ra_error.effective_address_provided)
521			ea = evt->u.ra_error.effective_address;
522		break;
523	case MCE_ERROR_TYPE_LINK:
524		err_type = "Link";
525		subtype = evt->u.link_error.link_error_type <
526			ARRAY_SIZE(mc_link_types) ?
527			mc_link_types[evt->u.link_error.link_error_type]
528			: "Unknown";
529		if (evt->u.link_error.effective_address_provided)
530			ea = evt->u.link_error.effective_address;
531		break;
532	case MCE_ERROR_TYPE_DCACHE:
533		err_type = "D-Cache";
534		subtype = "Unknown";
535		break;
536	case MCE_ERROR_TYPE_ICACHE:
537		err_type = "I-Cache";
538		subtype = "Unknown";
539		break;
540	default:
541	case MCE_ERROR_TYPE_UNKNOWN:
542		err_type = "Unknown";
543		subtype = "";
544		break;
545	}
546
547	dar_str[0] = pa_str[0] = '\0';
548	if (ea && evt->srr0 != ea) {
549		/* Load/Store address */
550		n = sprintf(dar_str, "DAR: %016llx ", ea);
551		if (pa)
552			sprintf(dar_str + n, "paddr: %016llx ", pa);
553	} else if (pa) {
554		sprintf(pa_str, " paddr: %016llx", pa);
555	}
556
557	printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
558		level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
559		err_type, subtype, dar_str,
560		evt->disposition == MCE_DISPOSITION_RECOVERED ?
561		"Recovered" : "Not recovered");
562
563	if (in_guest || user_mode) {
564		printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
565			level, evt->cpu, current->pid, current->comm,
566			in_guest ? "Guest " : "", evt->srr0, pa_str);
567	} else {
568		printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
569			level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
570	}
571
572	printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
573
574	subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
575		mc_error_class[evt->error_class] : "Unknown";
576	printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
577
578#ifdef CONFIG_PPC_BOOK3S_64
579	/* Display faulty slb contents for SLB errors. */
580	if (evt->error_type == MCE_ERROR_TYPE_SLB)
581		slb_dump_contents(local_paca->mce_faulty_slbs);
582#endif
583}
584EXPORT_SYMBOL_GPL(machine_check_print_event_info);
585
586/*
587 * This function is called in real mode. Strictly no printk's please.
588 *
589 * regs->nip and regs->msr contains srr0 and ssr1.
590 */
591long notrace machine_check_early(struct pt_regs *regs)
592{
593	long handled = 0;
594	bool nested = in_nmi();
595	u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
596
597	this_cpu_set_ftrace_enabled(0);
598
599	if (!nested)
600		nmi_enter();
601
602	hv_nmi_check_nonrecoverable(regs);
603
604	/*
605	 * See if platform is capable of handling machine check.
606	 */
607	if (ppc_md.machine_check_early)
608		handled = ppc_md.machine_check_early(regs);
609
610	if (!nested)
611		nmi_exit();
612
613	this_cpu_set_ftrace_enabled(ftrace_enabled);
614
615	return handled;
616}
617
618/* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
619static enum {
620	DTRIG_UNKNOWN,
621	DTRIG_VECTOR_CI,	/* need to emulate vector CI load instr */
622	DTRIG_SUSPEND_ESCAPE,	/* need to escape from TM suspend mode */
623} hmer_debug_trig_function;
624
625static int init_debug_trig_function(void)
626{
627	int pvr;
628	struct device_node *cpun;
629	struct property *prop = NULL;
630	const char *str;
631
632	/* First look in the device tree */
633	preempt_disable();
634	cpun = of_get_cpu_node(smp_processor_id(), NULL);
635	if (cpun) {
636		of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
637					    prop, str) {
638			if (strcmp(str, "bit17-vector-ci-load") == 0)
639				hmer_debug_trig_function = DTRIG_VECTOR_CI;
640			else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
641				hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
642		}
643		of_node_put(cpun);
644	}
645	preempt_enable();
646
647	/* If we found the property, don't look at PVR */
648	if (prop)
649		goto out;
650
651	pvr = mfspr(SPRN_PVR);
652	/* Check for POWER9 Nimbus (scale-out) */
653	if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
654		/* DD2.2 and later */
655		if ((pvr & 0xfff) >= 0x202)
656			hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
657		/* DD2.0 and DD2.1 - used for vector CI load emulation */
658		else if ((pvr & 0xfff) >= 0x200)
659			hmer_debug_trig_function = DTRIG_VECTOR_CI;
660	}
661
662 out:
663	switch (hmer_debug_trig_function) {
664	case DTRIG_VECTOR_CI:
665		pr_debug("HMI debug trigger used for vector CI load\n");
666		break;
667	case DTRIG_SUSPEND_ESCAPE:
668		pr_debug("HMI debug trigger used for TM suspend escape\n");
669		break;
670	default:
671		break;
672	}
673	return 0;
674}
675__initcall(init_debug_trig_function);
676
677/*
678 * Handle HMIs that occur as a result of a debug trigger.
679 * Return values:
680 * -1 means this is not a HMI cause that we know about
681 *  0 means no further handling is required
682 *  1 means further handling is required
683 */
684long hmi_handle_debugtrig(struct pt_regs *regs)
685{
686	unsigned long hmer = mfspr(SPRN_HMER);
687	long ret = 0;
688
689	/* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
690	if (!((hmer & HMER_DEBUG_TRIG)
691	      && hmer_debug_trig_function != DTRIG_UNKNOWN))
692		return -1;
693		
694	hmer &= ~HMER_DEBUG_TRIG;
695	/* HMER is a write-AND register */
696	mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
697
698	switch (hmer_debug_trig_function) {
699	case DTRIG_VECTOR_CI:
700		/*
701		 * Now to avoid problems with soft-disable we
702		 * only do the emulation if we are coming from
703		 * host user space
704		 */
705		if (regs && user_mode(regs))
706			ret = local_paca->hmi_p9_special_emu = 1;
707
708		break;
709
710	default:
711		break;
712	}
713
714	/*
715	 * See if any other HMI causes remain to be handled
716	 */
717	if (hmer & mfspr(SPRN_HMEER))
718		return -1;
719
720	return ret;
721}
722
723/*
724 * Return values:
725 */
726long hmi_exception_realmode(struct pt_regs *regs)
727{	
728	int ret;
729
730	local_paca->hmi_irqs++;
731
732	ret = hmi_handle_debugtrig(regs);
733	if (ret >= 0)
734		return ret;
735
736	wait_for_subcore_guest_exit();
737
738	if (ppc_md.hmi_exception_early)
739		ppc_md.hmi_exception_early(regs);
740
741	wait_for_tb_resync();
742
743	return 1;
744}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Machine check exception handling.
  4 *
  5 * Copyright 2013 IBM Corporation
  6 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
  7 */
  8
  9#undef DEBUG
 10#define pr_fmt(fmt) "mce: " fmt
 11
 12#include <linux/hardirq.h>
 13#include <linux/types.h>
 14#include <linux/ptrace.h>
 15#include <linux/percpu.h>
 16#include <linux/export.h>
 17#include <linux/irq_work.h>
 
 
 18
 19#include <asm/machdep.h>
 20#include <asm/mce.h>
 21#include <asm/nmi.h>
 22
 23static DEFINE_PER_CPU(int, mce_nest_count);
 24static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
 25
 26/* Queue for delayed MCE events. */
 27static DEFINE_PER_CPU(int, mce_queue_count);
 28static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
 29
 30/* Queue for delayed MCE UE events. */
 31static DEFINE_PER_CPU(int, mce_ue_count);
 32static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
 33					mce_ue_event_queue);
 34
 35static void machine_check_process_queued_event(struct irq_work *work);
 36static void machine_check_ue_irq_work(struct irq_work *work);
 37static void machine_check_ue_event(struct machine_check_event *evt);
 38static void machine_process_ue_event(struct work_struct *work);
 39
 40static struct irq_work mce_event_process_work = {
 41        .func = machine_check_process_queued_event,
 42};
 43
 44static struct irq_work mce_ue_event_irq_work = {
 45	.func = machine_check_ue_irq_work,
 46};
 47
 48DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
 49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50static void mce_set_error_info(struct machine_check_event *mce,
 51			       struct mce_error_info *mce_err)
 52{
 53	mce->error_type = mce_err->error_type;
 54	switch (mce_err->error_type) {
 55	case MCE_ERROR_TYPE_UE:
 56		mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
 57		break;
 58	case MCE_ERROR_TYPE_SLB:
 59		mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
 60		break;
 61	case MCE_ERROR_TYPE_ERAT:
 62		mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
 63		break;
 64	case MCE_ERROR_TYPE_TLB:
 65		mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
 66		break;
 67	case MCE_ERROR_TYPE_USER:
 68		mce->u.user_error.user_error_type = mce_err->u.user_error_type;
 69		break;
 70	case MCE_ERROR_TYPE_RA:
 71		mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
 72		break;
 73	case MCE_ERROR_TYPE_LINK:
 74		mce->u.link_error.link_error_type = mce_err->u.link_error_type;
 75		break;
 76	case MCE_ERROR_TYPE_UNKNOWN:
 77	default:
 78		break;
 79	}
 80}
 81
 82/*
 83 * Decode and save high level MCE information into per cpu buffer which
 84 * is an array of machine_check_event structure.
 85 */
 86void save_mce_event(struct pt_regs *regs, long handled,
 87		    struct mce_error_info *mce_err,
 88		    uint64_t nip, uint64_t addr, uint64_t phys_addr)
 89{
 90	int index = __this_cpu_inc_return(mce_nest_count) - 1;
 91	struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
 92
 93	/*
 94	 * Return if we don't have enough space to log mce event.
 95	 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
 96	 * the check below will stop buffer overrun.
 97	 */
 98	if (index >= MAX_MC_EVT)
 99		return;
100
101	/* Populate generic machine check info */
102	mce->version = MCE_V1;
103	mce->srr0 = nip;
104	mce->srr1 = regs->msr;
105	mce->gpr3 = regs->gpr[3];
106	mce->in_use = 1;
107	mce->cpu = get_paca()->paca_index;
108
109	/* Mark it recovered if we have handled it and MSR(RI=1). */
110	if (handled && (regs->msr & MSR_RI))
111		mce->disposition = MCE_DISPOSITION_RECOVERED;
112	else
113		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
114
115	mce->initiator = mce_err->initiator;
116	mce->severity = mce_err->severity;
117	mce->sync_error = mce_err->sync_error;
118	mce->error_class = mce_err->error_class;
119
120	/*
121	 * Populate the mce error_type and type-specific error_type.
122	 */
123	mce_set_error_info(mce, mce_err);
124
125	if (!addr)
126		return;
127
128	if (mce->error_type == MCE_ERROR_TYPE_TLB) {
129		mce->u.tlb_error.effective_address_provided = true;
130		mce->u.tlb_error.effective_address = addr;
131	} else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
132		mce->u.slb_error.effective_address_provided = true;
133		mce->u.slb_error.effective_address = addr;
134	} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
135		mce->u.erat_error.effective_address_provided = true;
136		mce->u.erat_error.effective_address = addr;
137	} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
138		mce->u.user_error.effective_address_provided = true;
139		mce->u.user_error.effective_address = addr;
140	} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
141		mce->u.ra_error.effective_address_provided = true;
142		mce->u.ra_error.effective_address = addr;
143	} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
144		mce->u.link_error.effective_address_provided = true;
145		mce->u.link_error.effective_address = addr;
146	} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
147		mce->u.ue_error.effective_address_provided = true;
148		mce->u.ue_error.effective_address = addr;
149		if (phys_addr != ULONG_MAX) {
150			mce->u.ue_error.physical_address_provided = true;
151			mce->u.ue_error.physical_address = phys_addr;
152			mce->u.ue_error.ignore_event = mce_err->ignore_event;
153			machine_check_ue_event(mce);
154		}
155	}
156	return;
157}
158
159/*
160 * get_mce_event:
161 *	mce	Pointer to machine_check_event structure to be filled.
162 *	release Flag to indicate whether to free the event slot or not.
163 *		0 <= do not release the mce event. Caller will invoke
164 *		     release_mce_event() once event has been consumed.
165 *		1 <= release the slot.
166 *
167 *	return	1 = success
168 *		0 = failure
169 *
170 * get_mce_event() will be called by platform specific machine check
171 * handle routine and in KVM.
172 * When we call get_mce_event(), we are still in interrupt context and
173 * preemption will not be scheduled until ret_from_expect() routine
174 * is called.
175 */
176int get_mce_event(struct machine_check_event *mce, bool release)
177{
178	int index = __this_cpu_read(mce_nest_count) - 1;
179	struct machine_check_event *mc_evt;
180	int ret = 0;
181
182	/* Sanity check */
183	if (index < 0)
184		return ret;
185
186	/* Check if we have MCE info to process. */
187	if (index < MAX_MC_EVT) {
188		mc_evt = this_cpu_ptr(&mce_event[index]);
189		/* Copy the event structure and release the original */
190		if (mce)
191			*mce = *mc_evt;
192		if (release)
193			mc_evt->in_use = 0;
194		ret = 1;
195	}
196	/* Decrement the count to free the slot. */
197	if (release)
198		__this_cpu_dec(mce_nest_count);
199
200	return ret;
201}
202
203void release_mce_event(void)
204{
205	get_mce_event(NULL, true);
206}
207
208static void machine_check_ue_irq_work(struct irq_work *work)
209{
210	schedule_work(&mce_ue_event_work);
211}
212
213/*
214 * Queue up the MCE event which then can be handled later.
215 */
216static void machine_check_ue_event(struct machine_check_event *evt)
217{
218	int index;
219
220	index = __this_cpu_inc_return(mce_ue_count) - 1;
221	/* If queue is full, just return for now. */
222	if (index >= MAX_MC_EVT) {
223		__this_cpu_dec(mce_ue_count);
224		return;
225	}
226	memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
227
228	/* Queue work to process this event later. */
229	irq_work_queue(&mce_ue_event_irq_work);
230}
231
232/*
233 * Queue up the MCE event which then can be handled later.
234 */
235void machine_check_queue_event(void)
236{
237	int index;
238	struct machine_check_event evt;
239
240	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
241		return;
242
243	index = __this_cpu_inc_return(mce_queue_count) - 1;
244	/* If queue is full, just return for now. */
245	if (index >= MAX_MC_EVT) {
246		__this_cpu_dec(mce_queue_count);
247		return;
248	}
249	memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
250
251	/* Queue irq work to process this event later. */
252	irq_work_queue(&mce_event_process_work);
253}
 
 
 
 
 
 
 
 
 
 
 
 
 
254/*
255 * process pending MCE event from the mce event queue. This function will be
256 * called during syscall exit.
257 */
258static void machine_process_ue_event(struct work_struct *work)
259{
260	int index;
261	struct machine_check_event *evt;
262
263	while (__this_cpu_read(mce_ue_count) > 0) {
264		index = __this_cpu_read(mce_ue_count) - 1;
265		evt = this_cpu_ptr(&mce_ue_event_queue[index]);
 
266#ifdef CONFIG_MEMORY_FAILURE
267		/*
268		 * This should probably queued elsewhere, but
269		 * oh! well
270		 *
271		 * Don't report this machine check because the caller has a
272		 * asked us to ignore the event, it has a fixup handler which
273		 * will do the appropriate error handling and reporting.
274		 */
275		if (evt->error_type == MCE_ERROR_TYPE_UE) {
276			if (evt->u.ue_error.ignore_event) {
277				__this_cpu_dec(mce_ue_count);
278				continue;
279			}
280
281			if (evt->u.ue_error.physical_address_provided) {
282				unsigned long pfn;
283
284				pfn = evt->u.ue_error.physical_address >>
285					PAGE_SHIFT;
286				memory_failure(pfn, 0);
287			} else
288				pr_warn("Failed to identify bad address from "
289					"where the uncorrectable error (UE) "
290					"was generated\n");
291		}
292#endif
293		__this_cpu_dec(mce_ue_count);
294	}
295}
296/*
297 * process pending MCE event from the mce event queue. This function will be
298 * called during syscall exit.
299 */
300static void machine_check_process_queued_event(struct irq_work *work)
301{
302	int index;
303	struct machine_check_event *evt;
304
305	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
306
307	/*
308	 * For now just print it to console.
309	 * TODO: log this error event to FSP or nvram.
310	 */
311	while (__this_cpu_read(mce_queue_count) > 0) {
312		index = __this_cpu_read(mce_queue_count) - 1;
313		evt = this_cpu_ptr(&mce_event_queue[index]);
314
315		if (evt->error_type == MCE_ERROR_TYPE_UE &&
316		    evt->u.ue_error.ignore_event) {
317			__this_cpu_dec(mce_queue_count);
318			continue;
319		}
320		machine_check_print_event_info(evt, false, false);
321		__this_cpu_dec(mce_queue_count);
322	}
323}
324
325void machine_check_print_event_info(struct machine_check_event *evt,
326				    bool user_mode, bool in_guest)
327{
328	const char *level, *sevstr, *subtype, *err_type, *initiator;
329	uint64_t ea = 0, pa = 0;
330	int n = 0;
331	char dar_str[50];
332	char pa_str[50];
333	static const char *mc_ue_types[] = {
334		"Indeterminate",
335		"Instruction fetch",
336		"Page table walk ifetch",
337		"Load/Store",
338		"Page table walk Load/Store",
339	};
340	static const char *mc_slb_types[] = {
341		"Indeterminate",
342		"Parity",
343		"Multihit",
344	};
345	static const char *mc_erat_types[] = {
346		"Indeterminate",
347		"Parity",
348		"Multihit",
349	};
350	static const char *mc_tlb_types[] = {
351		"Indeterminate",
352		"Parity",
353		"Multihit",
354	};
355	static const char *mc_user_types[] = {
356		"Indeterminate",
357		"tlbie(l) invalid",
 
358	};
359	static const char *mc_ra_types[] = {
360		"Indeterminate",
361		"Instruction fetch (bad)",
362		"Instruction fetch (foreign)",
363		"Page table walk ifetch (bad)",
364		"Page table walk ifetch (foreign)",
365		"Load (bad)",
366		"Store (bad)",
367		"Page table walk Load/Store (bad)",
368		"Page table walk Load/Store (foreign)",
369		"Load/Store (foreign)",
370	};
371	static const char *mc_link_types[] = {
372		"Indeterminate",
373		"Instruction fetch (timeout)",
374		"Page table walk ifetch (timeout)",
375		"Load (timeout)",
376		"Store (timeout)",
377		"Page table walk Load/Store (timeout)",
378	};
379	static const char *mc_error_class[] = {
380		"Unknown",
381		"Hardware error",
382		"Probable Hardware error (some chance of software cause)",
383		"Software error",
384		"Probable Software error (some chance of hardware cause)",
385	};
386
387	/* Print things out */
388	if (evt->version != MCE_V1) {
389		pr_err("Machine Check Exception, Unknown event version %d !\n",
390		       evt->version);
391		return;
392	}
393	switch (evt->severity) {
394	case MCE_SEV_NO_ERROR:
395		level = KERN_INFO;
396		sevstr = "Harmless";
397		break;
398	case MCE_SEV_WARNING:
399		level = KERN_WARNING;
400		sevstr = "Warning";
401		break;
402	case MCE_SEV_SEVERE:
403		level = KERN_ERR;
404		sevstr = "Severe";
405		break;
406	case MCE_SEV_FATAL:
407	default:
408		level = KERN_ERR;
409		sevstr = "Fatal";
410		break;
411	}
412
413	switch(evt->initiator) {
414	case MCE_INITIATOR_CPU:
415		initiator = "CPU";
416		break;
417	case MCE_INITIATOR_PCI:
418		initiator = "PCI";
419		break;
420	case MCE_INITIATOR_ISA:
421		initiator = "ISA";
422		break;
423	case MCE_INITIATOR_MEMORY:
424		initiator = "Memory";
425		break;
426	case MCE_INITIATOR_POWERMGM:
427		initiator = "Power Management";
428		break;
429	case MCE_INITIATOR_UNKNOWN:
430	default:
431		initiator = "Unknown";
432		break;
433	}
434
435	switch (evt->error_type) {
436	case MCE_ERROR_TYPE_UE:
437		err_type = "UE";
438		subtype = evt->u.ue_error.ue_error_type <
439			ARRAY_SIZE(mc_ue_types) ?
440			mc_ue_types[evt->u.ue_error.ue_error_type]
441			: "Unknown";
442		if (evt->u.ue_error.effective_address_provided)
443			ea = evt->u.ue_error.effective_address;
444		if (evt->u.ue_error.physical_address_provided)
445			pa = evt->u.ue_error.physical_address;
446		break;
447	case MCE_ERROR_TYPE_SLB:
448		err_type = "SLB";
449		subtype = evt->u.slb_error.slb_error_type <
450			ARRAY_SIZE(mc_slb_types) ?
451			mc_slb_types[evt->u.slb_error.slb_error_type]
452			: "Unknown";
453		if (evt->u.slb_error.effective_address_provided)
454			ea = evt->u.slb_error.effective_address;
455		break;
456	case MCE_ERROR_TYPE_ERAT:
457		err_type = "ERAT";
458		subtype = evt->u.erat_error.erat_error_type <
459			ARRAY_SIZE(mc_erat_types) ?
460			mc_erat_types[evt->u.erat_error.erat_error_type]
461			: "Unknown";
462		if (evt->u.erat_error.effective_address_provided)
463			ea = evt->u.erat_error.effective_address;
464		break;
465	case MCE_ERROR_TYPE_TLB:
466		err_type = "TLB";
467		subtype = evt->u.tlb_error.tlb_error_type <
468			ARRAY_SIZE(mc_tlb_types) ?
469			mc_tlb_types[evt->u.tlb_error.tlb_error_type]
470			: "Unknown";
471		if (evt->u.tlb_error.effective_address_provided)
472			ea = evt->u.tlb_error.effective_address;
473		break;
474	case MCE_ERROR_TYPE_USER:
475		err_type = "User";
476		subtype = evt->u.user_error.user_error_type <
477			ARRAY_SIZE(mc_user_types) ?
478			mc_user_types[evt->u.user_error.user_error_type]
479			: "Unknown";
480		if (evt->u.user_error.effective_address_provided)
481			ea = evt->u.user_error.effective_address;
482		break;
483	case MCE_ERROR_TYPE_RA:
484		err_type = "Real address";
485		subtype = evt->u.ra_error.ra_error_type <
486			ARRAY_SIZE(mc_ra_types) ?
487			mc_ra_types[evt->u.ra_error.ra_error_type]
488			: "Unknown";
489		if (evt->u.ra_error.effective_address_provided)
490			ea = evt->u.ra_error.effective_address;
491		break;
492	case MCE_ERROR_TYPE_LINK:
493		err_type = "Link";
494		subtype = evt->u.link_error.link_error_type <
495			ARRAY_SIZE(mc_link_types) ?
496			mc_link_types[evt->u.link_error.link_error_type]
497			: "Unknown";
498		if (evt->u.link_error.effective_address_provided)
499			ea = evt->u.link_error.effective_address;
500		break;
501	case MCE_ERROR_TYPE_DCACHE:
502		err_type = "D-Cache";
503		subtype = "Unknown";
504		break;
505	case MCE_ERROR_TYPE_ICACHE:
506		err_type = "I-Cache";
507		subtype = "Unknown";
508		break;
509	default:
510	case MCE_ERROR_TYPE_UNKNOWN:
511		err_type = "Unknown";
512		subtype = "";
513		break;
514	}
515
516	dar_str[0] = pa_str[0] = '\0';
517	if (ea && evt->srr0 != ea) {
518		/* Load/Store address */
519		n = sprintf(dar_str, "DAR: %016llx ", ea);
520		if (pa)
521			sprintf(dar_str + n, "paddr: %016llx ", pa);
522	} else if (pa) {
523		sprintf(pa_str, " paddr: %016llx", pa);
524	}
525
526	printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
527		level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
528		err_type, subtype, dar_str,
529		evt->disposition == MCE_DISPOSITION_RECOVERED ?
530		"Recovered" : "Not recovered");
531
532	if (in_guest || user_mode) {
533		printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
534			level, evt->cpu, current->pid, current->comm,
535			in_guest ? "Guest " : "", evt->srr0, pa_str);
536	} else {
537		printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
538			level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
539	}
540
541	printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
542
543	subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
544		mc_error_class[evt->error_class] : "Unknown";
545	printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
546
547#ifdef CONFIG_PPC_BOOK3S_64
548	/* Display faulty slb contents for SLB errors. */
549	if (evt->error_type == MCE_ERROR_TYPE_SLB)
550		slb_dump_contents(local_paca->mce_faulty_slbs);
551#endif
552}
553EXPORT_SYMBOL_GPL(machine_check_print_event_info);
554
555/*
556 * This function is called in real mode. Strictly no printk's please.
557 *
558 * regs->nip and regs->msr contains srr0 and ssr1.
559 */
560long machine_check_early(struct pt_regs *regs)
561{
562	long handled = 0;
 
 
 
 
 
 
 
563
564	hv_nmi_check_nonrecoverable(regs);
565
566	/*
567	 * See if platform is capable of handling machine check.
568	 */
569	if (ppc_md.machine_check_early)
570		handled = ppc_md.machine_check_early(regs);
 
 
 
 
 
 
571	return handled;
572}
573
574/* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
575static enum {
576	DTRIG_UNKNOWN,
577	DTRIG_VECTOR_CI,	/* need to emulate vector CI load instr */
578	DTRIG_SUSPEND_ESCAPE,	/* need to escape from TM suspend mode */
579} hmer_debug_trig_function;
580
581static int init_debug_trig_function(void)
582{
583	int pvr;
584	struct device_node *cpun;
585	struct property *prop = NULL;
586	const char *str;
587
588	/* First look in the device tree */
589	preempt_disable();
590	cpun = of_get_cpu_node(smp_processor_id(), NULL);
591	if (cpun) {
592		of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
593					    prop, str) {
594			if (strcmp(str, "bit17-vector-ci-load") == 0)
595				hmer_debug_trig_function = DTRIG_VECTOR_CI;
596			else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
597				hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
598		}
599		of_node_put(cpun);
600	}
601	preempt_enable();
602
603	/* If we found the property, don't look at PVR */
604	if (prop)
605		goto out;
606
607	pvr = mfspr(SPRN_PVR);
608	/* Check for POWER9 Nimbus (scale-out) */
609	if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
610		/* DD2.2 and later */
611		if ((pvr & 0xfff) >= 0x202)
612			hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
613		/* DD2.0 and DD2.1 - used for vector CI load emulation */
614		else if ((pvr & 0xfff) >= 0x200)
615			hmer_debug_trig_function = DTRIG_VECTOR_CI;
616	}
617
618 out:
619	switch (hmer_debug_trig_function) {
620	case DTRIG_VECTOR_CI:
621		pr_debug("HMI debug trigger used for vector CI load\n");
622		break;
623	case DTRIG_SUSPEND_ESCAPE:
624		pr_debug("HMI debug trigger used for TM suspend escape\n");
625		break;
626	default:
627		break;
628	}
629	return 0;
630}
631__initcall(init_debug_trig_function);
632
633/*
634 * Handle HMIs that occur as a result of a debug trigger.
635 * Return values:
636 * -1 means this is not a HMI cause that we know about
637 *  0 means no further handling is required
638 *  1 means further handling is required
639 */
640long hmi_handle_debugtrig(struct pt_regs *regs)
641{
642	unsigned long hmer = mfspr(SPRN_HMER);
643	long ret = 0;
644
645	/* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
646	if (!((hmer & HMER_DEBUG_TRIG)
647	      && hmer_debug_trig_function != DTRIG_UNKNOWN))
648		return -1;
649		
650	hmer &= ~HMER_DEBUG_TRIG;
651	/* HMER is a write-AND register */
652	mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
653
654	switch (hmer_debug_trig_function) {
655	case DTRIG_VECTOR_CI:
656		/*
657		 * Now to avoid problems with soft-disable we
658		 * only do the emulation if we are coming from
659		 * host user space
660		 */
661		if (regs && user_mode(regs))
662			ret = local_paca->hmi_p9_special_emu = 1;
663
664		break;
665
666	default:
667		break;
668	}
669
670	/*
671	 * See if any other HMI causes remain to be handled
672	 */
673	if (hmer & mfspr(SPRN_HMEER))
674		return -1;
675
676	return ret;
677}
678
679/*
680 * Return values:
681 */
682long hmi_exception_realmode(struct pt_regs *regs)
683{	
684	int ret;
685
686	__this_cpu_inc(irq_stat.hmi_exceptions);
687
688	ret = hmi_handle_debugtrig(regs);
689	if (ret >= 0)
690		return ret;
691
692	wait_for_subcore_guest_exit();
693
694	if (ppc_md.hmi_exception_early)
695		ppc_md.hmi_exception_early(regs);
696
697	wait_for_tb_resync();
698
699	return 1;
700}