Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Copyright 2007-2010 Freescale Semiconductor, Inc.
5 *
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras (paulus@samba.org)
8 */
9
10/*
11 * This file handles the architecture-dependent parts of hardware exceptions
12 */
13
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/sched/debug.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/pkeys.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/ptrace.h>
23#include <linux/user.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/extable.h>
27#include <linux/module.h> /* print_modules */
28#include <linux/prctl.h>
29#include <linux/delay.h>
30#include <linux/kprobes.h>
31#include <linux/kexec.h>
32#include <linux/backlight.h>
33#include <linux/bug.h>
34#include <linux/kdebug.h>
35#include <linux/ratelimit.h>
36#include <linux/context_tracking.h>
37#include <linux/smp.h>
38#include <linux/console.h>
39#include <linux/kmsg_dump.h>
40
41#include <asm/emulated_ops.h>
42#include <linux/uaccess.h>
43#include <asm/debugfs.h>
44#include <asm/interrupt.h>
45#include <asm/io.h>
46#include <asm/machdep.h>
47#include <asm/rtas.h>
48#include <asm/pmc.h>
49#include <asm/reg.h>
50#ifdef CONFIG_PMAC_BACKLIGHT
51#include <asm/backlight.h>
52#endif
53#ifdef CONFIG_PPC64
54#include <asm/firmware.h>
55#include <asm/processor.h>
56#endif
57#include <asm/kexec.h>
58#include <asm/ppc-opcode.h>
59#include <asm/rio.h>
60#include <asm/fadump.h>
61#include <asm/switch_to.h>
62#include <asm/tm.h>
63#include <asm/debug.h>
64#include <asm/asm-prototypes.h>
65#include <asm/hmi.h>
66#include <sysdev/fsl_pci.h>
67#include <asm/kprobes.h>
68#include <asm/stacktrace.h>
69#include <asm/nmi.h>
70#include <asm/disassemble.h>
71
72#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
73int (*__debugger)(struct pt_regs *regs) __read_mostly;
74int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
75int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
76int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
77int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
78int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
79int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
80
81EXPORT_SYMBOL(__debugger);
82EXPORT_SYMBOL(__debugger_ipi);
83EXPORT_SYMBOL(__debugger_bpt);
84EXPORT_SYMBOL(__debugger_sstep);
85EXPORT_SYMBOL(__debugger_iabr_match);
86EXPORT_SYMBOL(__debugger_break_match);
87EXPORT_SYMBOL(__debugger_fault_handler);
88#endif
89
90/* Transactional Memory trap debug */
91#ifdef TM_DEBUG_SW
92#define TM_DEBUG(x...) printk(KERN_INFO x)
93#else
94#define TM_DEBUG(x...) do { } while(0)
95#endif
96
97static const char *signame(int signr)
98{
99 switch (signr) {
100 case SIGBUS: return "bus error";
101 case SIGFPE: return "floating point exception";
102 case SIGILL: return "illegal instruction";
103 case SIGSEGV: return "segfault";
104 case SIGTRAP: return "unhandled trap";
105 }
106
107 return "unknown signal";
108}
109
110/*
111 * Trap & Exception support
112 */
113
114#ifdef CONFIG_PMAC_BACKLIGHT
115static void pmac_backlight_unblank(void)
116{
117 mutex_lock(&pmac_backlight_mutex);
118 if (pmac_backlight) {
119 struct backlight_properties *props;
120
121 props = &pmac_backlight->props;
122 props->brightness = props->max_brightness;
123 props->power = FB_BLANK_UNBLANK;
124 backlight_update_status(pmac_backlight);
125 }
126 mutex_unlock(&pmac_backlight_mutex);
127}
128#else
129static inline void pmac_backlight_unblank(void) { }
130#endif
131
132/*
133 * If oops/die is expected to crash the machine, return true here.
134 *
135 * This should not be expected to be 100% accurate, there may be
136 * notifiers registered or other unexpected conditions that may bring
137 * down the kernel. Or if the current process in the kernel is holding
138 * locks or has other critical state, the kernel may become effectively
139 * unusable anyway.
140 */
141bool die_will_crash(void)
142{
143 if (should_fadump_crash())
144 return true;
145 if (kexec_should_crash(current))
146 return true;
147 if (in_interrupt() || panic_on_oops ||
148 !current->pid || is_global_init(current))
149 return true;
150
151 return false;
152}
153
154static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
155static int die_owner = -1;
156static unsigned int die_nest_count;
157static int die_counter;
158
159extern void panic_flush_kmsg_start(void)
160{
161 /*
162 * These are mostly taken from kernel/panic.c, but tries to do
163 * relatively minimal work. Don't use delay functions (TB may
164 * be broken), don't crash dump (need to set a firmware log),
165 * don't run notifiers. We do want to get some information to
166 * Linux console.
167 */
168 console_verbose();
169 bust_spinlocks(1);
170}
171
172extern void panic_flush_kmsg_end(void)
173{
174 printk_safe_flush_on_panic();
175 kmsg_dump(KMSG_DUMP_PANIC);
176 bust_spinlocks(0);
177 debug_locks_off();
178 console_flush_on_panic(CONSOLE_FLUSH_PENDING);
179}
180
181static unsigned long oops_begin(struct pt_regs *regs)
182{
183 int cpu;
184 unsigned long flags;
185
186 oops_enter();
187
188 /* racy, but better than risking deadlock. */
189 raw_local_irq_save(flags);
190 cpu = smp_processor_id();
191 if (!arch_spin_trylock(&die_lock)) {
192 if (cpu == die_owner)
193 /* nested oops. should stop eventually */;
194 else
195 arch_spin_lock(&die_lock);
196 }
197 die_nest_count++;
198 die_owner = cpu;
199 console_verbose();
200 bust_spinlocks(1);
201 if (machine_is(powermac))
202 pmac_backlight_unblank();
203 return flags;
204}
205NOKPROBE_SYMBOL(oops_begin);
206
207static void oops_end(unsigned long flags, struct pt_regs *regs,
208 int signr)
209{
210 bust_spinlocks(0);
211 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
212 die_nest_count--;
213 oops_exit();
214 printk("\n");
215 if (!die_nest_count) {
216 /* Nest count reaches zero, release the lock. */
217 die_owner = -1;
218 arch_spin_unlock(&die_lock);
219 }
220 raw_local_irq_restore(flags);
221
222 /*
223 * system_reset_excption handles debugger, crash dump, panic, for 0x100
224 */
225 if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
226 return;
227
228 crash_fadump(regs, "die oops");
229
230 if (kexec_should_crash(current))
231 crash_kexec(regs);
232
233 if (!signr)
234 return;
235
236 /*
237 * While our oops output is serialised by a spinlock, output
238 * from panic() called below can race and corrupt it. If we
239 * know we are going to panic, delay for 1 second so we have a
240 * chance to get clean backtraces from all CPUs that are oopsing.
241 */
242 if (in_interrupt() || panic_on_oops || !current->pid ||
243 is_global_init(current)) {
244 mdelay(MSEC_PER_SEC);
245 }
246
247 if (panic_on_oops)
248 panic("Fatal exception");
249 do_exit(signr);
250}
251NOKPROBE_SYMBOL(oops_end);
252
253static char *get_mmu_str(void)
254{
255 if (early_radix_enabled())
256 return " MMU=Radix";
257 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
258 return " MMU=Hash";
259 return "";
260}
261
262static int __die(const char *str, struct pt_regs *regs, long err)
263{
264 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
265
266 printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
267 IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
268 PAGE_SIZE / 1024, get_mmu_str(),
269 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
270 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
271 IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
272 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
273 IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "",
274 ppc_md.name ? ppc_md.name : "");
275
276 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
277 return 1;
278
279 print_modules();
280 show_regs(regs);
281
282 return 0;
283}
284NOKPROBE_SYMBOL(__die);
285
286void die(const char *str, struct pt_regs *regs, long err)
287{
288 unsigned long flags;
289
290 /*
291 * system_reset_excption handles debugger, crash dump, panic, for 0x100
292 */
293 if (TRAP(regs) != INTERRUPT_SYSTEM_RESET) {
294 if (debugger(regs))
295 return;
296 }
297
298 flags = oops_begin(regs);
299 if (__die(str, regs, err))
300 err = 0;
301 oops_end(flags, regs, err);
302}
303NOKPROBE_SYMBOL(die);
304
305void user_single_step_report(struct pt_regs *regs)
306{
307 force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip);
308}
309
310static void show_signal_msg(int signr, struct pt_regs *regs, int code,
311 unsigned long addr)
312{
313 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
314 DEFAULT_RATELIMIT_BURST);
315
316 if (!show_unhandled_signals)
317 return;
318
319 if (!unhandled_signal(current, signr))
320 return;
321
322 if (!__ratelimit(&rs))
323 return;
324
325 pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
326 current->comm, current->pid, signame(signr), signr,
327 addr, regs->nip, regs->link, code);
328
329 print_vma_addr(KERN_CONT " in ", regs->nip);
330
331 pr_cont("\n");
332
333 show_user_instructions(regs);
334}
335
336static bool exception_common(int signr, struct pt_regs *regs, int code,
337 unsigned long addr)
338{
339 if (!user_mode(regs)) {
340 die("Exception in kernel mode", regs, signr);
341 return false;
342 }
343
344 /*
345 * Must not enable interrupts even for user-mode exception, because
346 * this can be called from machine check, which may be a NMI or IRQ
347 * which don't like interrupts being enabled. Could check for
348 * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
349 * reason why _exception() should enable irqs for an exception handler,
350 * the handlers themselves do that directly.
351 */
352
353 show_signal_msg(signr, regs, code, addr);
354
355 current->thread.trap_nr = code;
356
357 return true;
358}
359
360void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key)
361{
362 if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr))
363 return;
364
365 force_sig_pkuerr((void __user *) addr, key);
366}
367
368void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
369{
370 if (!exception_common(signr, regs, code, addr))
371 return;
372
373 force_sig_fault(signr, code, (void __user *)addr);
374}
375
376/*
377 * The interrupt architecture has a quirk in that the HV interrupts excluding
378 * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing
379 * that an interrupt handler must do is save off a GPR into a scratch register,
380 * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch.
381 * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing
382 * that it is non-reentrant, which leads to random data corruption.
383 *
384 * The solution is for NMI interrupts in HV mode to check if they originated
385 * from these critical HV interrupt regions. If so, then mark them not
386 * recoverable.
387 *
388 * An alternative would be for HV NMIs to use SPRG for scratch to avoid the
389 * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux
390 * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so
391 * that would work. However any other guest OS that may have the SPRG live
392 * and MSR[RI]=1 could encounter silent corruption.
393 *
394 * Builds that do not support KVM could take this second option to increase
395 * the recoverability of NMIs.
396 */
397void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
398{
399#ifdef CONFIG_PPC_POWERNV
400 unsigned long kbase = (unsigned long)_stext;
401 unsigned long nip = regs->nip;
402
403 if (!(regs->msr & MSR_RI))
404 return;
405 if (!(regs->msr & MSR_HV))
406 return;
407 if (regs->msr & MSR_PR)
408 return;
409
410 /*
411 * Now test if the interrupt has hit a range that may be using
412 * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
413 * problem ranges all run un-relocated. Test real and virt modes
414 * at the same time by dropping the high bit of the nip (virt mode
415 * entry points still have the +0x4000 offset).
416 */
417 nip &= ~0xc000000000000000ULL;
418 if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600))
419 goto nonrecoverable;
420 if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00))
421 goto nonrecoverable;
422 if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0))
423 goto nonrecoverable;
424 if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0))
425 goto nonrecoverable;
426
427 /* Trampoline code runs un-relocated so subtract kbase. */
428 if (nip >= (unsigned long)(start_real_trampolines - kbase) &&
429 nip < (unsigned long)(end_real_trampolines - kbase))
430 goto nonrecoverable;
431 if (nip >= (unsigned long)(start_virt_trampolines - kbase) &&
432 nip < (unsigned long)(end_virt_trampolines - kbase))
433 goto nonrecoverable;
434 return;
435
436nonrecoverable:
437 regs_set_return_msr(regs, regs->msr & ~MSR_RI);
438#endif
439}
440DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
441{
442 unsigned long hsrr0, hsrr1;
443 bool saved_hsrrs = false;
444
445 /*
446 * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
447 * The system reset interrupt itself may clobber HSRRs (e.g., to call
448 * OPAL), so save them here and restore them before returning.
449 *
450 * Machine checks don't need to save HSRRs, as the real mode handler
451 * is careful to avoid them, and the regular handler is not delivered
452 * as an NMI.
453 */
454 if (cpu_has_feature(CPU_FTR_HVMODE)) {
455 hsrr0 = mfspr(SPRN_HSRR0);
456 hsrr1 = mfspr(SPRN_HSRR1);
457 saved_hsrrs = true;
458 }
459
460 hv_nmi_check_nonrecoverable(regs);
461
462 __this_cpu_inc(irq_stat.sreset_irqs);
463
464 /* See if any machine dependent calls */
465 if (ppc_md.system_reset_exception) {
466 if (ppc_md.system_reset_exception(regs))
467 goto out;
468 }
469
470 if (debugger(regs))
471 goto out;
472
473 kmsg_dump(KMSG_DUMP_OOPS);
474 /*
475 * A system reset is a request to dump, so we always send
476 * it through the crashdump code (if fadump or kdump are
477 * registered).
478 */
479 crash_fadump(regs, "System Reset");
480
481 crash_kexec(regs);
482
483 /*
484 * We aren't the primary crash CPU. We need to send it
485 * to a holding pattern to avoid it ending up in the panic
486 * code.
487 */
488 crash_kexec_secondary(regs);
489
490 /*
491 * No debugger or crash dump registered, print logs then
492 * panic.
493 */
494 die("System Reset", regs, SIGABRT);
495
496 mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
497 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
498 nmi_panic(regs, "System Reset");
499
500out:
501#ifdef CONFIG_PPC_BOOK3S_64
502 BUG_ON(get_paca()->in_nmi == 0);
503 if (get_paca()->in_nmi > 1)
504 die("Unrecoverable nested System Reset", regs, SIGABRT);
505#endif
506 /* Must die if the interrupt is not recoverable */
507 if (!(regs->msr & MSR_RI)) {
508 /* For the reason explained in die_mce, nmi_exit before die */
509 nmi_exit();
510 die("Unrecoverable System Reset", regs, SIGABRT);
511 }
512
513 if (saved_hsrrs) {
514 mtspr(SPRN_HSRR0, hsrr0);
515 mtspr(SPRN_HSRR1, hsrr1);
516 }
517
518 /* What should we do here? We could issue a shutdown or hard reset. */
519
520 return 0;
521}
522
523/*
524 * I/O accesses can cause machine checks on powermacs.
525 * Check if the NIP corresponds to the address of a sync
526 * instruction for which there is an entry in the exception
527 * table.
528 * -- paulus.
529 */
530static inline int check_io_access(struct pt_regs *regs)
531{
532#ifdef CONFIG_PPC32
533 unsigned long msr = regs->msr;
534 const struct exception_table_entry *entry;
535 unsigned int *nip = (unsigned int *)regs->nip;
536
537 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
538 && (entry = search_exception_tables(regs->nip)) != NULL) {
539 /*
540 * Check that it's a sync instruction, or somewhere
541 * in the twi; isync; nop sequence that inb/inw/inl uses.
542 * As the address is in the exception table
543 * we should be able to read the instr there.
544 * For the debug message, we look at the preceding
545 * load or store.
546 */
547 if (*nip == PPC_RAW_NOP())
548 nip -= 2;
549 else if (*nip == PPC_RAW_ISYNC())
550 --nip;
551 if (*nip == PPC_RAW_SYNC() || get_op(*nip) == OP_TRAP) {
552 unsigned int rb;
553
554 --nip;
555 rb = (*nip >> 11) & 0x1f;
556 printk(KERN_DEBUG "%s bad port %lx at %p\n",
557 (*nip & 0x100)? "OUT to": "IN from",
558 regs->gpr[rb] - _IO_BASE, nip);
559 regs_set_return_msr(regs, regs->msr | MSR_RI);
560 regs_set_return_ip(regs, extable_fixup(entry));
561 return 1;
562 }
563 }
564#endif /* CONFIG_PPC32 */
565 return 0;
566}
567
568#ifdef CONFIG_PPC_ADV_DEBUG_REGS
569/* On 4xx, the reason for the machine check or program exception
570 is in the ESR. */
571#define get_reason(regs) ((regs)->dsisr)
572#define REASON_FP ESR_FP
573#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
574#define REASON_PRIVILEGED ESR_PPR
575#define REASON_TRAP ESR_PTR
576#define REASON_PREFIXED 0
577#define REASON_BOUNDARY 0
578
579/* single-step stuff */
580#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
581#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
582#define clear_br_trace(regs) do {} while(0)
583#else
584/* On non-4xx, the reason for the machine check or program
585 exception is in the MSR. */
586#define get_reason(regs) ((regs)->msr)
587#define REASON_TM SRR1_PROGTM
588#define REASON_FP SRR1_PROGFPE
589#define REASON_ILLEGAL SRR1_PROGILL
590#define REASON_PRIVILEGED SRR1_PROGPRIV
591#define REASON_TRAP SRR1_PROGTRAP
592#define REASON_PREFIXED SRR1_PREFIXED
593#define REASON_BOUNDARY SRR1_BOUNDARY
594
595#define single_stepping(regs) ((regs)->msr & MSR_SE)
596#define clear_single_step(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_SE))
597#define clear_br_trace(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_BE))
598#endif
599
600#define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
601
602#if defined(CONFIG_E500)
603int machine_check_e500mc(struct pt_regs *regs)
604{
605 unsigned long mcsr = mfspr(SPRN_MCSR);
606 unsigned long pvr = mfspr(SPRN_PVR);
607 unsigned long reason = mcsr;
608 int recoverable = 1;
609
610 if (reason & MCSR_LD) {
611 recoverable = fsl_rio_mcheck_exception(regs);
612 if (recoverable == 1)
613 goto silent_out;
614 }
615
616 printk("Machine check in kernel mode.\n");
617 printk("Caused by (from MCSR=%lx): ", reason);
618
619 if (reason & MCSR_MCP)
620 pr_cont("Machine Check Signal\n");
621
622 if (reason & MCSR_ICPERR) {
623 pr_cont("Instruction Cache Parity Error\n");
624
625 /*
626 * This is recoverable by invalidating the i-cache.
627 */
628 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
629 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
630 ;
631
632 /*
633 * This will generally be accompanied by an instruction
634 * fetch error report -- only treat MCSR_IF as fatal
635 * if it wasn't due to an L1 parity error.
636 */
637 reason &= ~MCSR_IF;
638 }
639
640 if (reason & MCSR_DCPERR_MC) {
641 pr_cont("Data Cache Parity Error\n");
642
643 /*
644 * In write shadow mode we auto-recover from the error, but it
645 * may still get logged and cause a machine check. We should
646 * only treat the non-write shadow case as non-recoverable.
647 */
648 /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
649 * is not implemented but L1 data cache always runs in write
650 * shadow mode. Hence on data cache parity errors HW will
651 * automatically invalidate the L1 Data Cache.
652 */
653 if (PVR_VER(pvr) != PVR_VER_E6500) {
654 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
655 recoverable = 0;
656 }
657 }
658
659 if (reason & MCSR_L2MMU_MHIT) {
660 pr_cont("Hit on multiple TLB entries\n");
661 recoverable = 0;
662 }
663
664 if (reason & MCSR_NMI)
665 pr_cont("Non-maskable interrupt\n");
666
667 if (reason & MCSR_IF) {
668 pr_cont("Instruction Fetch Error Report\n");
669 recoverable = 0;
670 }
671
672 if (reason & MCSR_LD) {
673 pr_cont("Load Error Report\n");
674 recoverable = 0;
675 }
676
677 if (reason & MCSR_ST) {
678 pr_cont("Store Error Report\n");
679 recoverable = 0;
680 }
681
682 if (reason & MCSR_LDG) {
683 pr_cont("Guarded Load Error Report\n");
684 recoverable = 0;
685 }
686
687 if (reason & MCSR_TLBSYNC)
688 pr_cont("Simultaneous tlbsync operations\n");
689
690 if (reason & MCSR_BSL2_ERR) {
691 pr_cont("Level 2 Cache Error\n");
692 recoverable = 0;
693 }
694
695 if (reason & MCSR_MAV) {
696 u64 addr;
697
698 addr = mfspr(SPRN_MCAR);
699 addr |= (u64)mfspr(SPRN_MCARU) << 32;
700
701 pr_cont("Machine Check %s Address: %#llx\n",
702 reason & MCSR_MEA ? "Effective" : "Physical", addr);
703 }
704
705silent_out:
706 mtspr(SPRN_MCSR, mcsr);
707 return mfspr(SPRN_MCSR) == 0 && recoverable;
708}
709
710int machine_check_e500(struct pt_regs *regs)
711{
712 unsigned long reason = mfspr(SPRN_MCSR);
713
714 if (reason & MCSR_BUS_RBERR) {
715 if (fsl_rio_mcheck_exception(regs))
716 return 1;
717 if (fsl_pci_mcheck_exception(regs))
718 return 1;
719 }
720
721 printk("Machine check in kernel mode.\n");
722 printk("Caused by (from MCSR=%lx): ", reason);
723
724 if (reason & MCSR_MCP)
725 pr_cont("Machine Check Signal\n");
726 if (reason & MCSR_ICPERR)
727 pr_cont("Instruction Cache Parity Error\n");
728 if (reason & MCSR_DCP_PERR)
729 pr_cont("Data Cache Push Parity Error\n");
730 if (reason & MCSR_DCPERR)
731 pr_cont("Data Cache Parity Error\n");
732 if (reason & MCSR_BUS_IAERR)
733 pr_cont("Bus - Instruction Address Error\n");
734 if (reason & MCSR_BUS_RAERR)
735 pr_cont("Bus - Read Address Error\n");
736 if (reason & MCSR_BUS_WAERR)
737 pr_cont("Bus - Write Address Error\n");
738 if (reason & MCSR_BUS_IBERR)
739 pr_cont("Bus - Instruction Data Error\n");
740 if (reason & MCSR_BUS_RBERR)
741 pr_cont("Bus - Read Data Bus Error\n");
742 if (reason & MCSR_BUS_WBERR)
743 pr_cont("Bus - Write Data Bus Error\n");
744 if (reason & MCSR_BUS_IPERR)
745 pr_cont("Bus - Instruction Parity Error\n");
746 if (reason & MCSR_BUS_RPERR)
747 pr_cont("Bus - Read Parity Error\n");
748
749 return 0;
750}
751
752int machine_check_generic(struct pt_regs *regs)
753{
754 return 0;
755}
756#elif defined(CONFIG_PPC32)
757int machine_check_generic(struct pt_regs *regs)
758{
759 unsigned long reason = regs->msr;
760
761 printk("Machine check in kernel mode.\n");
762 printk("Caused by (from SRR1=%lx): ", reason);
763 switch (reason & 0x601F0000) {
764 case 0x80000:
765 pr_cont("Machine check signal\n");
766 break;
767 case 0x40000:
768 case 0x140000: /* 7450 MSS error and TEA */
769 pr_cont("Transfer error ack signal\n");
770 break;
771 case 0x20000:
772 pr_cont("Data parity error signal\n");
773 break;
774 case 0x10000:
775 pr_cont("Address parity error signal\n");
776 break;
777 case 0x20000000:
778 pr_cont("L1 Data Cache error\n");
779 break;
780 case 0x40000000:
781 pr_cont("L1 Instruction Cache error\n");
782 break;
783 case 0x00100000:
784 pr_cont("L2 data cache parity error\n");
785 break;
786 default:
787 pr_cont("Unknown values in msr\n");
788 }
789 return 0;
790}
791#endif /* everything else */
792
793void die_mce(const char *str, struct pt_regs *regs, long err)
794{
795 /*
796 * The machine check wants to kill the interrupted context, but
797 * do_exit() checks for in_interrupt() and panics in that case, so
798 * exit the irq/nmi before calling die.
799 */
800 if (in_nmi())
801 nmi_exit();
802 else
803 irq_exit();
804 die(str, regs, err);
805}
806
807/*
808 * BOOK3S_64 does not usually call this handler as a non-maskable interrupt
809 * (it uses its own early real-mode handler to handle the MCE proper
810 * and then raises irq_work to call this handler when interrupts are
811 * enabled). The only time when this is not true is if the early handler
812 * is unrecoverable, then it does call this directly to try to get a
813 * message out.
814 */
815static void __machine_check_exception(struct pt_regs *regs)
816{
817 int recover = 0;
818
819 __this_cpu_inc(irq_stat.mce_exceptions);
820
821 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
822
823 /* See if any machine dependent calls. In theory, we would want
824 * to call the CPU first, and call the ppc_md. one if the CPU
825 * one returns a positive number. However there is existing code
826 * that assumes the board gets a first chance, so let's keep it
827 * that way for now and fix things later. --BenH.
828 */
829 if (ppc_md.machine_check_exception)
830 recover = ppc_md.machine_check_exception(regs);
831 else if (cur_cpu_spec->machine_check)
832 recover = cur_cpu_spec->machine_check(regs);
833
834 if (recover > 0)
835 goto bail;
836
837 if (debugger_fault_handler(regs))
838 goto bail;
839
840 if (check_io_access(regs))
841 goto bail;
842
843 die_mce("Machine check", regs, SIGBUS);
844
845bail:
846 /* Must die if the interrupt is not recoverable */
847 if (!(regs->msr & MSR_RI))
848 die_mce("Unrecoverable Machine check", regs, SIGBUS);
849}
850
851#ifdef CONFIG_PPC_BOOK3S_64
852DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
853{
854 __machine_check_exception(regs);
855}
856#endif
857DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
858{
859 __machine_check_exception(regs);
860
861 return 0;
862}
863
864DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
865{
866 die("System Management Interrupt", regs, SIGABRT);
867}
868
869#ifdef CONFIG_VSX
870static void p9_hmi_special_emu(struct pt_regs *regs)
871{
872 unsigned int ra, rb, t, i, sel, instr, rc;
873 const void __user *addr;
874 u8 vbuf[16] __aligned(16), *vdst;
875 unsigned long ea, msr, msr_mask;
876 bool swap;
877
878 if (__get_user(instr, (unsigned int __user *)regs->nip))
879 return;
880
881 /*
882 * lxvb16x opcode: 0x7c0006d8
883 * lxvd2x opcode: 0x7c000698
884 * lxvh8x opcode: 0x7c000658
885 * lxvw4x opcode: 0x7c000618
886 */
887 if ((instr & 0xfc00073e) != 0x7c000618) {
888 pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
889 " instr=%08x\n",
890 smp_processor_id(), current->comm, current->pid,
891 regs->nip, instr);
892 return;
893 }
894
895 /* Grab vector registers into the task struct */
896 msr = regs->msr; /* Grab msr before we flush the bits */
897 flush_vsx_to_thread(current);
898 enable_kernel_altivec();
899
900 /*
901 * Is userspace running with a different endian (this is rare but
902 * not impossible)
903 */
904 swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
905
906 /* Decode the instruction */
907 ra = (instr >> 16) & 0x1f;
908 rb = (instr >> 11) & 0x1f;
909 t = (instr >> 21) & 0x1f;
910 if (instr & 1)
911 vdst = (u8 *)¤t->thread.vr_state.vr[t];
912 else
913 vdst = (u8 *)¤t->thread.fp_state.fpr[t][0];
914
915 /* Grab the vector address */
916 ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
917 if (is_32bit_task())
918 ea &= 0xfffffffful;
919 addr = (__force const void __user *)ea;
920
921 /* Check it */
922 if (!access_ok(addr, 16)) {
923 pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
924 " instr=%08x addr=%016lx\n",
925 smp_processor_id(), current->comm, current->pid,
926 regs->nip, instr, (unsigned long)addr);
927 return;
928 }
929
930 /* Read the vector */
931 rc = 0;
932 if ((unsigned long)addr & 0xfUL)
933 /* unaligned case */
934 rc = __copy_from_user_inatomic(vbuf, addr, 16);
935 else
936 __get_user_atomic_128_aligned(vbuf, addr, rc);
937 if (rc) {
938 pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
939 " instr=%08x addr=%016lx\n",
940 smp_processor_id(), current->comm, current->pid,
941 regs->nip, instr, (unsigned long)addr);
942 return;
943 }
944
945 pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
946 " instr=%08x addr=%016lx\n",
947 smp_processor_id(), current->comm, current->pid, regs->nip,
948 instr, (unsigned long) addr);
949
950 /* Grab instruction "selector" */
951 sel = (instr >> 6) & 3;
952
953 /*
954 * Check to make sure the facility is actually enabled. This
955 * could happen if we get a false positive hit.
956 *
957 * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
958 * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
959 */
960 msr_mask = MSR_VSX;
961 if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
962 msr_mask = MSR_VEC;
963 if (!(msr & msr_mask)) {
964 pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
965 " instr=%08x msr:%016lx\n",
966 smp_processor_id(), current->comm, current->pid,
967 regs->nip, instr, msr);
968 return;
969 }
970
971 /* Do logging here before we modify sel based on endian */
972 switch (sel) {
973 case 0: /* lxvw4x */
974 PPC_WARN_EMULATED(lxvw4x, regs);
975 break;
976 case 1: /* lxvh8x */
977 PPC_WARN_EMULATED(lxvh8x, regs);
978 break;
979 case 2: /* lxvd2x */
980 PPC_WARN_EMULATED(lxvd2x, regs);
981 break;
982 case 3: /* lxvb16x */
983 PPC_WARN_EMULATED(lxvb16x, regs);
984 break;
985 }
986
987#ifdef __LITTLE_ENDIAN__
988 /*
989 * An LE kernel stores the vector in the task struct as an LE
990 * byte array (effectively swapping both the components and
991 * the content of the components). Those instructions expect
992 * the components to remain in ascending address order, so we
993 * swap them back.
994 *
995 * If we are running a BE user space, the expectation is that
996 * of a simple memcpy, so forcing the emulation to look like
997 * a lxvb16x should do the trick.
998 */
999 if (swap)
1000 sel = 3;
1001
1002 switch (sel) {
1003 case 0: /* lxvw4x */
1004 for (i = 0; i < 4; i++)
1005 ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
1006 break;
1007 case 1: /* lxvh8x */
1008 for (i = 0; i < 8; i++)
1009 ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
1010 break;
1011 case 2: /* lxvd2x */
1012 for (i = 0; i < 2; i++)
1013 ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
1014 break;
1015 case 3: /* lxvb16x */
1016 for (i = 0; i < 16; i++)
1017 vdst[i] = vbuf[15-i];
1018 break;
1019 }
1020#else /* __LITTLE_ENDIAN__ */
1021 /* On a big endian kernel, a BE userspace only needs a memcpy */
1022 if (!swap)
1023 sel = 3;
1024
1025 /* Otherwise, we need to swap the content of the components */
1026 switch (sel) {
1027 case 0: /* lxvw4x */
1028 for (i = 0; i < 4; i++)
1029 ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
1030 break;
1031 case 1: /* lxvh8x */
1032 for (i = 0; i < 8; i++)
1033 ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
1034 break;
1035 case 2: /* lxvd2x */
1036 for (i = 0; i < 2; i++)
1037 ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
1038 break;
1039 case 3: /* lxvb16x */
1040 memcpy(vdst, vbuf, 16);
1041 break;
1042 }
1043#endif /* !__LITTLE_ENDIAN__ */
1044
1045 /* Go to next instruction */
1046 regs_add_return_ip(regs, 4);
1047}
1048#endif /* CONFIG_VSX */
1049
1050DEFINE_INTERRUPT_HANDLER_ASYNC(handle_hmi_exception)
1051{
1052 struct pt_regs *old_regs;
1053
1054 old_regs = set_irq_regs(regs);
1055
1056#ifdef CONFIG_VSX
1057 /* Real mode flagged P9 special emu is needed */
1058 if (local_paca->hmi_p9_special_emu) {
1059 local_paca->hmi_p9_special_emu = 0;
1060
1061 /*
1062 * We don't want to take page faults while doing the
1063 * emulation, we just replay the instruction if necessary.
1064 */
1065 pagefault_disable();
1066 p9_hmi_special_emu(regs);
1067 pagefault_enable();
1068 }
1069#endif /* CONFIG_VSX */
1070
1071 if (ppc_md.handle_hmi_exception)
1072 ppc_md.handle_hmi_exception(regs);
1073
1074 set_irq_regs(old_regs);
1075}
1076
1077DEFINE_INTERRUPT_HANDLER(unknown_exception)
1078{
1079 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1080 regs->nip, regs->msr, regs->trap);
1081
1082 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1083}
1084
1085DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception)
1086{
1087 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1088 regs->nip, regs->msr, regs->trap);
1089
1090 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1091}
1092
1093DEFINE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception)
1094{
1095 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1096 regs->nip, regs->msr, regs->trap);
1097
1098 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1099
1100 return 0;
1101}
1102
1103DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception)
1104{
1105 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
1106 5, SIGTRAP) == NOTIFY_STOP)
1107 return;
1108 if (debugger_iabr_match(regs))
1109 return;
1110 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1111}
1112
1113DEFINE_INTERRUPT_HANDLER(RunModeException)
1114{
1115 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1116}
1117
1118static void __single_step_exception(struct pt_regs *regs)
1119{
1120 clear_single_step(regs);
1121 clear_br_trace(regs);
1122
1123 if (kprobe_post_handler(regs))
1124 return;
1125
1126 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1127 5, SIGTRAP) == NOTIFY_STOP)
1128 return;
1129 if (debugger_sstep(regs))
1130 return;
1131
1132 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1133}
1134
1135DEFINE_INTERRUPT_HANDLER(single_step_exception)
1136{
1137 __single_step_exception(regs);
1138}
1139
1140/*
1141 * After we have successfully emulated an instruction, we have to
1142 * check if the instruction was being single-stepped, and if so,
1143 * pretend we got a single-step exception. This was pointed out
1144 * by Kumar Gala. -- paulus
1145 */
1146static void emulate_single_step(struct pt_regs *regs)
1147{
1148 if (single_stepping(regs))
1149 __single_step_exception(regs);
1150}
1151
1152static inline int __parse_fpscr(unsigned long fpscr)
1153{
1154 int ret = FPE_FLTUNK;
1155
1156 /* Invalid operation */
1157 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
1158 ret = FPE_FLTINV;
1159
1160 /* Overflow */
1161 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
1162 ret = FPE_FLTOVF;
1163
1164 /* Underflow */
1165 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
1166 ret = FPE_FLTUND;
1167
1168 /* Divide by zero */
1169 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
1170 ret = FPE_FLTDIV;
1171
1172 /* Inexact result */
1173 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
1174 ret = FPE_FLTRES;
1175
1176 return ret;
1177}
1178
1179static void parse_fpe(struct pt_regs *regs)
1180{
1181 int code = 0;
1182
1183 flush_fp_to_thread(current);
1184
1185#ifdef CONFIG_PPC_FPU_REGS
1186 code = __parse_fpscr(current->thread.fp_state.fpscr);
1187#endif
1188
1189 _exception(SIGFPE, regs, code, regs->nip);
1190}
1191
1192/*
1193 * Illegal instruction emulation support. Originally written to
1194 * provide the PVR to user applications using the mfspr rd, PVR.
1195 * Return non-zero if we can't emulate, or -EFAULT if the associated
1196 * memory access caused an access fault. Return zero on success.
1197 *
1198 * There are a couple of ways to do this, either "decode" the instruction
1199 * or directly match lots of bits. In this case, matching lots of
1200 * bits is faster and easier.
1201 *
1202 */
1203static int emulate_string_inst(struct pt_regs *regs, u32 instword)
1204{
1205 u8 rT = (instword >> 21) & 0x1f;
1206 u8 rA = (instword >> 16) & 0x1f;
1207 u8 NB_RB = (instword >> 11) & 0x1f;
1208 u32 num_bytes;
1209 unsigned long EA;
1210 int pos = 0;
1211
1212 /* Early out if we are an invalid form of lswx */
1213 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
1214 if ((rT == rA) || (rT == NB_RB))
1215 return -EINVAL;
1216
1217 EA = (rA == 0) ? 0 : regs->gpr[rA];
1218
1219 switch (instword & PPC_INST_STRING_MASK) {
1220 case PPC_INST_LSWX:
1221 case PPC_INST_STSWX:
1222 EA += NB_RB;
1223 num_bytes = regs->xer & 0x7f;
1224 break;
1225 case PPC_INST_LSWI:
1226 case PPC_INST_STSWI:
1227 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
1228 break;
1229 default:
1230 return -EINVAL;
1231 }
1232
1233 while (num_bytes != 0)
1234 {
1235 u8 val;
1236 u32 shift = 8 * (3 - (pos & 0x3));
1237
1238 /* if process is 32-bit, clear upper 32 bits of EA */
1239 if ((regs->msr & MSR_64BIT) == 0)
1240 EA &= 0xFFFFFFFF;
1241
1242 switch ((instword & PPC_INST_STRING_MASK)) {
1243 case PPC_INST_LSWX:
1244 case PPC_INST_LSWI:
1245 if (get_user(val, (u8 __user *)EA))
1246 return -EFAULT;
1247 /* first time updating this reg,
1248 * zero it out */
1249 if (pos == 0)
1250 regs->gpr[rT] = 0;
1251 regs->gpr[rT] |= val << shift;
1252 break;
1253 case PPC_INST_STSWI:
1254 case PPC_INST_STSWX:
1255 val = regs->gpr[rT] >> shift;
1256 if (put_user(val, (u8 __user *)EA))
1257 return -EFAULT;
1258 break;
1259 }
1260 /* move EA to next address */
1261 EA += 1;
1262 num_bytes--;
1263
1264 /* manage our position within the register */
1265 if (++pos == 4) {
1266 pos = 0;
1267 if (++rT == 32)
1268 rT = 0;
1269 }
1270 }
1271
1272 return 0;
1273}
1274
1275static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
1276{
1277 u32 ra,rs;
1278 unsigned long tmp;
1279
1280 ra = (instword >> 16) & 0x1f;
1281 rs = (instword >> 21) & 0x1f;
1282
1283 tmp = regs->gpr[rs];
1284 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
1285 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
1286 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1287 regs->gpr[ra] = tmp;
1288
1289 return 0;
1290}
1291
1292static int emulate_isel(struct pt_regs *regs, u32 instword)
1293{
1294 u8 rT = (instword >> 21) & 0x1f;
1295 u8 rA = (instword >> 16) & 0x1f;
1296 u8 rB = (instword >> 11) & 0x1f;
1297 u8 BC = (instword >> 6) & 0x1f;
1298 u8 bit;
1299 unsigned long tmp;
1300
1301 tmp = (rA == 0) ? 0 : regs->gpr[rA];
1302 bit = (regs->ccr >> (31 - BC)) & 0x1;
1303
1304 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1305
1306 return 0;
1307}
1308
1309#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1310static inline bool tm_abort_check(struct pt_regs *regs, int cause)
1311{
1312 /* If we're emulating a load/store in an active transaction, we cannot
1313 * emulate it as the kernel operates in transaction suspended context.
1314 * We need to abort the transaction. This creates a persistent TM
1315 * abort so tell the user what caused it with a new code.
1316 */
1317 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1318 tm_enable();
1319 tm_abort(cause);
1320 return true;
1321 }
1322 return false;
1323}
1324#else
1325static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1326{
1327 return false;
1328}
1329#endif
1330
1331static int emulate_instruction(struct pt_regs *regs)
1332{
1333 u32 instword;
1334 u32 rd;
1335
1336 if (!user_mode(regs))
1337 return -EINVAL;
1338
1339 if (get_user(instword, (u32 __user *)(regs->nip)))
1340 return -EFAULT;
1341
1342 /* Emulate the mfspr rD, PVR. */
1343 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1344 PPC_WARN_EMULATED(mfpvr, regs);
1345 rd = (instword >> 21) & 0x1f;
1346 regs->gpr[rd] = mfspr(SPRN_PVR);
1347 return 0;
1348 }
1349
1350 /* Emulating the dcba insn is just a no-op. */
1351 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1352 PPC_WARN_EMULATED(dcba, regs);
1353 return 0;
1354 }
1355
1356 /* Emulate the mcrxr insn. */
1357 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1358 int shift = (instword >> 21) & 0x1c;
1359 unsigned long msk = 0xf0000000UL >> shift;
1360
1361 PPC_WARN_EMULATED(mcrxr, regs);
1362 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1363 regs->xer &= ~0xf0000000UL;
1364 return 0;
1365 }
1366
1367 /* Emulate load/store string insn. */
1368 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1369 if (tm_abort_check(regs,
1370 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1371 return -EINVAL;
1372 PPC_WARN_EMULATED(string, regs);
1373 return emulate_string_inst(regs, instword);
1374 }
1375
1376 /* Emulate the popcntb (Population Count Bytes) instruction. */
1377 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1378 PPC_WARN_EMULATED(popcntb, regs);
1379 return emulate_popcntb_inst(regs, instword);
1380 }
1381
1382 /* Emulate isel (Integer Select) instruction */
1383 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1384 PPC_WARN_EMULATED(isel, regs);
1385 return emulate_isel(regs, instword);
1386 }
1387
1388 /* Emulate sync instruction variants */
1389 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1390 PPC_WARN_EMULATED(sync, regs);
1391 asm volatile("sync");
1392 return 0;
1393 }
1394
1395#ifdef CONFIG_PPC64
1396 /* Emulate the mfspr rD, DSCR. */
1397 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1398 PPC_INST_MFSPR_DSCR_USER) ||
1399 ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1400 PPC_INST_MFSPR_DSCR)) &&
1401 cpu_has_feature(CPU_FTR_DSCR)) {
1402 PPC_WARN_EMULATED(mfdscr, regs);
1403 rd = (instword >> 21) & 0x1f;
1404 regs->gpr[rd] = mfspr(SPRN_DSCR);
1405 return 0;
1406 }
1407 /* Emulate the mtspr DSCR, rD. */
1408 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1409 PPC_INST_MTSPR_DSCR_USER) ||
1410 ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1411 PPC_INST_MTSPR_DSCR)) &&
1412 cpu_has_feature(CPU_FTR_DSCR)) {
1413 PPC_WARN_EMULATED(mtdscr, regs);
1414 rd = (instword >> 21) & 0x1f;
1415 current->thread.dscr = regs->gpr[rd];
1416 current->thread.dscr_inherit = 1;
1417 mtspr(SPRN_DSCR, current->thread.dscr);
1418 return 0;
1419 }
1420#endif
1421
1422 return -EINVAL;
1423}
1424
1425int is_valid_bugaddr(unsigned long addr)
1426{
1427 return is_kernel_addr(addr);
1428}
1429
1430#ifdef CONFIG_MATH_EMULATION
1431static int emulate_math(struct pt_regs *regs)
1432{
1433 int ret;
1434
1435 ret = do_mathemu(regs);
1436 if (ret >= 0)
1437 PPC_WARN_EMULATED(math, regs);
1438
1439 switch (ret) {
1440 case 0:
1441 emulate_single_step(regs);
1442 return 0;
1443 case 1: {
1444 int code = 0;
1445 code = __parse_fpscr(current->thread.fp_state.fpscr);
1446 _exception(SIGFPE, regs, code, regs->nip);
1447 return 0;
1448 }
1449 case -EFAULT:
1450 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1451 return 0;
1452 }
1453
1454 return -1;
1455}
1456#else
1457static inline int emulate_math(struct pt_regs *regs) { return -1; }
1458#endif
1459
1460static void do_program_check(struct pt_regs *regs)
1461{
1462 unsigned int reason = get_reason(regs);
1463
1464 /* We can now get here via a FP Unavailable exception if the core
1465 * has no FPU, in that case the reason flags will be 0 */
1466
1467 if (reason & REASON_FP) {
1468 /* IEEE FP exception */
1469 parse_fpe(regs);
1470 return;
1471 }
1472 if (reason & REASON_TRAP) {
1473 unsigned long bugaddr;
1474 /* Debugger is first in line to stop recursive faults in
1475 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1476 if (debugger_bpt(regs))
1477 return;
1478
1479 if (kprobe_handler(regs))
1480 return;
1481
1482 /* trap exception */
1483 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1484 == NOTIFY_STOP)
1485 return;
1486
1487 bugaddr = regs->nip;
1488 /*
1489 * Fixup bugaddr for BUG_ON() in real mode
1490 */
1491 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1492 bugaddr += PAGE_OFFSET;
1493
1494 if (!(regs->msr & MSR_PR) && /* not user-mode */
1495 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1496 regs_add_return_ip(regs, 4);
1497 return;
1498 }
1499 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1500 return;
1501 }
1502#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1503 if (reason & REASON_TM) {
1504 /* This is a TM "Bad Thing Exception" program check.
1505 * This occurs when:
1506 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1507 * transition in TM states.
1508 * - A trechkpt is attempted when transactional.
1509 * - A treclaim is attempted when non transactional.
1510 * - A tend is illegally attempted.
1511 * - writing a TM SPR when transactional.
1512 *
1513 * If usermode caused this, it's done something illegal and
1514 * gets a SIGILL slap on the wrist. We call it an illegal
1515 * operand to distinguish from the instruction just being bad
1516 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1517 * illegal /placement/ of a valid instruction.
1518 */
1519 if (user_mode(regs)) {
1520 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1521 return;
1522 } else {
1523 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1524 "at %lx (msr 0x%lx) tm_scratch=%llx\n",
1525 regs->nip, regs->msr, get_paca()->tm_scratch);
1526 die("Unrecoverable exception", regs, SIGABRT);
1527 }
1528 }
1529#endif
1530
1531 /*
1532 * If we took the program check in the kernel skip down to sending a
1533 * SIGILL. The subsequent cases all relate to emulating instructions
1534 * which we should only do for userspace. We also do not want to enable
1535 * interrupts for kernel faults because that might lead to further
1536 * faults, and loose the context of the original exception.
1537 */
1538 if (!user_mode(regs))
1539 goto sigill;
1540
1541 interrupt_cond_local_irq_enable(regs);
1542
1543 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1544 * but there seems to be a hardware bug on the 405GP (RevD)
1545 * that means ESR is sometimes set incorrectly - either to
1546 * ESR_DST (!?) or 0. In the process of chasing this with the
1547 * hardware people - not sure if it can happen on any illegal
1548 * instruction or only on FP instructions, whether there is a
1549 * pattern to occurrences etc. -dgibson 31/Mar/2003
1550 */
1551 if (!emulate_math(regs))
1552 return;
1553
1554 /* Try to emulate it if we should. */
1555 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1556 switch (emulate_instruction(regs)) {
1557 case 0:
1558 regs_add_return_ip(regs, 4);
1559 emulate_single_step(regs);
1560 return;
1561 case -EFAULT:
1562 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1563 return;
1564 }
1565 }
1566
1567sigill:
1568 if (reason & REASON_PRIVILEGED)
1569 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1570 else
1571 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1572
1573}
1574
1575DEFINE_INTERRUPT_HANDLER(program_check_exception)
1576{
1577 do_program_check(regs);
1578}
1579
1580/*
1581 * This occurs when running in hypervisor mode on POWER6 or later
1582 * and an illegal instruction is encountered.
1583 */
1584DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt)
1585{
1586 regs_set_return_msr(regs, regs->msr | REASON_ILLEGAL);
1587 do_program_check(regs);
1588}
1589
1590DEFINE_INTERRUPT_HANDLER(alignment_exception)
1591{
1592 int sig, code, fixed = 0;
1593 unsigned long reason;
1594
1595 interrupt_cond_local_irq_enable(regs);
1596
1597 reason = get_reason(regs);
1598 if (reason & REASON_BOUNDARY) {
1599 sig = SIGBUS;
1600 code = BUS_ADRALN;
1601 goto bad;
1602 }
1603
1604 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1605 return;
1606
1607 /* we don't implement logging of alignment exceptions */
1608 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1609 fixed = fix_alignment(regs);
1610
1611 if (fixed == 1) {
1612 /* skip over emulated instruction */
1613 regs_add_return_ip(regs, inst_length(reason));
1614 emulate_single_step(regs);
1615 return;
1616 }
1617
1618 /* Operand address was bad */
1619 if (fixed == -EFAULT) {
1620 sig = SIGSEGV;
1621 code = SEGV_ACCERR;
1622 } else {
1623 sig = SIGBUS;
1624 code = BUS_ADRALN;
1625 }
1626bad:
1627 if (user_mode(regs))
1628 _exception(sig, regs, code, regs->dar);
1629 else
1630 bad_page_fault(regs, sig);
1631}
1632
1633DEFINE_INTERRUPT_HANDLER(stack_overflow_exception)
1634{
1635 die("Kernel stack overflow", regs, SIGSEGV);
1636}
1637
1638DEFINE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception)
1639{
1640 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1641 "%lx at %lx\n", regs->trap, regs->nip);
1642 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1643}
1644
1645DEFINE_INTERRUPT_HANDLER(altivec_unavailable_exception)
1646{
1647 if (user_mode(regs)) {
1648 /* A user program has executed an altivec instruction,
1649 but this kernel doesn't support altivec. */
1650 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1651 return;
1652 }
1653
1654 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1655 "%lx at %lx\n", regs->trap, regs->nip);
1656 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1657}
1658
1659DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception)
1660{
1661 if (user_mode(regs)) {
1662 /* A user program has executed an vsx instruction,
1663 but this kernel doesn't support vsx. */
1664 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1665 return;
1666 }
1667
1668 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1669 "%lx at %lx\n", regs->trap, regs->nip);
1670 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1671}
1672
1673#ifdef CONFIG_PPC64
1674static void tm_unavailable(struct pt_regs *regs)
1675{
1676#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1677 if (user_mode(regs)) {
1678 current->thread.load_tm++;
1679 regs_set_return_msr(regs, regs->msr | MSR_TM);
1680 tm_enable();
1681 tm_restore_sprs(¤t->thread);
1682 return;
1683 }
1684#endif
1685 pr_emerg("Unrecoverable TM Unavailable Exception "
1686 "%lx at %lx\n", regs->trap, regs->nip);
1687 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1688}
1689
1690DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception)
1691{
1692 static char *facility_strings[] = {
1693 [FSCR_FP_LG] = "FPU",
1694 [FSCR_VECVSX_LG] = "VMX/VSX",
1695 [FSCR_DSCR_LG] = "DSCR",
1696 [FSCR_PM_LG] = "PMU SPRs",
1697 [FSCR_BHRB_LG] = "BHRB",
1698 [FSCR_TM_LG] = "TM",
1699 [FSCR_EBB_LG] = "EBB",
1700 [FSCR_TAR_LG] = "TAR",
1701 [FSCR_MSGP_LG] = "MSGP",
1702 [FSCR_SCV_LG] = "SCV",
1703 [FSCR_PREFIX_LG] = "PREFIX",
1704 };
1705 char *facility = "unknown";
1706 u64 value;
1707 u32 instword, rd;
1708 u8 status;
1709 bool hv;
1710
1711 hv = (TRAP(regs) == INTERRUPT_H_FAC_UNAVAIL);
1712 if (hv)
1713 value = mfspr(SPRN_HFSCR);
1714 else
1715 value = mfspr(SPRN_FSCR);
1716
1717 status = value >> 56;
1718 if ((hv || status >= 2) &&
1719 (status < ARRAY_SIZE(facility_strings)) &&
1720 facility_strings[status])
1721 facility = facility_strings[status];
1722
1723 /* We should not have taken this interrupt in kernel */
1724 if (!user_mode(regs)) {
1725 pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
1726 facility, status, regs->nip);
1727 die("Unexpected facility unavailable exception", regs, SIGABRT);
1728 }
1729
1730 interrupt_cond_local_irq_enable(regs);
1731
1732 if (status == FSCR_DSCR_LG) {
1733 /*
1734 * User is accessing the DSCR register using the problem
1735 * state only SPR number (0x03) either through a mfspr or
1736 * a mtspr instruction. If it is a write attempt through
1737 * a mtspr, then we set the inherit bit. This also allows
1738 * the user to write or read the register directly in the
1739 * future by setting via the FSCR DSCR bit. But in case it
1740 * is a read DSCR attempt through a mfspr instruction, we
1741 * just emulate the instruction instead. This code path will
1742 * always emulate all the mfspr instructions till the user
1743 * has attempted at least one mtspr instruction. This way it
1744 * preserves the same behaviour when the user is accessing
1745 * the DSCR through privilege level only SPR number (0x11)
1746 * which is emulated through illegal instruction exception.
1747 * We always leave HFSCR DSCR set.
1748 */
1749 if (get_user(instword, (u32 __user *)(regs->nip))) {
1750 pr_err("Failed to fetch the user instruction\n");
1751 return;
1752 }
1753
1754 /* Write into DSCR (mtspr 0x03, RS) */
1755 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1756 == PPC_INST_MTSPR_DSCR_USER) {
1757 rd = (instword >> 21) & 0x1f;
1758 current->thread.dscr = regs->gpr[rd];
1759 current->thread.dscr_inherit = 1;
1760 current->thread.fscr |= FSCR_DSCR;
1761 mtspr(SPRN_FSCR, current->thread.fscr);
1762 }
1763
1764 /* Read from DSCR (mfspr RT, 0x03) */
1765 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1766 == PPC_INST_MFSPR_DSCR_USER) {
1767 if (emulate_instruction(regs)) {
1768 pr_err("DSCR based mfspr emulation failed\n");
1769 return;
1770 }
1771 regs_add_return_ip(regs, 4);
1772 emulate_single_step(regs);
1773 }
1774 return;
1775 }
1776
1777 if (status == FSCR_TM_LG) {
1778 /*
1779 * If we're here then the hardware is TM aware because it
1780 * generated an exception with FSRM_TM set.
1781 *
1782 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1783 * told us not to do TM, or the kernel is not built with TM
1784 * support.
1785 *
1786 * If both of those things are true, then userspace can spam the
1787 * console by triggering the printk() below just by continually
1788 * doing tbegin (or any TM instruction). So in that case just
1789 * send the process a SIGILL immediately.
1790 */
1791 if (!cpu_has_feature(CPU_FTR_TM))
1792 goto out;
1793
1794 tm_unavailable(regs);
1795 return;
1796 }
1797
1798 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1799 hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1800
1801out:
1802 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1803}
1804#endif
1805
1806#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1807
1808DEFINE_INTERRUPT_HANDLER(fp_unavailable_tm)
1809{
1810 /* Note: This does not handle any kind of FP laziness. */
1811
1812 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1813 regs->nip, regs->msr);
1814
1815 /* We can only have got here if the task started using FP after
1816 * beginning the transaction. So, the transactional regs are just a
1817 * copy of the checkpointed ones. But, we still need to recheckpoint
1818 * as we're enabling FP for the process; it will return, abort the
1819 * transaction, and probably retry but now with FP enabled. So the
1820 * checkpointed FP registers need to be loaded.
1821 */
1822 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1823
1824 /*
1825 * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
1826 * then it was overwrite by the thr->fp_state by tm_reclaim_thread().
1827 *
1828 * At this point, ck{fp,vr}_state contains the exact values we want to
1829 * recheckpoint.
1830 */
1831
1832 /* Enable FP for the task: */
1833 current->thread.load_fp = 1;
1834
1835 /*
1836 * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
1837 */
1838 tm_recheckpoint(¤t->thread);
1839}
1840
1841DEFINE_INTERRUPT_HANDLER(altivec_unavailable_tm)
1842{
1843 /* See the comments in fp_unavailable_tm(). This function operates
1844 * the same way.
1845 */
1846
1847 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1848 "MSR=%lx\n",
1849 regs->nip, regs->msr);
1850 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1851 current->thread.load_vec = 1;
1852 tm_recheckpoint(¤t->thread);
1853 current->thread.used_vr = 1;
1854}
1855
1856DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm)
1857{
1858 /* See the comments in fp_unavailable_tm(). This works similarly,
1859 * though we're loading both FP and VEC registers in here.
1860 *
1861 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1862 * regs. Either way, set MSR_VSX.
1863 */
1864
1865 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1866 "MSR=%lx\n",
1867 regs->nip, regs->msr);
1868
1869 current->thread.used_vsr = 1;
1870
1871 /* This reclaims FP and/or VR regs if they're already enabled */
1872 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1873
1874 current->thread.load_vec = 1;
1875 current->thread.load_fp = 1;
1876
1877 tm_recheckpoint(¤t->thread);
1878}
1879#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1880
1881#ifdef CONFIG_PPC64
1882DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
1883DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi)
1884{
1885 __this_cpu_inc(irq_stat.pmu_irqs);
1886
1887 perf_irq(regs);
1888
1889 return 0;
1890}
1891#endif
1892
1893DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
1894DEFINE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async)
1895{
1896 __this_cpu_inc(irq_stat.pmu_irqs);
1897
1898 perf_irq(regs);
1899}
1900
1901DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception)
1902{
1903 /*
1904 * On 64-bit, if perf interrupts hit in a local_irq_disable
1905 * (soft-masked) region, we consider them as NMIs. This is required to
1906 * prevent hash faults on user addresses when reading callchains (and
1907 * looks better from an irq tracing perspective).
1908 */
1909 if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
1910 performance_monitor_exception_nmi(regs);
1911 else
1912 performance_monitor_exception_async(regs);
1913
1914 return 0;
1915}
1916
1917#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1918static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1919{
1920 int changed = 0;
1921 /*
1922 * Determine the cause of the debug event, clear the
1923 * event flags and send a trap to the handler. Torez
1924 */
1925 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1926 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1927#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1928 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1929#endif
1930 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
1931 5);
1932 changed |= 0x01;
1933 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1934 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1935 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
1936 6);
1937 changed |= 0x01;
1938 } else if (debug_status & DBSR_IAC1) {
1939 current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1940 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1941 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
1942 1);
1943 changed |= 0x01;
1944 } else if (debug_status & DBSR_IAC2) {
1945 current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1946 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
1947 2);
1948 changed |= 0x01;
1949 } else if (debug_status & DBSR_IAC3) {
1950 current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1951 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1952 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
1953 3);
1954 changed |= 0x01;
1955 } else if (debug_status & DBSR_IAC4) {
1956 current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1957 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
1958 4);
1959 changed |= 0x01;
1960 }
1961 /*
1962 * At the point this routine was called, the MSR(DE) was turned off.
1963 * Check all other debug flags and see if that bit needs to be turned
1964 * back on or not.
1965 */
1966 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1967 current->thread.debug.dbcr1))
1968 regs_set_return_msr(regs, regs->msr | MSR_DE);
1969 else
1970 /* Make sure the IDM flag is off */
1971 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1972
1973 if (changed & 0x01)
1974 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1975}
1976
1977DEFINE_INTERRUPT_HANDLER(DebugException)
1978{
1979 unsigned long debug_status = regs->dsisr;
1980
1981 current->thread.debug.dbsr = debug_status;
1982
1983 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1984 * on server, it stops on the target of the branch. In order to simulate
1985 * the server behaviour, we thus restart right away with a single step
1986 * instead of stopping here when hitting a BT
1987 */
1988 if (debug_status & DBSR_BT) {
1989 regs_set_return_msr(regs, regs->msr & ~MSR_DE);
1990
1991 /* Disable BT */
1992 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1993 /* Clear the BT event */
1994 mtspr(SPRN_DBSR, DBSR_BT);
1995
1996 /* Do the single step trick only when coming from userspace */
1997 if (user_mode(regs)) {
1998 current->thread.debug.dbcr0 &= ~DBCR0_BT;
1999 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2000 regs_set_return_msr(regs, regs->msr | MSR_DE);
2001 return;
2002 }
2003
2004 if (kprobe_post_handler(regs))
2005 return;
2006
2007 if (notify_die(DIE_SSTEP, "block_step", regs, 5,
2008 5, SIGTRAP) == NOTIFY_STOP) {
2009 return;
2010 }
2011 if (debugger_sstep(regs))
2012 return;
2013 } else if (debug_status & DBSR_IC) { /* Instruction complete */
2014 regs_set_return_msr(regs, regs->msr & ~MSR_DE);
2015
2016 /* Disable instruction completion */
2017 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
2018 /* Clear the instruction completion event */
2019 mtspr(SPRN_DBSR, DBSR_IC);
2020
2021 if (kprobe_post_handler(regs))
2022 return;
2023
2024 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
2025 5, SIGTRAP) == NOTIFY_STOP) {
2026 return;
2027 }
2028
2029 if (debugger_sstep(regs))
2030 return;
2031
2032 if (user_mode(regs)) {
2033 current->thread.debug.dbcr0 &= ~DBCR0_IC;
2034 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
2035 current->thread.debug.dbcr1))
2036 regs_set_return_msr(regs, regs->msr | MSR_DE);
2037 else
2038 /* Make sure the IDM bit is off */
2039 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
2040 }
2041
2042 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
2043 } else
2044 handle_debug(regs, debug_status);
2045}
2046#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2047
2048#ifdef CONFIG_ALTIVEC
2049DEFINE_INTERRUPT_HANDLER(altivec_assist_exception)
2050{
2051 int err;
2052
2053 if (!user_mode(regs)) {
2054 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
2055 " at %lx\n", regs->nip);
2056 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
2057 }
2058
2059 flush_altivec_to_thread(current);
2060
2061 PPC_WARN_EMULATED(altivec, regs);
2062 err = emulate_altivec(regs);
2063 if (err == 0) {
2064 regs_add_return_ip(regs, 4); /* skip emulated instruction */
2065 emulate_single_step(regs);
2066 return;
2067 }
2068
2069 if (err == -EFAULT) {
2070 /* got an error reading the instruction */
2071 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2072 } else {
2073 /* didn't recognize the instruction */
2074 /* XXX quick hack for now: set the non-Java bit in the VSCR */
2075 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
2076 "in %s at %lx\n", current->comm, regs->nip);
2077 current->thread.vr_state.vscr.u[3] |= 0x10000;
2078 }
2079}
2080#endif /* CONFIG_ALTIVEC */
2081
2082#ifdef CONFIG_FSL_BOOKE
2083DEFINE_INTERRUPT_HANDLER(CacheLockingException)
2084{
2085 unsigned long error_code = regs->dsisr;
2086
2087 /* We treat cache locking instructions from the user
2088 * as priv ops, in the future we could try to do
2089 * something smarter
2090 */
2091 if (error_code & (ESR_DLK|ESR_ILK))
2092 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
2093 return;
2094}
2095#endif /* CONFIG_FSL_BOOKE */
2096
2097#ifdef CONFIG_SPE
2098DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException)
2099{
2100 extern int do_spe_mathemu(struct pt_regs *regs);
2101 unsigned long spefscr;
2102 int fpexc_mode;
2103 int code = FPE_FLTUNK;
2104 int err;
2105
2106 interrupt_cond_local_irq_enable(regs);
2107
2108 flush_spe_to_thread(current);
2109
2110 spefscr = current->thread.spefscr;
2111 fpexc_mode = current->thread.fpexc_mode;
2112
2113 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
2114 code = FPE_FLTOVF;
2115 }
2116 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
2117 code = FPE_FLTUND;
2118 }
2119 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
2120 code = FPE_FLTDIV;
2121 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
2122 code = FPE_FLTINV;
2123 }
2124 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
2125 code = FPE_FLTRES;
2126
2127 err = do_spe_mathemu(regs);
2128 if (err == 0) {
2129 regs_add_return_ip(regs, 4); /* skip emulated instruction */
2130 emulate_single_step(regs);
2131 return;
2132 }
2133
2134 if (err == -EFAULT) {
2135 /* got an error reading the instruction */
2136 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2137 } else if (err == -EINVAL) {
2138 /* didn't recognize the instruction */
2139 printk(KERN_ERR "unrecognized spe instruction "
2140 "in %s at %lx\n", current->comm, regs->nip);
2141 } else {
2142 _exception(SIGFPE, regs, code, regs->nip);
2143 }
2144
2145 return;
2146}
2147
2148DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
2149{
2150 extern int speround_handler(struct pt_regs *regs);
2151 int err;
2152
2153 interrupt_cond_local_irq_enable(regs);
2154
2155 preempt_disable();
2156 if (regs->msr & MSR_SPE)
2157 giveup_spe(current);
2158 preempt_enable();
2159
2160 regs_add_return_ip(regs, -4);
2161 err = speround_handler(regs);
2162 if (err == 0) {
2163 regs_add_return_ip(regs, 4); /* skip emulated instruction */
2164 emulate_single_step(regs);
2165 return;
2166 }
2167
2168 if (err == -EFAULT) {
2169 /* got an error reading the instruction */
2170 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2171 } else if (err == -EINVAL) {
2172 /* didn't recognize the instruction */
2173 printk(KERN_ERR "unrecognized spe instruction "
2174 "in %s at %lx\n", current->comm, regs->nip);
2175 } else {
2176 _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
2177 return;
2178 }
2179}
2180#endif
2181
2182/*
2183 * We enter here if we get an unrecoverable exception, that is, one
2184 * that happened at a point where the RI (recoverable interrupt) bit
2185 * in the MSR is 0. This indicates that SRR0/1 are live, and that
2186 * we therefore lost state by taking this exception.
2187 */
2188void __noreturn unrecoverable_exception(struct pt_regs *regs)
2189{
2190 pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
2191 regs->trap, regs->nip, regs->msr);
2192 die("Unrecoverable exception", regs, SIGABRT);
2193 /* die() should not return */
2194 for (;;)
2195 ;
2196}
2197
2198#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
2199/*
2200 * Default handler for a Watchdog exception,
2201 * spins until a reboot occurs
2202 */
2203void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
2204{
2205 /* Generic WatchdogHandler, implement your own */
2206 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
2207 return;
2208}
2209
2210DEFINE_INTERRUPT_HANDLER_NMI(WatchdogException)
2211{
2212 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
2213 WatchdogHandler(regs);
2214 return 0;
2215}
2216#endif
2217
2218/*
2219 * We enter here if we discover during exception entry that we are
2220 * running in supervisor mode with a userspace value in the stack pointer.
2221 */
2222DEFINE_INTERRUPT_HANDLER(kernel_bad_stack)
2223{
2224 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
2225 regs->gpr[1], regs->nip);
2226 die("Bad kernel stack pointer", regs, SIGABRT);
2227}
2228
2229void __init trap_init(void)
2230{
2231}
2232
2233
2234#ifdef CONFIG_PPC_EMULATED_STATS
2235
2236#define WARN_EMULATED_SETUP(type) .type = { .name = #type }
2237
2238struct ppc_emulated ppc_emulated = {
2239#ifdef CONFIG_ALTIVEC
2240 WARN_EMULATED_SETUP(altivec),
2241#endif
2242 WARN_EMULATED_SETUP(dcba),
2243 WARN_EMULATED_SETUP(dcbz),
2244 WARN_EMULATED_SETUP(fp_pair),
2245 WARN_EMULATED_SETUP(isel),
2246 WARN_EMULATED_SETUP(mcrxr),
2247 WARN_EMULATED_SETUP(mfpvr),
2248 WARN_EMULATED_SETUP(multiple),
2249 WARN_EMULATED_SETUP(popcntb),
2250 WARN_EMULATED_SETUP(spe),
2251 WARN_EMULATED_SETUP(string),
2252 WARN_EMULATED_SETUP(sync),
2253 WARN_EMULATED_SETUP(unaligned),
2254#ifdef CONFIG_MATH_EMULATION
2255 WARN_EMULATED_SETUP(math),
2256#endif
2257#ifdef CONFIG_VSX
2258 WARN_EMULATED_SETUP(vsx),
2259#endif
2260#ifdef CONFIG_PPC64
2261 WARN_EMULATED_SETUP(mfdscr),
2262 WARN_EMULATED_SETUP(mtdscr),
2263 WARN_EMULATED_SETUP(lq_stq),
2264 WARN_EMULATED_SETUP(lxvw4x),
2265 WARN_EMULATED_SETUP(lxvh8x),
2266 WARN_EMULATED_SETUP(lxvd2x),
2267 WARN_EMULATED_SETUP(lxvb16x),
2268#endif
2269};
2270
2271u32 ppc_warn_emulated;
2272
2273void ppc_warn_emulated_print(const char *type)
2274{
2275 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2276 type);
2277}
2278
2279static int __init ppc_warn_emulated_init(void)
2280{
2281 struct dentry *dir;
2282 unsigned int i;
2283 struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2284
2285 dir = debugfs_create_dir("emulated_instructions",
2286 powerpc_debugfs_root);
2287
2288 debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated);
2289
2290 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++)
2291 debugfs_create_u32(entries[i].name, 0644, dir,
2292 (u32 *)&entries[i].val.counter);
2293
2294 return 0;
2295}
2296
2297device_initcall(ppc_warn_emulated_init);
2298
2299#endif /* CONFIG_PPC_EMULATED_STATS */
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Copyright 2007-2010 Freescale Semiconductor, Inc.
5 *
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras (paulus@samba.org)
8 */
9
10/*
11 * This file handles the architecture-dependent parts of hardware exceptions
12 */
13
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/sched/debug.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/pkeys.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/ptrace.h>
23#include <linux/user.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/extable.h>
27#include <linux/module.h> /* print_modules */
28#include <linux/prctl.h>
29#include <linux/delay.h>
30#include <linux/kprobes.h>
31#include <linux/kexec.h>
32#include <linux/backlight.h>
33#include <linux/bug.h>
34#include <linux/kdebug.h>
35#include <linux/ratelimit.h>
36#include <linux/context_tracking.h>
37#include <linux/smp.h>
38#include <linux/console.h>
39#include <linux/kmsg_dump.h>
40
41#include <asm/emulated_ops.h>
42#include <asm/pgtable.h>
43#include <linux/uaccess.h>
44#include <asm/debugfs.h>
45#include <asm/io.h>
46#include <asm/machdep.h>
47#include <asm/rtas.h>
48#include <asm/pmc.h>
49#include <asm/reg.h>
50#ifdef CONFIG_PMAC_BACKLIGHT
51#include <asm/backlight.h>
52#endif
53#ifdef CONFIG_PPC64
54#include <asm/firmware.h>
55#include <asm/processor.h>
56#include <asm/tm.h>
57#endif
58#include <asm/kexec.h>
59#include <asm/ppc-opcode.h>
60#include <asm/rio.h>
61#include <asm/fadump.h>
62#include <asm/switch_to.h>
63#include <asm/tm.h>
64#include <asm/debug.h>
65#include <asm/asm-prototypes.h>
66#include <asm/hmi.h>
67#include <sysdev/fsl_pci.h>
68#include <asm/kprobes.h>
69#include <asm/stacktrace.h>
70#include <asm/nmi.h>
71
72#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
73int (*__debugger)(struct pt_regs *regs) __read_mostly;
74int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
75int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
76int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
77int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
78int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
79int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
80
81EXPORT_SYMBOL(__debugger);
82EXPORT_SYMBOL(__debugger_ipi);
83EXPORT_SYMBOL(__debugger_bpt);
84EXPORT_SYMBOL(__debugger_sstep);
85EXPORT_SYMBOL(__debugger_iabr_match);
86EXPORT_SYMBOL(__debugger_break_match);
87EXPORT_SYMBOL(__debugger_fault_handler);
88#endif
89
90/* Transactional Memory trap debug */
91#ifdef TM_DEBUG_SW
92#define TM_DEBUG(x...) printk(KERN_INFO x)
93#else
94#define TM_DEBUG(x...) do { } while(0)
95#endif
96
97static const char *signame(int signr)
98{
99 switch (signr) {
100 case SIGBUS: return "bus error";
101 case SIGFPE: return "floating point exception";
102 case SIGILL: return "illegal instruction";
103 case SIGSEGV: return "segfault";
104 case SIGTRAP: return "unhandled trap";
105 }
106
107 return "unknown signal";
108}
109
110/*
111 * Trap & Exception support
112 */
113
114#ifdef CONFIG_PMAC_BACKLIGHT
115static void pmac_backlight_unblank(void)
116{
117 mutex_lock(&pmac_backlight_mutex);
118 if (pmac_backlight) {
119 struct backlight_properties *props;
120
121 props = &pmac_backlight->props;
122 props->brightness = props->max_brightness;
123 props->power = FB_BLANK_UNBLANK;
124 backlight_update_status(pmac_backlight);
125 }
126 mutex_unlock(&pmac_backlight_mutex);
127}
128#else
129static inline void pmac_backlight_unblank(void) { }
130#endif
131
132/*
133 * If oops/die is expected to crash the machine, return true here.
134 *
135 * This should not be expected to be 100% accurate, there may be
136 * notifiers registered or other unexpected conditions that may bring
137 * down the kernel. Or if the current process in the kernel is holding
138 * locks or has other critical state, the kernel may become effectively
139 * unusable anyway.
140 */
141bool die_will_crash(void)
142{
143 if (should_fadump_crash())
144 return true;
145 if (kexec_should_crash(current))
146 return true;
147 if (in_interrupt() || panic_on_oops ||
148 !current->pid || is_global_init(current))
149 return true;
150
151 return false;
152}
153
154static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
155static int die_owner = -1;
156static unsigned int die_nest_count;
157static int die_counter;
158
159extern void panic_flush_kmsg_start(void)
160{
161 /*
162 * These are mostly taken from kernel/panic.c, but tries to do
163 * relatively minimal work. Don't use delay functions (TB may
164 * be broken), don't crash dump (need to set a firmware log),
165 * don't run notifiers. We do want to get some information to
166 * Linux console.
167 */
168 console_verbose();
169 bust_spinlocks(1);
170}
171
172extern void panic_flush_kmsg_end(void)
173{
174 printk_safe_flush_on_panic();
175 kmsg_dump(KMSG_DUMP_PANIC);
176 bust_spinlocks(0);
177 debug_locks_off();
178 console_flush_on_panic(CONSOLE_FLUSH_PENDING);
179}
180
181static unsigned long oops_begin(struct pt_regs *regs)
182{
183 int cpu;
184 unsigned long flags;
185
186 oops_enter();
187
188 /* racy, but better than risking deadlock. */
189 raw_local_irq_save(flags);
190 cpu = smp_processor_id();
191 if (!arch_spin_trylock(&die_lock)) {
192 if (cpu == die_owner)
193 /* nested oops. should stop eventually */;
194 else
195 arch_spin_lock(&die_lock);
196 }
197 die_nest_count++;
198 die_owner = cpu;
199 console_verbose();
200 bust_spinlocks(1);
201 if (machine_is(powermac))
202 pmac_backlight_unblank();
203 return flags;
204}
205NOKPROBE_SYMBOL(oops_begin);
206
207static void oops_end(unsigned long flags, struct pt_regs *regs,
208 int signr)
209{
210 bust_spinlocks(0);
211 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
212 die_nest_count--;
213 oops_exit();
214 printk("\n");
215 if (!die_nest_count) {
216 /* Nest count reaches zero, release the lock. */
217 die_owner = -1;
218 arch_spin_unlock(&die_lock);
219 }
220 raw_local_irq_restore(flags);
221
222 /*
223 * system_reset_excption handles debugger, crash dump, panic, for 0x100
224 */
225 if (TRAP(regs) == 0x100)
226 return;
227
228 crash_fadump(regs, "die oops");
229
230 if (kexec_should_crash(current))
231 crash_kexec(regs);
232
233 if (!signr)
234 return;
235
236 /*
237 * While our oops output is serialised by a spinlock, output
238 * from panic() called below can race and corrupt it. If we
239 * know we are going to panic, delay for 1 second so we have a
240 * chance to get clean backtraces from all CPUs that are oopsing.
241 */
242 if (in_interrupt() || panic_on_oops || !current->pid ||
243 is_global_init(current)) {
244 mdelay(MSEC_PER_SEC);
245 }
246
247 if (panic_on_oops)
248 panic("Fatal exception");
249 do_exit(signr);
250}
251NOKPROBE_SYMBOL(oops_end);
252
253static int __die(const char *str, struct pt_regs *regs, long err)
254{
255 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
256
257 printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s%s %s\n",
258 IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
259 PAGE_SIZE / 1024,
260 early_radix_enabled() ? " MMU=Radix" : "",
261 early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ? " MMU=Hash" : "",
262 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
263 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
264 IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
265 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
266 IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "",
267 ppc_md.name ? ppc_md.name : "");
268
269 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
270 return 1;
271
272 print_modules();
273 show_regs(regs);
274
275 return 0;
276}
277NOKPROBE_SYMBOL(__die);
278
279void die(const char *str, struct pt_regs *regs, long err)
280{
281 unsigned long flags;
282
283 /*
284 * system_reset_excption handles debugger, crash dump, panic, for 0x100
285 */
286 if (TRAP(regs) != 0x100) {
287 if (debugger(regs))
288 return;
289 }
290
291 flags = oops_begin(regs);
292 if (__die(str, regs, err))
293 err = 0;
294 oops_end(flags, regs, err);
295}
296NOKPROBE_SYMBOL(die);
297
298void user_single_step_report(struct pt_regs *regs)
299{
300 force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip);
301}
302
303static void show_signal_msg(int signr, struct pt_regs *regs, int code,
304 unsigned long addr)
305{
306 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
307 DEFAULT_RATELIMIT_BURST);
308
309 if (!show_unhandled_signals)
310 return;
311
312 if (!unhandled_signal(current, signr))
313 return;
314
315 if (!__ratelimit(&rs))
316 return;
317
318 pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
319 current->comm, current->pid, signame(signr), signr,
320 addr, regs->nip, regs->link, code);
321
322 print_vma_addr(KERN_CONT " in ", regs->nip);
323
324 pr_cont("\n");
325
326 show_user_instructions(regs);
327}
328
329static bool exception_common(int signr, struct pt_regs *regs, int code,
330 unsigned long addr)
331{
332 if (!user_mode(regs)) {
333 die("Exception in kernel mode", regs, signr);
334 return false;
335 }
336
337 show_signal_msg(signr, regs, code, addr);
338
339 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
340 local_irq_enable();
341
342 current->thread.trap_nr = code;
343
344 /*
345 * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
346 * to capture the content, if the task gets killed.
347 */
348 thread_pkey_regs_save(¤t->thread);
349
350 return true;
351}
352
353void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key)
354{
355 if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr))
356 return;
357
358 force_sig_pkuerr((void __user *) addr, key);
359}
360
361void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
362{
363 if (!exception_common(signr, regs, code, addr))
364 return;
365
366 force_sig_fault(signr, code, (void __user *)addr);
367}
368
369/*
370 * The interrupt architecture has a quirk in that the HV interrupts excluding
371 * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing
372 * that an interrupt handler must do is save off a GPR into a scratch register,
373 * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch.
374 * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing
375 * that it is non-reentrant, which leads to random data corruption.
376 *
377 * The solution is for NMI interrupts in HV mode to check if they originated
378 * from these critical HV interrupt regions. If so, then mark them not
379 * recoverable.
380 *
381 * An alternative would be for HV NMIs to use SPRG for scratch to avoid the
382 * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux
383 * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so
384 * that would work. However any other guest OS that may have the SPRG live
385 * and MSR[RI]=1 could encounter silent corruption.
386 *
387 * Builds that do not support KVM could take this second option to increase
388 * the recoverability of NMIs.
389 */
390void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
391{
392#ifdef CONFIG_PPC_POWERNV
393 unsigned long kbase = (unsigned long)_stext;
394 unsigned long nip = regs->nip;
395
396 if (!(regs->msr & MSR_RI))
397 return;
398 if (!(regs->msr & MSR_HV))
399 return;
400 if (regs->msr & MSR_PR)
401 return;
402
403 /*
404 * Now test if the interrupt has hit a range that may be using
405 * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
406 * problem ranges all run un-relocated. Test real and virt modes
407 * at the same time by droping the high bit of the nip (virt mode
408 * entry points still have the +0x4000 offset).
409 */
410 nip &= ~0xc000000000000000ULL;
411 if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600))
412 goto nonrecoverable;
413 if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00))
414 goto nonrecoverable;
415 if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0))
416 goto nonrecoverable;
417 if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0))
418 goto nonrecoverable;
419
420 /* Trampoline code runs un-relocated so subtract kbase. */
421 if (nip >= (unsigned long)(start_real_trampolines - kbase) &&
422 nip < (unsigned long)(end_real_trampolines - kbase))
423 goto nonrecoverable;
424 if (nip >= (unsigned long)(start_virt_trampolines - kbase) &&
425 nip < (unsigned long)(end_virt_trampolines - kbase))
426 goto nonrecoverable;
427 return;
428
429nonrecoverable:
430 regs->msr &= ~MSR_RI;
431#endif
432}
433
434void system_reset_exception(struct pt_regs *regs)
435{
436 unsigned long hsrr0, hsrr1;
437 bool nested = in_nmi();
438 bool saved_hsrrs = false;
439
440 /*
441 * Avoid crashes in case of nested NMI exceptions. Recoverability
442 * is determined by RI and in_nmi
443 */
444 if (!nested)
445 nmi_enter();
446
447 /*
448 * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
449 * The system reset interrupt itself may clobber HSRRs (e.g., to call
450 * OPAL), so save them here and restore them before returning.
451 *
452 * Machine checks don't need to save HSRRs, as the real mode handler
453 * is careful to avoid them, and the regular handler is not delivered
454 * as an NMI.
455 */
456 if (cpu_has_feature(CPU_FTR_HVMODE)) {
457 hsrr0 = mfspr(SPRN_HSRR0);
458 hsrr1 = mfspr(SPRN_HSRR1);
459 saved_hsrrs = true;
460 }
461
462 hv_nmi_check_nonrecoverable(regs);
463
464 __this_cpu_inc(irq_stat.sreset_irqs);
465
466 /* See if any machine dependent calls */
467 if (ppc_md.system_reset_exception) {
468 if (ppc_md.system_reset_exception(regs))
469 goto out;
470 }
471
472 if (debugger(regs))
473 goto out;
474
475 kmsg_dump(KMSG_DUMP_OOPS);
476 /*
477 * A system reset is a request to dump, so we always send
478 * it through the crashdump code (if fadump or kdump are
479 * registered).
480 */
481 crash_fadump(regs, "System Reset");
482
483 crash_kexec(regs);
484
485 /*
486 * We aren't the primary crash CPU. We need to send it
487 * to a holding pattern to avoid it ending up in the panic
488 * code.
489 */
490 crash_kexec_secondary(regs);
491
492 /*
493 * No debugger or crash dump registered, print logs then
494 * panic.
495 */
496 die("System Reset", regs, SIGABRT);
497
498 mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
499 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
500 nmi_panic(regs, "System Reset");
501
502out:
503#ifdef CONFIG_PPC_BOOK3S_64
504 BUG_ON(get_paca()->in_nmi == 0);
505 if (get_paca()->in_nmi > 1)
506 nmi_panic(regs, "Unrecoverable nested System Reset");
507#endif
508 /* Must die if the interrupt is not recoverable */
509 if (!(regs->msr & MSR_RI))
510 nmi_panic(regs, "Unrecoverable System Reset");
511
512 if (saved_hsrrs) {
513 mtspr(SPRN_HSRR0, hsrr0);
514 mtspr(SPRN_HSRR1, hsrr1);
515 }
516
517 if (!nested)
518 nmi_exit();
519
520 /* What should we do here? We could issue a shutdown or hard reset. */
521}
522
523/*
524 * I/O accesses can cause machine checks on powermacs.
525 * Check if the NIP corresponds to the address of a sync
526 * instruction for which there is an entry in the exception
527 * table.
528 * Note that the 601 only takes a machine check on TEA
529 * (transfer error ack) signal assertion, and does not
530 * set any of the top 16 bits of SRR1.
531 * -- paulus.
532 */
533static inline int check_io_access(struct pt_regs *regs)
534{
535#ifdef CONFIG_PPC32
536 unsigned long msr = regs->msr;
537 const struct exception_table_entry *entry;
538 unsigned int *nip = (unsigned int *)regs->nip;
539
540 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
541 && (entry = search_exception_tables(regs->nip)) != NULL) {
542 /*
543 * Check that it's a sync instruction, or somewhere
544 * in the twi; isync; nop sequence that inb/inw/inl uses.
545 * As the address is in the exception table
546 * we should be able to read the instr there.
547 * For the debug message, we look at the preceding
548 * load or store.
549 */
550 if (*nip == PPC_INST_NOP)
551 nip -= 2;
552 else if (*nip == PPC_INST_ISYNC)
553 --nip;
554 if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
555 unsigned int rb;
556
557 --nip;
558 rb = (*nip >> 11) & 0x1f;
559 printk(KERN_DEBUG "%s bad port %lx at %p\n",
560 (*nip & 0x100)? "OUT to": "IN from",
561 regs->gpr[rb] - _IO_BASE, nip);
562 regs->msr |= MSR_RI;
563 regs->nip = extable_fixup(entry);
564 return 1;
565 }
566 }
567#endif /* CONFIG_PPC32 */
568 return 0;
569}
570
571#ifdef CONFIG_PPC_ADV_DEBUG_REGS
572/* On 4xx, the reason for the machine check or program exception
573 is in the ESR. */
574#define get_reason(regs) ((regs)->dsisr)
575#define REASON_FP ESR_FP
576#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
577#define REASON_PRIVILEGED ESR_PPR
578#define REASON_TRAP ESR_PTR
579
580/* single-step stuff */
581#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
582#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
583#define clear_br_trace(regs) do {} while(0)
584#else
585/* On non-4xx, the reason for the machine check or program
586 exception is in the MSR. */
587#define get_reason(regs) ((regs)->msr)
588#define REASON_TM SRR1_PROGTM
589#define REASON_FP SRR1_PROGFPE
590#define REASON_ILLEGAL SRR1_PROGILL
591#define REASON_PRIVILEGED SRR1_PROGPRIV
592#define REASON_TRAP SRR1_PROGTRAP
593
594#define single_stepping(regs) ((regs)->msr & MSR_SE)
595#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
596#define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
597#endif
598
599#if defined(CONFIG_E500)
600int machine_check_e500mc(struct pt_regs *regs)
601{
602 unsigned long mcsr = mfspr(SPRN_MCSR);
603 unsigned long pvr = mfspr(SPRN_PVR);
604 unsigned long reason = mcsr;
605 int recoverable = 1;
606
607 if (reason & MCSR_LD) {
608 recoverable = fsl_rio_mcheck_exception(regs);
609 if (recoverable == 1)
610 goto silent_out;
611 }
612
613 printk("Machine check in kernel mode.\n");
614 printk("Caused by (from MCSR=%lx): ", reason);
615
616 if (reason & MCSR_MCP)
617 pr_cont("Machine Check Signal\n");
618
619 if (reason & MCSR_ICPERR) {
620 pr_cont("Instruction Cache Parity Error\n");
621
622 /*
623 * This is recoverable by invalidating the i-cache.
624 */
625 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
626 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
627 ;
628
629 /*
630 * This will generally be accompanied by an instruction
631 * fetch error report -- only treat MCSR_IF as fatal
632 * if it wasn't due to an L1 parity error.
633 */
634 reason &= ~MCSR_IF;
635 }
636
637 if (reason & MCSR_DCPERR_MC) {
638 pr_cont("Data Cache Parity Error\n");
639
640 /*
641 * In write shadow mode we auto-recover from the error, but it
642 * may still get logged and cause a machine check. We should
643 * only treat the non-write shadow case as non-recoverable.
644 */
645 /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
646 * is not implemented but L1 data cache always runs in write
647 * shadow mode. Hence on data cache parity errors HW will
648 * automatically invalidate the L1 Data Cache.
649 */
650 if (PVR_VER(pvr) != PVR_VER_E6500) {
651 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
652 recoverable = 0;
653 }
654 }
655
656 if (reason & MCSR_L2MMU_MHIT) {
657 pr_cont("Hit on multiple TLB entries\n");
658 recoverable = 0;
659 }
660
661 if (reason & MCSR_NMI)
662 pr_cont("Non-maskable interrupt\n");
663
664 if (reason & MCSR_IF) {
665 pr_cont("Instruction Fetch Error Report\n");
666 recoverable = 0;
667 }
668
669 if (reason & MCSR_LD) {
670 pr_cont("Load Error Report\n");
671 recoverable = 0;
672 }
673
674 if (reason & MCSR_ST) {
675 pr_cont("Store Error Report\n");
676 recoverable = 0;
677 }
678
679 if (reason & MCSR_LDG) {
680 pr_cont("Guarded Load Error Report\n");
681 recoverable = 0;
682 }
683
684 if (reason & MCSR_TLBSYNC)
685 pr_cont("Simultaneous tlbsync operations\n");
686
687 if (reason & MCSR_BSL2_ERR) {
688 pr_cont("Level 2 Cache Error\n");
689 recoverable = 0;
690 }
691
692 if (reason & MCSR_MAV) {
693 u64 addr;
694
695 addr = mfspr(SPRN_MCAR);
696 addr |= (u64)mfspr(SPRN_MCARU) << 32;
697
698 pr_cont("Machine Check %s Address: %#llx\n",
699 reason & MCSR_MEA ? "Effective" : "Physical", addr);
700 }
701
702silent_out:
703 mtspr(SPRN_MCSR, mcsr);
704 return mfspr(SPRN_MCSR) == 0 && recoverable;
705}
706
707int machine_check_e500(struct pt_regs *regs)
708{
709 unsigned long reason = mfspr(SPRN_MCSR);
710
711 if (reason & MCSR_BUS_RBERR) {
712 if (fsl_rio_mcheck_exception(regs))
713 return 1;
714 if (fsl_pci_mcheck_exception(regs))
715 return 1;
716 }
717
718 printk("Machine check in kernel mode.\n");
719 printk("Caused by (from MCSR=%lx): ", reason);
720
721 if (reason & MCSR_MCP)
722 pr_cont("Machine Check Signal\n");
723 if (reason & MCSR_ICPERR)
724 pr_cont("Instruction Cache Parity Error\n");
725 if (reason & MCSR_DCP_PERR)
726 pr_cont("Data Cache Push Parity Error\n");
727 if (reason & MCSR_DCPERR)
728 pr_cont("Data Cache Parity Error\n");
729 if (reason & MCSR_BUS_IAERR)
730 pr_cont("Bus - Instruction Address Error\n");
731 if (reason & MCSR_BUS_RAERR)
732 pr_cont("Bus - Read Address Error\n");
733 if (reason & MCSR_BUS_WAERR)
734 pr_cont("Bus - Write Address Error\n");
735 if (reason & MCSR_BUS_IBERR)
736 pr_cont("Bus - Instruction Data Error\n");
737 if (reason & MCSR_BUS_RBERR)
738 pr_cont("Bus - Read Data Bus Error\n");
739 if (reason & MCSR_BUS_WBERR)
740 pr_cont("Bus - Write Data Bus Error\n");
741 if (reason & MCSR_BUS_IPERR)
742 pr_cont("Bus - Instruction Parity Error\n");
743 if (reason & MCSR_BUS_RPERR)
744 pr_cont("Bus - Read Parity Error\n");
745
746 return 0;
747}
748
749int machine_check_generic(struct pt_regs *regs)
750{
751 return 0;
752}
753#elif defined(CONFIG_E200)
754int machine_check_e200(struct pt_regs *regs)
755{
756 unsigned long reason = mfspr(SPRN_MCSR);
757
758 printk("Machine check in kernel mode.\n");
759 printk("Caused by (from MCSR=%lx): ", reason);
760
761 if (reason & MCSR_MCP)
762 pr_cont("Machine Check Signal\n");
763 if (reason & MCSR_CP_PERR)
764 pr_cont("Cache Push Parity Error\n");
765 if (reason & MCSR_CPERR)
766 pr_cont("Cache Parity Error\n");
767 if (reason & MCSR_EXCP_ERR)
768 pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
769 if (reason & MCSR_BUS_IRERR)
770 pr_cont("Bus - Read Bus Error on instruction fetch\n");
771 if (reason & MCSR_BUS_DRERR)
772 pr_cont("Bus - Read Bus Error on data load\n");
773 if (reason & MCSR_BUS_WRERR)
774 pr_cont("Bus - Write Bus Error on buffered store or cache line push\n");
775
776 return 0;
777}
778#elif defined(CONFIG_PPC32)
779int machine_check_generic(struct pt_regs *regs)
780{
781 unsigned long reason = regs->msr;
782
783 printk("Machine check in kernel mode.\n");
784 printk("Caused by (from SRR1=%lx): ", reason);
785 switch (reason & 0x601F0000) {
786 case 0x80000:
787 pr_cont("Machine check signal\n");
788 break;
789 case 0: /* for 601 */
790 case 0x40000:
791 case 0x140000: /* 7450 MSS error and TEA */
792 pr_cont("Transfer error ack signal\n");
793 break;
794 case 0x20000:
795 pr_cont("Data parity error signal\n");
796 break;
797 case 0x10000:
798 pr_cont("Address parity error signal\n");
799 break;
800 case 0x20000000:
801 pr_cont("L1 Data Cache error\n");
802 break;
803 case 0x40000000:
804 pr_cont("L1 Instruction Cache error\n");
805 break;
806 case 0x00100000:
807 pr_cont("L2 data cache parity error\n");
808 break;
809 default:
810 pr_cont("Unknown values in msr\n");
811 }
812 return 0;
813}
814#endif /* everything else */
815
816void machine_check_exception(struct pt_regs *regs)
817{
818 int recover = 0;
819 bool nested = in_nmi();
820 if (!nested)
821 nmi_enter();
822
823 __this_cpu_inc(irq_stat.mce_exceptions);
824
825 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
826
827 /* See if any machine dependent calls. In theory, we would want
828 * to call the CPU first, and call the ppc_md. one if the CPU
829 * one returns a positive number. However there is existing code
830 * that assumes the board gets a first chance, so let's keep it
831 * that way for now and fix things later. --BenH.
832 */
833 if (ppc_md.machine_check_exception)
834 recover = ppc_md.machine_check_exception(regs);
835 else if (cur_cpu_spec->machine_check)
836 recover = cur_cpu_spec->machine_check(regs);
837
838 if (recover > 0)
839 goto bail;
840
841 if (debugger_fault_handler(regs))
842 goto bail;
843
844 if (check_io_access(regs))
845 goto bail;
846
847 if (!nested)
848 nmi_exit();
849
850 die("Machine check", regs, SIGBUS);
851
852 /* Must die if the interrupt is not recoverable */
853 if (!(regs->msr & MSR_RI))
854 nmi_panic(regs, "Unrecoverable Machine check");
855
856 return;
857
858bail:
859 if (!nested)
860 nmi_exit();
861}
862
863void SMIException(struct pt_regs *regs)
864{
865 die("System Management Interrupt", regs, SIGABRT);
866}
867
868#ifdef CONFIG_VSX
869static void p9_hmi_special_emu(struct pt_regs *regs)
870{
871 unsigned int ra, rb, t, i, sel, instr, rc;
872 const void __user *addr;
873 u8 vbuf[16], *vdst;
874 unsigned long ea, msr, msr_mask;
875 bool swap;
876
877 if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
878 return;
879
880 /*
881 * lxvb16x opcode: 0x7c0006d8
882 * lxvd2x opcode: 0x7c000698
883 * lxvh8x opcode: 0x7c000658
884 * lxvw4x opcode: 0x7c000618
885 */
886 if ((instr & 0xfc00073e) != 0x7c000618) {
887 pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
888 " instr=%08x\n",
889 smp_processor_id(), current->comm, current->pid,
890 regs->nip, instr);
891 return;
892 }
893
894 /* Grab vector registers into the task struct */
895 msr = regs->msr; /* Grab msr before we flush the bits */
896 flush_vsx_to_thread(current);
897 enable_kernel_altivec();
898
899 /*
900 * Is userspace running with a different endian (this is rare but
901 * not impossible)
902 */
903 swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
904
905 /* Decode the instruction */
906 ra = (instr >> 16) & 0x1f;
907 rb = (instr >> 11) & 0x1f;
908 t = (instr >> 21) & 0x1f;
909 if (instr & 1)
910 vdst = (u8 *)¤t->thread.vr_state.vr[t];
911 else
912 vdst = (u8 *)¤t->thread.fp_state.fpr[t][0];
913
914 /* Grab the vector address */
915 ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
916 if (is_32bit_task())
917 ea &= 0xfffffffful;
918 addr = (__force const void __user *)ea;
919
920 /* Check it */
921 if (!access_ok(addr, 16)) {
922 pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
923 " instr=%08x addr=%016lx\n",
924 smp_processor_id(), current->comm, current->pid,
925 regs->nip, instr, (unsigned long)addr);
926 return;
927 }
928
929 /* Read the vector */
930 rc = 0;
931 if ((unsigned long)addr & 0xfUL)
932 /* unaligned case */
933 rc = __copy_from_user_inatomic(vbuf, addr, 16);
934 else
935 __get_user_atomic_128_aligned(vbuf, addr, rc);
936 if (rc) {
937 pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
938 " instr=%08x addr=%016lx\n",
939 smp_processor_id(), current->comm, current->pid,
940 regs->nip, instr, (unsigned long)addr);
941 return;
942 }
943
944 pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
945 " instr=%08x addr=%016lx\n",
946 smp_processor_id(), current->comm, current->pid, regs->nip,
947 instr, (unsigned long) addr);
948
949 /* Grab instruction "selector" */
950 sel = (instr >> 6) & 3;
951
952 /*
953 * Check to make sure the facility is actually enabled. This
954 * could happen if we get a false positive hit.
955 *
956 * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
957 * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
958 */
959 msr_mask = MSR_VSX;
960 if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
961 msr_mask = MSR_VEC;
962 if (!(msr & msr_mask)) {
963 pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
964 " instr=%08x msr:%016lx\n",
965 smp_processor_id(), current->comm, current->pid,
966 regs->nip, instr, msr);
967 return;
968 }
969
970 /* Do logging here before we modify sel based on endian */
971 switch (sel) {
972 case 0: /* lxvw4x */
973 PPC_WARN_EMULATED(lxvw4x, regs);
974 break;
975 case 1: /* lxvh8x */
976 PPC_WARN_EMULATED(lxvh8x, regs);
977 break;
978 case 2: /* lxvd2x */
979 PPC_WARN_EMULATED(lxvd2x, regs);
980 break;
981 case 3: /* lxvb16x */
982 PPC_WARN_EMULATED(lxvb16x, regs);
983 break;
984 }
985
986#ifdef __LITTLE_ENDIAN__
987 /*
988 * An LE kernel stores the vector in the task struct as an LE
989 * byte array (effectively swapping both the components and
990 * the content of the components). Those instructions expect
991 * the components to remain in ascending address order, so we
992 * swap them back.
993 *
994 * If we are running a BE user space, the expectation is that
995 * of a simple memcpy, so forcing the emulation to look like
996 * a lxvb16x should do the trick.
997 */
998 if (swap)
999 sel = 3;
1000
1001 switch (sel) {
1002 case 0: /* lxvw4x */
1003 for (i = 0; i < 4; i++)
1004 ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
1005 break;
1006 case 1: /* lxvh8x */
1007 for (i = 0; i < 8; i++)
1008 ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
1009 break;
1010 case 2: /* lxvd2x */
1011 for (i = 0; i < 2; i++)
1012 ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
1013 break;
1014 case 3: /* lxvb16x */
1015 for (i = 0; i < 16; i++)
1016 vdst[i] = vbuf[15-i];
1017 break;
1018 }
1019#else /* __LITTLE_ENDIAN__ */
1020 /* On a big endian kernel, a BE userspace only needs a memcpy */
1021 if (!swap)
1022 sel = 3;
1023
1024 /* Otherwise, we need to swap the content of the components */
1025 switch (sel) {
1026 case 0: /* lxvw4x */
1027 for (i = 0; i < 4; i++)
1028 ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
1029 break;
1030 case 1: /* lxvh8x */
1031 for (i = 0; i < 8; i++)
1032 ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
1033 break;
1034 case 2: /* lxvd2x */
1035 for (i = 0; i < 2; i++)
1036 ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
1037 break;
1038 case 3: /* lxvb16x */
1039 memcpy(vdst, vbuf, 16);
1040 break;
1041 }
1042#endif /* !__LITTLE_ENDIAN__ */
1043
1044 /* Go to next instruction */
1045 regs->nip += 4;
1046}
1047#endif /* CONFIG_VSX */
1048
1049void handle_hmi_exception(struct pt_regs *regs)
1050{
1051 struct pt_regs *old_regs;
1052
1053 old_regs = set_irq_regs(regs);
1054 irq_enter();
1055
1056#ifdef CONFIG_VSX
1057 /* Real mode flagged P9 special emu is needed */
1058 if (local_paca->hmi_p9_special_emu) {
1059 local_paca->hmi_p9_special_emu = 0;
1060
1061 /*
1062 * We don't want to take page faults while doing the
1063 * emulation, we just replay the instruction if necessary.
1064 */
1065 pagefault_disable();
1066 p9_hmi_special_emu(regs);
1067 pagefault_enable();
1068 }
1069#endif /* CONFIG_VSX */
1070
1071 if (ppc_md.handle_hmi_exception)
1072 ppc_md.handle_hmi_exception(regs);
1073
1074 irq_exit();
1075 set_irq_regs(old_regs);
1076}
1077
1078void unknown_exception(struct pt_regs *regs)
1079{
1080 enum ctx_state prev_state = exception_enter();
1081
1082 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1083 regs->nip, regs->msr, regs->trap);
1084
1085 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1086
1087 exception_exit(prev_state);
1088}
1089
1090void instruction_breakpoint_exception(struct pt_regs *regs)
1091{
1092 enum ctx_state prev_state = exception_enter();
1093
1094 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
1095 5, SIGTRAP) == NOTIFY_STOP)
1096 goto bail;
1097 if (debugger_iabr_match(regs))
1098 goto bail;
1099 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1100
1101bail:
1102 exception_exit(prev_state);
1103}
1104
1105void RunModeException(struct pt_regs *regs)
1106{
1107 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1108}
1109
1110void single_step_exception(struct pt_regs *regs)
1111{
1112 enum ctx_state prev_state = exception_enter();
1113
1114 clear_single_step(regs);
1115 clear_br_trace(regs);
1116
1117 if (kprobe_post_handler(regs))
1118 return;
1119
1120 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1121 5, SIGTRAP) == NOTIFY_STOP)
1122 goto bail;
1123 if (debugger_sstep(regs))
1124 goto bail;
1125
1126 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1127
1128bail:
1129 exception_exit(prev_state);
1130}
1131NOKPROBE_SYMBOL(single_step_exception);
1132
1133/*
1134 * After we have successfully emulated an instruction, we have to
1135 * check if the instruction was being single-stepped, and if so,
1136 * pretend we got a single-step exception. This was pointed out
1137 * by Kumar Gala. -- paulus
1138 */
1139static void emulate_single_step(struct pt_regs *regs)
1140{
1141 if (single_stepping(regs))
1142 single_step_exception(regs);
1143}
1144
1145static inline int __parse_fpscr(unsigned long fpscr)
1146{
1147 int ret = FPE_FLTUNK;
1148
1149 /* Invalid operation */
1150 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
1151 ret = FPE_FLTINV;
1152
1153 /* Overflow */
1154 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
1155 ret = FPE_FLTOVF;
1156
1157 /* Underflow */
1158 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
1159 ret = FPE_FLTUND;
1160
1161 /* Divide by zero */
1162 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
1163 ret = FPE_FLTDIV;
1164
1165 /* Inexact result */
1166 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
1167 ret = FPE_FLTRES;
1168
1169 return ret;
1170}
1171
1172static void parse_fpe(struct pt_regs *regs)
1173{
1174 int code = 0;
1175
1176 flush_fp_to_thread(current);
1177
1178 code = __parse_fpscr(current->thread.fp_state.fpscr);
1179
1180 _exception(SIGFPE, regs, code, regs->nip);
1181}
1182
1183/*
1184 * Illegal instruction emulation support. Originally written to
1185 * provide the PVR to user applications using the mfspr rd, PVR.
1186 * Return non-zero if we can't emulate, or -EFAULT if the associated
1187 * memory access caused an access fault. Return zero on success.
1188 *
1189 * There are a couple of ways to do this, either "decode" the instruction
1190 * or directly match lots of bits. In this case, matching lots of
1191 * bits is faster and easier.
1192 *
1193 */
1194static int emulate_string_inst(struct pt_regs *regs, u32 instword)
1195{
1196 u8 rT = (instword >> 21) & 0x1f;
1197 u8 rA = (instword >> 16) & 0x1f;
1198 u8 NB_RB = (instword >> 11) & 0x1f;
1199 u32 num_bytes;
1200 unsigned long EA;
1201 int pos = 0;
1202
1203 /* Early out if we are an invalid form of lswx */
1204 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
1205 if ((rT == rA) || (rT == NB_RB))
1206 return -EINVAL;
1207
1208 EA = (rA == 0) ? 0 : regs->gpr[rA];
1209
1210 switch (instword & PPC_INST_STRING_MASK) {
1211 case PPC_INST_LSWX:
1212 case PPC_INST_STSWX:
1213 EA += NB_RB;
1214 num_bytes = regs->xer & 0x7f;
1215 break;
1216 case PPC_INST_LSWI:
1217 case PPC_INST_STSWI:
1218 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
1219 break;
1220 default:
1221 return -EINVAL;
1222 }
1223
1224 while (num_bytes != 0)
1225 {
1226 u8 val;
1227 u32 shift = 8 * (3 - (pos & 0x3));
1228
1229 /* if process is 32-bit, clear upper 32 bits of EA */
1230 if ((regs->msr & MSR_64BIT) == 0)
1231 EA &= 0xFFFFFFFF;
1232
1233 switch ((instword & PPC_INST_STRING_MASK)) {
1234 case PPC_INST_LSWX:
1235 case PPC_INST_LSWI:
1236 if (get_user(val, (u8 __user *)EA))
1237 return -EFAULT;
1238 /* first time updating this reg,
1239 * zero it out */
1240 if (pos == 0)
1241 regs->gpr[rT] = 0;
1242 regs->gpr[rT] |= val << shift;
1243 break;
1244 case PPC_INST_STSWI:
1245 case PPC_INST_STSWX:
1246 val = regs->gpr[rT] >> shift;
1247 if (put_user(val, (u8 __user *)EA))
1248 return -EFAULT;
1249 break;
1250 }
1251 /* move EA to next address */
1252 EA += 1;
1253 num_bytes--;
1254
1255 /* manage our position within the register */
1256 if (++pos == 4) {
1257 pos = 0;
1258 if (++rT == 32)
1259 rT = 0;
1260 }
1261 }
1262
1263 return 0;
1264}
1265
1266static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
1267{
1268 u32 ra,rs;
1269 unsigned long tmp;
1270
1271 ra = (instword >> 16) & 0x1f;
1272 rs = (instword >> 21) & 0x1f;
1273
1274 tmp = regs->gpr[rs];
1275 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
1276 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
1277 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1278 regs->gpr[ra] = tmp;
1279
1280 return 0;
1281}
1282
1283static int emulate_isel(struct pt_regs *regs, u32 instword)
1284{
1285 u8 rT = (instword >> 21) & 0x1f;
1286 u8 rA = (instword >> 16) & 0x1f;
1287 u8 rB = (instword >> 11) & 0x1f;
1288 u8 BC = (instword >> 6) & 0x1f;
1289 u8 bit;
1290 unsigned long tmp;
1291
1292 tmp = (rA == 0) ? 0 : regs->gpr[rA];
1293 bit = (regs->ccr >> (31 - BC)) & 0x1;
1294
1295 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1296
1297 return 0;
1298}
1299
1300#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1301static inline bool tm_abort_check(struct pt_regs *regs, int cause)
1302{
1303 /* If we're emulating a load/store in an active transaction, we cannot
1304 * emulate it as the kernel operates in transaction suspended context.
1305 * We need to abort the transaction. This creates a persistent TM
1306 * abort so tell the user what caused it with a new code.
1307 */
1308 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1309 tm_enable();
1310 tm_abort(cause);
1311 return true;
1312 }
1313 return false;
1314}
1315#else
1316static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1317{
1318 return false;
1319}
1320#endif
1321
1322static int emulate_instruction(struct pt_regs *regs)
1323{
1324 u32 instword;
1325 u32 rd;
1326
1327 if (!user_mode(regs))
1328 return -EINVAL;
1329 CHECK_FULL_REGS(regs);
1330
1331 if (get_user(instword, (u32 __user *)(regs->nip)))
1332 return -EFAULT;
1333
1334 /* Emulate the mfspr rD, PVR. */
1335 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1336 PPC_WARN_EMULATED(mfpvr, regs);
1337 rd = (instword >> 21) & 0x1f;
1338 regs->gpr[rd] = mfspr(SPRN_PVR);
1339 return 0;
1340 }
1341
1342 /* Emulating the dcba insn is just a no-op. */
1343 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1344 PPC_WARN_EMULATED(dcba, regs);
1345 return 0;
1346 }
1347
1348 /* Emulate the mcrxr insn. */
1349 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1350 int shift = (instword >> 21) & 0x1c;
1351 unsigned long msk = 0xf0000000UL >> shift;
1352
1353 PPC_WARN_EMULATED(mcrxr, regs);
1354 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1355 regs->xer &= ~0xf0000000UL;
1356 return 0;
1357 }
1358
1359 /* Emulate load/store string insn. */
1360 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1361 if (tm_abort_check(regs,
1362 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1363 return -EINVAL;
1364 PPC_WARN_EMULATED(string, regs);
1365 return emulate_string_inst(regs, instword);
1366 }
1367
1368 /* Emulate the popcntb (Population Count Bytes) instruction. */
1369 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1370 PPC_WARN_EMULATED(popcntb, regs);
1371 return emulate_popcntb_inst(regs, instword);
1372 }
1373
1374 /* Emulate isel (Integer Select) instruction */
1375 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1376 PPC_WARN_EMULATED(isel, regs);
1377 return emulate_isel(regs, instword);
1378 }
1379
1380 /* Emulate sync instruction variants */
1381 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1382 PPC_WARN_EMULATED(sync, regs);
1383 asm volatile("sync");
1384 return 0;
1385 }
1386
1387#ifdef CONFIG_PPC64
1388 /* Emulate the mfspr rD, DSCR. */
1389 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1390 PPC_INST_MFSPR_DSCR_USER) ||
1391 ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1392 PPC_INST_MFSPR_DSCR)) &&
1393 cpu_has_feature(CPU_FTR_DSCR)) {
1394 PPC_WARN_EMULATED(mfdscr, regs);
1395 rd = (instword >> 21) & 0x1f;
1396 regs->gpr[rd] = mfspr(SPRN_DSCR);
1397 return 0;
1398 }
1399 /* Emulate the mtspr DSCR, rD. */
1400 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1401 PPC_INST_MTSPR_DSCR_USER) ||
1402 ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1403 PPC_INST_MTSPR_DSCR)) &&
1404 cpu_has_feature(CPU_FTR_DSCR)) {
1405 PPC_WARN_EMULATED(mtdscr, regs);
1406 rd = (instword >> 21) & 0x1f;
1407 current->thread.dscr = regs->gpr[rd];
1408 current->thread.dscr_inherit = 1;
1409 mtspr(SPRN_DSCR, current->thread.dscr);
1410 return 0;
1411 }
1412#endif
1413
1414 return -EINVAL;
1415}
1416
1417int is_valid_bugaddr(unsigned long addr)
1418{
1419 return is_kernel_addr(addr);
1420}
1421
1422#ifdef CONFIG_MATH_EMULATION
1423static int emulate_math(struct pt_regs *regs)
1424{
1425 int ret;
1426 extern int do_mathemu(struct pt_regs *regs);
1427
1428 ret = do_mathemu(regs);
1429 if (ret >= 0)
1430 PPC_WARN_EMULATED(math, regs);
1431
1432 switch (ret) {
1433 case 0:
1434 emulate_single_step(regs);
1435 return 0;
1436 case 1: {
1437 int code = 0;
1438 code = __parse_fpscr(current->thread.fp_state.fpscr);
1439 _exception(SIGFPE, regs, code, regs->nip);
1440 return 0;
1441 }
1442 case -EFAULT:
1443 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1444 return 0;
1445 }
1446
1447 return -1;
1448}
1449#else
1450static inline int emulate_math(struct pt_regs *regs) { return -1; }
1451#endif
1452
1453void program_check_exception(struct pt_regs *regs)
1454{
1455 enum ctx_state prev_state = exception_enter();
1456 unsigned int reason = get_reason(regs);
1457
1458 /* We can now get here via a FP Unavailable exception if the core
1459 * has no FPU, in that case the reason flags will be 0 */
1460
1461 if (reason & REASON_FP) {
1462 /* IEEE FP exception */
1463 parse_fpe(regs);
1464 goto bail;
1465 }
1466 if (reason & REASON_TRAP) {
1467 unsigned long bugaddr;
1468 /* Debugger is first in line to stop recursive faults in
1469 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1470 if (debugger_bpt(regs))
1471 goto bail;
1472
1473 if (kprobe_handler(regs))
1474 goto bail;
1475
1476 /* trap exception */
1477 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1478 == NOTIFY_STOP)
1479 goto bail;
1480
1481 bugaddr = regs->nip;
1482 /*
1483 * Fixup bugaddr for BUG_ON() in real mode
1484 */
1485 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1486 bugaddr += PAGE_OFFSET;
1487
1488 if (!(regs->msr & MSR_PR) && /* not user-mode */
1489 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1490 regs->nip += 4;
1491 goto bail;
1492 }
1493 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1494 goto bail;
1495 }
1496#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1497 if (reason & REASON_TM) {
1498 /* This is a TM "Bad Thing Exception" program check.
1499 * This occurs when:
1500 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1501 * transition in TM states.
1502 * - A trechkpt is attempted when transactional.
1503 * - A treclaim is attempted when non transactional.
1504 * - A tend is illegally attempted.
1505 * - writing a TM SPR when transactional.
1506 *
1507 * If usermode caused this, it's done something illegal and
1508 * gets a SIGILL slap on the wrist. We call it an illegal
1509 * operand to distinguish from the instruction just being bad
1510 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1511 * illegal /placement/ of a valid instruction.
1512 */
1513 if (user_mode(regs)) {
1514 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1515 goto bail;
1516 } else {
1517 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1518 "at %lx (msr 0x%lx) tm_scratch=%llx\n",
1519 regs->nip, regs->msr, get_paca()->tm_scratch);
1520 die("Unrecoverable exception", regs, SIGABRT);
1521 }
1522 }
1523#endif
1524
1525 /*
1526 * If we took the program check in the kernel skip down to sending a
1527 * SIGILL. The subsequent cases all relate to emulating instructions
1528 * which we should only do for userspace. We also do not want to enable
1529 * interrupts for kernel faults because that might lead to further
1530 * faults, and loose the context of the original exception.
1531 */
1532 if (!user_mode(regs))
1533 goto sigill;
1534
1535 /* We restore the interrupt state now */
1536 if (!arch_irq_disabled_regs(regs))
1537 local_irq_enable();
1538
1539 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1540 * but there seems to be a hardware bug on the 405GP (RevD)
1541 * that means ESR is sometimes set incorrectly - either to
1542 * ESR_DST (!?) or 0. In the process of chasing this with the
1543 * hardware people - not sure if it can happen on any illegal
1544 * instruction or only on FP instructions, whether there is a
1545 * pattern to occurrences etc. -dgibson 31/Mar/2003
1546 */
1547 if (!emulate_math(regs))
1548 goto bail;
1549
1550 /* Try to emulate it if we should. */
1551 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1552 switch (emulate_instruction(regs)) {
1553 case 0:
1554 regs->nip += 4;
1555 emulate_single_step(regs);
1556 goto bail;
1557 case -EFAULT:
1558 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1559 goto bail;
1560 }
1561 }
1562
1563sigill:
1564 if (reason & REASON_PRIVILEGED)
1565 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1566 else
1567 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1568
1569bail:
1570 exception_exit(prev_state);
1571}
1572NOKPROBE_SYMBOL(program_check_exception);
1573
1574/*
1575 * This occurs when running in hypervisor mode on POWER6 or later
1576 * and an illegal instruction is encountered.
1577 */
1578void emulation_assist_interrupt(struct pt_regs *regs)
1579{
1580 regs->msr |= REASON_ILLEGAL;
1581 program_check_exception(regs);
1582}
1583NOKPROBE_SYMBOL(emulation_assist_interrupt);
1584
1585void alignment_exception(struct pt_regs *regs)
1586{
1587 enum ctx_state prev_state = exception_enter();
1588 int sig, code, fixed = 0;
1589
1590 /* We restore the interrupt state now */
1591 if (!arch_irq_disabled_regs(regs))
1592 local_irq_enable();
1593
1594 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1595 goto bail;
1596
1597 /* we don't implement logging of alignment exceptions */
1598 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1599 fixed = fix_alignment(regs);
1600
1601 if (fixed == 1) {
1602 regs->nip += 4; /* skip over emulated instruction */
1603 emulate_single_step(regs);
1604 goto bail;
1605 }
1606
1607 /* Operand address was bad */
1608 if (fixed == -EFAULT) {
1609 sig = SIGSEGV;
1610 code = SEGV_ACCERR;
1611 } else {
1612 sig = SIGBUS;
1613 code = BUS_ADRALN;
1614 }
1615 if (user_mode(regs))
1616 _exception(sig, regs, code, regs->dar);
1617 else
1618 bad_page_fault(regs, regs->dar, sig);
1619
1620bail:
1621 exception_exit(prev_state);
1622}
1623
1624void StackOverflow(struct pt_regs *regs)
1625{
1626 pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
1627 current->comm, task_pid_nr(current), regs->gpr[1]);
1628 debugger(regs);
1629 show_regs(regs);
1630 panic("kernel stack overflow");
1631}
1632
1633void kernel_fp_unavailable_exception(struct pt_regs *regs)
1634{
1635 enum ctx_state prev_state = exception_enter();
1636
1637 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1638 "%lx at %lx\n", regs->trap, regs->nip);
1639 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1640
1641 exception_exit(prev_state);
1642}
1643
1644void altivec_unavailable_exception(struct pt_regs *regs)
1645{
1646 enum ctx_state prev_state = exception_enter();
1647
1648 if (user_mode(regs)) {
1649 /* A user program has executed an altivec instruction,
1650 but this kernel doesn't support altivec. */
1651 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1652 goto bail;
1653 }
1654
1655 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1656 "%lx at %lx\n", regs->trap, regs->nip);
1657 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1658
1659bail:
1660 exception_exit(prev_state);
1661}
1662
1663void vsx_unavailable_exception(struct pt_regs *regs)
1664{
1665 if (user_mode(regs)) {
1666 /* A user program has executed an vsx instruction,
1667 but this kernel doesn't support vsx. */
1668 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1669 return;
1670 }
1671
1672 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1673 "%lx at %lx\n", regs->trap, regs->nip);
1674 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1675}
1676
1677#ifdef CONFIG_PPC64
1678static void tm_unavailable(struct pt_regs *regs)
1679{
1680#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1681 if (user_mode(regs)) {
1682 current->thread.load_tm++;
1683 regs->msr |= MSR_TM;
1684 tm_enable();
1685 tm_restore_sprs(¤t->thread);
1686 return;
1687 }
1688#endif
1689 pr_emerg("Unrecoverable TM Unavailable Exception "
1690 "%lx at %lx\n", regs->trap, regs->nip);
1691 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1692}
1693
1694void facility_unavailable_exception(struct pt_regs *regs)
1695{
1696 static char *facility_strings[] = {
1697 [FSCR_FP_LG] = "FPU",
1698 [FSCR_VECVSX_LG] = "VMX/VSX",
1699 [FSCR_DSCR_LG] = "DSCR",
1700 [FSCR_PM_LG] = "PMU SPRs",
1701 [FSCR_BHRB_LG] = "BHRB",
1702 [FSCR_TM_LG] = "TM",
1703 [FSCR_EBB_LG] = "EBB",
1704 [FSCR_TAR_LG] = "TAR",
1705 [FSCR_MSGP_LG] = "MSGP",
1706 [FSCR_SCV_LG] = "SCV",
1707 };
1708 char *facility = "unknown";
1709 u64 value;
1710 u32 instword, rd;
1711 u8 status;
1712 bool hv;
1713
1714 hv = (TRAP(regs) == 0xf80);
1715 if (hv)
1716 value = mfspr(SPRN_HFSCR);
1717 else
1718 value = mfspr(SPRN_FSCR);
1719
1720 status = value >> 56;
1721 if ((hv || status >= 2) &&
1722 (status < ARRAY_SIZE(facility_strings)) &&
1723 facility_strings[status])
1724 facility = facility_strings[status];
1725
1726 /* We should not have taken this interrupt in kernel */
1727 if (!user_mode(regs)) {
1728 pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
1729 facility, status, regs->nip);
1730 die("Unexpected facility unavailable exception", regs, SIGABRT);
1731 }
1732
1733 /* We restore the interrupt state now */
1734 if (!arch_irq_disabled_regs(regs))
1735 local_irq_enable();
1736
1737 if (status == FSCR_DSCR_LG) {
1738 /*
1739 * User is accessing the DSCR register using the problem
1740 * state only SPR number (0x03) either through a mfspr or
1741 * a mtspr instruction. If it is a write attempt through
1742 * a mtspr, then we set the inherit bit. This also allows
1743 * the user to write or read the register directly in the
1744 * future by setting via the FSCR DSCR bit. But in case it
1745 * is a read DSCR attempt through a mfspr instruction, we
1746 * just emulate the instruction instead. This code path will
1747 * always emulate all the mfspr instructions till the user
1748 * has attempted at least one mtspr instruction. This way it
1749 * preserves the same behaviour when the user is accessing
1750 * the DSCR through privilege level only SPR number (0x11)
1751 * which is emulated through illegal instruction exception.
1752 * We always leave HFSCR DSCR set.
1753 */
1754 if (get_user(instword, (u32 __user *)(regs->nip))) {
1755 pr_err("Failed to fetch the user instruction\n");
1756 return;
1757 }
1758
1759 /* Write into DSCR (mtspr 0x03, RS) */
1760 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1761 == PPC_INST_MTSPR_DSCR_USER) {
1762 rd = (instword >> 21) & 0x1f;
1763 current->thread.dscr = regs->gpr[rd];
1764 current->thread.dscr_inherit = 1;
1765 current->thread.fscr |= FSCR_DSCR;
1766 mtspr(SPRN_FSCR, current->thread.fscr);
1767 }
1768
1769 /* Read from DSCR (mfspr RT, 0x03) */
1770 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1771 == PPC_INST_MFSPR_DSCR_USER) {
1772 if (emulate_instruction(regs)) {
1773 pr_err("DSCR based mfspr emulation failed\n");
1774 return;
1775 }
1776 regs->nip += 4;
1777 emulate_single_step(regs);
1778 }
1779 return;
1780 }
1781
1782 if (status == FSCR_TM_LG) {
1783 /*
1784 * If we're here then the hardware is TM aware because it
1785 * generated an exception with FSRM_TM set.
1786 *
1787 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1788 * told us not to do TM, or the kernel is not built with TM
1789 * support.
1790 *
1791 * If both of those things are true, then userspace can spam the
1792 * console by triggering the printk() below just by continually
1793 * doing tbegin (or any TM instruction). So in that case just
1794 * send the process a SIGILL immediately.
1795 */
1796 if (!cpu_has_feature(CPU_FTR_TM))
1797 goto out;
1798
1799 tm_unavailable(regs);
1800 return;
1801 }
1802
1803 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1804 hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1805
1806out:
1807 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1808}
1809#endif
1810
1811#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1812
1813void fp_unavailable_tm(struct pt_regs *regs)
1814{
1815 /* Note: This does not handle any kind of FP laziness. */
1816
1817 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1818 regs->nip, regs->msr);
1819
1820 /* We can only have got here if the task started using FP after
1821 * beginning the transaction. So, the transactional regs are just a
1822 * copy of the checkpointed ones. But, we still need to recheckpoint
1823 * as we're enabling FP for the process; it will return, abort the
1824 * transaction, and probably retry but now with FP enabled. So the
1825 * checkpointed FP registers need to be loaded.
1826 */
1827 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1828
1829 /*
1830 * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
1831 * then it was overwrite by the thr->fp_state by tm_reclaim_thread().
1832 *
1833 * At this point, ck{fp,vr}_state contains the exact values we want to
1834 * recheckpoint.
1835 */
1836
1837 /* Enable FP for the task: */
1838 current->thread.load_fp = 1;
1839
1840 /*
1841 * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
1842 */
1843 tm_recheckpoint(¤t->thread);
1844}
1845
1846void altivec_unavailable_tm(struct pt_regs *regs)
1847{
1848 /* See the comments in fp_unavailable_tm(). This function operates
1849 * the same way.
1850 */
1851
1852 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1853 "MSR=%lx\n",
1854 regs->nip, regs->msr);
1855 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1856 current->thread.load_vec = 1;
1857 tm_recheckpoint(¤t->thread);
1858 current->thread.used_vr = 1;
1859}
1860
1861void vsx_unavailable_tm(struct pt_regs *regs)
1862{
1863 /* See the comments in fp_unavailable_tm(). This works similarly,
1864 * though we're loading both FP and VEC registers in here.
1865 *
1866 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1867 * regs. Either way, set MSR_VSX.
1868 */
1869
1870 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1871 "MSR=%lx\n",
1872 regs->nip, regs->msr);
1873
1874 current->thread.used_vsr = 1;
1875
1876 /* This reclaims FP and/or VR regs if they're already enabled */
1877 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1878
1879 current->thread.load_vec = 1;
1880 current->thread.load_fp = 1;
1881
1882 tm_recheckpoint(¤t->thread);
1883}
1884#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1885
1886void performance_monitor_exception(struct pt_regs *regs)
1887{
1888 __this_cpu_inc(irq_stat.pmu_irqs);
1889
1890 perf_irq(regs);
1891}
1892
1893#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1894static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1895{
1896 int changed = 0;
1897 /*
1898 * Determine the cause of the debug event, clear the
1899 * event flags and send a trap to the handler. Torez
1900 */
1901 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1902 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1903#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1904 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1905#endif
1906 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
1907 5);
1908 changed |= 0x01;
1909 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1910 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1911 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
1912 6);
1913 changed |= 0x01;
1914 } else if (debug_status & DBSR_IAC1) {
1915 current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1916 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1917 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
1918 1);
1919 changed |= 0x01;
1920 } else if (debug_status & DBSR_IAC2) {
1921 current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1922 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
1923 2);
1924 changed |= 0x01;
1925 } else if (debug_status & DBSR_IAC3) {
1926 current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1927 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1928 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
1929 3);
1930 changed |= 0x01;
1931 } else if (debug_status & DBSR_IAC4) {
1932 current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1933 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
1934 4);
1935 changed |= 0x01;
1936 }
1937 /*
1938 * At the point this routine was called, the MSR(DE) was turned off.
1939 * Check all other debug flags and see if that bit needs to be turned
1940 * back on or not.
1941 */
1942 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1943 current->thread.debug.dbcr1))
1944 regs->msr |= MSR_DE;
1945 else
1946 /* Make sure the IDM flag is off */
1947 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1948
1949 if (changed & 0x01)
1950 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1951}
1952
1953void DebugException(struct pt_regs *regs, unsigned long debug_status)
1954{
1955 current->thread.debug.dbsr = debug_status;
1956
1957 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1958 * on server, it stops on the target of the branch. In order to simulate
1959 * the server behaviour, we thus restart right away with a single step
1960 * instead of stopping here when hitting a BT
1961 */
1962 if (debug_status & DBSR_BT) {
1963 regs->msr &= ~MSR_DE;
1964
1965 /* Disable BT */
1966 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1967 /* Clear the BT event */
1968 mtspr(SPRN_DBSR, DBSR_BT);
1969
1970 /* Do the single step trick only when coming from userspace */
1971 if (user_mode(regs)) {
1972 current->thread.debug.dbcr0 &= ~DBCR0_BT;
1973 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1974 regs->msr |= MSR_DE;
1975 return;
1976 }
1977
1978 if (kprobe_post_handler(regs))
1979 return;
1980
1981 if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1982 5, SIGTRAP) == NOTIFY_STOP) {
1983 return;
1984 }
1985 if (debugger_sstep(regs))
1986 return;
1987 } else if (debug_status & DBSR_IC) { /* Instruction complete */
1988 regs->msr &= ~MSR_DE;
1989
1990 /* Disable instruction completion */
1991 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1992 /* Clear the instruction completion event */
1993 mtspr(SPRN_DBSR, DBSR_IC);
1994
1995 if (kprobe_post_handler(regs))
1996 return;
1997
1998 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1999 5, SIGTRAP) == NOTIFY_STOP) {
2000 return;
2001 }
2002
2003 if (debugger_sstep(regs))
2004 return;
2005
2006 if (user_mode(regs)) {
2007 current->thread.debug.dbcr0 &= ~DBCR0_IC;
2008 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
2009 current->thread.debug.dbcr1))
2010 regs->msr |= MSR_DE;
2011 else
2012 /* Make sure the IDM bit is off */
2013 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
2014 }
2015
2016 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
2017 } else
2018 handle_debug(regs, debug_status);
2019}
2020NOKPROBE_SYMBOL(DebugException);
2021#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2022
2023#if !defined(CONFIG_TAU_INT)
2024void TAUException(struct pt_regs *regs)
2025{
2026 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
2027 regs->nip, regs->msr, regs->trap, print_tainted());
2028}
2029#endif /* CONFIG_INT_TAU */
2030
2031#ifdef CONFIG_ALTIVEC
2032void altivec_assist_exception(struct pt_regs *regs)
2033{
2034 int err;
2035
2036 if (!user_mode(regs)) {
2037 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
2038 " at %lx\n", regs->nip);
2039 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
2040 }
2041
2042 flush_altivec_to_thread(current);
2043
2044 PPC_WARN_EMULATED(altivec, regs);
2045 err = emulate_altivec(regs);
2046 if (err == 0) {
2047 regs->nip += 4; /* skip emulated instruction */
2048 emulate_single_step(regs);
2049 return;
2050 }
2051
2052 if (err == -EFAULT) {
2053 /* got an error reading the instruction */
2054 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2055 } else {
2056 /* didn't recognize the instruction */
2057 /* XXX quick hack for now: set the non-Java bit in the VSCR */
2058 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
2059 "in %s at %lx\n", current->comm, regs->nip);
2060 current->thread.vr_state.vscr.u[3] |= 0x10000;
2061 }
2062}
2063#endif /* CONFIG_ALTIVEC */
2064
2065#ifdef CONFIG_FSL_BOOKE
2066void CacheLockingException(struct pt_regs *regs, unsigned long address,
2067 unsigned long error_code)
2068{
2069 /* We treat cache locking instructions from the user
2070 * as priv ops, in the future we could try to do
2071 * something smarter
2072 */
2073 if (error_code & (ESR_DLK|ESR_ILK))
2074 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
2075 return;
2076}
2077#endif /* CONFIG_FSL_BOOKE */
2078
2079#ifdef CONFIG_SPE
2080void SPEFloatingPointException(struct pt_regs *regs)
2081{
2082 extern int do_spe_mathemu(struct pt_regs *regs);
2083 unsigned long spefscr;
2084 int fpexc_mode;
2085 int code = FPE_FLTUNK;
2086 int err;
2087
2088 /* We restore the interrupt state now */
2089 if (!arch_irq_disabled_regs(regs))
2090 local_irq_enable();
2091
2092 flush_spe_to_thread(current);
2093
2094 spefscr = current->thread.spefscr;
2095 fpexc_mode = current->thread.fpexc_mode;
2096
2097 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
2098 code = FPE_FLTOVF;
2099 }
2100 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
2101 code = FPE_FLTUND;
2102 }
2103 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
2104 code = FPE_FLTDIV;
2105 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
2106 code = FPE_FLTINV;
2107 }
2108 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
2109 code = FPE_FLTRES;
2110
2111 err = do_spe_mathemu(regs);
2112 if (err == 0) {
2113 regs->nip += 4; /* skip emulated instruction */
2114 emulate_single_step(regs);
2115 return;
2116 }
2117
2118 if (err == -EFAULT) {
2119 /* got an error reading the instruction */
2120 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2121 } else if (err == -EINVAL) {
2122 /* didn't recognize the instruction */
2123 printk(KERN_ERR "unrecognized spe instruction "
2124 "in %s at %lx\n", current->comm, regs->nip);
2125 } else {
2126 _exception(SIGFPE, regs, code, regs->nip);
2127 }
2128
2129 return;
2130}
2131
2132void SPEFloatingPointRoundException(struct pt_regs *regs)
2133{
2134 extern int speround_handler(struct pt_regs *regs);
2135 int err;
2136
2137 /* We restore the interrupt state now */
2138 if (!arch_irq_disabled_regs(regs))
2139 local_irq_enable();
2140
2141 preempt_disable();
2142 if (regs->msr & MSR_SPE)
2143 giveup_spe(current);
2144 preempt_enable();
2145
2146 regs->nip -= 4;
2147 err = speround_handler(regs);
2148 if (err == 0) {
2149 regs->nip += 4; /* skip emulated instruction */
2150 emulate_single_step(regs);
2151 return;
2152 }
2153
2154 if (err == -EFAULT) {
2155 /* got an error reading the instruction */
2156 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2157 } else if (err == -EINVAL) {
2158 /* didn't recognize the instruction */
2159 printk(KERN_ERR "unrecognized spe instruction "
2160 "in %s at %lx\n", current->comm, regs->nip);
2161 } else {
2162 _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
2163 return;
2164 }
2165}
2166#endif
2167
2168/*
2169 * We enter here if we get an unrecoverable exception, that is, one
2170 * that happened at a point where the RI (recoverable interrupt) bit
2171 * in the MSR is 0. This indicates that SRR0/1 are live, and that
2172 * we therefore lost state by taking this exception.
2173 */
2174void unrecoverable_exception(struct pt_regs *regs)
2175{
2176 pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
2177 regs->trap, regs->nip, regs->msr);
2178 die("Unrecoverable exception", regs, SIGABRT);
2179}
2180NOKPROBE_SYMBOL(unrecoverable_exception);
2181
2182#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
2183/*
2184 * Default handler for a Watchdog exception,
2185 * spins until a reboot occurs
2186 */
2187void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
2188{
2189 /* Generic WatchdogHandler, implement your own */
2190 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
2191 return;
2192}
2193
2194void WatchdogException(struct pt_regs *regs)
2195{
2196 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
2197 WatchdogHandler(regs);
2198}
2199#endif
2200
2201/*
2202 * We enter here if we discover during exception entry that we are
2203 * running in supervisor mode with a userspace value in the stack pointer.
2204 */
2205void kernel_bad_stack(struct pt_regs *regs)
2206{
2207 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
2208 regs->gpr[1], regs->nip);
2209 die("Bad kernel stack pointer", regs, SIGABRT);
2210}
2211NOKPROBE_SYMBOL(kernel_bad_stack);
2212
2213void __init trap_init(void)
2214{
2215}
2216
2217
2218#ifdef CONFIG_PPC_EMULATED_STATS
2219
2220#define WARN_EMULATED_SETUP(type) .type = { .name = #type }
2221
2222struct ppc_emulated ppc_emulated = {
2223#ifdef CONFIG_ALTIVEC
2224 WARN_EMULATED_SETUP(altivec),
2225#endif
2226 WARN_EMULATED_SETUP(dcba),
2227 WARN_EMULATED_SETUP(dcbz),
2228 WARN_EMULATED_SETUP(fp_pair),
2229 WARN_EMULATED_SETUP(isel),
2230 WARN_EMULATED_SETUP(mcrxr),
2231 WARN_EMULATED_SETUP(mfpvr),
2232 WARN_EMULATED_SETUP(multiple),
2233 WARN_EMULATED_SETUP(popcntb),
2234 WARN_EMULATED_SETUP(spe),
2235 WARN_EMULATED_SETUP(string),
2236 WARN_EMULATED_SETUP(sync),
2237 WARN_EMULATED_SETUP(unaligned),
2238#ifdef CONFIG_MATH_EMULATION
2239 WARN_EMULATED_SETUP(math),
2240#endif
2241#ifdef CONFIG_VSX
2242 WARN_EMULATED_SETUP(vsx),
2243#endif
2244#ifdef CONFIG_PPC64
2245 WARN_EMULATED_SETUP(mfdscr),
2246 WARN_EMULATED_SETUP(mtdscr),
2247 WARN_EMULATED_SETUP(lq_stq),
2248 WARN_EMULATED_SETUP(lxvw4x),
2249 WARN_EMULATED_SETUP(lxvh8x),
2250 WARN_EMULATED_SETUP(lxvd2x),
2251 WARN_EMULATED_SETUP(lxvb16x),
2252#endif
2253};
2254
2255u32 ppc_warn_emulated;
2256
2257void ppc_warn_emulated_print(const char *type)
2258{
2259 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2260 type);
2261}
2262
2263static int __init ppc_warn_emulated_init(void)
2264{
2265 struct dentry *dir, *d;
2266 unsigned int i;
2267 struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2268
2269 if (!powerpc_debugfs_root)
2270 return -ENODEV;
2271
2272 dir = debugfs_create_dir("emulated_instructions",
2273 powerpc_debugfs_root);
2274 if (!dir)
2275 return -ENOMEM;
2276
2277 d = debugfs_create_u32("do_warn", 0644, dir,
2278 &ppc_warn_emulated);
2279 if (!d)
2280 goto fail;
2281
2282 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
2283 d = debugfs_create_u32(entries[i].name, 0644, dir,
2284 (u32 *)&entries[i].val.counter);
2285 if (!d)
2286 goto fail;
2287 }
2288
2289 return 0;
2290
2291fail:
2292 debugfs_remove_recursive(dir);
2293 return -ENOMEM;
2294}
2295
2296device_initcall(ppc_warn_emulated_init);
2297
2298#endif /* CONFIG_PPC_EMULATED_STATS */