Loading...
1/* arch/sparc64/kernel/traps.c
2 *
3 * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7/*
8 * I like traps on v9, :))))
9 */
10
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/linkage.h>
14#include <linux/kernel.h>
15#include <linux/signal.h>
16#include <linux/smp.h>
17#include <linux/mm.h>
18#include <linux/init.h>
19#include <linux/kdebug.h>
20#include <linux/ftrace.h>
21#include <linux/reboot.h>
22#include <linux/gfp.h>
23#include <linux/context_tracking.h>
24
25#include <asm/smp.h>
26#include <asm/delay.h>
27#include <asm/ptrace.h>
28#include <asm/oplib.h>
29#include <asm/page.h>
30#include <asm/pgtable.h>
31#include <asm/unistd.h>
32#include <asm/uaccess.h>
33#include <asm/fpumacro.h>
34#include <asm/lsu.h>
35#include <asm/dcu.h>
36#include <asm/estate.h>
37#include <asm/chafsr.h>
38#include <asm/sfafsr.h>
39#include <asm/psrcompat.h>
40#include <asm/processor.h>
41#include <asm/timer.h>
42#include <asm/head.h>
43#include <asm/prom.h>
44#include <asm/memctrl.h>
45#include <asm/cacheflush.h>
46#include <asm/setup.h>
47
48#include "entry.h"
49#include "kernel.h"
50#include "kstack.h"
51
52/* When an irrecoverable trap occurs at tl > 0, the trap entry
53 * code logs the trap state registers at every level in the trap
54 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
55 * is as follows:
56 */
57struct tl1_traplog {
58 struct {
59 unsigned long tstate;
60 unsigned long tpc;
61 unsigned long tnpc;
62 unsigned long tt;
63 } trapstack[4];
64 unsigned long tl;
65};
66
67static void dump_tl1_traplog(struct tl1_traplog *p)
68{
69 int i, limit;
70
71 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
72 "dumping track stack.\n", p->tl);
73
74 limit = (tlb_type == hypervisor) ? 2 : 4;
75 for (i = 0; i < limit; i++) {
76 printk(KERN_EMERG
77 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
78 "TNPC[%016lx] TT[%lx]\n",
79 i + 1,
80 p->trapstack[i].tstate, p->trapstack[i].tpc,
81 p->trapstack[i].tnpc, p->trapstack[i].tt);
82 printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
83 }
84}
85
86void bad_trap(struct pt_regs *regs, long lvl)
87{
88 char buffer[32];
89 siginfo_t info;
90
91 if (notify_die(DIE_TRAP, "bad trap", regs,
92 0, lvl, SIGTRAP) == NOTIFY_STOP)
93 return;
94
95 if (lvl < 0x100) {
96 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
97 die_if_kernel(buffer, regs);
98 }
99
100 lvl -= 0x100;
101 if (regs->tstate & TSTATE_PRIV) {
102 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
103 die_if_kernel(buffer, regs);
104 }
105 if (test_thread_flag(TIF_32BIT)) {
106 regs->tpc &= 0xffffffff;
107 regs->tnpc &= 0xffffffff;
108 }
109 info.si_signo = SIGILL;
110 info.si_errno = 0;
111 info.si_code = ILL_ILLTRP;
112 info.si_addr = (void __user *)regs->tpc;
113 info.si_trapno = lvl;
114 force_sig_info(SIGILL, &info, current);
115}
116
117void bad_trap_tl1(struct pt_regs *regs, long lvl)
118{
119 char buffer[32];
120
121 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
122 0, lvl, SIGTRAP) == NOTIFY_STOP)
123 return;
124
125 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
126
127 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
128 die_if_kernel (buffer, regs);
129}
130
131#ifdef CONFIG_DEBUG_BUGVERBOSE
132void do_BUG(const char *file, int line)
133{
134 bust_spinlocks(1);
135 printk("kernel BUG at %s:%d!\n", file, line);
136}
137EXPORT_SYMBOL(do_BUG);
138#endif
139
140static DEFINE_SPINLOCK(dimm_handler_lock);
141static dimm_printer_t dimm_handler;
142
143static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
144{
145 unsigned long flags;
146 int ret = -ENODEV;
147
148 spin_lock_irqsave(&dimm_handler_lock, flags);
149 if (dimm_handler) {
150 ret = dimm_handler(synd_code, paddr, buf, buflen);
151 } else if (tlb_type == spitfire) {
152 if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
153 ret = -EINVAL;
154 else
155 ret = 0;
156 } else
157 ret = -ENODEV;
158 spin_unlock_irqrestore(&dimm_handler_lock, flags);
159
160 return ret;
161}
162
163int register_dimm_printer(dimm_printer_t func)
164{
165 unsigned long flags;
166 int ret = 0;
167
168 spin_lock_irqsave(&dimm_handler_lock, flags);
169 if (!dimm_handler)
170 dimm_handler = func;
171 else
172 ret = -EEXIST;
173 spin_unlock_irqrestore(&dimm_handler_lock, flags);
174
175 return ret;
176}
177EXPORT_SYMBOL_GPL(register_dimm_printer);
178
179void unregister_dimm_printer(dimm_printer_t func)
180{
181 unsigned long flags;
182
183 spin_lock_irqsave(&dimm_handler_lock, flags);
184 if (dimm_handler == func)
185 dimm_handler = NULL;
186 spin_unlock_irqrestore(&dimm_handler_lock, flags);
187}
188EXPORT_SYMBOL_GPL(unregister_dimm_printer);
189
190void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
191{
192 enum ctx_state prev_state = exception_enter();
193 siginfo_t info;
194
195 if (notify_die(DIE_TRAP, "instruction access exception", regs,
196 0, 0x8, SIGTRAP) == NOTIFY_STOP)
197 goto out;
198
199 if (regs->tstate & TSTATE_PRIV) {
200 printk("spitfire_insn_access_exception: SFSR[%016lx] "
201 "SFAR[%016lx], going.\n", sfsr, sfar);
202 die_if_kernel("Iax", regs);
203 }
204 if (test_thread_flag(TIF_32BIT)) {
205 regs->tpc &= 0xffffffff;
206 regs->tnpc &= 0xffffffff;
207 }
208 info.si_signo = SIGSEGV;
209 info.si_errno = 0;
210 info.si_code = SEGV_MAPERR;
211 info.si_addr = (void __user *)regs->tpc;
212 info.si_trapno = 0;
213 force_sig_info(SIGSEGV, &info, current);
214out:
215 exception_exit(prev_state);
216}
217
218void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
219{
220 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
221 0, 0x8, SIGTRAP) == NOTIFY_STOP)
222 return;
223
224 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
225 spitfire_insn_access_exception(regs, sfsr, sfar);
226}
227
228void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
229{
230 unsigned short type = (type_ctx >> 16);
231 unsigned short ctx = (type_ctx & 0xffff);
232 siginfo_t info;
233
234 if (notify_die(DIE_TRAP, "instruction access exception", regs,
235 0, 0x8, SIGTRAP) == NOTIFY_STOP)
236 return;
237
238 if (regs->tstate & TSTATE_PRIV) {
239 printk("sun4v_insn_access_exception: ADDR[%016lx] "
240 "CTX[%04x] TYPE[%04x], going.\n",
241 addr, ctx, type);
242 die_if_kernel("Iax", regs);
243 }
244
245 if (test_thread_flag(TIF_32BIT)) {
246 regs->tpc &= 0xffffffff;
247 regs->tnpc &= 0xffffffff;
248 }
249 info.si_signo = SIGSEGV;
250 info.si_errno = 0;
251 info.si_code = SEGV_MAPERR;
252 info.si_addr = (void __user *) addr;
253 info.si_trapno = 0;
254 force_sig_info(SIGSEGV, &info, current);
255}
256
257void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
258{
259 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
260 0, 0x8, SIGTRAP) == NOTIFY_STOP)
261 return;
262
263 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
264 sun4v_insn_access_exception(regs, addr, type_ctx);
265}
266
267void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
268{
269 enum ctx_state prev_state = exception_enter();
270 siginfo_t info;
271
272 if (notify_die(DIE_TRAP, "data access exception", regs,
273 0, 0x30, SIGTRAP) == NOTIFY_STOP)
274 goto out;
275
276 if (regs->tstate & TSTATE_PRIV) {
277 /* Test if this comes from uaccess places. */
278 const struct exception_table_entry *entry;
279
280 entry = search_exception_tables(regs->tpc);
281 if (entry) {
282 /* Ouch, somebody is trying VM hole tricks on us... */
283#ifdef DEBUG_EXCEPTIONS
284 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
285 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
286 regs->tpc, entry->fixup);
287#endif
288 regs->tpc = entry->fixup;
289 regs->tnpc = regs->tpc + 4;
290 goto out;
291 }
292 /* Shit... */
293 printk("spitfire_data_access_exception: SFSR[%016lx] "
294 "SFAR[%016lx], going.\n", sfsr, sfar);
295 die_if_kernel("Dax", regs);
296 }
297
298 info.si_signo = SIGSEGV;
299 info.si_errno = 0;
300 info.si_code = SEGV_MAPERR;
301 info.si_addr = (void __user *)sfar;
302 info.si_trapno = 0;
303 force_sig_info(SIGSEGV, &info, current);
304out:
305 exception_exit(prev_state);
306}
307
308void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
309{
310 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
311 0, 0x30, SIGTRAP) == NOTIFY_STOP)
312 return;
313
314 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
315 spitfire_data_access_exception(regs, sfsr, sfar);
316}
317
318void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
319{
320 unsigned short type = (type_ctx >> 16);
321 unsigned short ctx = (type_ctx & 0xffff);
322 siginfo_t info;
323
324 if (notify_die(DIE_TRAP, "data access exception", regs,
325 0, 0x8, SIGTRAP) == NOTIFY_STOP)
326 return;
327
328 if (regs->tstate & TSTATE_PRIV) {
329 /* Test if this comes from uaccess places. */
330 const struct exception_table_entry *entry;
331
332 entry = search_exception_tables(regs->tpc);
333 if (entry) {
334 /* Ouch, somebody is trying VM hole tricks on us... */
335#ifdef DEBUG_EXCEPTIONS
336 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
337 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
338 regs->tpc, entry->fixup);
339#endif
340 regs->tpc = entry->fixup;
341 regs->tnpc = regs->tpc + 4;
342 return;
343 }
344 printk("sun4v_data_access_exception: ADDR[%016lx] "
345 "CTX[%04x] TYPE[%04x], going.\n",
346 addr, ctx, type);
347 die_if_kernel("Dax", regs);
348 }
349
350 if (test_thread_flag(TIF_32BIT)) {
351 regs->tpc &= 0xffffffff;
352 regs->tnpc &= 0xffffffff;
353 }
354 info.si_signo = SIGSEGV;
355 info.si_errno = 0;
356 info.si_code = SEGV_MAPERR;
357 info.si_addr = (void __user *) addr;
358 info.si_trapno = 0;
359 force_sig_info(SIGSEGV, &info, current);
360}
361
362void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
363{
364 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
365 0, 0x8, SIGTRAP) == NOTIFY_STOP)
366 return;
367
368 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
369 sun4v_data_access_exception(regs, addr, type_ctx);
370}
371
372#ifdef CONFIG_PCI
373#include "pci_impl.h"
374#endif
375
376/* When access exceptions happen, we must do this. */
377static void spitfire_clean_and_reenable_l1_caches(void)
378{
379 unsigned long va;
380
381 if (tlb_type != spitfire)
382 BUG();
383
384 /* Clean 'em. */
385 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
386 spitfire_put_icache_tag(va, 0x0);
387 spitfire_put_dcache_tag(va, 0x0);
388 }
389
390 /* Re-enable in LSU. */
391 __asm__ __volatile__("flush %%g6\n\t"
392 "membar #Sync\n\t"
393 "stxa %0, [%%g0] %1\n\t"
394 "membar #Sync"
395 : /* no outputs */
396 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
397 LSU_CONTROL_IM | LSU_CONTROL_DM),
398 "i" (ASI_LSU_CONTROL)
399 : "memory");
400}
401
402static void spitfire_enable_estate_errors(void)
403{
404 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
405 "membar #Sync"
406 : /* no outputs */
407 : "r" (ESTATE_ERR_ALL),
408 "i" (ASI_ESTATE_ERROR_EN));
409}
410
411static char ecc_syndrome_table[] = {
412 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
413 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
414 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
415 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
416 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
417 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
418 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
419 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
420 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
421 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
422 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
423 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
424 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
425 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
426 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
427 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
428 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
429 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
430 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
431 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
432 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
433 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
434 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
435 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
436 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
437 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
438 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
439 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
440 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
441 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
442 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
443 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
444};
445
446static char *syndrome_unknown = "<Unknown>";
447
448static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
449{
450 unsigned short scode;
451 char memmod_str[64], *p;
452
453 if (udbl & bit) {
454 scode = ecc_syndrome_table[udbl & 0xff];
455 if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
456 p = syndrome_unknown;
457 else
458 p = memmod_str;
459 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
460 "Memory Module \"%s\"\n",
461 smp_processor_id(), scode, p);
462 }
463
464 if (udbh & bit) {
465 scode = ecc_syndrome_table[udbh & 0xff];
466 if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
467 p = syndrome_unknown;
468 else
469 p = memmod_str;
470 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
471 "Memory Module \"%s\"\n",
472 smp_processor_id(), scode, p);
473 }
474
475}
476
477static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
478{
479
480 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
481 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
482 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
483
484 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
485
486 /* We always log it, even if someone is listening for this
487 * trap.
488 */
489 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
490 0, TRAP_TYPE_CEE, SIGTRAP);
491
492 /* The Correctable ECC Error trap does not disable I/D caches. So
493 * we only have to restore the ESTATE Error Enable register.
494 */
495 spitfire_enable_estate_errors();
496}
497
498static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
499{
500 siginfo_t info;
501
502 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
503 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
504 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
505
506 /* XXX add more human friendly logging of the error status
507 * XXX as is implemented for cheetah
508 */
509
510 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
511
512 /* We always log it, even if someone is listening for this
513 * trap.
514 */
515 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
516 0, tt, SIGTRAP);
517
518 if (regs->tstate & TSTATE_PRIV) {
519 if (tl1)
520 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
521 die_if_kernel("UE", regs);
522 }
523
524 /* XXX need more intelligent processing here, such as is implemented
525 * XXX for cheetah errors, in fact if the E-cache still holds the
526 * XXX line with bad parity this will loop
527 */
528
529 spitfire_clean_and_reenable_l1_caches();
530 spitfire_enable_estate_errors();
531
532 if (test_thread_flag(TIF_32BIT)) {
533 regs->tpc &= 0xffffffff;
534 regs->tnpc &= 0xffffffff;
535 }
536 info.si_signo = SIGBUS;
537 info.si_errno = 0;
538 info.si_code = BUS_OBJERR;
539 info.si_addr = (void *)0;
540 info.si_trapno = 0;
541 force_sig_info(SIGBUS, &info, current);
542}
543
544void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
545{
546 unsigned long afsr, tt, udbh, udbl;
547 int tl1;
548
549 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
550 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
551 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
552 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
553 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
554
555#ifdef CONFIG_PCI
556 if (tt == TRAP_TYPE_DAE &&
557 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
558 spitfire_clean_and_reenable_l1_caches();
559 spitfire_enable_estate_errors();
560
561 pci_poke_faulted = 1;
562 regs->tnpc = regs->tpc + 4;
563 return;
564 }
565#endif
566
567 if (afsr & SFAFSR_UE)
568 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
569
570 if (tt == TRAP_TYPE_CEE) {
571 /* Handle the case where we took a CEE trap, but ACK'd
572 * only the UE state in the UDB error registers.
573 */
574 if (afsr & SFAFSR_UE) {
575 if (udbh & UDBE_CE) {
576 __asm__ __volatile__(
577 "stxa %0, [%1] %2\n\t"
578 "membar #Sync"
579 : /* no outputs */
580 : "r" (udbh & UDBE_CE),
581 "r" (0x0), "i" (ASI_UDB_ERROR_W));
582 }
583 if (udbl & UDBE_CE) {
584 __asm__ __volatile__(
585 "stxa %0, [%1] %2\n\t"
586 "membar #Sync"
587 : /* no outputs */
588 : "r" (udbl & UDBE_CE),
589 "r" (0x18), "i" (ASI_UDB_ERROR_W));
590 }
591 }
592
593 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
594 }
595}
596
597int cheetah_pcache_forced_on;
598
599void cheetah_enable_pcache(void)
600{
601 unsigned long dcr;
602
603 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
604 smp_processor_id());
605
606 __asm__ __volatile__("ldxa [%%g0] %1, %0"
607 : "=r" (dcr)
608 : "i" (ASI_DCU_CONTROL_REG));
609 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
610 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
611 "membar #Sync"
612 : /* no outputs */
613 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
614}
615
616/* Cheetah error trap handling. */
617static unsigned long ecache_flush_physbase;
618static unsigned long ecache_flush_linesize;
619static unsigned long ecache_flush_size;
620
621/* This table is ordered in priority of errors and matches the
622 * AFAR overwrite policy as well.
623 */
624
625struct afsr_error_table {
626 unsigned long mask;
627 const char *name;
628};
629
630static const char CHAFSR_PERR_msg[] =
631 "System interface protocol error";
632static const char CHAFSR_IERR_msg[] =
633 "Internal processor error";
634static const char CHAFSR_ISAP_msg[] =
635 "System request parity error on incoming address";
636static const char CHAFSR_UCU_msg[] =
637 "Uncorrectable E-cache ECC error for ifetch/data";
638static const char CHAFSR_UCC_msg[] =
639 "SW Correctable E-cache ECC error for ifetch/data";
640static const char CHAFSR_UE_msg[] =
641 "Uncorrectable system bus data ECC error for read";
642static const char CHAFSR_EDU_msg[] =
643 "Uncorrectable E-cache ECC error for stmerge/blkld";
644static const char CHAFSR_EMU_msg[] =
645 "Uncorrectable system bus MTAG error";
646static const char CHAFSR_WDU_msg[] =
647 "Uncorrectable E-cache ECC error for writeback";
648static const char CHAFSR_CPU_msg[] =
649 "Uncorrectable ECC error for copyout";
650static const char CHAFSR_CE_msg[] =
651 "HW corrected system bus data ECC error for read";
652static const char CHAFSR_EDC_msg[] =
653 "HW corrected E-cache ECC error for stmerge/blkld";
654static const char CHAFSR_EMC_msg[] =
655 "HW corrected system bus MTAG ECC error";
656static const char CHAFSR_WDC_msg[] =
657 "HW corrected E-cache ECC error for writeback";
658static const char CHAFSR_CPC_msg[] =
659 "HW corrected ECC error for copyout";
660static const char CHAFSR_TO_msg[] =
661 "Unmapped error from system bus";
662static const char CHAFSR_BERR_msg[] =
663 "Bus error response from system bus";
664static const char CHAFSR_IVC_msg[] =
665 "HW corrected system bus data ECC error for ivec read";
666static const char CHAFSR_IVU_msg[] =
667 "Uncorrectable system bus data ECC error for ivec read";
668static struct afsr_error_table __cheetah_error_table[] = {
669 { CHAFSR_PERR, CHAFSR_PERR_msg },
670 { CHAFSR_IERR, CHAFSR_IERR_msg },
671 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
672 { CHAFSR_UCU, CHAFSR_UCU_msg },
673 { CHAFSR_UCC, CHAFSR_UCC_msg },
674 { CHAFSR_UE, CHAFSR_UE_msg },
675 { CHAFSR_EDU, CHAFSR_EDU_msg },
676 { CHAFSR_EMU, CHAFSR_EMU_msg },
677 { CHAFSR_WDU, CHAFSR_WDU_msg },
678 { CHAFSR_CPU, CHAFSR_CPU_msg },
679 { CHAFSR_CE, CHAFSR_CE_msg },
680 { CHAFSR_EDC, CHAFSR_EDC_msg },
681 { CHAFSR_EMC, CHAFSR_EMC_msg },
682 { CHAFSR_WDC, CHAFSR_WDC_msg },
683 { CHAFSR_CPC, CHAFSR_CPC_msg },
684 { CHAFSR_TO, CHAFSR_TO_msg },
685 { CHAFSR_BERR, CHAFSR_BERR_msg },
686 /* These two do not update the AFAR. */
687 { CHAFSR_IVC, CHAFSR_IVC_msg },
688 { CHAFSR_IVU, CHAFSR_IVU_msg },
689 { 0, NULL },
690};
691static const char CHPAFSR_DTO_msg[] =
692 "System bus unmapped error for prefetch/storequeue-read";
693static const char CHPAFSR_DBERR_msg[] =
694 "System bus error for prefetch/storequeue-read";
695static const char CHPAFSR_THCE_msg[] =
696 "Hardware corrected E-cache Tag ECC error";
697static const char CHPAFSR_TSCE_msg[] =
698 "SW handled correctable E-cache Tag ECC error";
699static const char CHPAFSR_TUE_msg[] =
700 "Uncorrectable E-cache Tag ECC error";
701static const char CHPAFSR_DUE_msg[] =
702 "System bus uncorrectable data ECC error due to prefetch/store-fill";
703static struct afsr_error_table __cheetah_plus_error_table[] = {
704 { CHAFSR_PERR, CHAFSR_PERR_msg },
705 { CHAFSR_IERR, CHAFSR_IERR_msg },
706 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
707 { CHAFSR_UCU, CHAFSR_UCU_msg },
708 { CHAFSR_UCC, CHAFSR_UCC_msg },
709 { CHAFSR_UE, CHAFSR_UE_msg },
710 { CHAFSR_EDU, CHAFSR_EDU_msg },
711 { CHAFSR_EMU, CHAFSR_EMU_msg },
712 { CHAFSR_WDU, CHAFSR_WDU_msg },
713 { CHAFSR_CPU, CHAFSR_CPU_msg },
714 { CHAFSR_CE, CHAFSR_CE_msg },
715 { CHAFSR_EDC, CHAFSR_EDC_msg },
716 { CHAFSR_EMC, CHAFSR_EMC_msg },
717 { CHAFSR_WDC, CHAFSR_WDC_msg },
718 { CHAFSR_CPC, CHAFSR_CPC_msg },
719 { CHAFSR_TO, CHAFSR_TO_msg },
720 { CHAFSR_BERR, CHAFSR_BERR_msg },
721 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
722 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
723 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
724 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
725 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
726 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
727 /* These two do not update the AFAR. */
728 { CHAFSR_IVC, CHAFSR_IVC_msg },
729 { CHAFSR_IVU, CHAFSR_IVU_msg },
730 { 0, NULL },
731};
732static const char JPAFSR_JETO_msg[] =
733 "System interface protocol error, hw timeout caused";
734static const char JPAFSR_SCE_msg[] =
735 "Parity error on system snoop results";
736static const char JPAFSR_JEIC_msg[] =
737 "System interface protocol error, illegal command detected";
738static const char JPAFSR_JEIT_msg[] =
739 "System interface protocol error, illegal ADTYPE detected";
740static const char JPAFSR_OM_msg[] =
741 "Out of range memory error has occurred";
742static const char JPAFSR_ETP_msg[] =
743 "Parity error on L2 cache tag SRAM";
744static const char JPAFSR_UMS_msg[] =
745 "Error due to unsupported store";
746static const char JPAFSR_RUE_msg[] =
747 "Uncorrectable ECC error from remote cache/memory";
748static const char JPAFSR_RCE_msg[] =
749 "Correctable ECC error from remote cache/memory";
750static const char JPAFSR_BP_msg[] =
751 "JBUS parity error on returned read data";
752static const char JPAFSR_WBP_msg[] =
753 "JBUS parity error on data for writeback or block store";
754static const char JPAFSR_FRC_msg[] =
755 "Foreign read to DRAM incurring correctable ECC error";
756static const char JPAFSR_FRU_msg[] =
757 "Foreign read to DRAM incurring uncorrectable ECC error";
758static struct afsr_error_table __jalapeno_error_table[] = {
759 { JPAFSR_JETO, JPAFSR_JETO_msg },
760 { JPAFSR_SCE, JPAFSR_SCE_msg },
761 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
762 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
763 { CHAFSR_PERR, CHAFSR_PERR_msg },
764 { CHAFSR_IERR, CHAFSR_IERR_msg },
765 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
766 { CHAFSR_UCU, CHAFSR_UCU_msg },
767 { CHAFSR_UCC, CHAFSR_UCC_msg },
768 { CHAFSR_UE, CHAFSR_UE_msg },
769 { CHAFSR_EDU, CHAFSR_EDU_msg },
770 { JPAFSR_OM, JPAFSR_OM_msg },
771 { CHAFSR_WDU, CHAFSR_WDU_msg },
772 { CHAFSR_CPU, CHAFSR_CPU_msg },
773 { CHAFSR_CE, CHAFSR_CE_msg },
774 { CHAFSR_EDC, CHAFSR_EDC_msg },
775 { JPAFSR_ETP, JPAFSR_ETP_msg },
776 { CHAFSR_WDC, CHAFSR_WDC_msg },
777 { CHAFSR_CPC, CHAFSR_CPC_msg },
778 { CHAFSR_TO, CHAFSR_TO_msg },
779 { CHAFSR_BERR, CHAFSR_BERR_msg },
780 { JPAFSR_UMS, JPAFSR_UMS_msg },
781 { JPAFSR_RUE, JPAFSR_RUE_msg },
782 { JPAFSR_RCE, JPAFSR_RCE_msg },
783 { JPAFSR_BP, JPAFSR_BP_msg },
784 { JPAFSR_WBP, JPAFSR_WBP_msg },
785 { JPAFSR_FRC, JPAFSR_FRC_msg },
786 { JPAFSR_FRU, JPAFSR_FRU_msg },
787 /* These two do not update the AFAR. */
788 { CHAFSR_IVU, CHAFSR_IVU_msg },
789 { 0, NULL },
790};
791static struct afsr_error_table *cheetah_error_table;
792static unsigned long cheetah_afsr_errors;
793
794struct cheetah_err_info *cheetah_error_log;
795
796static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
797{
798 struct cheetah_err_info *p;
799 int cpu = smp_processor_id();
800
801 if (!cheetah_error_log)
802 return NULL;
803
804 p = cheetah_error_log + (cpu * 2);
805 if ((afsr & CHAFSR_TL1) != 0UL)
806 p++;
807
808 return p;
809}
810
811extern unsigned int tl0_icpe[], tl1_icpe[];
812extern unsigned int tl0_dcpe[], tl1_dcpe[];
813extern unsigned int tl0_fecc[], tl1_fecc[];
814extern unsigned int tl0_cee[], tl1_cee[];
815extern unsigned int tl0_iae[], tl1_iae[];
816extern unsigned int tl0_dae[], tl1_dae[];
817extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
818extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
819extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
820extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
821extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
822
823void __init cheetah_ecache_flush_init(void)
824{
825 unsigned long largest_size, smallest_linesize, order, ver;
826 int i, sz;
827
828 /* Scan all cpu device tree nodes, note two values:
829 * 1) largest E-cache size
830 * 2) smallest E-cache line size
831 */
832 largest_size = 0UL;
833 smallest_linesize = ~0UL;
834
835 for (i = 0; i < NR_CPUS; i++) {
836 unsigned long val;
837
838 val = cpu_data(i).ecache_size;
839 if (!val)
840 continue;
841
842 if (val > largest_size)
843 largest_size = val;
844
845 val = cpu_data(i).ecache_line_size;
846 if (val < smallest_linesize)
847 smallest_linesize = val;
848
849 }
850
851 if (largest_size == 0UL || smallest_linesize == ~0UL) {
852 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
853 "parameters.\n");
854 prom_halt();
855 }
856
857 ecache_flush_size = (2 * largest_size);
858 ecache_flush_linesize = smallest_linesize;
859
860 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
861
862 if (ecache_flush_physbase == ~0UL) {
863 prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte "
864 "contiguous physical memory.\n",
865 ecache_flush_size);
866 prom_halt();
867 }
868
869 /* Now allocate error trap reporting scoreboard. */
870 sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
871 for (order = 0; order < MAX_ORDER; order++) {
872 if ((PAGE_SIZE << order) >= sz)
873 break;
874 }
875 cheetah_error_log = (struct cheetah_err_info *)
876 __get_free_pages(GFP_KERNEL, order);
877 if (!cheetah_error_log) {
878 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
879 "error logging scoreboard (%d bytes).\n", sz);
880 prom_halt();
881 }
882 memset(cheetah_error_log, 0, PAGE_SIZE << order);
883
884 /* Mark all AFSRs as invalid so that the trap handler will
885 * log new new information there.
886 */
887 for (i = 0; i < 2 * NR_CPUS; i++)
888 cheetah_error_log[i].afsr = CHAFSR_INVALID;
889
890 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
891 if ((ver >> 32) == __JALAPENO_ID ||
892 (ver >> 32) == __SERRANO_ID) {
893 cheetah_error_table = &__jalapeno_error_table[0];
894 cheetah_afsr_errors = JPAFSR_ERRORS;
895 } else if ((ver >> 32) == 0x003e0015) {
896 cheetah_error_table = &__cheetah_plus_error_table[0];
897 cheetah_afsr_errors = CHPAFSR_ERRORS;
898 } else {
899 cheetah_error_table = &__cheetah_error_table[0];
900 cheetah_afsr_errors = CHAFSR_ERRORS;
901 }
902
903 /* Now patch trap tables. */
904 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
905 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
906 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
907 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
908 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
909 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
910 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
911 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
912 if (tlb_type == cheetah_plus) {
913 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
914 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
915 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
916 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
917 }
918 flushi(PAGE_OFFSET);
919}
920
921static void cheetah_flush_ecache(void)
922{
923 unsigned long flush_base = ecache_flush_physbase;
924 unsigned long flush_linesize = ecache_flush_linesize;
925 unsigned long flush_size = ecache_flush_size;
926
927 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
928 " bne,pt %%xcc, 1b\n\t"
929 " ldxa [%2 + %0] %3, %%g0\n\t"
930 : "=&r" (flush_size)
931 : "0" (flush_size), "r" (flush_base),
932 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
933}
934
935static void cheetah_flush_ecache_line(unsigned long physaddr)
936{
937 unsigned long alias;
938
939 physaddr &= ~(8UL - 1UL);
940 physaddr = (ecache_flush_physbase +
941 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
942 alias = physaddr + (ecache_flush_size >> 1UL);
943 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
944 "ldxa [%1] %2, %%g0\n\t"
945 "membar #Sync"
946 : /* no outputs */
947 : "r" (physaddr), "r" (alias),
948 "i" (ASI_PHYS_USE_EC));
949}
950
951/* Unfortunately, the diagnostic access to the I-cache tags we need to
952 * use to clear the thing interferes with I-cache coherency transactions.
953 *
954 * So we must only flush the I-cache when it is disabled.
955 */
956static void __cheetah_flush_icache(void)
957{
958 unsigned int icache_size, icache_line_size;
959 unsigned long addr;
960
961 icache_size = local_cpu_data().icache_size;
962 icache_line_size = local_cpu_data().icache_line_size;
963
964 /* Clear the valid bits in all the tags. */
965 for (addr = 0; addr < icache_size; addr += icache_line_size) {
966 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
967 "membar #Sync"
968 : /* no outputs */
969 : "r" (addr | (2 << 3)),
970 "i" (ASI_IC_TAG));
971 }
972}
973
974static void cheetah_flush_icache(void)
975{
976 unsigned long dcu_save;
977
978 /* Save current DCU, disable I-cache. */
979 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
980 "or %0, %2, %%g1\n\t"
981 "stxa %%g1, [%%g0] %1\n\t"
982 "membar #Sync"
983 : "=r" (dcu_save)
984 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
985 : "g1");
986
987 __cheetah_flush_icache();
988
989 /* Restore DCU register */
990 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
991 "membar #Sync"
992 : /* no outputs */
993 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
994}
995
996static void cheetah_flush_dcache(void)
997{
998 unsigned int dcache_size, dcache_line_size;
999 unsigned long addr;
1000
1001 dcache_size = local_cpu_data().dcache_size;
1002 dcache_line_size = local_cpu_data().dcache_line_size;
1003
1004 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1005 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1006 "membar #Sync"
1007 : /* no outputs */
1008 : "r" (addr), "i" (ASI_DCACHE_TAG));
1009 }
1010}
1011
1012/* In order to make the even parity correct we must do two things.
1013 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
1014 * Next, we clear out all 32-bytes of data for that line. Data of
1015 * all-zero + tag parity value of zero == correct parity.
1016 */
1017static void cheetah_plus_zap_dcache_parity(void)
1018{
1019 unsigned int dcache_size, dcache_line_size;
1020 unsigned long addr;
1021
1022 dcache_size = local_cpu_data().dcache_size;
1023 dcache_line_size = local_cpu_data().dcache_line_size;
1024
1025 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1026 unsigned long tag = (addr >> 14);
1027 unsigned long line;
1028
1029 __asm__ __volatile__("membar #Sync\n\t"
1030 "stxa %0, [%1] %2\n\t"
1031 "membar #Sync"
1032 : /* no outputs */
1033 : "r" (tag), "r" (addr),
1034 "i" (ASI_DCACHE_UTAG));
1035 for (line = addr; line < addr + dcache_line_size; line += 8)
1036 __asm__ __volatile__("membar #Sync\n\t"
1037 "stxa %%g0, [%0] %1\n\t"
1038 "membar #Sync"
1039 : /* no outputs */
1040 : "r" (line),
1041 "i" (ASI_DCACHE_DATA));
1042 }
1043}
1044
1045/* Conversion tables used to frob Cheetah AFSR syndrome values into
1046 * something palatable to the memory controller driver get_unumber
1047 * routine.
1048 */
1049#define MT0 137
1050#define MT1 138
1051#define MT2 139
1052#define NONE 254
1053#define MTC0 140
1054#define MTC1 141
1055#define MTC2 142
1056#define MTC3 143
1057#define C0 128
1058#define C1 129
1059#define C2 130
1060#define C3 131
1061#define C4 132
1062#define C5 133
1063#define C6 134
1064#define C7 135
1065#define C8 136
1066#define M2 144
1067#define M3 145
1068#define M4 146
1069#define M 147
1070static unsigned char cheetah_ecc_syntab[] = {
1071/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1072/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1073/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1074/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1075/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1076/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1077/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1078/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1079/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1080/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1081/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1082/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1083/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1084/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1085/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1086/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1087/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1088/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1089/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1090/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1091/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1092/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1093/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1094/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1095/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1096/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1097/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1098/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1099/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1100/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1101/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1102/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1103};
1104static unsigned char cheetah_mtag_syntab[] = {
1105 NONE, MTC0,
1106 MTC1, NONE,
1107 MTC2, NONE,
1108 NONE, MT0,
1109 MTC3, NONE,
1110 NONE, MT1,
1111 NONE, MT2,
1112 NONE, NONE
1113};
1114
1115/* Return the highest priority error conditon mentioned. */
1116static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1117{
1118 unsigned long tmp = 0;
1119 int i;
1120
1121 for (i = 0; cheetah_error_table[i].mask; i++) {
1122 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1123 return tmp;
1124 }
1125 return tmp;
1126}
1127
1128static const char *cheetah_get_string(unsigned long bit)
1129{
1130 int i;
1131
1132 for (i = 0; cheetah_error_table[i].mask; i++) {
1133 if ((bit & cheetah_error_table[i].mask) != 0UL)
1134 return cheetah_error_table[i].name;
1135 }
1136 return "???";
1137}
1138
1139static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1140 unsigned long afsr, unsigned long afar, int recoverable)
1141{
1142 unsigned long hipri;
1143 char unum[256];
1144
1145 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1146 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1147 afsr, afar,
1148 (afsr & CHAFSR_TL1) ? 1 : 0);
1149 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1150 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1151 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1152 printk("%s" "ERROR(%d): ",
1153 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1154 printk("TPC<%pS>\n", (void *) regs->tpc);
1155 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1156 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1157 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1158 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1159 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1160 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1161 hipri = cheetah_get_hipri(afsr);
1162 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1163 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1164 hipri, cheetah_get_string(hipri));
1165
1166 /* Try to get unumber if relevant. */
1167#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1168 CHAFSR_CPC | CHAFSR_CPU | \
1169 CHAFSR_UE | CHAFSR_CE | \
1170 CHAFSR_EDC | CHAFSR_EDU | \
1171 CHAFSR_UCC | CHAFSR_UCU | \
1172 CHAFSR_WDU | CHAFSR_WDC)
1173#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1174 if (afsr & ESYND_ERRORS) {
1175 int syndrome;
1176 int ret;
1177
1178 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1179 syndrome = cheetah_ecc_syntab[syndrome];
1180 ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1181 if (ret != -1)
1182 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1183 (recoverable ? KERN_WARNING : KERN_CRIT),
1184 smp_processor_id(), unum);
1185 } else if (afsr & MSYND_ERRORS) {
1186 int syndrome;
1187 int ret;
1188
1189 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1190 syndrome = cheetah_mtag_syntab[syndrome];
1191 ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1192 if (ret != -1)
1193 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1194 (recoverable ? KERN_WARNING : KERN_CRIT),
1195 smp_processor_id(), unum);
1196 }
1197
1198 /* Now dump the cache snapshots. */
1199 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1200 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1201 (int) info->dcache_index,
1202 info->dcache_tag,
1203 info->dcache_utag,
1204 info->dcache_stag);
1205 printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1206 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1207 info->dcache_data[0],
1208 info->dcache_data[1],
1209 info->dcache_data[2],
1210 info->dcache_data[3]);
1211 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1212 "u[%016llx] l[%016llx]\n",
1213 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1214 (int) info->icache_index,
1215 info->icache_tag,
1216 info->icache_utag,
1217 info->icache_stag,
1218 info->icache_upper,
1219 info->icache_lower);
1220 printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1221 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1222 info->icache_data[0],
1223 info->icache_data[1],
1224 info->icache_data[2],
1225 info->icache_data[3]);
1226 printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1227 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1228 info->icache_data[4],
1229 info->icache_data[5],
1230 info->icache_data[6],
1231 info->icache_data[7]);
1232 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1233 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1234 (int) info->ecache_index, info->ecache_tag);
1235 printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1236 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1237 info->ecache_data[0],
1238 info->ecache_data[1],
1239 info->ecache_data[2],
1240 info->ecache_data[3]);
1241
1242 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1243 while (afsr != 0UL) {
1244 unsigned long bit = cheetah_get_hipri(afsr);
1245
1246 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1247 (recoverable ? KERN_WARNING : KERN_CRIT),
1248 bit, cheetah_get_string(bit));
1249
1250 afsr &= ~bit;
1251 }
1252
1253 if (!recoverable)
1254 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1255}
1256
1257static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1258{
1259 unsigned long afsr, afar;
1260 int ret = 0;
1261
1262 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1263 : "=r" (afsr)
1264 : "i" (ASI_AFSR));
1265 if ((afsr & cheetah_afsr_errors) != 0) {
1266 if (logp != NULL) {
1267 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1268 : "=r" (afar)
1269 : "i" (ASI_AFAR));
1270 logp->afsr = afsr;
1271 logp->afar = afar;
1272 }
1273 ret = 1;
1274 }
1275 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1276 "membar #Sync\n\t"
1277 : : "r" (afsr), "i" (ASI_AFSR));
1278
1279 return ret;
1280}
1281
1282void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1283{
1284 struct cheetah_err_info local_snapshot, *p;
1285 int recoverable;
1286
1287 /* Flush E-cache */
1288 cheetah_flush_ecache();
1289
1290 p = cheetah_get_error_log(afsr);
1291 if (!p) {
1292 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1293 afsr, afar);
1294 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1295 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1296 prom_halt();
1297 }
1298
1299 /* Grab snapshot of logged error. */
1300 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1301
1302 /* If the current trap snapshot does not match what the
1303 * trap handler passed along into our args, big trouble.
1304 * In such a case, mark the local copy as invalid.
1305 *
1306 * Else, it matches and we mark the afsr in the non-local
1307 * copy as invalid so we may log new error traps there.
1308 */
1309 if (p->afsr != afsr || p->afar != afar)
1310 local_snapshot.afsr = CHAFSR_INVALID;
1311 else
1312 p->afsr = CHAFSR_INVALID;
1313
1314 cheetah_flush_icache();
1315 cheetah_flush_dcache();
1316
1317 /* Re-enable I-cache/D-cache */
1318 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1319 "or %%g1, %1, %%g1\n\t"
1320 "stxa %%g1, [%%g0] %0\n\t"
1321 "membar #Sync"
1322 : /* no outputs */
1323 : "i" (ASI_DCU_CONTROL_REG),
1324 "i" (DCU_DC | DCU_IC)
1325 : "g1");
1326
1327 /* Re-enable error reporting */
1328 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1329 "or %%g1, %1, %%g1\n\t"
1330 "stxa %%g1, [%%g0] %0\n\t"
1331 "membar #Sync"
1332 : /* no outputs */
1333 : "i" (ASI_ESTATE_ERROR_EN),
1334 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1335 : "g1");
1336
1337 /* Decide if we can continue after handling this trap and
1338 * logging the error.
1339 */
1340 recoverable = 1;
1341 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1342 recoverable = 0;
1343
1344 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1345 * error was logged while we had error reporting traps disabled.
1346 */
1347 if (cheetah_recheck_errors(&local_snapshot)) {
1348 unsigned long new_afsr = local_snapshot.afsr;
1349
1350 /* If we got a new asynchronous error, die... */
1351 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1352 CHAFSR_WDU | CHAFSR_CPU |
1353 CHAFSR_IVU | CHAFSR_UE |
1354 CHAFSR_BERR | CHAFSR_TO))
1355 recoverable = 0;
1356 }
1357
1358 /* Log errors. */
1359 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1360
1361 if (!recoverable)
1362 panic("Irrecoverable Fast-ECC error trap.\n");
1363
1364 /* Flush E-cache to kick the error trap handlers out. */
1365 cheetah_flush_ecache();
1366}
1367
1368/* Try to fix a correctable error by pushing the line out from
1369 * the E-cache. Recheck error reporting registers to see if the
1370 * problem is intermittent.
1371 */
1372static int cheetah_fix_ce(unsigned long physaddr)
1373{
1374 unsigned long orig_estate;
1375 unsigned long alias1, alias2;
1376 int ret;
1377
1378 /* Make sure correctable error traps are disabled. */
1379 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1380 "andn %0, %1, %%g1\n\t"
1381 "stxa %%g1, [%%g0] %2\n\t"
1382 "membar #Sync"
1383 : "=&r" (orig_estate)
1384 : "i" (ESTATE_ERROR_CEEN),
1385 "i" (ASI_ESTATE_ERROR_EN)
1386 : "g1");
1387
1388 /* We calculate alias addresses that will force the
1389 * cache line in question out of the E-cache. Then
1390 * we bring it back in with an atomic instruction so
1391 * that we get it in some modified/exclusive state,
1392 * then we displace it again to try and get proper ECC
1393 * pushed back into the system.
1394 */
1395 physaddr &= ~(8UL - 1UL);
1396 alias1 = (ecache_flush_physbase +
1397 (physaddr & ((ecache_flush_size >> 1) - 1)));
1398 alias2 = alias1 + (ecache_flush_size >> 1);
1399 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1400 "ldxa [%1] %3, %%g0\n\t"
1401 "casxa [%2] %3, %%g0, %%g0\n\t"
1402 "ldxa [%0] %3, %%g0\n\t"
1403 "ldxa [%1] %3, %%g0\n\t"
1404 "membar #Sync"
1405 : /* no outputs */
1406 : "r" (alias1), "r" (alias2),
1407 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1408
1409 /* Did that trigger another error? */
1410 if (cheetah_recheck_errors(NULL)) {
1411 /* Try one more time. */
1412 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1413 "membar #Sync"
1414 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1415 if (cheetah_recheck_errors(NULL))
1416 ret = 2;
1417 else
1418 ret = 1;
1419 } else {
1420 /* No new error, intermittent problem. */
1421 ret = 0;
1422 }
1423
1424 /* Restore error enables. */
1425 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1426 "membar #Sync"
1427 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1428
1429 return ret;
1430}
1431
1432/* Return non-zero if PADDR is a valid physical memory address. */
1433static int cheetah_check_main_memory(unsigned long paddr)
1434{
1435 unsigned long vaddr = PAGE_OFFSET + paddr;
1436
1437 if (vaddr > (unsigned long) high_memory)
1438 return 0;
1439
1440 return kern_addr_valid(vaddr);
1441}
1442
1443void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1444{
1445 struct cheetah_err_info local_snapshot, *p;
1446 int recoverable, is_memory;
1447
1448 p = cheetah_get_error_log(afsr);
1449 if (!p) {
1450 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1451 afsr, afar);
1452 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1453 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1454 prom_halt();
1455 }
1456
1457 /* Grab snapshot of logged error. */
1458 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1459
1460 /* If the current trap snapshot does not match what the
1461 * trap handler passed along into our args, big trouble.
1462 * In such a case, mark the local copy as invalid.
1463 *
1464 * Else, it matches and we mark the afsr in the non-local
1465 * copy as invalid so we may log new error traps there.
1466 */
1467 if (p->afsr != afsr || p->afar != afar)
1468 local_snapshot.afsr = CHAFSR_INVALID;
1469 else
1470 p->afsr = CHAFSR_INVALID;
1471
1472 is_memory = cheetah_check_main_memory(afar);
1473
1474 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1475 /* XXX Might want to log the results of this operation
1476 * XXX somewhere... -DaveM
1477 */
1478 cheetah_fix_ce(afar);
1479 }
1480
1481 {
1482 int flush_all, flush_line;
1483
1484 flush_all = flush_line = 0;
1485 if ((afsr & CHAFSR_EDC) != 0UL) {
1486 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1487 flush_line = 1;
1488 else
1489 flush_all = 1;
1490 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1491 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1492 flush_line = 1;
1493 else
1494 flush_all = 1;
1495 }
1496
1497 /* Trap handler only disabled I-cache, flush it. */
1498 cheetah_flush_icache();
1499
1500 /* Re-enable I-cache */
1501 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1502 "or %%g1, %1, %%g1\n\t"
1503 "stxa %%g1, [%%g0] %0\n\t"
1504 "membar #Sync"
1505 : /* no outputs */
1506 : "i" (ASI_DCU_CONTROL_REG),
1507 "i" (DCU_IC)
1508 : "g1");
1509
1510 if (flush_all)
1511 cheetah_flush_ecache();
1512 else if (flush_line)
1513 cheetah_flush_ecache_line(afar);
1514 }
1515
1516 /* Re-enable error reporting */
1517 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1518 "or %%g1, %1, %%g1\n\t"
1519 "stxa %%g1, [%%g0] %0\n\t"
1520 "membar #Sync"
1521 : /* no outputs */
1522 : "i" (ASI_ESTATE_ERROR_EN),
1523 "i" (ESTATE_ERROR_CEEN)
1524 : "g1");
1525
1526 /* Decide if we can continue after handling this trap and
1527 * logging the error.
1528 */
1529 recoverable = 1;
1530 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1531 recoverable = 0;
1532
1533 /* Re-check AFSR/AFAR */
1534 (void) cheetah_recheck_errors(&local_snapshot);
1535
1536 /* Log errors. */
1537 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1538
1539 if (!recoverable)
1540 panic("Irrecoverable Correctable-ECC error trap.\n");
1541}
1542
1543void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1544{
1545 struct cheetah_err_info local_snapshot, *p;
1546 int recoverable, is_memory;
1547
1548#ifdef CONFIG_PCI
1549 /* Check for the special PCI poke sequence. */
1550 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1551 cheetah_flush_icache();
1552 cheetah_flush_dcache();
1553
1554 /* Re-enable I-cache/D-cache */
1555 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1556 "or %%g1, %1, %%g1\n\t"
1557 "stxa %%g1, [%%g0] %0\n\t"
1558 "membar #Sync"
1559 : /* no outputs */
1560 : "i" (ASI_DCU_CONTROL_REG),
1561 "i" (DCU_DC | DCU_IC)
1562 : "g1");
1563
1564 /* Re-enable error reporting */
1565 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1566 "or %%g1, %1, %%g1\n\t"
1567 "stxa %%g1, [%%g0] %0\n\t"
1568 "membar #Sync"
1569 : /* no outputs */
1570 : "i" (ASI_ESTATE_ERROR_EN),
1571 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1572 : "g1");
1573
1574 (void) cheetah_recheck_errors(NULL);
1575
1576 pci_poke_faulted = 1;
1577 regs->tpc += 4;
1578 regs->tnpc = regs->tpc + 4;
1579 return;
1580 }
1581#endif
1582
1583 p = cheetah_get_error_log(afsr);
1584 if (!p) {
1585 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1586 afsr, afar);
1587 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1588 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1589 prom_halt();
1590 }
1591
1592 /* Grab snapshot of logged error. */
1593 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1594
1595 /* If the current trap snapshot does not match what the
1596 * trap handler passed along into our args, big trouble.
1597 * In such a case, mark the local copy as invalid.
1598 *
1599 * Else, it matches and we mark the afsr in the non-local
1600 * copy as invalid so we may log new error traps there.
1601 */
1602 if (p->afsr != afsr || p->afar != afar)
1603 local_snapshot.afsr = CHAFSR_INVALID;
1604 else
1605 p->afsr = CHAFSR_INVALID;
1606
1607 is_memory = cheetah_check_main_memory(afar);
1608
1609 {
1610 int flush_all, flush_line;
1611
1612 flush_all = flush_line = 0;
1613 if ((afsr & CHAFSR_EDU) != 0UL) {
1614 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1615 flush_line = 1;
1616 else
1617 flush_all = 1;
1618 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1619 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1620 flush_line = 1;
1621 else
1622 flush_all = 1;
1623 }
1624
1625 cheetah_flush_icache();
1626 cheetah_flush_dcache();
1627
1628 /* Re-enable I/D caches */
1629 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1630 "or %%g1, %1, %%g1\n\t"
1631 "stxa %%g1, [%%g0] %0\n\t"
1632 "membar #Sync"
1633 : /* no outputs */
1634 : "i" (ASI_DCU_CONTROL_REG),
1635 "i" (DCU_IC | DCU_DC)
1636 : "g1");
1637
1638 if (flush_all)
1639 cheetah_flush_ecache();
1640 else if (flush_line)
1641 cheetah_flush_ecache_line(afar);
1642 }
1643
1644 /* Re-enable error reporting */
1645 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1646 "or %%g1, %1, %%g1\n\t"
1647 "stxa %%g1, [%%g0] %0\n\t"
1648 "membar #Sync"
1649 : /* no outputs */
1650 : "i" (ASI_ESTATE_ERROR_EN),
1651 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1652 : "g1");
1653
1654 /* Decide if we can continue after handling this trap and
1655 * logging the error.
1656 */
1657 recoverable = 1;
1658 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1659 recoverable = 0;
1660
1661 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1662 * error was logged while we had error reporting traps disabled.
1663 */
1664 if (cheetah_recheck_errors(&local_snapshot)) {
1665 unsigned long new_afsr = local_snapshot.afsr;
1666
1667 /* If we got a new asynchronous error, die... */
1668 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1669 CHAFSR_WDU | CHAFSR_CPU |
1670 CHAFSR_IVU | CHAFSR_UE |
1671 CHAFSR_BERR | CHAFSR_TO))
1672 recoverable = 0;
1673 }
1674
1675 /* Log errors. */
1676 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1677
1678 /* "Recoverable" here means we try to yank the page from ever
1679 * being newly used again. This depends upon a few things:
1680 * 1) Must be main memory, and AFAR must be valid.
1681 * 2) If we trapped from user, OK.
1682 * 3) Else, if we trapped from kernel we must find exception
1683 * table entry (ie. we have to have been accessing user
1684 * space).
1685 *
1686 * If AFAR is not in main memory, or we trapped from kernel
1687 * and cannot find an exception table entry, it is unacceptable
1688 * to try and continue.
1689 */
1690 if (recoverable && is_memory) {
1691 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1692 /* OK, usermode access. */
1693 recoverable = 1;
1694 } else {
1695 const struct exception_table_entry *entry;
1696
1697 entry = search_exception_tables(regs->tpc);
1698 if (entry) {
1699 /* OK, kernel access to userspace. */
1700 recoverable = 1;
1701
1702 } else {
1703 /* BAD, privileged state is corrupted. */
1704 recoverable = 0;
1705 }
1706
1707 if (recoverable) {
1708 if (pfn_valid(afar >> PAGE_SHIFT))
1709 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1710 else
1711 recoverable = 0;
1712
1713 /* Only perform fixup if we still have a
1714 * recoverable condition.
1715 */
1716 if (recoverable) {
1717 regs->tpc = entry->fixup;
1718 regs->tnpc = regs->tpc + 4;
1719 }
1720 }
1721 }
1722 } else {
1723 recoverable = 0;
1724 }
1725
1726 if (!recoverable)
1727 panic("Irrecoverable deferred error trap.\n");
1728}
1729
1730/* Handle a D/I cache parity error trap. TYPE is encoded as:
1731 *
1732 * Bit0: 0=dcache,1=icache
1733 * Bit1: 0=recoverable,1=unrecoverable
1734 *
1735 * The hardware has disabled both the I-cache and D-cache in
1736 * the %dcr register.
1737 */
1738void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1739{
1740 if (type & 0x1)
1741 __cheetah_flush_icache();
1742 else
1743 cheetah_plus_zap_dcache_parity();
1744 cheetah_flush_dcache();
1745
1746 /* Re-enable I-cache/D-cache */
1747 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1748 "or %%g1, %1, %%g1\n\t"
1749 "stxa %%g1, [%%g0] %0\n\t"
1750 "membar #Sync"
1751 : /* no outputs */
1752 : "i" (ASI_DCU_CONTROL_REG),
1753 "i" (DCU_DC | DCU_IC)
1754 : "g1");
1755
1756 if (type & 0x2) {
1757 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1758 smp_processor_id(),
1759 (type & 0x1) ? 'I' : 'D',
1760 regs->tpc);
1761 printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1762 panic("Irrecoverable Cheetah+ parity error.");
1763 }
1764
1765 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1766 smp_processor_id(),
1767 (type & 0x1) ? 'I' : 'D',
1768 regs->tpc);
1769 printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1770}
1771
1772struct sun4v_error_entry {
1773 /* Unique error handle */
1774/*0x00*/u64 err_handle;
1775
1776 /* %stick value at the time of the error */
1777/*0x08*/u64 err_stick;
1778
1779/*0x10*/u8 reserved_1[3];
1780
1781 /* Error type */
1782/*0x13*/u8 err_type;
1783#define SUN4V_ERR_TYPE_UNDEFINED 0
1784#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1785#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1786#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1787#define SUN4V_ERR_TYPE_SHUTDOWN_RQST 4
1788#define SUN4V_ERR_TYPE_DUMP_CORE 5
1789#define SUN4V_ERR_TYPE_SP_STATE_CHANGE 6
1790#define SUN4V_ERR_TYPE_NUM 7
1791
1792 /* Error attributes */
1793/*0x14*/u32 err_attrs;
1794#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1795#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1796#define SUN4V_ERR_ATTRS_PIO 0x00000004
1797#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1798#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1799#define SUN4V_ERR_ATTRS_SHUTDOWN_RQST 0x00000020
1800#define SUN4V_ERR_ATTRS_ASR 0x00000040
1801#define SUN4V_ERR_ATTRS_ASI 0x00000080
1802#define SUN4V_ERR_ATTRS_PRIV_REG 0x00000100
1803#define SUN4V_ERR_ATTRS_SPSTATE_MSK 0x00000600
1804#define SUN4V_ERR_ATTRS_SPSTATE_SHFT 9
1805#define SUN4V_ERR_ATTRS_MODE_MSK 0x03000000
1806#define SUN4V_ERR_ATTRS_MODE_SHFT 24
1807#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1808
1809#define SUN4V_ERR_SPSTATE_FAULTED 0
1810#define SUN4V_ERR_SPSTATE_AVAILABLE 1
1811#define SUN4V_ERR_SPSTATE_NOT_PRESENT 2
1812
1813#define SUN4V_ERR_MODE_USER 1
1814#define SUN4V_ERR_MODE_PRIV 2
1815
1816 /* Real address of the memory region or PIO transaction */
1817/*0x18*/u64 err_raddr;
1818
1819 /* Size of the operation triggering the error, in bytes */
1820/*0x20*/u32 err_size;
1821
1822 /* ID of the CPU */
1823/*0x24*/u16 err_cpu;
1824
1825 /* Grace periof for shutdown, in seconds */
1826/*0x26*/u16 err_secs;
1827
1828 /* Value of the %asi register */
1829/*0x28*/u8 err_asi;
1830
1831/*0x29*/u8 reserved_2;
1832
1833 /* Value of the ASR register number */
1834/*0x2a*/u16 err_asr;
1835#define SUN4V_ERR_ASR_VALID 0x8000
1836
1837/*0x2c*/u32 reserved_3;
1838/*0x30*/u64 reserved_4;
1839/*0x38*/u64 reserved_5;
1840};
1841
1842static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1843static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1844
1845static const char *sun4v_err_type_to_str(u8 type)
1846{
1847 static const char *types[SUN4V_ERR_TYPE_NUM] = {
1848 "undefined",
1849 "uncorrected resumable",
1850 "precise nonresumable",
1851 "deferred nonresumable",
1852 "shutdown request",
1853 "dump core",
1854 "SP state change",
1855 };
1856
1857 if (type < SUN4V_ERR_TYPE_NUM)
1858 return types[type];
1859
1860 return "unknown";
1861}
1862
1863static void sun4v_emit_err_attr_strings(u32 attrs)
1864{
1865 static const char *attr_names[] = {
1866 "processor",
1867 "memory",
1868 "PIO",
1869 "int-registers",
1870 "fpu-registers",
1871 "shutdown-request",
1872 "ASR",
1873 "ASI",
1874 "priv-reg",
1875 };
1876 static const char *sp_states[] = {
1877 "sp-faulted",
1878 "sp-available",
1879 "sp-not-present",
1880 "sp-state-reserved",
1881 };
1882 static const char *modes[] = {
1883 "mode-reserved0",
1884 "user",
1885 "priv",
1886 "mode-reserved1",
1887 };
1888 u32 sp_state, mode;
1889 int i;
1890
1891 for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
1892 if (attrs & (1U << i)) {
1893 const char *s = attr_names[i];
1894
1895 pr_cont("%s ", s);
1896 }
1897 }
1898
1899 sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >>
1900 SUN4V_ERR_ATTRS_SPSTATE_SHFT);
1901 pr_cont("%s ", sp_states[sp_state]);
1902
1903 mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >>
1904 SUN4V_ERR_ATTRS_MODE_SHFT);
1905 pr_cont("%s ", modes[mode]);
1906
1907 if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL)
1908 pr_cont("res-queue-full ");
1909}
1910
1911/* When the report contains a real-address of "-1" it means that the
1912 * hardware did not provide the address. So we compute the effective
1913 * address of the load or store instruction at regs->tpc and report
1914 * that. Usually when this happens it's a PIO and in such a case we
1915 * are using physical addresses with bypass ASIs anyways, so what we
1916 * report here is exactly what we want.
1917 */
1918static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
1919{
1920 unsigned int insn;
1921 u64 addr;
1922
1923 if (!(regs->tstate & TSTATE_PRIV))
1924 return;
1925
1926 insn = *(unsigned int *) regs->tpc;
1927
1928 addr = compute_effective_address(regs, insn, 0);
1929
1930 printk("%s: insn effective address [0x%016llx]\n",
1931 pfx, addr);
1932}
1933
1934static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
1935 int cpu, const char *pfx, atomic_t *ocnt)
1936{
1937 u64 *raw_ptr = (u64 *) ent;
1938 u32 attrs;
1939 int cnt;
1940
1941 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1942 printk("%s: TPC [0x%016lx] <%pS>\n",
1943 pfx, regs->tpc, (void *) regs->tpc);
1944
1945 printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n",
1946 pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]);
1947 printk("%s: %016llx:%016llx:%016llx:%016llx]\n",
1948 pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]);
1949
1950 printk("%s: handle [0x%016llx] stick [0x%016llx]\n",
1951 pfx, ent->err_handle, ent->err_stick);
1952
1953 printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type));
1954
1955 attrs = ent->err_attrs;
1956 printk("%s: attrs [0x%08x] < ", pfx, attrs);
1957 sun4v_emit_err_attr_strings(attrs);
1958 pr_cont(">\n");
1959
1960 /* Various fields in the error report are only valid if
1961 * certain attribute bits are set.
1962 */
1963 if (attrs & (SUN4V_ERR_ATTRS_MEMORY |
1964 SUN4V_ERR_ATTRS_PIO |
1965 SUN4V_ERR_ATTRS_ASI)) {
1966 printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr);
1967
1968 if (ent->err_raddr == ~(u64)0)
1969 sun4v_report_real_raddr(pfx, regs);
1970 }
1971
1972 if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI))
1973 printk("%s: size [0x%x]\n", pfx, ent->err_size);
1974
1975 if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR |
1976 SUN4V_ERR_ATTRS_INT_REGISTERS |
1977 SUN4V_ERR_ATTRS_FPU_REGISTERS |
1978 SUN4V_ERR_ATTRS_PRIV_REG))
1979 printk("%s: cpu[%u]\n", pfx, ent->err_cpu);
1980
1981 if (attrs & SUN4V_ERR_ATTRS_ASI)
1982 printk("%s: asi [0x%02x]\n", pfx, ent->err_asi);
1983
1984 if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS |
1985 SUN4V_ERR_ATTRS_FPU_REGISTERS |
1986 SUN4V_ERR_ATTRS_PRIV_REG)) &&
1987 (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0)
1988 printk("%s: reg [0x%04x]\n",
1989 pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID);
1990
1991 show_regs(regs);
1992
1993 if ((cnt = atomic_read(ocnt)) != 0) {
1994 atomic_set(ocnt, 0);
1995 wmb();
1996 printk("%s: Queue overflowed %d times.\n",
1997 pfx, cnt);
1998 }
1999}
2000
2001/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2002 * Log the event and clear the first word of the entry.
2003 */
2004void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
2005{
2006 enum ctx_state prev_state = exception_enter();
2007 struct sun4v_error_entry *ent, local_copy;
2008 struct trap_per_cpu *tb;
2009 unsigned long paddr;
2010 int cpu;
2011
2012 cpu = get_cpu();
2013
2014 tb = &trap_block[cpu];
2015 paddr = tb->resum_kernel_buf_pa + offset;
2016 ent = __va(paddr);
2017
2018 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2019
2020 /* We have a local copy now, so release the entry. */
2021 ent->err_handle = 0;
2022 wmb();
2023
2024 put_cpu();
2025
2026 if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) {
2027 /* We should really take the seconds field of
2028 * the error report and use it for the shutdown
2029 * invocation, but for now do the same thing we
2030 * do for a DS shutdown request.
2031 */
2032 pr_info("Shutdown request, %u seconds...\n",
2033 local_copy.err_secs);
2034 orderly_poweroff(true);
2035 goto out;
2036 }
2037
2038 sun4v_log_error(regs, &local_copy, cpu,
2039 KERN_ERR "RESUMABLE ERROR",
2040 &sun4v_resum_oflow_cnt);
2041out:
2042 exception_exit(prev_state);
2043}
2044
2045/* If we try to printk() we'll probably make matters worse, by trying
2046 * to retake locks this cpu already holds or causing more errors. So
2047 * just bump a counter, and we'll report these counter bumps above.
2048 */
2049void sun4v_resum_overflow(struct pt_regs *regs)
2050{
2051 atomic_inc(&sun4v_resum_oflow_cnt);
2052}
2053
2054/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2055 * Log the event, clear the first word of the entry, and die.
2056 */
2057void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
2058{
2059 struct sun4v_error_entry *ent, local_copy;
2060 struct trap_per_cpu *tb;
2061 unsigned long paddr;
2062 int cpu;
2063
2064 cpu = get_cpu();
2065
2066 tb = &trap_block[cpu];
2067 paddr = tb->nonresum_kernel_buf_pa + offset;
2068 ent = __va(paddr);
2069
2070 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2071
2072 /* We have a local copy now, so release the entry. */
2073 ent->err_handle = 0;
2074 wmb();
2075
2076 put_cpu();
2077
2078#ifdef CONFIG_PCI
2079 /* Check for the special PCI poke sequence. */
2080 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
2081 pci_poke_faulted = 1;
2082 regs->tpc += 4;
2083 regs->tnpc = regs->tpc + 4;
2084 return;
2085 }
2086#endif
2087
2088 sun4v_log_error(regs, &local_copy, cpu,
2089 KERN_EMERG "NON-RESUMABLE ERROR",
2090 &sun4v_nonresum_oflow_cnt);
2091
2092 panic("Non-resumable error.");
2093}
2094
2095/* If we try to printk() we'll probably make matters worse, by trying
2096 * to retake locks this cpu already holds or causing more errors. So
2097 * just bump a counter, and we'll report these counter bumps above.
2098 */
2099void sun4v_nonresum_overflow(struct pt_regs *regs)
2100{
2101 /* XXX Actually even this can make not that much sense. Perhaps
2102 * XXX we should just pull the plug and panic directly from here?
2103 */
2104 atomic_inc(&sun4v_nonresum_oflow_cnt);
2105}
2106
2107static void sun4v_tlb_error(struct pt_regs *regs)
2108{
2109 die_if_kernel("TLB/TSB error", regs);
2110}
2111
2112unsigned long sun4v_err_itlb_vaddr;
2113unsigned long sun4v_err_itlb_ctx;
2114unsigned long sun4v_err_itlb_pte;
2115unsigned long sun4v_err_itlb_error;
2116
2117void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
2118{
2119 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2120
2121 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
2122 regs->tpc, tl);
2123 printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
2124 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2125 printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
2126 (void *) regs->u_regs[UREG_I7]);
2127 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
2128 "pte[%lx] error[%lx]\n",
2129 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
2130 sun4v_err_itlb_pte, sun4v_err_itlb_error);
2131
2132 sun4v_tlb_error(regs);
2133}
2134
2135unsigned long sun4v_err_dtlb_vaddr;
2136unsigned long sun4v_err_dtlb_ctx;
2137unsigned long sun4v_err_dtlb_pte;
2138unsigned long sun4v_err_dtlb_error;
2139
2140void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
2141{
2142 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2143
2144 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
2145 regs->tpc, tl);
2146 printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
2147 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2148 printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
2149 (void *) regs->u_regs[UREG_I7]);
2150 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
2151 "pte[%lx] error[%lx]\n",
2152 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
2153 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
2154
2155 sun4v_tlb_error(regs);
2156}
2157
2158void hypervisor_tlbop_error(unsigned long err, unsigned long op)
2159{
2160 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
2161 err, op);
2162}
2163
2164void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
2165{
2166 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
2167 err, op);
2168}
2169
2170static void do_fpe_common(struct pt_regs *regs)
2171{
2172 if (regs->tstate & TSTATE_PRIV) {
2173 regs->tpc = regs->tnpc;
2174 regs->tnpc += 4;
2175 } else {
2176 unsigned long fsr = current_thread_info()->xfsr[0];
2177 siginfo_t info;
2178
2179 if (test_thread_flag(TIF_32BIT)) {
2180 regs->tpc &= 0xffffffff;
2181 regs->tnpc &= 0xffffffff;
2182 }
2183 info.si_signo = SIGFPE;
2184 info.si_errno = 0;
2185 info.si_addr = (void __user *)regs->tpc;
2186 info.si_trapno = 0;
2187 info.si_code = __SI_FAULT;
2188 if ((fsr & 0x1c000) == (1 << 14)) {
2189 if (fsr & 0x10)
2190 info.si_code = FPE_FLTINV;
2191 else if (fsr & 0x08)
2192 info.si_code = FPE_FLTOVF;
2193 else if (fsr & 0x04)
2194 info.si_code = FPE_FLTUND;
2195 else if (fsr & 0x02)
2196 info.si_code = FPE_FLTDIV;
2197 else if (fsr & 0x01)
2198 info.si_code = FPE_FLTRES;
2199 }
2200 force_sig_info(SIGFPE, &info, current);
2201 }
2202}
2203
2204void do_fpieee(struct pt_regs *regs)
2205{
2206 enum ctx_state prev_state = exception_enter();
2207
2208 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2209 0, 0x24, SIGFPE) == NOTIFY_STOP)
2210 goto out;
2211
2212 do_fpe_common(regs);
2213out:
2214 exception_exit(prev_state);
2215}
2216
2217void do_fpother(struct pt_regs *regs)
2218{
2219 enum ctx_state prev_state = exception_enter();
2220 struct fpustate *f = FPUSTATE;
2221 int ret = 0;
2222
2223 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2224 0, 0x25, SIGFPE) == NOTIFY_STOP)
2225 goto out;
2226
2227 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2228 case (2 << 14): /* unfinished_FPop */
2229 case (3 << 14): /* unimplemented_FPop */
2230 ret = do_mathemu(regs, f, false);
2231 break;
2232 }
2233 if (ret)
2234 goto out;
2235 do_fpe_common(regs);
2236out:
2237 exception_exit(prev_state);
2238}
2239
2240void do_tof(struct pt_regs *regs)
2241{
2242 enum ctx_state prev_state = exception_enter();
2243 siginfo_t info;
2244
2245 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2246 0, 0x26, SIGEMT) == NOTIFY_STOP)
2247 goto out;
2248
2249 if (regs->tstate & TSTATE_PRIV)
2250 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2251 if (test_thread_flag(TIF_32BIT)) {
2252 regs->tpc &= 0xffffffff;
2253 regs->tnpc &= 0xffffffff;
2254 }
2255 info.si_signo = SIGEMT;
2256 info.si_errno = 0;
2257 info.si_code = EMT_TAGOVF;
2258 info.si_addr = (void __user *)regs->tpc;
2259 info.si_trapno = 0;
2260 force_sig_info(SIGEMT, &info, current);
2261out:
2262 exception_exit(prev_state);
2263}
2264
2265void do_div0(struct pt_regs *regs)
2266{
2267 enum ctx_state prev_state = exception_enter();
2268 siginfo_t info;
2269
2270 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2271 0, 0x28, SIGFPE) == NOTIFY_STOP)
2272 goto out;
2273
2274 if (regs->tstate & TSTATE_PRIV)
2275 die_if_kernel("TL0: Kernel divide by zero.", regs);
2276 if (test_thread_flag(TIF_32BIT)) {
2277 regs->tpc &= 0xffffffff;
2278 regs->tnpc &= 0xffffffff;
2279 }
2280 info.si_signo = SIGFPE;
2281 info.si_errno = 0;
2282 info.si_code = FPE_INTDIV;
2283 info.si_addr = (void __user *)regs->tpc;
2284 info.si_trapno = 0;
2285 force_sig_info(SIGFPE, &info, current);
2286out:
2287 exception_exit(prev_state);
2288}
2289
2290static void instruction_dump(unsigned int *pc)
2291{
2292 int i;
2293
2294 if ((((unsigned long) pc) & 3))
2295 return;
2296
2297 printk("Instruction DUMP:");
2298 for (i = -3; i < 6; i++)
2299 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2300 printk("\n");
2301}
2302
2303static void user_instruction_dump(unsigned int __user *pc)
2304{
2305 int i;
2306 unsigned int buf[9];
2307
2308 if ((((unsigned long) pc) & 3))
2309 return;
2310
2311 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2312 return;
2313
2314 printk("Instruction DUMP:");
2315 for (i = 0; i < 9; i++)
2316 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2317 printk("\n");
2318}
2319
2320void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2321{
2322 unsigned long fp, ksp;
2323 struct thread_info *tp;
2324 int count = 0;
2325#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2326 int graph = 0;
2327#endif
2328
2329 ksp = (unsigned long) _ksp;
2330 if (!tsk)
2331 tsk = current;
2332 tp = task_thread_info(tsk);
2333 if (ksp == 0UL) {
2334 if (tsk == current)
2335 asm("mov %%fp, %0" : "=r" (ksp));
2336 else
2337 ksp = tp->ksp;
2338 }
2339 if (tp == current_thread_info())
2340 flushw_all();
2341
2342 fp = ksp + STACK_BIAS;
2343
2344 printk("Call Trace:\n");
2345 do {
2346 struct sparc_stackf *sf;
2347 struct pt_regs *regs;
2348 unsigned long pc;
2349
2350 if (!kstack_valid(tp, fp))
2351 break;
2352 sf = (struct sparc_stackf *) fp;
2353 regs = (struct pt_regs *) (sf + 1);
2354
2355 if (kstack_is_trap_frame(tp, regs)) {
2356 if (!(regs->tstate & TSTATE_PRIV))
2357 break;
2358 pc = regs->tpc;
2359 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2360 } else {
2361 pc = sf->callers_pc;
2362 fp = (unsigned long)sf->fp + STACK_BIAS;
2363 }
2364
2365 printk(" [%016lx] %pS\n", pc, (void *) pc);
2366#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2367 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
2368 int index = tsk->curr_ret_stack;
2369 if (tsk->ret_stack && index >= graph) {
2370 pc = tsk->ret_stack[index - graph].ret;
2371 printk(" [%016lx] %pS\n", pc, (void *) pc);
2372 graph++;
2373 }
2374 }
2375#endif
2376 } while (++count < 16);
2377}
2378
2379static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2380{
2381 unsigned long fp = rw->ins[6];
2382
2383 if (!fp)
2384 return NULL;
2385
2386 return (struct reg_window *) (fp + STACK_BIAS);
2387}
2388
2389void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
2390{
2391 static int die_counter;
2392 int count = 0;
2393
2394 /* Amuse the user. */
2395 printk(
2396" \\|/ ____ \\|/\n"
2397" \"@'/ .. \\`@\"\n"
2398" /_| \\__/ |_\\\n"
2399" \\__U_/\n");
2400
2401 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2402 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2403 __asm__ __volatile__("flushw");
2404 show_regs(regs);
2405 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
2406 if (regs->tstate & TSTATE_PRIV) {
2407 struct thread_info *tp = current_thread_info();
2408 struct reg_window *rw = (struct reg_window *)
2409 (regs->u_regs[UREG_FP] + STACK_BIAS);
2410
2411 /* Stop the back trace when we hit userland or we
2412 * find some badly aligned kernel stack.
2413 */
2414 while (rw &&
2415 count++ < 30 &&
2416 kstack_valid(tp, (unsigned long) rw)) {
2417 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2418 (void *) rw->ins[7]);
2419
2420 rw = kernel_stack_up(rw);
2421 }
2422 instruction_dump ((unsigned int *) regs->tpc);
2423 } else {
2424 if (test_thread_flag(TIF_32BIT)) {
2425 regs->tpc &= 0xffffffff;
2426 regs->tnpc &= 0xffffffff;
2427 }
2428 user_instruction_dump ((unsigned int __user *) regs->tpc);
2429 }
2430 if (panic_on_oops)
2431 panic("Fatal exception");
2432 if (regs->tstate & TSTATE_PRIV)
2433 do_exit(SIGKILL);
2434 do_exit(SIGSEGV);
2435}
2436EXPORT_SYMBOL(die_if_kernel);
2437
2438#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2439#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2440
2441void do_illegal_instruction(struct pt_regs *regs)
2442{
2443 enum ctx_state prev_state = exception_enter();
2444 unsigned long pc = regs->tpc;
2445 unsigned long tstate = regs->tstate;
2446 u32 insn;
2447 siginfo_t info;
2448
2449 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2450 0, 0x10, SIGILL) == NOTIFY_STOP)
2451 goto out;
2452
2453 if (tstate & TSTATE_PRIV)
2454 die_if_kernel("Kernel illegal instruction", regs);
2455 if (test_thread_flag(TIF_32BIT))
2456 pc = (u32)pc;
2457 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2458 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2459 if (handle_popc(insn, regs))
2460 goto out;
2461 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2462 if (handle_ldf_stq(insn, regs))
2463 goto out;
2464 } else if (tlb_type == hypervisor) {
2465 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2466 if (!vis_emul(regs, insn))
2467 goto out;
2468 } else {
2469 struct fpustate *f = FPUSTATE;
2470
2471 /* On UltraSPARC T2 and later, FPU insns which
2472 * are not implemented in HW signal an illegal
2473 * instruction trap and do not set the FP Trap
2474 * Trap in the %fsr to unimplemented_FPop.
2475 */
2476 if (do_mathemu(regs, f, true))
2477 goto out;
2478 }
2479 }
2480 }
2481 info.si_signo = SIGILL;
2482 info.si_errno = 0;
2483 info.si_code = ILL_ILLOPC;
2484 info.si_addr = (void __user *)pc;
2485 info.si_trapno = 0;
2486 force_sig_info(SIGILL, &info, current);
2487out:
2488 exception_exit(prev_state);
2489}
2490
2491void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2492{
2493 enum ctx_state prev_state = exception_enter();
2494 siginfo_t info;
2495
2496 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2497 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2498 goto out;
2499
2500 if (regs->tstate & TSTATE_PRIV) {
2501 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2502 goto out;
2503 }
2504 info.si_signo = SIGBUS;
2505 info.si_errno = 0;
2506 info.si_code = BUS_ADRALN;
2507 info.si_addr = (void __user *)sfar;
2508 info.si_trapno = 0;
2509 force_sig_info(SIGBUS, &info, current);
2510out:
2511 exception_exit(prev_state);
2512}
2513
2514void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2515{
2516 siginfo_t info;
2517
2518 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2519 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2520 return;
2521
2522 if (regs->tstate & TSTATE_PRIV) {
2523 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2524 return;
2525 }
2526 info.si_signo = SIGBUS;
2527 info.si_errno = 0;
2528 info.si_code = BUS_ADRALN;
2529 info.si_addr = (void __user *) addr;
2530 info.si_trapno = 0;
2531 force_sig_info(SIGBUS, &info, current);
2532}
2533
2534void do_privop(struct pt_regs *regs)
2535{
2536 enum ctx_state prev_state = exception_enter();
2537 siginfo_t info;
2538
2539 if (notify_die(DIE_TRAP, "privileged operation", regs,
2540 0, 0x11, SIGILL) == NOTIFY_STOP)
2541 goto out;
2542
2543 if (test_thread_flag(TIF_32BIT)) {
2544 regs->tpc &= 0xffffffff;
2545 regs->tnpc &= 0xffffffff;
2546 }
2547 info.si_signo = SIGILL;
2548 info.si_errno = 0;
2549 info.si_code = ILL_PRVOPC;
2550 info.si_addr = (void __user *)regs->tpc;
2551 info.si_trapno = 0;
2552 force_sig_info(SIGILL, &info, current);
2553out:
2554 exception_exit(prev_state);
2555}
2556
2557void do_privact(struct pt_regs *regs)
2558{
2559 do_privop(regs);
2560}
2561
2562/* Trap level 1 stuff or other traps we should never see... */
2563void do_cee(struct pt_regs *regs)
2564{
2565 exception_enter();
2566 die_if_kernel("TL0: Cache Error Exception", regs);
2567}
2568
2569void do_div0_tl1(struct pt_regs *regs)
2570{
2571 exception_enter();
2572 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2573 die_if_kernel("TL1: DIV0 Exception", regs);
2574}
2575
2576void do_fpieee_tl1(struct pt_regs *regs)
2577{
2578 exception_enter();
2579 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2580 die_if_kernel("TL1: FPU IEEE Exception", regs);
2581}
2582
2583void do_fpother_tl1(struct pt_regs *regs)
2584{
2585 exception_enter();
2586 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2587 die_if_kernel("TL1: FPU Other Exception", regs);
2588}
2589
2590void do_ill_tl1(struct pt_regs *regs)
2591{
2592 exception_enter();
2593 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2594 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2595}
2596
2597void do_irq_tl1(struct pt_regs *regs)
2598{
2599 exception_enter();
2600 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2601 die_if_kernel("TL1: IRQ Exception", regs);
2602}
2603
2604void do_lddfmna_tl1(struct pt_regs *regs)
2605{
2606 exception_enter();
2607 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2608 die_if_kernel("TL1: LDDF Exception", regs);
2609}
2610
2611void do_stdfmna_tl1(struct pt_regs *regs)
2612{
2613 exception_enter();
2614 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2615 die_if_kernel("TL1: STDF Exception", regs);
2616}
2617
2618void do_paw(struct pt_regs *regs)
2619{
2620 exception_enter();
2621 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2622}
2623
2624void do_paw_tl1(struct pt_regs *regs)
2625{
2626 exception_enter();
2627 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2628 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2629}
2630
2631void do_vaw(struct pt_regs *regs)
2632{
2633 exception_enter();
2634 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2635}
2636
2637void do_vaw_tl1(struct pt_regs *regs)
2638{
2639 exception_enter();
2640 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2641 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2642}
2643
2644void do_tof_tl1(struct pt_regs *regs)
2645{
2646 exception_enter();
2647 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2648 die_if_kernel("TL1: Tag Overflow Exception", regs);
2649}
2650
2651void do_getpsr(struct pt_regs *regs)
2652{
2653 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2654 regs->tpc = regs->tnpc;
2655 regs->tnpc += 4;
2656 if (test_thread_flag(TIF_32BIT)) {
2657 regs->tpc &= 0xffffffff;
2658 regs->tnpc &= 0xffffffff;
2659 }
2660}
2661
2662struct trap_per_cpu trap_block[NR_CPUS];
2663EXPORT_SYMBOL(trap_block);
2664
2665/* This can get invoked before sched_init() so play it super safe
2666 * and use hard_smp_processor_id().
2667 */
2668void notrace init_cur_cpu_trap(struct thread_info *t)
2669{
2670 int cpu = hard_smp_processor_id();
2671 struct trap_per_cpu *p = &trap_block[cpu];
2672
2673 p->thread = t;
2674 p->pgd_paddr = 0;
2675}
2676
2677extern void thread_info_offsets_are_bolixed_dave(void);
2678extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2679extern void tsb_config_offsets_are_bolixed_dave(void);
2680
2681/* Only invoked on boot processor. */
2682void __init trap_init(void)
2683{
2684 /* Compile time sanity check. */
2685 BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
2686 TI_FLAGS != offsetof(struct thread_info, flags) ||
2687 TI_CPU != offsetof(struct thread_info, cpu) ||
2688 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2689 TI_KSP != offsetof(struct thread_info, ksp) ||
2690 TI_FAULT_ADDR != offsetof(struct thread_info,
2691 fault_address) ||
2692 TI_KREGS != offsetof(struct thread_info, kregs) ||
2693 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2694 TI_REG_WINDOW != offsetof(struct thread_info,
2695 reg_window) ||
2696 TI_RWIN_SPTRS != offsetof(struct thread_info,
2697 rwbuf_stkptrs) ||
2698 TI_GSR != offsetof(struct thread_info, gsr) ||
2699 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2700 TI_PRE_COUNT != offsetof(struct thread_info,
2701 preempt_count) ||
2702 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2703 TI_CURRENT_DS != offsetof(struct thread_info,
2704 current_ds) ||
2705 TI_KUNA_REGS != offsetof(struct thread_info,
2706 kern_una_regs) ||
2707 TI_KUNA_INSN != offsetof(struct thread_info,
2708 kern_una_insn) ||
2709 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2710 (TI_FPREGS & (64 - 1)));
2711
2712 BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
2713 thread) ||
2714 (TRAP_PER_CPU_PGD_PADDR !=
2715 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2716 (TRAP_PER_CPU_CPU_MONDO_PA !=
2717 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2718 (TRAP_PER_CPU_DEV_MONDO_PA !=
2719 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2720 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2721 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2722 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2723 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2724 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2725 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2726 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2727 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2728 (TRAP_PER_CPU_FAULT_INFO !=
2729 offsetof(struct trap_per_cpu, fault_info)) ||
2730 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2731 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2732 (TRAP_PER_CPU_CPU_LIST_PA !=
2733 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2734 (TRAP_PER_CPU_TSB_HUGE !=
2735 offsetof(struct trap_per_cpu, tsb_huge)) ||
2736 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2737 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2738 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2739 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2740 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2741 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2742 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2743 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2744 (TRAP_PER_CPU_RESUM_QMASK !=
2745 offsetof(struct trap_per_cpu, resum_qmask)) ||
2746 (TRAP_PER_CPU_NONRESUM_QMASK !=
2747 offsetof(struct trap_per_cpu, nonresum_qmask)) ||
2748 (TRAP_PER_CPU_PER_CPU_BASE !=
2749 offsetof(struct trap_per_cpu, __per_cpu_base)));
2750
2751 BUILD_BUG_ON((TSB_CONFIG_TSB !=
2752 offsetof(struct tsb_config, tsb)) ||
2753 (TSB_CONFIG_RSS_LIMIT !=
2754 offsetof(struct tsb_config, tsb_rss_limit)) ||
2755 (TSB_CONFIG_NENTRIES !=
2756 offsetof(struct tsb_config, tsb_nentries)) ||
2757 (TSB_CONFIG_REG_VAL !=
2758 offsetof(struct tsb_config, tsb_reg_val)) ||
2759 (TSB_CONFIG_MAP_VADDR !=
2760 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2761 (TSB_CONFIG_MAP_PTE !=
2762 offsetof(struct tsb_config, tsb_map_pte)));
2763
2764 /* Attach to the address space of init_task. On SMP we
2765 * do this in smp.c:smp_callin for other cpus.
2766 */
2767 atomic_inc(&init_mm.mm_count);
2768 current->active_mm = &init_mm;
2769}
1// SPDX-License-Identifier: GPL-2.0-only
2/* arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8/*
9 * I like traps on v9, :))))
10 */
11
12#include <linux/extable.h>
13#include <linux/sched/mm.h>
14#include <linux/sched/debug.h>
15#include <linux/linkage.h>
16#include <linux/kernel.h>
17#include <linux/signal.h>
18#include <linux/smp.h>
19#include <linux/mm.h>
20#include <linux/init.h>
21#include <linux/kallsyms.h>
22#include <linux/kdebug.h>
23#include <linux/ftrace.h>
24#include <linux/reboot.h>
25#include <linux/gfp.h>
26#include <linux/context_tracking.h>
27
28#include <asm/smp.h>
29#include <asm/delay.h>
30#include <asm/ptrace.h>
31#include <asm/oplib.h>
32#include <asm/page.h>
33#include <asm/unistd.h>
34#include <linux/uaccess.h>
35#include <asm/fpumacro.h>
36#include <asm/lsu.h>
37#include <asm/dcu.h>
38#include <asm/estate.h>
39#include <asm/chafsr.h>
40#include <asm/sfafsr.h>
41#include <asm/psrcompat.h>
42#include <asm/processor.h>
43#include <asm/timer.h>
44#include <asm/head.h>
45#include <asm/prom.h>
46#include <asm/memctrl.h>
47#include <asm/cacheflush.h>
48#include <asm/setup.h>
49
50#include "entry.h"
51#include "kernel.h"
52#include "kstack.h"
53
54/* When an irrecoverable trap occurs at tl > 0, the trap entry
55 * code logs the trap state registers at every level in the trap
56 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
57 * is as follows:
58 */
59struct tl1_traplog {
60 struct {
61 unsigned long tstate;
62 unsigned long tpc;
63 unsigned long tnpc;
64 unsigned long tt;
65 } trapstack[4];
66 unsigned long tl;
67};
68
69static void dump_tl1_traplog(struct tl1_traplog *p)
70{
71 int i, limit;
72
73 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
74 "dumping track stack.\n", p->tl);
75
76 limit = (tlb_type == hypervisor) ? 2 : 4;
77 for (i = 0; i < limit; i++) {
78 printk(KERN_EMERG
79 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
80 "TNPC[%016lx] TT[%lx]\n",
81 i + 1,
82 p->trapstack[i].tstate, p->trapstack[i].tpc,
83 p->trapstack[i].tnpc, p->trapstack[i].tt);
84 printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
85 }
86}
87
88void bad_trap(struct pt_regs *regs, long lvl)
89{
90 char buffer[36];
91
92 if (notify_die(DIE_TRAP, "bad trap", regs,
93 0, lvl, SIGTRAP) == NOTIFY_STOP)
94 return;
95
96 if (lvl < 0x100) {
97 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
98 die_if_kernel(buffer, regs);
99 }
100
101 lvl -= 0x100;
102 if (regs->tstate & TSTATE_PRIV) {
103 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
104 die_if_kernel(buffer, regs);
105 }
106 if (test_thread_flag(TIF_32BIT)) {
107 regs->tpc &= 0xffffffff;
108 regs->tnpc &= 0xffffffff;
109 }
110 force_sig_fault(SIGILL, ILL_ILLTRP,
111 (void __user *)regs->tpc, lvl);
112}
113
114void bad_trap_tl1(struct pt_regs *regs, long lvl)
115{
116 char buffer[36];
117
118 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
119 0, lvl, SIGTRAP) == NOTIFY_STOP)
120 return;
121
122 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
123
124 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
125 die_if_kernel (buffer, regs);
126}
127
128#ifdef CONFIG_DEBUG_BUGVERBOSE
129void do_BUG(const char *file, int line)
130{
131 bust_spinlocks(1);
132 printk("kernel BUG at %s:%d!\n", file, line);
133}
134EXPORT_SYMBOL(do_BUG);
135#endif
136
137static DEFINE_SPINLOCK(dimm_handler_lock);
138static dimm_printer_t dimm_handler;
139
140static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
141{
142 unsigned long flags;
143 int ret = -ENODEV;
144
145 spin_lock_irqsave(&dimm_handler_lock, flags);
146 if (dimm_handler) {
147 ret = dimm_handler(synd_code, paddr, buf, buflen);
148 } else if (tlb_type == spitfire) {
149 if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
150 ret = -EINVAL;
151 else
152 ret = 0;
153 } else
154 ret = -ENODEV;
155 spin_unlock_irqrestore(&dimm_handler_lock, flags);
156
157 return ret;
158}
159
160int register_dimm_printer(dimm_printer_t func)
161{
162 unsigned long flags;
163 int ret = 0;
164
165 spin_lock_irqsave(&dimm_handler_lock, flags);
166 if (!dimm_handler)
167 dimm_handler = func;
168 else
169 ret = -EEXIST;
170 spin_unlock_irqrestore(&dimm_handler_lock, flags);
171
172 return ret;
173}
174EXPORT_SYMBOL_GPL(register_dimm_printer);
175
176void unregister_dimm_printer(dimm_printer_t func)
177{
178 unsigned long flags;
179
180 spin_lock_irqsave(&dimm_handler_lock, flags);
181 if (dimm_handler == func)
182 dimm_handler = NULL;
183 spin_unlock_irqrestore(&dimm_handler_lock, flags);
184}
185EXPORT_SYMBOL_GPL(unregister_dimm_printer);
186
187void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
188{
189 enum ctx_state prev_state = exception_enter();
190
191 if (notify_die(DIE_TRAP, "instruction access exception", regs,
192 0, 0x8, SIGTRAP) == NOTIFY_STOP)
193 goto out;
194
195 if (regs->tstate & TSTATE_PRIV) {
196 printk("spitfire_insn_access_exception: SFSR[%016lx] "
197 "SFAR[%016lx], going.\n", sfsr, sfar);
198 die_if_kernel("Iax", regs);
199 }
200 if (test_thread_flag(TIF_32BIT)) {
201 regs->tpc &= 0xffffffff;
202 regs->tnpc &= 0xffffffff;
203 }
204 force_sig_fault(SIGSEGV, SEGV_MAPERR,
205 (void __user *)regs->tpc, 0);
206out:
207 exception_exit(prev_state);
208}
209
210void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
211{
212 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
213 0, 0x8, SIGTRAP) == NOTIFY_STOP)
214 return;
215
216 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
217 spitfire_insn_access_exception(regs, sfsr, sfar);
218}
219
220void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
221{
222 unsigned short type = (type_ctx >> 16);
223 unsigned short ctx = (type_ctx & 0xffff);
224
225 if (notify_die(DIE_TRAP, "instruction access exception", regs,
226 0, 0x8, SIGTRAP) == NOTIFY_STOP)
227 return;
228
229 if (regs->tstate & TSTATE_PRIV) {
230 printk("sun4v_insn_access_exception: ADDR[%016lx] "
231 "CTX[%04x] TYPE[%04x], going.\n",
232 addr, ctx, type);
233 die_if_kernel("Iax", regs);
234 }
235
236 if (test_thread_flag(TIF_32BIT)) {
237 regs->tpc &= 0xffffffff;
238 regs->tnpc &= 0xffffffff;
239 }
240 force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *) addr, 0);
241}
242
243void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
244{
245 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
246 0, 0x8, SIGTRAP) == NOTIFY_STOP)
247 return;
248
249 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
250 sun4v_insn_access_exception(regs, addr, type_ctx);
251}
252
253bool is_no_fault_exception(struct pt_regs *regs)
254{
255 unsigned char asi;
256 u32 insn;
257
258 if (get_user(insn, (u32 __user *)regs->tpc) == -EFAULT)
259 return false;
260
261 /*
262 * Must do a little instruction decoding here in order to
263 * decide on a course of action. The bits of interest are:
264 * insn[31:30] = op, where 3 indicates the load/store group
265 * insn[24:19] = op3, which identifies individual opcodes
266 * insn[13] indicates an immediate offset
267 * op3[4]=1 identifies alternate space instructions
268 * op3[5:4]=3 identifies floating point instructions
269 * op3[2]=1 identifies stores
270 * See "Opcode Maps" in the appendix of any Sparc V9
271 * architecture spec for full details.
272 */
273 if ((insn & 0xc0800000) == 0xc0800000) { /* op=3, op3[4]=1 */
274 if (insn & 0x2000) /* immediate offset */
275 asi = (regs->tstate >> 24); /* saved %asi */
276 else
277 asi = (insn >> 5); /* immediate asi */
278 if ((asi & 0xf2) == ASI_PNF) {
279 if (insn & 0x1000000) { /* op3[5:4]=3 */
280 handle_ldf_stq(insn, regs);
281 return true;
282 } else if (insn & 0x200000) { /* op3[2], stores */
283 return false;
284 }
285 handle_ld_nf(insn, regs);
286 return true;
287 }
288 }
289 return false;
290}
291
292void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
293{
294 enum ctx_state prev_state = exception_enter();
295
296 if (notify_die(DIE_TRAP, "data access exception", regs,
297 0, 0x30, SIGTRAP) == NOTIFY_STOP)
298 goto out;
299
300 if (regs->tstate & TSTATE_PRIV) {
301 /* Test if this comes from uaccess places. */
302 const struct exception_table_entry *entry;
303
304 entry = search_exception_tables(regs->tpc);
305 if (entry) {
306 /* Ouch, somebody is trying VM hole tricks on us... */
307#ifdef DEBUG_EXCEPTIONS
308 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
309 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
310 regs->tpc, entry->fixup);
311#endif
312 regs->tpc = entry->fixup;
313 regs->tnpc = regs->tpc + 4;
314 goto out;
315 }
316 /* Shit... */
317 printk("spitfire_data_access_exception: SFSR[%016lx] "
318 "SFAR[%016lx], going.\n", sfsr, sfar);
319 die_if_kernel("Dax", regs);
320 }
321
322 if (is_no_fault_exception(regs))
323 return;
324
325 force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)sfar, 0);
326out:
327 exception_exit(prev_state);
328}
329
330void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
331{
332 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
333 0, 0x30, SIGTRAP) == NOTIFY_STOP)
334 return;
335
336 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
337 spitfire_data_access_exception(regs, sfsr, sfar);
338}
339
340void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
341{
342 unsigned short type = (type_ctx >> 16);
343 unsigned short ctx = (type_ctx & 0xffff);
344
345 if (notify_die(DIE_TRAP, "data access exception", regs,
346 0, 0x8, SIGTRAP) == NOTIFY_STOP)
347 return;
348
349 if (regs->tstate & TSTATE_PRIV) {
350 /* Test if this comes from uaccess places. */
351 const struct exception_table_entry *entry;
352
353 entry = search_exception_tables(regs->tpc);
354 if (entry) {
355 /* Ouch, somebody is trying VM hole tricks on us... */
356#ifdef DEBUG_EXCEPTIONS
357 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
358 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
359 regs->tpc, entry->fixup);
360#endif
361 regs->tpc = entry->fixup;
362 regs->tnpc = regs->tpc + 4;
363 return;
364 }
365 printk("sun4v_data_access_exception: ADDR[%016lx] "
366 "CTX[%04x] TYPE[%04x], going.\n",
367 addr, ctx, type);
368 die_if_kernel("Dax", regs);
369 }
370
371 if (test_thread_flag(TIF_32BIT)) {
372 regs->tpc &= 0xffffffff;
373 regs->tnpc &= 0xffffffff;
374 }
375 if (is_no_fault_exception(regs))
376 return;
377
378 /* MCD (Memory Corruption Detection) disabled trap (TT=0x19) in HV
379 * is vectored thorugh data access exception trap with fault type
380 * set to HV_FAULT_TYPE_MCD_DIS. Check for MCD disabled trap.
381 * Accessing an address with invalid ASI for the address, for
382 * example setting an ADI tag on an address with ASI_MCD_PRIMARY
383 * when TTE.mcd is not set for the VA, is also vectored into
384 * kerbel by HV as data access exception with fault type set to
385 * HV_FAULT_TYPE_INV_ASI.
386 */
387 switch (type) {
388 case HV_FAULT_TYPE_INV_ASI:
389 force_sig_fault(SIGILL, ILL_ILLADR, (void __user *)addr, 0);
390 break;
391 case HV_FAULT_TYPE_MCD_DIS:
392 force_sig_fault(SIGSEGV, SEGV_ACCADI, (void __user *)addr, 0);
393 break;
394 default:
395 force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)addr, 0);
396 break;
397 }
398}
399
400void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
401{
402 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
403 0, 0x8, SIGTRAP) == NOTIFY_STOP)
404 return;
405
406 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
407 sun4v_data_access_exception(regs, addr, type_ctx);
408}
409
410#ifdef CONFIG_PCI
411#include "pci_impl.h"
412#endif
413
414/* When access exceptions happen, we must do this. */
415static void spitfire_clean_and_reenable_l1_caches(void)
416{
417 unsigned long va;
418
419 if (tlb_type != spitfire)
420 BUG();
421
422 /* Clean 'em. */
423 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
424 spitfire_put_icache_tag(va, 0x0);
425 spitfire_put_dcache_tag(va, 0x0);
426 }
427
428 /* Re-enable in LSU. */
429 __asm__ __volatile__("flush %%g6\n\t"
430 "membar #Sync\n\t"
431 "stxa %0, [%%g0] %1\n\t"
432 "membar #Sync"
433 : /* no outputs */
434 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
435 LSU_CONTROL_IM | LSU_CONTROL_DM),
436 "i" (ASI_LSU_CONTROL)
437 : "memory");
438}
439
440static void spitfire_enable_estate_errors(void)
441{
442 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
443 "membar #Sync"
444 : /* no outputs */
445 : "r" (ESTATE_ERR_ALL),
446 "i" (ASI_ESTATE_ERROR_EN));
447}
448
449static char ecc_syndrome_table[] = {
450 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
451 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
452 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
453 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
454 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
455 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
456 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
457 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
458 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
459 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
460 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
461 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
462 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
463 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
464 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
465 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
466 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
467 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
468 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
469 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
470 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
471 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
472 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
473 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
474 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
475 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
476 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
477 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
478 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
479 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
480 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
481 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
482};
483
484static char *syndrome_unknown = "<Unknown>";
485
486static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
487{
488 unsigned short scode;
489 char memmod_str[64], *p;
490
491 if (udbl & bit) {
492 scode = ecc_syndrome_table[udbl & 0xff];
493 if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
494 p = syndrome_unknown;
495 else
496 p = memmod_str;
497 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
498 "Memory Module \"%s\"\n",
499 smp_processor_id(), scode, p);
500 }
501
502 if (udbh & bit) {
503 scode = ecc_syndrome_table[udbh & 0xff];
504 if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
505 p = syndrome_unknown;
506 else
507 p = memmod_str;
508 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
509 "Memory Module \"%s\"\n",
510 smp_processor_id(), scode, p);
511 }
512
513}
514
515static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
516{
517
518 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
519 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
520 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
521
522 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
523
524 /* We always log it, even if someone is listening for this
525 * trap.
526 */
527 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
528 0, TRAP_TYPE_CEE, SIGTRAP);
529
530 /* The Correctable ECC Error trap does not disable I/D caches. So
531 * we only have to restore the ESTATE Error Enable register.
532 */
533 spitfire_enable_estate_errors();
534}
535
536static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
537{
538 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
539 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
540 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
541
542 /* XXX add more human friendly logging of the error status
543 * XXX as is implemented for cheetah
544 */
545
546 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
547
548 /* We always log it, even if someone is listening for this
549 * trap.
550 */
551 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
552 0, tt, SIGTRAP);
553
554 if (regs->tstate & TSTATE_PRIV) {
555 if (tl1)
556 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
557 die_if_kernel("UE", regs);
558 }
559
560 /* XXX need more intelligent processing here, such as is implemented
561 * XXX for cheetah errors, in fact if the E-cache still holds the
562 * XXX line with bad parity this will loop
563 */
564
565 spitfire_clean_and_reenable_l1_caches();
566 spitfire_enable_estate_errors();
567
568 if (test_thread_flag(TIF_32BIT)) {
569 regs->tpc &= 0xffffffff;
570 regs->tnpc &= 0xffffffff;
571 }
572 force_sig_fault(SIGBUS, BUS_OBJERR, (void *)0, 0);
573}
574
575void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
576{
577 unsigned long afsr, tt, udbh, udbl;
578 int tl1;
579
580 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
581 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
582 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
583 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
584 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
585
586#ifdef CONFIG_PCI
587 if (tt == TRAP_TYPE_DAE &&
588 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
589 spitfire_clean_and_reenable_l1_caches();
590 spitfire_enable_estate_errors();
591
592 pci_poke_faulted = 1;
593 regs->tnpc = regs->tpc + 4;
594 return;
595 }
596#endif
597
598 if (afsr & SFAFSR_UE)
599 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
600
601 if (tt == TRAP_TYPE_CEE) {
602 /* Handle the case where we took a CEE trap, but ACK'd
603 * only the UE state in the UDB error registers.
604 */
605 if (afsr & SFAFSR_UE) {
606 if (udbh & UDBE_CE) {
607 __asm__ __volatile__(
608 "stxa %0, [%1] %2\n\t"
609 "membar #Sync"
610 : /* no outputs */
611 : "r" (udbh & UDBE_CE),
612 "r" (0x0), "i" (ASI_UDB_ERROR_W));
613 }
614 if (udbl & UDBE_CE) {
615 __asm__ __volatile__(
616 "stxa %0, [%1] %2\n\t"
617 "membar #Sync"
618 : /* no outputs */
619 : "r" (udbl & UDBE_CE),
620 "r" (0x18), "i" (ASI_UDB_ERROR_W));
621 }
622 }
623
624 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
625 }
626}
627
628int cheetah_pcache_forced_on;
629
630void cheetah_enable_pcache(void)
631{
632 unsigned long dcr;
633
634 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
635 smp_processor_id());
636
637 __asm__ __volatile__("ldxa [%%g0] %1, %0"
638 : "=r" (dcr)
639 : "i" (ASI_DCU_CONTROL_REG));
640 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
641 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
642 "membar #Sync"
643 : /* no outputs */
644 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
645}
646
647/* Cheetah error trap handling. */
648static unsigned long ecache_flush_physbase;
649static unsigned long ecache_flush_linesize;
650static unsigned long ecache_flush_size;
651
652/* This table is ordered in priority of errors and matches the
653 * AFAR overwrite policy as well.
654 */
655
656struct afsr_error_table {
657 unsigned long mask;
658 const char *name;
659};
660
661static const char CHAFSR_PERR_msg[] =
662 "System interface protocol error";
663static const char CHAFSR_IERR_msg[] =
664 "Internal processor error";
665static const char CHAFSR_ISAP_msg[] =
666 "System request parity error on incoming address";
667static const char CHAFSR_UCU_msg[] =
668 "Uncorrectable E-cache ECC error for ifetch/data";
669static const char CHAFSR_UCC_msg[] =
670 "SW Correctable E-cache ECC error for ifetch/data";
671static const char CHAFSR_UE_msg[] =
672 "Uncorrectable system bus data ECC error for read";
673static const char CHAFSR_EDU_msg[] =
674 "Uncorrectable E-cache ECC error for stmerge/blkld";
675static const char CHAFSR_EMU_msg[] =
676 "Uncorrectable system bus MTAG error";
677static const char CHAFSR_WDU_msg[] =
678 "Uncorrectable E-cache ECC error for writeback";
679static const char CHAFSR_CPU_msg[] =
680 "Uncorrectable ECC error for copyout";
681static const char CHAFSR_CE_msg[] =
682 "HW corrected system bus data ECC error for read";
683static const char CHAFSR_EDC_msg[] =
684 "HW corrected E-cache ECC error for stmerge/blkld";
685static const char CHAFSR_EMC_msg[] =
686 "HW corrected system bus MTAG ECC error";
687static const char CHAFSR_WDC_msg[] =
688 "HW corrected E-cache ECC error for writeback";
689static const char CHAFSR_CPC_msg[] =
690 "HW corrected ECC error for copyout";
691static const char CHAFSR_TO_msg[] =
692 "Unmapped error from system bus";
693static const char CHAFSR_BERR_msg[] =
694 "Bus error response from system bus";
695static const char CHAFSR_IVC_msg[] =
696 "HW corrected system bus data ECC error for ivec read";
697static const char CHAFSR_IVU_msg[] =
698 "Uncorrectable system bus data ECC error for ivec read";
699static struct afsr_error_table __cheetah_error_table[] = {
700 { CHAFSR_PERR, CHAFSR_PERR_msg },
701 { CHAFSR_IERR, CHAFSR_IERR_msg },
702 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
703 { CHAFSR_UCU, CHAFSR_UCU_msg },
704 { CHAFSR_UCC, CHAFSR_UCC_msg },
705 { CHAFSR_UE, CHAFSR_UE_msg },
706 { CHAFSR_EDU, CHAFSR_EDU_msg },
707 { CHAFSR_EMU, CHAFSR_EMU_msg },
708 { CHAFSR_WDU, CHAFSR_WDU_msg },
709 { CHAFSR_CPU, CHAFSR_CPU_msg },
710 { CHAFSR_CE, CHAFSR_CE_msg },
711 { CHAFSR_EDC, CHAFSR_EDC_msg },
712 { CHAFSR_EMC, CHAFSR_EMC_msg },
713 { CHAFSR_WDC, CHAFSR_WDC_msg },
714 { CHAFSR_CPC, CHAFSR_CPC_msg },
715 { CHAFSR_TO, CHAFSR_TO_msg },
716 { CHAFSR_BERR, CHAFSR_BERR_msg },
717 /* These two do not update the AFAR. */
718 { CHAFSR_IVC, CHAFSR_IVC_msg },
719 { CHAFSR_IVU, CHAFSR_IVU_msg },
720 { 0, NULL },
721};
722static const char CHPAFSR_DTO_msg[] =
723 "System bus unmapped error for prefetch/storequeue-read";
724static const char CHPAFSR_DBERR_msg[] =
725 "System bus error for prefetch/storequeue-read";
726static const char CHPAFSR_THCE_msg[] =
727 "Hardware corrected E-cache Tag ECC error";
728static const char CHPAFSR_TSCE_msg[] =
729 "SW handled correctable E-cache Tag ECC error";
730static const char CHPAFSR_TUE_msg[] =
731 "Uncorrectable E-cache Tag ECC error";
732static const char CHPAFSR_DUE_msg[] =
733 "System bus uncorrectable data ECC error due to prefetch/store-fill";
734static struct afsr_error_table __cheetah_plus_error_table[] = {
735 { CHAFSR_PERR, CHAFSR_PERR_msg },
736 { CHAFSR_IERR, CHAFSR_IERR_msg },
737 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
738 { CHAFSR_UCU, CHAFSR_UCU_msg },
739 { CHAFSR_UCC, CHAFSR_UCC_msg },
740 { CHAFSR_UE, CHAFSR_UE_msg },
741 { CHAFSR_EDU, CHAFSR_EDU_msg },
742 { CHAFSR_EMU, CHAFSR_EMU_msg },
743 { CHAFSR_WDU, CHAFSR_WDU_msg },
744 { CHAFSR_CPU, CHAFSR_CPU_msg },
745 { CHAFSR_CE, CHAFSR_CE_msg },
746 { CHAFSR_EDC, CHAFSR_EDC_msg },
747 { CHAFSR_EMC, CHAFSR_EMC_msg },
748 { CHAFSR_WDC, CHAFSR_WDC_msg },
749 { CHAFSR_CPC, CHAFSR_CPC_msg },
750 { CHAFSR_TO, CHAFSR_TO_msg },
751 { CHAFSR_BERR, CHAFSR_BERR_msg },
752 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
753 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
754 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
755 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
756 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
757 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
758 /* These two do not update the AFAR. */
759 { CHAFSR_IVC, CHAFSR_IVC_msg },
760 { CHAFSR_IVU, CHAFSR_IVU_msg },
761 { 0, NULL },
762};
763static const char JPAFSR_JETO_msg[] =
764 "System interface protocol error, hw timeout caused";
765static const char JPAFSR_SCE_msg[] =
766 "Parity error on system snoop results";
767static const char JPAFSR_JEIC_msg[] =
768 "System interface protocol error, illegal command detected";
769static const char JPAFSR_JEIT_msg[] =
770 "System interface protocol error, illegal ADTYPE detected";
771static const char JPAFSR_OM_msg[] =
772 "Out of range memory error has occurred";
773static const char JPAFSR_ETP_msg[] =
774 "Parity error on L2 cache tag SRAM";
775static const char JPAFSR_UMS_msg[] =
776 "Error due to unsupported store";
777static const char JPAFSR_RUE_msg[] =
778 "Uncorrectable ECC error from remote cache/memory";
779static const char JPAFSR_RCE_msg[] =
780 "Correctable ECC error from remote cache/memory";
781static const char JPAFSR_BP_msg[] =
782 "JBUS parity error on returned read data";
783static const char JPAFSR_WBP_msg[] =
784 "JBUS parity error on data for writeback or block store";
785static const char JPAFSR_FRC_msg[] =
786 "Foreign read to DRAM incurring correctable ECC error";
787static const char JPAFSR_FRU_msg[] =
788 "Foreign read to DRAM incurring uncorrectable ECC error";
789static struct afsr_error_table __jalapeno_error_table[] = {
790 { JPAFSR_JETO, JPAFSR_JETO_msg },
791 { JPAFSR_SCE, JPAFSR_SCE_msg },
792 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
793 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
794 { CHAFSR_PERR, CHAFSR_PERR_msg },
795 { CHAFSR_IERR, CHAFSR_IERR_msg },
796 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
797 { CHAFSR_UCU, CHAFSR_UCU_msg },
798 { CHAFSR_UCC, CHAFSR_UCC_msg },
799 { CHAFSR_UE, CHAFSR_UE_msg },
800 { CHAFSR_EDU, CHAFSR_EDU_msg },
801 { JPAFSR_OM, JPAFSR_OM_msg },
802 { CHAFSR_WDU, CHAFSR_WDU_msg },
803 { CHAFSR_CPU, CHAFSR_CPU_msg },
804 { CHAFSR_CE, CHAFSR_CE_msg },
805 { CHAFSR_EDC, CHAFSR_EDC_msg },
806 { JPAFSR_ETP, JPAFSR_ETP_msg },
807 { CHAFSR_WDC, CHAFSR_WDC_msg },
808 { CHAFSR_CPC, CHAFSR_CPC_msg },
809 { CHAFSR_TO, CHAFSR_TO_msg },
810 { CHAFSR_BERR, CHAFSR_BERR_msg },
811 { JPAFSR_UMS, JPAFSR_UMS_msg },
812 { JPAFSR_RUE, JPAFSR_RUE_msg },
813 { JPAFSR_RCE, JPAFSR_RCE_msg },
814 { JPAFSR_BP, JPAFSR_BP_msg },
815 { JPAFSR_WBP, JPAFSR_WBP_msg },
816 { JPAFSR_FRC, JPAFSR_FRC_msg },
817 { JPAFSR_FRU, JPAFSR_FRU_msg },
818 /* These two do not update the AFAR. */
819 { CHAFSR_IVU, CHAFSR_IVU_msg },
820 { 0, NULL },
821};
822static struct afsr_error_table *cheetah_error_table;
823static unsigned long cheetah_afsr_errors;
824
825struct cheetah_err_info *cheetah_error_log;
826
827static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
828{
829 struct cheetah_err_info *p;
830 int cpu = smp_processor_id();
831
832 if (!cheetah_error_log)
833 return NULL;
834
835 p = cheetah_error_log + (cpu * 2);
836 if ((afsr & CHAFSR_TL1) != 0UL)
837 p++;
838
839 return p;
840}
841
842extern unsigned int tl0_icpe[], tl1_icpe[];
843extern unsigned int tl0_dcpe[], tl1_dcpe[];
844extern unsigned int tl0_fecc[], tl1_fecc[];
845extern unsigned int tl0_cee[], tl1_cee[];
846extern unsigned int tl0_iae[], tl1_iae[];
847extern unsigned int tl0_dae[], tl1_dae[];
848extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
849extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
850extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
851extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
852extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
853
854void __init cheetah_ecache_flush_init(void)
855{
856 unsigned long largest_size, smallest_linesize, order, ver;
857 int i, sz;
858
859 /* Scan all cpu device tree nodes, note two values:
860 * 1) largest E-cache size
861 * 2) smallest E-cache line size
862 */
863 largest_size = 0UL;
864 smallest_linesize = ~0UL;
865
866 for (i = 0; i < NR_CPUS; i++) {
867 unsigned long val;
868
869 val = cpu_data(i).ecache_size;
870 if (!val)
871 continue;
872
873 if (val > largest_size)
874 largest_size = val;
875
876 val = cpu_data(i).ecache_line_size;
877 if (val < smallest_linesize)
878 smallest_linesize = val;
879
880 }
881
882 if (largest_size == 0UL || smallest_linesize == ~0UL) {
883 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
884 "parameters.\n");
885 prom_halt();
886 }
887
888 ecache_flush_size = (2 * largest_size);
889 ecache_flush_linesize = smallest_linesize;
890
891 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
892
893 if (ecache_flush_physbase == ~0UL) {
894 prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte "
895 "contiguous physical memory.\n",
896 ecache_flush_size);
897 prom_halt();
898 }
899
900 /* Now allocate error trap reporting scoreboard. */
901 sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
902 for (order = 0; order < MAX_ORDER; order++) {
903 if ((PAGE_SIZE << order) >= sz)
904 break;
905 }
906 cheetah_error_log = (struct cheetah_err_info *)
907 __get_free_pages(GFP_KERNEL, order);
908 if (!cheetah_error_log) {
909 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
910 "error logging scoreboard (%d bytes).\n", sz);
911 prom_halt();
912 }
913 memset(cheetah_error_log, 0, PAGE_SIZE << order);
914
915 /* Mark all AFSRs as invalid so that the trap handler will
916 * log new new information there.
917 */
918 for (i = 0; i < 2 * NR_CPUS; i++)
919 cheetah_error_log[i].afsr = CHAFSR_INVALID;
920
921 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
922 if ((ver >> 32) == __JALAPENO_ID ||
923 (ver >> 32) == __SERRANO_ID) {
924 cheetah_error_table = &__jalapeno_error_table[0];
925 cheetah_afsr_errors = JPAFSR_ERRORS;
926 } else if ((ver >> 32) == 0x003e0015) {
927 cheetah_error_table = &__cheetah_plus_error_table[0];
928 cheetah_afsr_errors = CHPAFSR_ERRORS;
929 } else {
930 cheetah_error_table = &__cheetah_error_table[0];
931 cheetah_afsr_errors = CHAFSR_ERRORS;
932 }
933
934 /* Now patch trap tables. */
935 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
936 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
937 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
938 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
939 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
940 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
941 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
942 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
943 if (tlb_type == cheetah_plus) {
944 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
945 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
946 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
947 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
948 }
949 flushi(PAGE_OFFSET);
950}
951
952static void cheetah_flush_ecache(void)
953{
954 unsigned long flush_base = ecache_flush_physbase;
955 unsigned long flush_linesize = ecache_flush_linesize;
956 unsigned long flush_size = ecache_flush_size;
957
958 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
959 " bne,pt %%xcc, 1b\n\t"
960 " ldxa [%2 + %0] %3, %%g0\n\t"
961 : "=&r" (flush_size)
962 : "0" (flush_size), "r" (flush_base),
963 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
964}
965
966static void cheetah_flush_ecache_line(unsigned long physaddr)
967{
968 unsigned long alias;
969
970 physaddr &= ~(8UL - 1UL);
971 physaddr = (ecache_flush_physbase +
972 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
973 alias = physaddr + (ecache_flush_size >> 1UL);
974 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
975 "ldxa [%1] %2, %%g0\n\t"
976 "membar #Sync"
977 : /* no outputs */
978 : "r" (physaddr), "r" (alias),
979 "i" (ASI_PHYS_USE_EC));
980}
981
982/* Unfortunately, the diagnostic access to the I-cache tags we need to
983 * use to clear the thing interferes with I-cache coherency transactions.
984 *
985 * So we must only flush the I-cache when it is disabled.
986 */
987static void __cheetah_flush_icache(void)
988{
989 unsigned int icache_size, icache_line_size;
990 unsigned long addr;
991
992 icache_size = local_cpu_data().icache_size;
993 icache_line_size = local_cpu_data().icache_line_size;
994
995 /* Clear the valid bits in all the tags. */
996 for (addr = 0; addr < icache_size; addr += icache_line_size) {
997 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
998 "membar #Sync"
999 : /* no outputs */
1000 : "r" (addr | (2 << 3)),
1001 "i" (ASI_IC_TAG));
1002 }
1003}
1004
1005static void cheetah_flush_icache(void)
1006{
1007 unsigned long dcu_save;
1008
1009 /* Save current DCU, disable I-cache. */
1010 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1011 "or %0, %2, %%g1\n\t"
1012 "stxa %%g1, [%%g0] %1\n\t"
1013 "membar #Sync"
1014 : "=r" (dcu_save)
1015 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
1016 : "g1");
1017
1018 __cheetah_flush_icache();
1019
1020 /* Restore DCU register */
1021 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1022 "membar #Sync"
1023 : /* no outputs */
1024 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
1025}
1026
1027static void cheetah_flush_dcache(void)
1028{
1029 unsigned int dcache_size, dcache_line_size;
1030 unsigned long addr;
1031
1032 dcache_size = local_cpu_data().dcache_size;
1033 dcache_line_size = local_cpu_data().dcache_line_size;
1034
1035 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1036 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1037 "membar #Sync"
1038 : /* no outputs */
1039 : "r" (addr), "i" (ASI_DCACHE_TAG));
1040 }
1041}
1042
1043/* In order to make the even parity correct we must do two things.
1044 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
1045 * Next, we clear out all 32-bytes of data for that line. Data of
1046 * all-zero + tag parity value of zero == correct parity.
1047 */
1048static void cheetah_plus_zap_dcache_parity(void)
1049{
1050 unsigned int dcache_size, dcache_line_size;
1051 unsigned long addr;
1052
1053 dcache_size = local_cpu_data().dcache_size;
1054 dcache_line_size = local_cpu_data().dcache_line_size;
1055
1056 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1057 unsigned long tag = (addr >> 14);
1058 unsigned long line;
1059
1060 __asm__ __volatile__("membar #Sync\n\t"
1061 "stxa %0, [%1] %2\n\t"
1062 "membar #Sync"
1063 : /* no outputs */
1064 : "r" (tag), "r" (addr),
1065 "i" (ASI_DCACHE_UTAG));
1066 for (line = addr; line < addr + dcache_line_size; line += 8)
1067 __asm__ __volatile__("membar #Sync\n\t"
1068 "stxa %%g0, [%0] %1\n\t"
1069 "membar #Sync"
1070 : /* no outputs */
1071 : "r" (line),
1072 "i" (ASI_DCACHE_DATA));
1073 }
1074}
1075
1076/* Conversion tables used to frob Cheetah AFSR syndrome values into
1077 * something palatable to the memory controller driver get_unumber
1078 * routine.
1079 */
1080#define MT0 137
1081#define MT1 138
1082#define MT2 139
1083#define NONE 254
1084#define MTC0 140
1085#define MTC1 141
1086#define MTC2 142
1087#define MTC3 143
1088#define C0 128
1089#define C1 129
1090#define C2 130
1091#define C3 131
1092#define C4 132
1093#define C5 133
1094#define C6 134
1095#define C7 135
1096#define C8 136
1097#define M2 144
1098#define M3 145
1099#define M4 146
1100#define M 147
1101static unsigned char cheetah_ecc_syntab[] = {
1102/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1103/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1104/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1105/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1106/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1107/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1108/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1109/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1110/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1111/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1112/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1113/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1114/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1115/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1116/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1117/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1118/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1119/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1120/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1121/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1122/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1123/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1124/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1125/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1126/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1127/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1128/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1129/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1130/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1131/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1132/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1133/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1134};
1135static unsigned char cheetah_mtag_syntab[] = {
1136 NONE, MTC0,
1137 MTC1, NONE,
1138 MTC2, NONE,
1139 NONE, MT0,
1140 MTC3, NONE,
1141 NONE, MT1,
1142 NONE, MT2,
1143 NONE, NONE
1144};
1145
1146/* Return the highest priority error conditon mentioned. */
1147static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1148{
1149 unsigned long tmp = 0;
1150 int i;
1151
1152 for (i = 0; cheetah_error_table[i].mask; i++) {
1153 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1154 return tmp;
1155 }
1156 return tmp;
1157}
1158
1159static const char *cheetah_get_string(unsigned long bit)
1160{
1161 int i;
1162
1163 for (i = 0; cheetah_error_table[i].mask; i++) {
1164 if ((bit & cheetah_error_table[i].mask) != 0UL)
1165 return cheetah_error_table[i].name;
1166 }
1167 return "???";
1168}
1169
1170static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1171 unsigned long afsr, unsigned long afar, int recoverable)
1172{
1173 unsigned long hipri;
1174 char unum[256];
1175
1176 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1177 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1178 afsr, afar,
1179 (afsr & CHAFSR_TL1) ? 1 : 0);
1180 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1181 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1182 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1183 printk("%s" "ERROR(%d): ",
1184 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1185 printk("TPC<%pS>\n", (void *) regs->tpc);
1186 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1187 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1188 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1189 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1190 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1191 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1192 hipri = cheetah_get_hipri(afsr);
1193 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1194 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1195 hipri, cheetah_get_string(hipri));
1196
1197 /* Try to get unumber if relevant. */
1198#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1199 CHAFSR_CPC | CHAFSR_CPU | \
1200 CHAFSR_UE | CHAFSR_CE | \
1201 CHAFSR_EDC | CHAFSR_EDU | \
1202 CHAFSR_UCC | CHAFSR_UCU | \
1203 CHAFSR_WDU | CHAFSR_WDC)
1204#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1205 if (afsr & ESYND_ERRORS) {
1206 int syndrome;
1207 int ret;
1208
1209 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1210 syndrome = cheetah_ecc_syntab[syndrome];
1211 ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1212 if (ret != -1)
1213 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1214 (recoverable ? KERN_WARNING : KERN_CRIT),
1215 smp_processor_id(), unum);
1216 } else if (afsr & MSYND_ERRORS) {
1217 int syndrome;
1218 int ret;
1219
1220 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1221 syndrome = cheetah_mtag_syntab[syndrome];
1222 ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1223 if (ret != -1)
1224 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1225 (recoverable ? KERN_WARNING : KERN_CRIT),
1226 smp_processor_id(), unum);
1227 }
1228
1229 /* Now dump the cache snapshots. */
1230 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1231 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1232 (int) info->dcache_index,
1233 info->dcache_tag,
1234 info->dcache_utag,
1235 info->dcache_stag);
1236 printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1237 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1238 info->dcache_data[0],
1239 info->dcache_data[1],
1240 info->dcache_data[2],
1241 info->dcache_data[3]);
1242 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1243 "u[%016llx] l[%016llx]\n",
1244 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1245 (int) info->icache_index,
1246 info->icache_tag,
1247 info->icache_utag,
1248 info->icache_stag,
1249 info->icache_upper,
1250 info->icache_lower);
1251 printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1252 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1253 info->icache_data[0],
1254 info->icache_data[1],
1255 info->icache_data[2],
1256 info->icache_data[3]);
1257 printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1258 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1259 info->icache_data[4],
1260 info->icache_data[5],
1261 info->icache_data[6],
1262 info->icache_data[7]);
1263 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1264 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1265 (int) info->ecache_index, info->ecache_tag);
1266 printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1267 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1268 info->ecache_data[0],
1269 info->ecache_data[1],
1270 info->ecache_data[2],
1271 info->ecache_data[3]);
1272
1273 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1274 while (afsr != 0UL) {
1275 unsigned long bit = cheetah_get_hipri(afsr);
1276
1277 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1278 (recoverable ? KERN_WARNING : KERN_CRIT),
1279 bit, cheetah_get_string(bit));
1280
1281 afsr &= ~bit;
1282 }
1283
1284 if (!recoverable)
1285 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1286}
1287
1288static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1289{
1290 unsigned long afsr, afar;
1291 int ret = 0;
1292
1293 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1294 : "=r" (afsr)
1295 : "i" (ASI_AFSR));
1296 if ((afsr & cheetah_afsr_errors) != 0) {
1297 if (logp != NULL) {
1298 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1299 : "=r" (afar)
1300 : "i" (ASI_AFAR));
1301 logp->afsr = afsr;
1302 logp->afar = afar;
1303 }
1304 ret = 1;
1305 }
1306 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1307 "membar #Sync\n\t"
1308 : : "r" (afsr), "i" (ASI_AFSR));
1309
1310 return ret;
1311}
1312
1313void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1314{
1315 struct cheetah_err_info local_snapshot, *p;
1316 int recoverable;
1317
1318 /* Flush E-cache */
1319 cheetah_flush_ecache();
1320
1321 p = cheetah_get_error_log(afsr);
1322 if (!p) {
1323 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1324 afsr, afar);
1325 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1326 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1327 prom_halt();
1328 }
1329
1330 /* Grab snapshot of logged error. */
1331 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1332
1333 /* If the current trap snapshot does not match what the
1334 * trap handler passed along into our args, big trouble.
1335 * In such a case, mark the local copy as invalid.
1336 *
1337 * Else, it matches and we mark the afsr in the non-local
1338 * copy as invalid so we may log new error traps there.
1339 */
1340 if (p->afsr != afsr || p->afar != afar)
1341 local_snapshot.afsr = CHAFSR_INVALID;
1342 else
1343 p->afsr = CHAFSR_INVALID;
1344
1345 cheetah_flush_icache();
1346 cheetah_flush_dcache();
1347
1348 /* Re-enable I-cache/D-cache */
1349 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1350 "or %%g1, %1, %%g1\n\t"
1351 "stxa %%g1, [%%g0] %0\n\t"
1352 "membar #Sync"
1353 : /* no outputs */
1354 : "i" (ASI_DCU_CONTROL_REG),
1355 "i" (DCU_DC | DCU_IC)
1356 : "g1");
1357
1358 /* Re-enable error reporting */
1359 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1360 "or %%g1, %1, %%g1\n\t"
1361 "stxa %%g1, [%%g0] %0\n\t"
1362 "membar #Sync"
1363 : /* no outputs */
1364 : "i" (ASI_ESTATE_ERROR_EN),
1365 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1366 : "g1");
1367
1368 /* Decide if we can continue after handling this trap and
1369 * logging the error.
1370 */
1371 recoverable = 1;
1372 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1373 recoverable = 0;
1374
1375 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1376 * error was logged while we had error reporting traps disabled.
1377 */
1378 if (cheetah_recheck_errors(&local_snapshot)) {
1379 unsigned long new_afsr = local_snapshot.afsr;
1380
1381 /* If we got a new asynchronous error, die... */
1382 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1383 CHAFSR_WDU | CHAFSR_CPU |
1384 CHAFSR_IVU | CHAFSR_UE |
1385 CHAFSR_BERR | CHAFSR_TO))
1386 recoverable = 0;
1387 }
1388
1389 /* Log errors. */
1390 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1391
1392 if (!recoverable)
1393 panic("Irrecoverable Fast-ECC error trap.\n");
1394
1395 /* Flush E-cache to kick the error trap handlers out. */
1396 cheetah_flush_ecache();
1397}
1398
1399/* Try to fix a correctable error by pushing the line out from
1400 * the E-cache. Recheck error reporting registers to see if the
1401 * problem is intermittent.
1402 */
1403static int cheetah_fix_ce(unsigned long physaddr)
1404{
1405 unsigned long orig_estate;
1406 unsigned long alias1, alias2;
1407 int ret;
1408
1409 /* Make sure correctable error traps are disabled. */
1410 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1411 "andn %0, %1, %%g1\n\t"
1412 "stxa %%g1, [%%g0] %2\n\t"
1413 "membar #Sync"
1414 : "=&r" (orig_estate)
1415 : "i" (ESTATE_ERROR_CEEN),
1416 "i" (ASI_ESTATE_ERROR_EN)
1417 : "g1");
1418
1419 /* We calculate alias addresses that will force the
1420 * cache line in question out of the E-cache. Then
1421 * we bring it back in with an atomic instruction so
1422 * that we get it in some modified/exclusive state,
1423 * then we displace it again to try and get proper ECC
1424 * pushed back into the system.
1425 */
1426 physaddr &= ~(8UL - 1UL);
1427 alias1 = (ecache_flush_physbase +
1428 (physaddr & ((ecache_flush_size >> 1) - 1)));
1429 alias2 = alias1 + (ecache_flush_size >> 1);
1430 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1431 "ldxa [%1] %3, %%g0\n\t"
1432 "casxa [%2] %3, %%g0, %%g0\n\t"
1433 "ldxa [%0] %3, %%g0\n\t"
1434 "ldxa [%1] %3, %%g0\n\t"
1435 "membar #Sync"
1436 : /* no outputs */
1437 : "r" (alias1), "r" (alias2),
1438 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1439
1440 /* Did that trigger another error? */
1441 if (cheetah_recheck_errors(NULL)) {
1442 /* Try one more time. */
1443 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1444 "membar #Sync"
1445 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1446 if (cheetah_recheck_errors(NULL))
1447 ret = 2;
1448 else
1449 ret = 1;
1450 } else {
1451 /* No new error, intermittent problem. */
1452 ret = 0;
1453 }
1454
1455 /* Restore error enables. */
1456 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1457 "membar #Sync"
1458 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1459
1460 return ret;
1461}
1462
1463/* Return non-zero if PADDR is a valid physical memory address. */
1464static int cheetah_check_main_memory(unsigned long paddr)
1465{
1466 unsigned long vaddr = PAGE_OFFSET + paddr;
1467
1468 if (vaddr > (unsigned long) high_memory)
1469 return 0;
1470
1471 return kern_addr_valid(vaddr);
1472}
1473
1474void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1475{
1476 struct cheetah_err_info local_snapshot, *p;
1477 int recoverable, is_memory;
1478
1479 p = cheetah_get_error_log(afsr);
1480 if (!p) {
1481 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1482 afsr, afar);
1483 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1484 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1485 prom_halt();
1486 }
1487
1488 /* Grab snapshot of logged error. */
1489 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1490
1491 /* If the current trap snapshot does not match what the
1492 * trap handler passed along into our args, big trouble.
1493 * In such a case, mark the local copy as invalid.
1494 *
1495 * Else, it matches and we mark the afsr in the non-local
1496 * copy as invalid so we may log new error traps there.
1497 */
1498 if (p->afsr != afsr || p->afar != afar)
1499 local_snapshot.afsr = CHAFSR_INVALID;
1500 else
1501 p->afsr = CHAFSR_INVALID;
1502
1503 is_memory = cheetah_check_main_memory(afar);
1504
1505 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1506 /* XXX Might want to log the results of this operation
1507 * XXX somewhere... -DaveM
1508 */
1509 cheetah_fix_ce(afar);
1510 }
1511
1512 {
1513 int flush_all, flush_line;
1514
1515 flush_all = flush_line = 0;
1516 if ((afsr & CHAFSR_EDC) != 0UL) {
1517 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1518 flush_line = 1;
1519 else
1520 flush_all = 1;
1521 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1522 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1523 flush_line = 1;
1524 else
1525 flush_all = 1;
1526 }
1527
1528 /* Trap handler only disabled I-cache, flush it. */
1529 cheetah_flush_icache();
1530
1531 /* Re-enable I-cache */
1532 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1533 "or %%g1, %1, %%g1\n\t"
1534 "stxa %%g1, [%%g0] %0\n\t"
1535 "membar #Sync"
1536 : /* no outputs */
1537 : "i" (ASI_DCU_CONTROL_REG),
1538 "i" (DCU_IC)
1539 : "g1");
1540
1541 if (flush_all)
1542 cheetah_flush_ecache();
1543 else if (flush_line)
1544 cheetah_flush_ecache_line(afar);
1545 }
1546
1547 /* Re-enable error reporting */
1548 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1549 "or %%g1, %1, %%g1\n\t"
1550 "stxa %%g1, [%%g0] %0\n\t"
1551 "membar #Sync"
1552 : /* no outputs */
1553 : "i" (ASI_ESTATE_ERROR_EN),
1554 "i" (ESTATE_ERROR_CEEN)
1555 : "g1");
1556
1557 /* Decide if we can continue after handling this trap and
1558 * logging the error.
1559 */
1560 recoverable = 1;
1561 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1562 recoverable = 0;
1563
1564 /* Re-check AFSR/AFAR */
1565 (void) cheetah_recheck_errors(&local_snapshot);
1566
1567 /* Log errors. */
1568 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1569
1570 if (!recoverable)
1571 panic("Irrecoverable Correctable-ECC error trap.\n");
1572}
1573
1574void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1575{
1576 struct cheetah_err_info local_snapshot, *p;
1577 int recoverable, is_memory;
1578
1579#ifdef CONFIG_PCI
1580 /* Check for the special PCI poke sequence. */
1581 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1582 cheetah_flush_icache();
1583 cheetah_flush_dcache();
1584
1585 /* Re-enable I-cache/D-cache */
1586 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1587 "or %%g1, %1, %%g1\n\t"
1588 "stxa %%g1, [%%g0] %0\n\t"
1589 "membar #Sync"
1590 : /* no outputs */
1591 : "i" (ASI_DCU_CONTROL_REG),
1592 "i" (DCU_DC | DCU_IC)
1593 : "g1");
1594
1595 /* Re-enable error reporting */
1596 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1597 "or %%g1, %1, %%g1\n\t"
1598 "stxa %%g1, [%%g0] %0\n\t"
1599 "membar #Sync"
1600 : /* no outputs */
1601 : "i" (ASI_ESTATE_ERROR_EN),
1602 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1603 : "g1");
1604
1605 (void) cheetah_recheck_errors(NULL);
1606
1607 pci_poke_faulted = 1;
1608 regs->tpc += 4;
1609 regs->tnpc = regs->tpc + 4;
1610 return;
1611 }
1612#endif
1613
1614 p = cheetah_get_error_log(afsr);
1615 if (!p) {
1616 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1617 afsr, afar);
1618 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1619 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1620 prom_halt();
1621 }
1622
1623 /* Grab snapshot of logged error. */
1624 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1625
1626 /* If the current trap snapshot does not match what the
1627 * trap handler passed along into our args, big trouble.
1628 * In such a case, mark the local copy as invalid.
1629 *
1630 * Else, it matches and we mark the afsr in the non-local
1631 * copy as invalid so we may log new error traps there.
1632 */
1633 if (p->afsr != afsr || p->afar != afar)
1634 local_snapshot.afsr = CHAFSR_INVALID;
1635 else
1636 p->afsr = CHAFSR_INVALID;
1637
1638 is_memory = cheetah_check_main_memory(afar);
1639
1640 {
1641 int flush_all, flush_line;
1642
1643 flush_all = flush_line = 0;
1644 if ((afsr & CHAFSR_EDU) != 0UL) {
1645 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1646 flush_line = 1;
1647 else
1648 flush_all = 1;
1649 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1650 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1651 flush_line = 1;
1652 else
1653 flush_all = 1;
1654 }
1655
1656 cheetah_flush_icache();
1657 cheetah_flush_dcache();
1658
1659 /* Re-enable I/D caches */
1660 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1661 "or %%g1, %1, %%g1\n\t"
1662 "stxa %%g1, [%%g0] %0\n\t"
1663 "membar #Sync"
1664 : /* no outputs */
1665 : "i" (ASI_DCU_CONTROL_REG),
1666 "i" (DCU_IC | DCU_DC)
1667 : "g1");
1668
1669 if (flush_all)
1670 cheetah_flush_ecache();
1671 else if (flush_line)
1672 cheetah_flush_ecache_line(afar);
1673 }
1674
1675 /* Re-enable error reporting */
1676 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1677 "or %%g1, %1, %%g1\n\t"
1678 "stxa %%g1, [%%g0] %0\n\t"
1679 "membar #Sync"
1680 : /* no outputs */
1681 : "i" (ASI_ESTATE_ERROR_EN),
1682 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1683 : "g1");
1684
1685 /* Decide if we can continue after handling this trap and
1686 * logging the error.
1687 */
1688 recoverable = 1;
1689 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1690 recoverable = 0;
1691
1692 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1693 * error was logged while we had error reporting traps disabled.
1694 */
1695 if (cheetah_recheck_errors(&local_snapshot)) {
1696 unsigned long new_afsr = local_snapshot.afsr;
1697
1698 /* If we got a new asynchronous error, die... */
1699 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1700 CHAFSR_WDU | CHAFSR_CPU |
1701 CHAFSR_IVU | CHAFSR_UE |
1702 CHAFSR_BERR | CHAFSR_TO))
1703 recoverable = 0;
1704 }
1705
1706 /* Log errors. */
1707 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1708
1709 /* "Recoverable" here means we try to yank the page from ever
1710 * being newly used again. This depends upon a few things:
1711 * 1) Must be main memory, and AFAR must be valid.
1712 * 2) If we trapped from user, OK.
1713 * 3) Else, if we trapped from kernel we must find exception
1714 * table entry (ie. we have to have been accessing user
1715 * space).
1716 *
1717 * If AFAR is not in main memory, or we trapped from kernel
1718 * and cannot find an exception table entry, it is unacceptable
1719 * to try and continue.
1720 */
1721 if (recoverable && is_memory) {
1722 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1723 /* OK, usermode access. */
1724 recoverable = 1;
1725 } else {
1726 const struct exception_table_entry *entry;
1727
1728 entry = search_exception_tables(regs->tpc);
1729 if (entry) {
1730 /* OK, kernel access to userspace. */
1731 recoverable = 1;
1732
1733 } else {
1734 /* BAD, privileged state is corrupted. */
1735 recoverable = 0;
1736 }
1737
1738 if (recoverable) {
1739 if (pfn_valid(afar >> PAGE_SHIFT))
1740 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1741 else
1742 recoverable = 0;
1743
1744 /* Only perform fixup if we still have a
1745 * recoverable condition.
1746 */
1747 if (recoverable) {
1748 regs->tpc = entry->fixup;
1749 regs->tnpc = regs->tpc + 4;
1750 }
1751 }
1752 }
1753 } else {
1754 recoverable = 0;
1755 }
1756
1757 if (!recoverable)
1758 panic("Irrecoverable deferred error trap.\n");
1759}
1760
1761/* Handle a D/I cache parity error trap. TYPE is encoded as:
1762 *
1763 * Bit0: 0=dcache,1=icache
1764 * Bit1: 0=recoverable,1=unrecoverable
1765 *
1766 * The hardware has disabled both the I-cache and D-cache in
1767 * the %dcr register.
1768 */
1769void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1770{
1771 if (type & 0x1)
1772 __cheetah_flush_icache();
1773 else
1774 cheetah_plus_zap_dcache_parity();
1775 cheetah_flush_dcache();
1776
1777 /* Re-enable I-cache/D-cache */
1778 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1779 "or %%g1, %1, %%g1\n\t"
1780 "stxa %%g1, [%%g0] %0\n\t"
1781 "membar #Sync"
1782 : /* no outputs */
1783 : "i" (ASI_DCU_CONTROL_REG),
1784 "i" (DCU_DC | DCU_IC)
1785 : "g1");
1786
1787 if (type & 0x2) {
1788 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1789 smp_processor_id(),
1790 (type & 0x1) ? 'I' : 'D',
1791 regs->tpc);
1792 printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1793 panic("Irrecoverable Cheetah+ parity error.");
1794 }
1795
1796 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1797 smp_processor_id(),
1798 (type & 0x1) ? 'I' : 'D',
1799 regs->tpc);
1800 printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1801}
1802
1803struct sun4v_error_entry {
1804 /* Unique error handle */
1805/*0x00*/u64 err_handle;
1806
1807 /* %stick value at the time of the error */
1808/*0x08*/u64 err_stick;
1809
1810/*0x10*/u8 reserved_1[3];
1811
1812 /* Error type */
1813/*0x13*/u8 err_type;
1814#define SUN4V_ERR_TYPE_UNDEFINED 0
1815#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1816#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1817#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1818#define SUN4V_ERR_TYPE_SHUTDOWN_RQST 4
1819#define SUN4V_ERR_TYPE_DUMP_CORE 5
1820#define SUN4V_ERR_TYPE_SP_STATE_CHANGE 6
1821#define SUN4V_ERR_TYPE_NUM 7
1822
1823 /* Error attributes */
1824/*0x14*/u32 err_attrs;
1825#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1826#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1827#define SUN4V_ERR_ATTRS_PIO 0x00000004
1828#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1829#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1830#define SUN4V_ERR_ATTRS_SHUTDOWN_RQST 0x00000020
1831#define SUN4V_ERR_ATTRS_ASR 0x00000040
1832#define SUN4V_ERR_ATTRS_ASI 0x00000080
1833#define SUN4V_ERR_ATTRS_PRIV_REG 0x00000100
1834#define SUN4V_ERR_ATTRS_SPSTATE_MSK 0x00000600
1835#define SUN4V_ERR_ATTRS_MCD 0x00000800
1836#define SUN4V_ERR_ATTRS_SPSTATE_SHFT 9
1837#define SUN4V_ERR_ATTRS_MODE_MSK 0x03000000
1838#define SUN4V_ERR_ATTRS_MODE_SHFT 24
1839#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1840
1841#define SUN4V_ERR_SPSTATE_FAULTED 0
1842#define SUN4V_ERR_SPSTATE_AVAILABLE 1
1843#define SUN4V_ERR_SPSTATE_NOT_PRESENT 2
1844
1845#define SUN4V_ERR_MODE_USER 1
1846#define SUN4V_ERR_MODE_PRIV 2
1847
1848 /* Real address of the memory region or PIO transaction */
1849/*0x18*/u64 err_raddr;
1850
1851 /* Size of the operation triggering the error, in bytes */
1852/*0x20*/u32 err_size;
1853
1854 /* ID of the CPU */
1855/*0x24*/u16 err_cpu;
1856
1857 /* Grace periof for shutdown, in seconds */
1858/*0x26*/u16 err_secs;
1859
1860 /* Value of the %asi register */
1861/*0x28*/u8 err_asi;
1862
1863/*0x29*/u8 reserved_2;
1864
1865 /* Value of the ASR register number */
1866/*0x2a*/u16 err_asr;
1867#define SUN4V_ERR_ASR_VALID 0x8000
1868
1869/*0x2c*/u32 reserved_3;
1870/*0x30*/u64 reserved_4;
1871/*0x38*/u64 reserved_5;
1872};
1873
1874static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1875static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1876
1877static const char *sun4v_err_type_to_str(u8 type)
1878{
1879 static const char *types[SUN4V_ERR_TYPE_NUM] = {
1880 "undefined",
1881 "uncorrected resumable",
1882 "precise nonresumable",
1883 "deferred nonresumable",
1884 "shutdown request",
1885 "dump core",
1886 "SP state change",
1887 };
1888
1889 if (type < SUN4V_ERR_TYPE_NUM)
1890 return types[type];
1891
1892 return "unknown";
1893}
1894
1895static void sun4v_emit_err_attr_strings(u32 attrs)
1896{
1897 static const char *attr_names[] = {
1898 "processor",
1899 "memory",
1900 "PIO",
1901 "int-registers",
1902 "fpu-registers",
1903 "shutdown-request",
1904 "ASR",
1905 "ASI",
1906 "priv-reg",
1907 };
1908 static const char *sp_states[] = {
1909 "sp-faulted",
1910 "sp-available",
1911 "sp-not-present",
1912 "sp-state-reserved",
1913 };
1914 static const char *modes[] = {
1915 "mode-reserved0",
1916 "user",
1917 "priv",
1918 "mode-reserved1",
1919 };
1920 u32 sp_state, mode;
1921 int i;
1922
1923 for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
1924 if (attrs & (1U << i)) {
1925 const char *s = attr_names[i];
1926
1927 pr_cont("%s ", s);
1928 }
1929 }
1930
1931 sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >>
1932 SUN4V_ERR_ATTRS_SPSTATE_SHFT);
1933 pr_cont("%s ", sp_states[sp_state]);
1934
1935 mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >>
1936 SUN4V_ERR_ATTRS_MODE_SHFT);
1937 pr_cont("%s ", modes[mode]);
1938
1939 if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL)
1940 pr_cont("res-queue-full ");
1941}
1942
1943/* When the report contains a real-address of "-1" it means that the
1944 * hardware did not provide the address. So we compute the effective
1945 * address of the load or store instruction at regs->tpc and report
1946 * that. Usually when this happens it's a PIO and in such a case we
1947 * are using physical addresses with bypass ASIs anyways, so what we
1948 * report here is exactly what we want.
1949 */
1950static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
1951{
1952 unsigned int insn;
1953 u64 addr;
1954
1955 if (!(regs->tstate & TSTATE_PRIV))
1956 return;
1957
1958 insn = *(unsigned int *) regs->tpc;
1959
1960 addr = compute_effective_address(regs, insn, 0);
1961
1962 printk("%s: insn effective address [0x%016llx]\n",
1963 pfx, addr);
1964}
1965
1966static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
1967 int cpu, const char *pfx, atomic_t *ocnt)
1968{
1969 u64 *raw_ptr = (u64 *) ent;
1970 u32 attrs;
1971 int cnt;
1972
1973 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1974 printk("%s: TPC [0x%016lx] <%pS>\n",
1975 pfx, regs->tpc, (void *) regs->tpc);
1976
1977 printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n",
1978 pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]);
1979 printk("%s: %016llx:%016llx:%016llx:%016llx]\n",
1980 pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]);
1981
1982 printk("%s: handle [0x%016llx] stick [0x%016llx]\n",
1983 pfx, ent->err_handle, ent->err_stick);
1984
1985 printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type));
1986
1987 attrs = ent->err_attrs;
1988 printk("%s: attrs [0x%08x] < ", pfx, attrs);
1989 sun4v_emit_err_attr_strings(attrs);
1990 pr_cont(">\n");
1991
1992 /* Various fields in the error report are only valid if
1993 * certain attribute bits are set.
1994 */
1995 if (attrs & (SUN4V_ERR_ATTRS_MEMORY |
1996 SUN4V_ERR_ATTRS_PIO |
1997 SUN4V_ERR_ATTRS_ASI)) {
1998 printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr);
1999
2000 if (ent->err_raddr == ~(u64)0)
2001 sun4v_report_real_raddr(pfx, regs);
2002 }
2003
2004 if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI))
2005 printk("%s: size [0x%x]\n", pfx, ent->err_size);
2006
2007 if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR |
2008 SUN4V_ERR_ATTRS_INT_REGISTERS |
2009 SUN4V_ERR_ATTRS_FPU_REGISTERS |
2010 SUN4V_ERR_ATTRS_PRIV_REG))
2011 printk("%s: cpu[%u]\n", pfx, ent->err_cpu);
2012
2013 if (attrs & SUN4V_ERR_ATTRS_ASI)
2014 printk("%s: asi [0x%02x]\n", pfx, ent->err_asi);
2015
2016 if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS |
2017 SUN4V_ERR_ATTRS_FPU_REGISTERS |
2018 SUN4V_ERR_ATTRS_PRIV_REG)) &&
2019 (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0)
2020 printk("%s: reg [0x%04x]\n",
2021 pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID);
2022
2023 show_regs(regs);
2024
2025 if ((cnt = atomic_read(ocnt)) != 0) {
2026 atomic_set(ocnt, 0);
2027 wmb();
2028 printk("%s: Queue overflowed %d times.\n",
2029 pfx, cnt);
2030 }
2031}
2032
2033/* Handle memory corruption detected error which is vectored in
2034 * through resumable error trap.
2035 */
2036void do_mcd_err(struct pt_regs *regs, struct sun4v_error_entry ent)
2037{
2038 if (notify_die(DIE_TRAP, "MCD error", regs, 0, 0x34,
2039 SIGSEGV) == NOTIFY_STOP)
2040 return;
2041
2042 if (regs->tstate & TSTATE_PRIV) {
2043 /* MCD exception could happen because the task was
2044 * running a system call with MCD enabled and passed a
2045 * non-versioned pointer or pointer with bad version
2046 * tag to the system call. In such cases, hypervisor
2047 * places the address of offending instruction in the
2048 * resumable error report. This is a deferred error,
2049 * so the read/write that caused the trap was potentially
2050 * retired long time back and we may have no choice
2051 * but to send SIGSEGV to the process.
2052 */
2053 const struct exception_table_entry *entry;
2054
2055 entry = search_exception_tables(regs->tpc);
2056 if (entry) {
2057 /* Looks like a bad syscall parameter */
2058#ifdef DEBUG_EXCEPTIONS
2059 pr_emerg("Exception: PC<%016lx> faddr<UNKNOWN>\n",
2060 regs->tpc);
2061 pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
2062 ent.err_raddr, entry->fixup);
2063#endif
2064 regs->tpc = entry->fixup;
2065 regs->tnpc = regs->tpc + 4;
2066 return;
2067 }
2068 }
2069
2070 /* Send SIGSEGV to the userspace process with the right signal
2071 * code
2072 */
2073 force_sig_fault(SIGSEGV, SEGV_ADIDERR, (void __user *)ent.err_raddr,
2074 0);
2075}
2076
2077/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2078 * Log the event and clear the first word of the entry.
2079 */
2080void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
2081{
2082 enum ctx_state prev_state = exception_enter();
2083 struct sun4v_error_entry *ent, local_copy;
2084 struct trap_per_cpu *tb;
2085 unsigned long paddr;
2086 int cpu;
2087
2088 cpu = get_cpu();
2089
2090 tb = &trap_block[cpu];
2091 paddr = tb->resum_kernel_buf_pa + offset;
2092 ent = __va(paddr);
2093
2094 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2095
2096 /* We have a local copy now, so release the entry. */
2097 ent->err_handle = 0;
2098 wmb();
2099
2100 put_cpu();
2101
2102 if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) {
2103 /* We should really take the seconds field of
2104 * the error report and use it for the shutdown
2105 * invocation, but for now do the same thing we
2106 * do for a DS shutdown request.
2107 */
2108 pr_info("Shutdown request, %u seconds...\n",
2109 local_copy.err_secs);
2110 orderly_poweroff(true);
2111 goto out;
2112 }
2113
2114 /* If this is a memory corruption detected error vectored in
2115 * by HV through resumable error trap, call the handler
2116 */
2117 if (local_copy.err_attrs & SUN4V_ERR_ATTRS_MCD) {
2118 do_mcd_err(regs, local_copy);
2119 return;
2120 }
2121
2122 sun4v_log_error(regs, &local_copy, cpu,
2123 KERN_ERR "RESUMABLE ERROR",
2124 &sun4v_resum_oflow_cnt);
2125out:
2126 exception_exit(prev_state);
2127}
2128
2129/* If we try to printk() we'll probably make matters worse, by trying
2130 * to retake locks this cpu already holds or causing more errors. So
2131 * just bump a counter, and we'll report these counter bumps above.
2132 */
2133void sun4v_resum_overflow(struct pt_regs *regs)
2134{
2135 atomic_inc(&sun4v_resum_oflow_cnt);
2136}
2137
2138/* Given a set of registers, get the virtual addressi that was being accessed
2139 * by the faulting instructions at tpc.
2140 */
2141static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
2142{
2143 unsigned int insn;
2144
2145 if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
2146 return compute_effective_address(regs, insn,
2147 (insn >> 25) & 0x1f);
2148 }
2149 return 0;
2150}
2151
2152/* Attempt to handle non-resumable errors generated from userspace.
2153 * Returns true if the signal was handled, false otherwise.
2154 */
2155bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
2156 struct sun4v_error_entry *ent) {
2157
2158 unsigned int attrs = ent->err_attrs;
2159
2160 if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
2161 unsigned long addr = ent->err_raddr;
2162
2163 if (addr == ~(u64)0) {
2164 /* This seems highly unlikely to ever occur */
2165 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
2166 } else {
2167 unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
2168 PAGE_SIZE);
2169
2170 /* Break the unfortunate news. */
2171 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
2172 addr);
2173 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
2174 page_cnt);
2175
2176 while (page_cnt-- > 0) {
2177 if (pfn_valid(addr >> PAGE_SHIFT))
2178 get_page(pfn_to_page(addr >> PAGE_SHIFT));
2179 addr += PAGE_SIZE;
2180 }
2181 }
2182 force_sig(SIGKILL);
2183
2184 return true;
2185 }
2186 if (attrs & SUN4V_ERR_ATTRS_PIO) {
2187 force_sig_fault(SIGBUS, BUS_ADRERR,
2188 (void __user *)sun4v_get_vaddr(regs), 0);
2189 return true;
2190 }
2191
2192 /* Default to doing nothing */
2193 return false;
2194}
2195
2196/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2197 * Log the event, clear the first word of the entry, and die.
2198 */
2199void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
2200{
2201 struct sun4v_error_entry *ent, local_copy;
2202 struct trap_per_cpu *tb;
2203 unsigned long paddr;
2204 int cpu;
2205
2206 cpu = get_cpu();
2207
2208 tb = &trap_block[cpu];
2209 paddr = tb->nonresum_kernel_buf_pa + offset;
2210 ent = __va(paddr);
2211
2212 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2213
2214 /* We have a local copy now, so release the entry. */
2215 ent->err_handle = 0;
2216 wmb();
2217
2218 put_cpu();
2219
2220 if (!(regs->tstate & TSTATE_PRIV) &&
2221 sun4v_nonresum_error_user_handled(regs, &local_copy)) {
2222 /* DON'T PANIC: This userspace error was handled. */
2223 return;
2224 }
2225
2226#ifdef CONFIG_PCI
2227 /* Check for the special PCI poke sequence. */
2228 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
2229 pci_poke_faulted = 1;
2230 regs->tpc += 4;
2231 regs->tnpc = regs->tpc + 4;
2232 return;
2233 }
2234#endif
2235
2236 sun4v_log_error(regs, &local_copy, cpu,
2237 KERN_EMERG "NON-RESUMABLE ERROR",
2238 &sun4v_nonresum_oflow_cnt);
2239
2240 panic("Non-resumable error.");
2241}
2242
2243/* If we try to printk() we'll probably make matters worse, by trying
2244 * to retake locks this cpu already holds or causing more errors. So
2245 * just bump a counter, and we'll report these counter bumps above.
2246 */
2247void sun4v_nonresum_overflow(struct pt_regs *regs)
2248{
2249 /* XXX Actually even this can make not that much sense. Perhaps
2250 * XXX we should just pull the plug and panic directly from here?
2251 */
2252 atomic_inc(&sun4v_nonresum_oflow_cnt);
2253}
2254
2255static void sun4v_tlb_error(struct pt_regs *regs)
2256{
2257 die_if_kernel("TLB/TSB error", regs);
2258}
2259
2260unsigned long sun4v_err_itlb_vaddr;
2261unsigned long sun4v_err_itlb_ctx;
2262unsigned long sun4v_err_itlb_pte;
2263unsigned long sun4v_err_itlb_error;
2264
2265void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
2266{
2267 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2268
2269 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
2270 regs->tpc, tl);
2271 printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
2272 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2273 printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
2274 (void *) regs->u_regs[UREG_I7]);
2275 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
2276 "pte[%lx] error[%lx]\n",
2277 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
2278 sun4v_err_itlb_pte, sun4v_err_itlb_error);
2279
2280 sun4v_tlb_error(regs);
2281}
2282
2283unsigned long sun4v_err_dtlb_vaddr;
2284unsigned long sun4v_err_dtlb_ctx;
2285unsigned long sun4v_err_dtlb_pte;
2286unsigned long sun4v_err_dtlb_error;
2287
2288void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
2289{
2290 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2291
2292 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
2293 regs->tpc, tl);
2294 printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
2295 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2296 printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
2297 (void *) regs->u_regs[UREG_I7]);
2298 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
2299 "pte[%lx] error[%lx]\n",
2300 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
2301 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
2302
2303 sun4v_tlb_error(regs);
2304}
2305
2306void hypervisor_tlbop_error(unsigned long err, unsigned long op)
2307{
2308 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
2309 err, op);
2310}
2311
2312void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
2313{
2314 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
2315 err, op);
2316}
2317
2318static void do_fpe_common(struct pt_regs *regs)
2319{
2320 if (regs->tstate & TSTATE_PRIV) {
2321 regs->tpc = regs->tnpc;
2322 regs->tnpc += 4;
2323 } else {
2324 unsigned long fsr = current_thread_info()->xfsr[0];
2325 int code;
2326
2327 if (test_thread_flag(TIF_32BIT)) {
2328 regs->tpc &= 0xffffffff;
2329 regs->tnpc &= 0xffffffff;
2330 }
2331 code = FPE_FLTUNK;
2332 if ((fsr & 0x1c000) == (1 << 14)) {
2333 if (fsr & 0x10)
2334 code = FPE_FLTINV;
2335 else if (fsr & 0x08)
2336 code = FPE_FLTOVF;
2337 else if (fsr & 0x04)
2338 code = FPE_FLTUND;
2339 else if (fsr & 0x02)
2340 code = FPE_FLTDIV;
2341 else if (fsr & 0x01)
2342 code = FPE_FLTRES;
2343 }
2344 force_sig_fault(SIGFPE, code,
2345 (void __user *)regs->tpc, 0);
2346 }
2347}
2348
2349void do_fpieee(struct pt_regs *regs)
2350{
2351 enum ctx_state prev_state = exception_enter();
2352
2353 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2354 0, 0x24, SIGFPE) == NOTIFY_STOP)
2355 goto out;
2356
2357 do_fpe_common(regs);
2358out:
2359 exception_exit(prev_state);
2360}
2361
2362void do_fpother(struct pt_regs *regs)
2363{
2364 enum ctx_state prev_state = exception_enter();
2365 struct fpustate *f = FPUSTATE;
2366 int ret = 0;
2367
2368 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2369 0, 0x25, SIGFPE) == NOTIFY_STOP)
2370 goto out;
2371
2372 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2373 case (2 << 14): /* unfinished_FPop */
2374 case (3 << 14): /* unimplemented_FPop */
2375 ret = do_mathemu(regs, f, false);
2376 break;
2377 }
2378 if (ret)
2379 goto out;
2380 do_fpe_common(regs);
2381out:
2382 exception_exit(prev_state);
2383}
2384
2385void do_tof(struct pt_regs *regs)
2386{
2387 enum ctx_state prev_state = exception_enter();
2388
2389 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2390 0, 0x26, SIGEMT) == NOTIFY_STOP)
2391 goto out;
2392
2393 if (regs->tstate & TSTATE_PRIV)
2394 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2395 if (test_thread_flag(TIF_32BIT)) {
2396 regs->tpc &= 0xffffffff;
2397 regs->tnpc &= 0xffffffff;
2398 }
2399 force_sig_fault(SIGEMT, EMT_TAGOVF,
2400 (void __user *)regs->tpc, 0);
2401out:
2402 exception_exit(prev_state);
2403}
2404
2405void do_div0(struct pt_regs *regs)
2406{
2407 enum ctx_state prev_state = exception_enter();
2408
2409 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2410 0, 0x28, SIGFPE) == NOTIFY_STOP)
2411 goto out;
2412
2413 if (regs->tstate & TSTATE_PRIV)
2414 die_if_kernel("TL0: Kernel divide by zero.", regs);
2415 if (test_thread_flag(TIF_32BIT)) {
2416 regs->tpc &= 0xffffffff;
2417 regs->tnpc &= 0xffffffff;
2418 }
2419 force_sig_fault(SIGFPE, FPE_INTDIV,
2420 (void __user *)regs->tpc, 0);
2421out:
2422 exception_exit(prev_state);
2423}
2424
2425static void instruction_dump(unsigned int *pc)
2426{
2427 int i;
2428
2429 if ((((unsigned long) pc) & 3))
2430 return;
2431
2432 printk("Instruction DUMP:");
2433 for (i = -3; i < 6; i++)
2434 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2435 printk("\n");
2436}
2437
2438static void user_instruction_dump(unsigned int __user *pc)
2439{
2440 int i;
2441 unsigned int buf[9];
2442
2443 if ((((unsigned long) pc) & 3))
2444 return;
2445
2446 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2447 return;
2448
2449 printk("Instruction DUMP:");
2450 for (i = 0; i < 9; i++)
2451 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2452 printk("\n");
2453}
2454
2455void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl)
2456{
2457 unsigned long fp, ksp;
2458 struct thread_info *tp;
2459 int count = 0;
2460#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2461 int graph = 0;
2462#endif
2463
2464 ksp = (unsigned long) _ksp;
2465 if (!tsk)
2466 tsk = current;
2467 tp = task_thread_info(tsk);
2468 if (ksp == 0UL) {
2469 if (tsk == current)
2470 asm("mov %%fp, %0" : "=r" (ksp));
2471 else
2472 ksp = tp->ksp;
2473 }
2474 if (tp == current_thread_info())
2475 flushw_all();
2476
2477 fp = ksp + STACK_BIAS;
2478
2479 printk("%sCall Trace:\n", loglvl);
2480 do {
2481 struct sparc_stackf *sf;
2482 struct pt_regs *regs;
2483 unsigned long pc;
2484
2485 if (!kstack_valid(tp, fp))
2486 break;
2487 sf = (struct sparc_stackf *) fp;
2488 regs = (struct pt_regs *) (sf + 1);
2489
2490 if (kstack_is_trap_frame(tp, regs)) {
2491 if (!(regs->tstate & TSTATE_PRIV))
2492 break;
2493 pc = regs->tpc;
2494 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2495 } else {
2496 pc = sf->callers_pc;
2497 fp = (unsigned long)sf->fp + STACK_BIAS;
2498 }
2499
2500 print_ip_sym(loglvl, pc);
2501#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2502 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
2503 struct ftrace_ret_stack *ret_stack;
2504 ret_stack = ftrace_graph_get_ret_stack(tsk, graph);
2505 if (ret_stack) {
2506 pc = ret_stack->ret;
2507 print_ip_sym(loglvl, pc);
2508 graph++;
2509 }
2510 }
2511#endif
2512 } while (++count < 16);
2513}
2514
2515static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2516{
2517 unsigned long fp = rw->ins[6];
2518
2519 if (!fp)
2520 return NULL;
2521
2522 return (struct reg_window *) (fp + STACK_BIAS);
2523}
2524
2525void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
2526{
2527 static int die_counter;
2528 int count = 0;
2529
2530 /* Amuse the user. */
2531 printk(
2532" \\|/ ____ \\|/\n"
2533" \"@'/ .. \\`@\"\n"
2534" /_| \\__/ |_\\\n"
2535" \\__U_/\n");
2536
2537 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2538 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2539 __asm__ __volatile__("flushw");
2540 show_regs(regs);
2541 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
2542 if (regs->tstate & TSTATE_PRIV) {
2543 struct thread_info *tp = current_thread_info();
2544 struct reg_window *rw = (struct reg_window *)
2545 (regs->u_regs[UREG_FP] + STACK_BIAS);
2546
2547 /* Stop the back trace when we hit userland or we
2548 * find some badly aligned kernel stack.
2549 */
2550 while (rw &&
2551 count++ < 30 &&
2552 kstack_valid(tp, (unsigned long) rw)) {
2553 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2554 (void *) rw->ins[7]);
2555
2556 rw = kernel_stack_up(rw);
2557 }
2558 instruction_dump ((unsigned int *) regs->tpc);
2559 } else {
2560 if (test_thread_flag(TIF_32BIT)) {
2561 regs->tpc &= 0xffffffff;
2562 regs->tnpc &= 0xffffffff;
2563 }
2564 user_instruction_dump ((unsigned int __user *) regs->tpc);
2565 }
2566 if (panic_on_oops)
2567 panic("Fatal exception");
2568 if (regs->tstate & TSTATE_PRIV)
2569 do_exit(SIGKILL);
2570 do_exit(SIGSEGV);
2571}
2572EXPORT_SYMBOL(die_if_kernel);
2573
2574#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2575#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2576
2577void do_illegal_instruction(struct pt_regs *regs)
2578{
2579 enum ctx_state prev_state = exception_enter();
2580 unsigned long pc = regs->tpc;
2581 unsigned long tstate = regs->tstate;
2582 u32 insn;
2583
2584 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2585 0, 0x10, SIGILL) == NOTIFY_STOP)
2586 goto out;
2587
2588 if (tstate & TSTATE_PRIV)
2589 die_if_kernel("Kernel illegal instruction", regs);
2590 if (test_thread_flag(TIF_32BIT))
2591 pc = (u32)pc;
2592 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2593 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2594 if (handle_popc(insn, regs))
2595 goto out;
2596 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2597 if (handle_ldf_stq(insn, regs))
2598 goto out;
2599 } else if (tlb_type == hypervisor) {
2600 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2601 if (!vis_emul(regs, insn))
2602 goto out;
2603 } else {
2604 struct fpustate *f = FPUSTATE;
2605
2606 /* On UltraSPARC T2 and later, FPU insns which
2607 * are not implemented in HW signal an illegal
2608 * instruction trap and do not set the FP Trap
2609 * Trap in the %fsr to unimplemented_FPop.
2610 */
2611 if (do_mathemu(regs, f, true))
2612 goto out;
2613 }
2614 }
2615 }
2616 force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, 0);
2617out:
2618 exception_exit(prev_state);
2619}
2620
2621void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2622{
2623 enum ctx_state prev_state = exception_enter();
2624
2625 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2626 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2627 goto out;
2628
2629 if (regs->tstate & TSTATE_PRIV) {
2630 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2631 goto out;
2632 }
2633 if (is_no_fault_exception(regs))
2634 return;
2635
2636 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)sfar, 0);
2637out:
2638 exception_exit(prev_state);
2639}
2640
2641void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2642{
2643 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2644 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2645 return;
2646
2647 if (regs->tstate & TSTATE_PRIV) {
2648 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2649 return;
2650 }
2651 if (is_no_fault_exception(regs))
2652 return;
2653
2654 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) addr, 0);
2655}
2656
2657/* sun4v_mem_corrupt_detect_precise() - Handle precise exception on an ADI
2658 * tag mismatch.
2659 *
2660 * ADI version tag mismatch on a load from memory always results in a
2661 * precise exception. Tag mismatch on a store to memory will result in
2662 * precise exception if MCDPER or PMCDPER is set to 1.
2663 */
2664void sun4v_mem_corrupt_detect_precise(struct pt_regs *regs, unsigned long addr,
2665 unsigned long context)
2666{
2667 if (notify_die(DIE_TRAP, "memory corruption precise exception", regs,
2668 0, 0x8, SIGSEGV) == NOTIFY_STOP)
2669 return;
2670
2671 if (regs->tstate & TSTATE_PRIV) {
2672 /* MCD exception could happen because the task was running
2673 * a system call with MCD enabled and passed a non-versioned
2674 * pointer or pointer with bad version tag to the system
2675 * call.
2676 */
2677 const struct exception_table_entry *entry;
2678
2679 entry = search_exception_tables(regs->tpc);
2680 if (entry) {
2681 /* Looks like a bad syscall parameter */
2682#ifdef DEBUG_EXCEPTIONS
2683 pr_emerg("Exception: PC<%016lx> faddr<UNKNOWN>\n",
2684 regs->tpc);
2685 pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
2686 regs->tpc, entry->fixup);
2687#endif
2688 regs->tpc = entry->fixup;
2689 regs->tnpc = regs->tpc + 4;
2690 return;
2691 }
2692 pr_emerg("%s: ADDR[%016lx] CTX[%lx], going.\n",
2693 __func__, addr, context);
2694 die_if_kernel("MCD precise", regs);
2695 }
2696
2697 if (test_thread_flag(TIF_32BIT)) {
2698 regs->tpc &= 0xffffffff;
2699 regs->tnpc &= 0xffffffff;
2700 }
2701 force_sig_fault(SIGSEGV, SEGV_ADIPERR, (void __user *)addr, 0);
2702}
2703
2704void do_privop(struct pt_regs *regs)
2705{
2706 enum ctx_state prev_state = exception_enter();
2707
2708 if (notify_die(DIE_TRAP, "privileged operation", regs,
2709 0, 0x11, SIGILL) == NOTIFY_STOP)
2710 goto out;
2711
2712 if (test_thread_flag(TIF_32BIT)) {
2713 regs->tpc &= 0xffffffff;
2714 regs->tnpc &= 0xffffffff;
2715 }
2716 force_sig_fault(SIGILL, ILL_PRVOPC,
2717 (void __user *)regs->tpc, 0);
2718out:
2719 exception_exit(prev_state);
2720}
2721
2722void do_privact(struct pt_regs *regs)
2723{
2724 do_privop(regs);
2725}
2726
2727/* Trap level 1 stuff or other traps we should never see... */
2728void do_cee(struct pt_regs *regs)
2729{
2730 exception_enter();
2731 die_if_kernel("TL0: Cache Error Exception", regs);
2732}
2733
2734void do_div0_tl1(struct pt_regs *regs)
2735{
2736 exception_enter();
2737 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2738 die_if_kernel("TL1: DIV0 Exception", regs);
2739}
2740
2741void do_fpieee_tl1(struct pt_regs *regs)
2742{
2743 exception_enter();
2744 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2745 die_if_kernel("TL1: FPU IEEE Exception", regs);
2746}
2747
2748void do_fpother_tl1(struct pt_regs *regs)
2749{
2750 exception_enter();
2751 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2752 die_if_kernel("TL1: FPU Other Exception", regs);
2753}
2754
2755void do_ill_tl1(struct pt_regs *regs)
2756{
2757 exception_enter();
2758 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2759 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2760}
2761
2762void do_irq_tl1(struct pt_regs *regs)
2763{
2764 exception_enter();
2765 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2766 die_if_kernel("TL1: IRQ Exception", regs);
2767}
2768
2769void do_lddfmna_tl1(struct pt_regs *regs)
2770{
2771 exception_enter();
2772 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2773 die_if_kernel("TL1: LDDF Exception", regs);
2774}
2775
2776void do_stdfmna_tl1(struct pt_regs *regs)
2777{
2778 exception_enter();
2779 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2780 die_if_kernel("TL1: STDF Exception", regs);
2781}
2782
2783void do_paw(struct pt_regs *regs)
2784{
2785 exception_enter();
2786 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2787}
2788
2789void do_paw_tl1(struct pt_regs *regs)
2790{
2791 exception_enter();
2792 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2793 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2794}
2795
2796void do_vaw(struct pt_regs *regs)
2797{
2798 exception_enter();
2799 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2800}
2801
2802void do_vaw_tl1(struct pt_regs *regs)
2803{
2804 exception_enter();
2805 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2806 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2807}
2808
2809void do_tof_tl1(struct pt_regs *regs)
2810{
2811 exception_enter();
2812 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2813 die_if_kernel("TL1: Tag Overflow Exception", regs);
2814}
2815
2816void do_getpsr(struct pt_regs *regs)
2817{
2818 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2819 regs->tpc = regs->tnpc;
2820 regs->tnpc += 4;
2821 if (test_thread_flag(TIF_32BIT)) {
2822 regs->tpc &= 0xffffffff;
2823 regs->tnpc &= 0xffffffff;
2824 }
2825}
2826
2827u64 cpu_mondo_counter[NR_CPUS] = {0};
2828struct trap_per_cpu trap_block[NR_CPUS];
2829EXPORT_SYMBOL(trap_block);
2830
2831/* This can get invoked before sched_init() so play it super safe
2832 * and use hard_smp_processor_id().
2833 */
2834void notrace init_cur_cpu_trap(struct thread_info *t)
2835{
2836 int cpu = hard_smp_processor_id();
2837 struct trap_per_cpu *p = &trap_block[cpu];
2838
2839 p->thread = t;
2840 p->pgd_paddr = 0;
2841}
2842
2843extern void thread_info_offsets_are_bolixed_dave(void);
2844extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2845extern void tsb_config_offsets_are_bolixed_dave(void);
2846
2847/* Only invoked on boot processor. */
2848void __init trap_init(void)
2849{
2850 /* Compile time sanity check. */
2851 BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
2852 TI_FLAGS != offsetof(struct thread_info, flags) ||
2853 TI_CPU != offsetof(struct thread_info, cpu) ||
2854 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2855 TI_KSP != offsetof(struct thread_info, ksp) ||
2856 TI_FAULT_ADDR != offsetof(struct thread_info,
2857 fault_address) ||
2858 TI_KREGS != offsetof(struct thread_info, kregs) ||
2859 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2860 TI_REG_WINDOW != offsetof(struct thread_info,
2861 reg_window) ||
2862 TI_RWIN_SPTRS != offsetof(struct thread_info,
2863 rwbuf_stkptrs) ||
2864 TI_GSR != offsetof(struct thread_info, gsr) ||
2865 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2866 TI_PRE_COUNT != offsetof(struct thread_info,
2867 preempt_count) ||
2868 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2869 TI_CURRENT_DS != offsetof(struct thread_info,
2870 current_ds) ||
2871 TI_KUNA_REGS != offsetof(struct thread_info,
2872 kern_una_regs) ||
2873 TI_KUNA_INSN != offsetof(struct thread_info,
2874 kern_una_insn) ||
2875 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2876 (TI_FPREGS & (64 - 1)));
2877
2878 BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
2879 thread) ||
2880 (TRAP_PER_CPU_PGD_PADDR !=
2881 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2882 (TRAP_PER_CPU_CPU_MONDO_PA !=
2883 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2884 (TRAP_PER_CPU_DEV_MONDO_PA !=
2885 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2886 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2887 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2888 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2889 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2890 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2891 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2892 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2893 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2894 (TRAP_PER_CPU_FAULT_INFO !=
2895 offsetof(struct trap_per_cpu, fault_info)) ||
2896 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2897 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2898 (TRAP_PER_CPU_CPU_LIST_PA !=
2899 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2900 (TRAP_PER_CPU_TSB_HUGE !=
2901 offsetof(struct trap_per_cpu, tsb_huge)) ||
2902 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2903 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2904 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2905 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2906 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2907 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2908 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2909 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2910 (TRAP_PER_CPU_RESUM_QMASK !=
2911 offsetof(struct trap_per_cpu, resum_qmask)) ||
2912 (TRAP_PER_CPU_NONRESUM_QMASK !=
2913 offsetof(struct trap_per_cpu, nonresum_qmask)) ||
2914 (TRAP_PER_CPU_PER_CPU_BASE !=
2915 offsetof(struct trap_per_cpu, __per_cpu_base)));
2916
2917 BUILD_BUG_ON((TSB_CONFIG_TSB !=
2918 offsetof(struct tsb_config, tsb)) ||
2919 (TSB_CONFIG_RSS_LIMIT !=
2920 offsetof(struct tsb_config, tsb_rss_limit)) ||
2921 (TSB_CONFIG_NENTRIES !=
2922 offsetof(struct tsb_config, tsb_nentries)) ||
2923 (TSB_CONFIG_REG_VAL !=
2924 offsetof(struct tsb_config, tsb_reg_val)) ||
2925 (TSB_CONFIG_MAP_VADDR !=
2926 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2927 (TSB_CONFIG_MAP_PTE !=
2928 offsetof(struct tsb_config, tsb_map_pte)));
2929
2930 /* Attach to the address space of init_task. On SMP we
2931 * do this in smp.c:smp_callin for other cpus.
2932 */
2933 mmgrab(&init_mm);
2934 current->active_mm = &init_mm;
2935}