Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2002, 2007 Maciej W. Rozycki
9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
10 */
11#include <linux/init.h>
12
13#include <asm/asm.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/irqflags.h>
17#include <asm/regdef.h>
18#include <asm/fpregdef.h>
19#include <asm/mipsregs.h>
20#include <asm/stackframe.h>
21#include <asm/sync.h>
22#include <asm/war.h>
23#include <asm/thread_info.h>
24
25 __INIT
26
27/*
28 * General exception vector for all other CPUs.
29 *
30 * Be careful when changing this, it has to be at most 128 bytes
31 * to fit into space reserved for the exception handler.
32 */
33NESTED(except_vec3_generic, 0, sp)
34 .set push
35 .set noat
36 mfc0 k1, CP0_CAUSE
37 andi k1, k1, 0x7c
38#ifdef CONFIG_64BIT
39 dsll k1, k1, 1
40#endif
41 PTR_L k0, exception_handlers(k1)
42 jr k0
43 .set pop
44 END(except_vec3_generic)
45
46/*
47 * General exception handler for CPUs with virtual coherency exception.
48 *
49 * Be careful when changing this, it has to be at most 256 (as a special
50 * exception) bytes to fit into space reserved for the exception handler.
51 */
52NESTED(except_vec3_r4000, 0, sp)
53 .set push
54 .set arch=r4000
55 .set noat
56 mfc0 k1, CP0_CAUSE
57 li k0, 31<<2
58 andi k1, k1, 0x7c
59 .set push
60 .set noreorder
61 .set nomacro
62 beq k1, k0, handle_vced
63 li k0, 14<<2
64 beq k1, k0, handle_vcei
65#ifdef CONFIG_64BIT
66 dsll k1, k1, 1
67#endif
68 .set pop
69 PTR_L k0, exception_handlers(k1)
70 jr k0
71
72 /*
73 * Big shit, we now may have two dirty primary cache lines for the same
74 * physical address. We can safely invalidate the line pointed to by
75 * c0_badvaddr because after return from this exception handler the
76 * load / store will be re-executed.
77 */
78handle_vced:
79 MFC0 k0, CP0_BADVADDR
80 li k1, -4 # Is this ...
81 and k0, k1 # ... really needed?
82 mtc0 zero, CP0_TAGLO
83 cache Index_Store_Tag_D, (k0)
84 cache Hit_Writeback_Inv_SD, (k0)
85#ifdef CONFIG_PROC_FS
86 PTR_LA k0, vced_count
87 lw k1, (k0)
88 addiu k1, 1
89 sw k1, (k0)
90#endif
91 eret
92
93handle_vcei:
94 MFC0 k0, CP0_BADVADDR
95 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
96#ifdef CONFIG_PROC_FS
97 PTR_LA k0, vcei_count
98 lw k1, (k0)
99 addiu k1, 1
100 sw k1, (k0)
101#endif
102 eret
103 .set pop
104 END(except_vec3_r4000)
105
106 __FINIT
107
108 .align 5 /* 32 byte rollback region */
109LEAF(__r4k_wait)
110 .set push
111 .set noreorder
112 /* start of rollback region */
113 LONG_L t0, TI_FLAGS($28)
114 nop
115 andi t0, _TIF_NEED_RESCHED
116 bnez t0, 1f
117 nop
118 nop
119 nop
120#ifdef CONFIG_CPU_MICROMIPS
121 nop
122 nop
123 nop
124 nop
125#endif
126 .set MIPS_ISA_ARCH_LEVEL_RAW
127 wait
128 /* end of rollback region (the region size must be power of two) */
1291:
130 jr ra
131 nop
132 .set pop
133 END(__r4k_wait)
134
135 .macro BUILD_ROLLBACK_PROLOGUE handler
136 FEXPORT(rollback_\handler)
137 .set push
138 .set noat
139 MFC0 k0, CP0_EPC
140 PTR_LA k1, __r4k_wait
141 ori k0, 0x1f /* 32 byte rollback region */
142 xori k0, 0x1f
143 bne k0, k1, \handler
144 MTC0 k0, CP0_EPC
145 .set pop
146 .endm
147
148 .align 5
149BUILD_ROLLBACK_PROLOGUE handle_int
150NESTED(handle_int, PT_SIZE, sp)
151 .cfi_signal_frame
152#ifdef CONFIG_TRACE_IRQFLAGS
153 /*
154 * Check to see if the interrupted code has just disabled
155 * interrupts and ignore this interrupt for now if so.
156 *
157 * local_irq_disable() disables interrupts and then calls
158 * trace_hardirqs_off() to track the state. If an interrupt is taken
159 * after interrupts are disabled but before the state is updated
160 * it will appear to restore_all that it is incorrectly returning with
161 * interrupts disabled
162 */
163 .set push
164 .set noat
165 mfc0 k0, CP0_STATUS
166#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
167 and k0, ST0_IEP
168 bnez k0, 1f
169
170 mfc0 k0, CP0_EPC
171 .set noreorder
172 j k0
173 rfe
174#else
175 and k0, ST0_IE
176 bnez k0, 1f
177
178 eret
179#endif
1801:
181 .set pop
182#endif
183 SAVE_ALL docfi=1
184 CLI
185 TRACE_IRQS_OFF
186
187 LONG_L s0, TI_REGS($28)
188 LONG_S sp, TI_REGS($28)
189
190 /*
191 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
192 * Check if we are already using the IRQ stack.
193 */
194 move s1, sp # Preserve the sp
195
196 /* Get IRQ stack for this CPU */
197 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
198#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
199 lui k1, %hi(irq_stack)
200#else
201 lui k1, %highest(irq_stack)
202 daddiu k1, %higher(irq_stack)
203 dsll k1, 16
204 daddiu k1, %hi(irq_stack)
205 dsll k1, 16
206#endif
207 LONG_SRL k0, SMP_CPUID_PTRSHIFT
208 LONG_ADDU k1, k0
209 LONG_L t0, %lo(irq_stack)(k1)
210
211 # Check if already on IRQ stack
212 PTR_LI t1, ~(_THREAD_SIZE-1)
213 and t1, t1, sp
214 beq t0, t1, 2f
215
216 /* Switch to IRQ stack */
217 li t1, _IRQ_STACK_START
218 PTR_ADD sp, t0, t1
219
220 /* Save task's sp on IRQ stack so that unwinding can follow it */
221 LONG_S s1, 0(sp)
2222:
223 jal plat_irq_dispatch
224
225 /* Restore sp */
226 move sp, s1
227
228 j ret_from_irq
229#ifdef CONFIG_CPU_MICROMIPS
230 nop
231#endif
232 END(handle_int)
233
234 __INIT
235
236/*
237 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
238 * This is a dedicated interrupt exception vector which reduces the
239 * interrupt processing overhead. The jump instruction will be replaced
240 * at the initialization time.
241 *
242 * Be careful when changing this, it has to be at most 128 bytes
243 * to fit into space reserved for the exception handler.
244 */
245NESTED(except_vec4, 0, sp)
2461: j 1b /* Dummy, will be replaced */
247 END(except_vec4)
248
249/*
250 * EJTAG debug exception handler.
251 * The EJTAG debug exception entry point is 0xbfc00480, which
252 * normally is in the boot PROM, so the boot PROM must do an
253 * unconditional jump to this vector.
254 */
255NESTED(except_vec_ejtag_debug, 0, sp)
256 j ejtag_debug_handler
257#ifdef CONFIG_CPU_MICROMIPS
258 nop
259#endif
260 END(except_vec_ejtag_debug)
261
262 __FINIT
263
264/*
265 * Vectored interrupt handler.
266 * This prototype is copied to ebase + n*IntCtl.VS and patched
267 * to invoke the handler
268 */
269BUILD_ROLLBACK_PROLOGUE except_vec_vi
270NESTED(except_vec_vi, 0, sp)
271 SAVE_SOME docfi=1
272 SAVE_AT docfi=1
273 .set push
274 .set noreorder
275 PTR_LA v1, except_vec_vi_handler
276FEXPORT(except_vec_vi_lui)
277 lui v0, 0 /* Patched */
278 jr v1
279FEXPORT(except_vec_vi_ori)
280 ori v0, 0 /* Patched */
281 .set pop
282 END(except_vec_vi)
283EXPORT(except_vec_vi_end)
284
285/*
286 * Common Vectored Interrupt code
287 * Complete the register saves and invoke the handler which is passed in $v0
288 */
289NESTED(except_vec_vi_handler, 0, sp)
290 SAVE_TEMP
291 SAVE_STATIC
292 CLI
293#ifdef CONFIG_TRACE_IRQFLAGS
294 move s0, v0
295 TRACE_IRQS_OFF
296 move v0, s0
297#endif
298
299 LONG_L s0, TI_REGS($28)
300 LONG_S sp, TI_REGS($28)
301
302 /*
303 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
304 * Check if we are already using the IRQ stack.
305 */
306 move s1, sp # Preserve the sp
307
308 /* Get IRQ stack for this CPU */
309 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
310#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
311 lui k1, %hi(irq_stack)
312#else
313 lui k1, %highest(irq_stack)
314 daddiu k1, %higher(irq_stack)
315 dsll k1, 16
316 daddiu k1, %hi(irq_stack)
317 dsll k1, 16
318#endif
319 LONG_SRL k0, SMP_CPUID_PTRSHIFT
320 LONG_ADDU k1, k0
321 LONG_L t0, %lo(irq_stack)(k1)
322
323 # Check if already on IRQ stack
324 PTR_LI t1, ~(_THREAD_SIZE-1)
325 and t1, t1, sp
326 beq t0, t1, 2f
327
328 /* Switch to IRQ stack */
329 li t1, _IRQ_STACK_START
330 PTR_ADD sp, t0, t1
331
332 /* Save task's sp on IRQ stack so that unwinding can follow it */
333 LONG_S s1, 0(sp)
3342:
335 jalr v0
336
337 /* Restore sp */
338 move sp, s1
339
340 j ret_from_irq
341 END(except_vec_vi_handler)
342
343/*
344 * EJTAG debug exception handler.
345 */
346NESTED(ejtag_debug_handler, PT_SIZE, sp)
347 .set push
348 .set noat
349 MTC0 k0, CP0_DESAVE
350 mfc0 k0, CP0_DEBUG
351
352 andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP.
353 beqz k0, ejtag_return
354
355#ifdef CONFIG_SMP
3561: PTR_LA k0, ejtag_debug_buffer_spinlock
357 __SYNC(full, loongson3_war)
3582: ll k0, 0(k0)
359 bnez k0, 2b
360 PTR_LA k0, ejtag_debug_buffer_spinlock
361 sc k0, 0(k0)
362 beqz k0, 1b
363# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
364 sync
365# endif
366
367 PTR_LA k0, ejtag_debug_buffer
368 LONG_S k1, 0(k0)
369
370 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
371 PTR_SRL k1, SMP_CPUID_PTRSHIFT
372 PTR_SLL k1, LONGLOG
373 PTR_LA k0, ejtag_debug_buffer_per_cpu
374 PTR_ADDU k0, k1
375
376 PTR_LA k1, ejtag_debug_buffer
377 LONG_L k1, 0(k1)
378 LONG_S k1, 0(k0)
379
380 PTR_LA k0, ejtag_debug_buffer_spinlock
381 sw zero, 0(k0)
382#else
383 PTR_LA k0, ejtag_debug_buffer
384 LONG_S k1, 0(k0)
385#endif
386
387 SAVE_ALL
388 move a0, sp
389 jal ejtag_exception_handler
390 RESTORE_ALL
391
392#ifdef CONFIG_SMP
393 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
394 PTR_SRL k1, SMP_CPUID_PTRSHIFT
395 PTR_SLL k1, LONGLOG
396 PTR_LA k0, ejtag_debug_buffer_per_cpu
397 PTR_ADDU k0, k1
398 LONG_L k1, 0(k0)
399#else
400 PTR_LA k0, ejtag_debug_buffer
401 LONG_L k1, 0(k0)
402#endif
403
404ejtag_return:
405 back_to_back_c0_hazard
406 MFC0 k0, CP0_DESAVE
407 .set mips32
408 deret
409 .set pop
410 END(ejtag_debug_handler)
411
412/*
413 * This buffer is reserved for the use of the EJTAG debug
414 * handler.
415 */
416 .data
417EXPORT(ejtag_debug_buffer)
418 .fill LONGSIZE
419#ifdef CONFIG_SMP
420EXPORT(ejtag_debug_buffer_spinlock)
421 .fill LONGSIZE
422EXPORT(ejtag_debug_buffer_per_cpu)
423 .fill LONGSIZE * NR_CPUS
424#endif
425 .previous
426
427 __INIT
428
429/*
430 * NMI debug exception handler for MIPS reference boards.
431 * The NMI debug exception entry point is 0xbfc00000, which
432 * normally is in the boot PROM, so the boot PROM must do a
433 * unconditional jump to this vector.
434 */
435NESTED(except_vec_nmi, 0, sp)
436 j nmi_handler
437#ifdef CONFIG_CPU_MICROMIPS
438 nop
439#endif
440 END(except_vec_nmi)
441
442 __FINIT
443
444NESTED(nmi_handler, PT_SIZE, sp)
445 .cfi_signal_frame
446 .set push
447 .set noat
448 /*
449 * Clear ERL - restore segment mapping
450 * Clear BEV - required for page fault exception handler to work
451 */
452 mfc0 k0, CP0_STATUS
453 ori k0, k0, ST0_EXL
454 li k1, ~(ST0_BEV | ST0_ERL)
455 and k0, k0, k1
456 mtc0 k0, CP0_STATUS
457 _ehb
458 SAVE_ALL
459 move a0, sp
460 jal nmi_exception_handler
461 /* nmi_exception_handler never returns */
462 .set pop
463 END(nmi_handler)
464
465 .macro __build_clear_none
466 .endm
467
468 .macro __build_clear_sti
469 TRACE_IRQS_ON
470 STI
471 .endm
472
473 .macro __build_clear_cli
474 CLI
475 TRACE_IRQS_OFF
476 .endm
477
478 .macro __build_clear_fpe
479 CLI
480 TRACE_IRQS_OFF
481 .set push
482 /* gas fails to assemble cfc1 for some archs (octeon).*/ \
483 .set mips1
484 SET_HARDFLOAT
485 cfc1 a1, fcr31
486 .set pop
487 .endm
488
489 .macro __build_clear_msa_fpe
490 CLI
491 TRACE_IRQS_OFF
492 _cfcmsa a1, MSA_CSR
493 .endm
494
495 .macro __build_clear_ade
496 MFC0 t0, CP0_BADVADDR
497 PTR_S t0, PT_BVADDR(sp)
498 KMODE
499 .endm
500
501 .macro __build_clear_gsexc
502 .set push
503 /*
504 * We need to specify a selector to access the CP0.Diag1 (GSCause)
505 * register. All GSExc-equipped processors have MIPS32.
506 */
507 .set mips32
508 mfc0 a1, CP0_DIAGNOSTIC1
509 .set pop
510 TRACE_IRQS_ON
511 STI
512 .endm
513
514 .macro __BUILD_silent exception
515 .endm
516
517 /* Gas tries to parse the ASM_PRINT argument as a string containing
518 string escapes and emits bogus warnings if it believes to
519 recognize an unknown escape code. So make the arguments
520 start with an n and gas will believe \n is ok ... */
521 .macro __BUILD_verbose nexception
522 LONG_L a1, PT_EPC(sp)
523#ifdef CONFIG_32BIT
524 ASM_PRINT("Got \nexception at %08lx\012")
525#endif
526#ifdef CONFIG_64BIT
527 ASM_PRINT("Got \nexception at %016lx\012")
528#endif
529 .endm
530
531 .macro __BUILD_count exception
532 LONG_L t0,exception_count_\exception
533 LONG_ADDIU t0, 1
534 LONG_S t0,exception_count_\exception
535 .comm exception_count\exception, 8, 8
536 .endm
537
538 .macro __BUILD_HANDLER exception handler clear verbose ext
539 .align 5
540 NESTED(handle_\exception, PT_SIZE, sp)
541 .cfi_signal_frame
542 .set noat
543 SAVE_ALL
544 FEXPORT(handle_\exception\ext)
545 __build_clear_\clear
546 .set at
547 __BUILD_\verbose \exception
548 move a0, sp
549 jal do_\handler
550 j ret_from_exception
551 END(handle_\exception)
552 .endm
553
554 .macro BUILD_HANDLER exception handler clear verbose
555 __BUILD_HANDLER \exception \handler \clear \verbose _int
556 .endm
557
558 BUILD_HANDLER adel ade ade silent /* #4 */
559 BUILD_HANDLER ades ade ade silent /* #5 */
560 BUILD_HANDLER ibe be cli silent /* #6 */
561 BUILD_HANDLER dbe be cli silent /* #7 */
562 BUILD_HANDLER bp bp sti silent /* #9 */
563 BUILD_HANDLER ri ri sti silent /* #10 */
564 BUILD_HANDLER cpu cpu sti silent /* #11 */
565 BUILD_HANDLER ov ov sti silent /* #12 */
566 BUILD_HANDLER tr tr sti silent /* #13 */
567 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
568#ifdef CONFIG_MIPS_FP_SUPPORT
569 BUILD_HANDLER fpe fpe fpe silent /* #15 */
570#endif
571 BUILD_HANDLER ftlb ftlb none silent /* #16 */
572 BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */
573 BUILD_HANDLER msa msa sti silent /* #21 */
574 BUILD_HANDLER mdmx mdmx sti silent /* #22 */
575#ifdef CONFIG_HARDWARE_WATCHPOINTS
576 /*
577 * For watch, interrupts will be enabled after the watch
578 * registers are read.
579 */
580 BUILD_HANDLER watch watch cli silent /* #23 */
581#else
582 BUILD_HANDLER watch watch sti verbose /* #23 */
583#endif
584 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
585 BUILD_HANDLER mt mt sti silent /* #25 */
586 BUILD_HANDLER dsp dsp sti silent /* #26 */
587 BUILD_HANDLER reserved reserved sti verbose /* others */
588
589 .align 5
590 LEAF(handle_ri_rdhwr_tlbp)
591 .set push
592 .set noat
593 .set noreorder
594 /* check if TLB contains a entry for EPC */
595 MFC0 k1, CP0_ENTRYHI
596 andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
597 MFC0 k0, CP0_EPC
598 PTR_SRL k0, _PAGE_SHIFT + 1
599 PTR_SLL k0, _PAGE_SHIFT + 1
600 or k1, k0
601 MTC0 k1, CP0_ENTRYHI
602 mtc0_tlbw_hazard
603 tlbp
604 tlb_probe_hazard
605 mfc0 k1, CP0_INDEX
606 .set pop
607 bltz k1, handle_ri /* slow path */
608 /* fall thru */
609 END(handle_ri_rdhwr_tlbp)
610
611 LEAF(handle_ri_rdhwr)
612 .set push
613 .set noat
614 .set noreorder
615 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
616 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
617 MFC0 k1, CP0_EPC
618#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
619 and k0, k1, 1
620 beqz k0, 1f
621 xor k1, k0
622 lhu k0, (k1)
623 lhu k1, 2(k1)
624 ins k1, k0, 16, 16
625 lui k0, 0x007d
626 b docheck
627 ori k0, 0x6b3c
6281:
629 lui k0, 0x7c03
630 lw k1, (k1)
631 ori k0, 0xe83b
632#else
633 andi k0, k1, 1
634 bnez k0, handle_ri
635 lui k0, 0x7c03
636 lw k1, (k1)
637 ori k0, 0xe83b
638#endif
639 .set reorder
640docheck:
641 bne k0, k1, handle_ri /* if not ours */
642
643isrdhwr:
644 /* The insn is rdhwr. No need to check CAUSE.BD here. */
645 get_saved_sp /* k1 := current_thread_info */
646 .set noreorder
647 MFC0 k0, CP0_EPC
648#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
649 ori k1, _THREAD_MASK
650 xori k1, _THREAD_MASK
651 LONG_L v1, TI_TP_VALUE(k1)
652 LONG_ADDIU k0, 4
653 jr k0
654 rfe
655#else
656#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
657 LONG_ADDIU k0, 4 /* stall on $k0 */
658#else
659 .set at=v1
660 LONG_ADDIU k0, 4
661 .set noat
662#endif
663 MTC0 k0, CP0_EPC
664 /* I hope three instructions between MTC0 and ERET are enough... */
665 ori k1, _THREAD_MASK
666 xori k1, _THREAD_MASK
667 LONG_L v1, TI_TP_VALUE(k1)
668 .set push
669 .set arch=r4000
670 eret
671 .set pop
672#endif
673 .set pop
674 END(handle_ri_rdhwr)
675
676#ifdef CONFIG_CPU_R4X00_BUGS64
677/* A temporary overflow handler used by check_daddi(). */
678
679 __INIT
680
681 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
682#endif
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2002, 2007 Maciej W. Rozycki
9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
10 */
11#include <linux/init.h>
12
13#include <asm/asm.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/irqflags.h>
17#include <asm/regdef.h>
18#include <asm/fpregdef.h>
19#include <asm/mipsregs.h>
20#include <asm/stackframe.h>
21#include <asm/sync.h>
22#include <asm/thread_info.h>
23
24 __INIT
25
26/*
27 * General exception vector for all other CPUs.
28 *
29 * Be careful when changing this, it has to be at most 128 bytes
30 * to fit into space reserved for the exception handler.
31 */
32NESTED(except_vec3_generic, 0, sp)
33 .set push
34 .set noat
35 mfc0 k1, CP0_CAUSE
36 andi k1, k1, 0x7c
37#ifdef CONFIG_64BIT
38 dsll k1, k1, 1
39#endif
40 PTR_L k0, exception_handlers(k1)
41 jr k0
42 .set pop
43 END(except_vec3_generic)
44
45/*
46 * General exception handler for CPUs with virtual coherency exception.
47 *
48 * Be careful when changing this, it has to be at most 256 (as a special
49 * exception) bytes to fit into space reserved for the exception handler.
50 */
51NESTED(except_vec3_r4000, 0, sp)
52 .set push
53 .set arch=r4000
54 .set noat
55 mfc0 k1, CP0_CAUSE
56 li k0, 31<<2
57 andi k1, k1, 0x7c
58 .set push
59 .set noreorder
60 .set nomacro
61 beq k1, k0, handle_vced
62 li k0, 14<<2
63 beq k1, k0, handle_vcei
64#ifdef CONFIG_64BIT
65 dsll k1, k1, 1
66#endif
67 .set pop
68 PTR_L k0, exception_handlers(k1)
69 jr k0
70
71 /*
72 * Big shit, we now may have two dirty primary cache lines for the same
73 * physical address. We can safely invalidate the line pointed to by
74 * c0_badvaddr because after return from this exception handler the
75 * load / store will be re-executed.
76 */
77handle_vced:
78 MFC0 k0, CP0_BADVADDR
79 li k1, -4 # Is this ...
80 and k0, k1 # ... really needed?
81 mtc0 zero, CP0_TAGLO
82 cache Index_Store_Tag_D, (k0)
83 cache Hit_Writeback_Inv_SD, (k0)
84#ifdef CONFIG_PROC_FS
85 PTR_LA k0, vced_count
86 lw k1, (k0)
87 addiu k1, 1
88 sw k1, (k0)
89#endif
90 eret
91
92handle_vcei:
93 MFC0 k0, CP0_BADVADDR
94 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
95#ifdef CONFIG_PROC_FS
96 PTR_LA k0, vcei_count
97 lw k1, (k0)
98 addiu k1, 1
99 sw k1, (k0)
100#endif
101 eret
102 .set pop
103 END(except_vec3_r4000)
104
105 __FINIT
106
107 .align 5 /* 32 byte rollback region */
108LEAF(__r4k_wait)
109 .set push
110 .set noreorder
111 /* start of rollback region */
112 LONG_L t0, TI_FLAGS($28)
113 nop
114 andi t0, _TIF_NEED_RESCHED
115 bnez t0, 1f
116 nop
117 nop
118 nop
119#ifdef CONFIG_CPU_MICROMIPS
120 nop
121 nop
122 nop
123 nop
124#endif
125 .set MIPS_ISA_ARCH_LEVEL_RAW
126 wait
127 /* end of rollback region (the region size must be power of two) */
1281:
129 jr ra
130 nop
131 .set pop
132 END(__r4k_wait)
133
134 .macro BUILD_ROLLBACK_PROLOGUE handler
135 FEXPORT(rollback_\handler)
136 .set push
137 .set noat
138 MFC0 k0, CP0_EPC
139 PTR_LA k1, __r4k_wait
140 ori k0, 0x1f /* 32 byte rollback region */
141 xori k0, 0x1f
142 bne k0, k1, \handler
143 MTC0 k0, CP0_EPC
144 .set pop
145 .endm
146
147 .align 5
148BUILD_ROLLBACK_PROLOGUE handle_int
149NESTED(handle_int, PT_SIZE, sp)
150 .cfi_signal_frame
151#ifdef CONFIG_TRACE_IRQFLAGS
152 /*
153 * Check to see if the interrupted code has just disabled
154 * interrupts and ignore this interrupt for now if so.
155 *
156 * local_irq_disable() disables interrupts and then calls
157 * trace_hardirqs_off() to track the state. If an interrupt is taken
158 * after interrupts are disabled but before the state is updated
159 * it will appear to restore_all that it is incorrectly returning with
160 * interrupts disabled
161 */
162 .set push
163 .set noat
164 mfc0 k0, CP0_STATUS
165#if defined(CONFIG_CPU_R3000)
166 and k0, ST0_IEP
167 bnez k0, 1f
168
169 mfc0 k0, CP0_EPC
170 .set noreorder
171 j k0
172 rfe
173#else
174 and k0, ST0_IE
175 bnez k0, 1f
176
177 eret
178#endif
1791:
180 .set pop
181#endif
182 SAVE_ALL docfi=1
183 CLI
184 TRACE_IRQS_OFF
185
186 LONG_L s0, TI_REGS($28)
187 LONG_S sp, TI_REGS($28)
188
189 /*
190 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
191 * Check if we are already using the IRQ stack.
192 */
193 move s1, sp # Preserve the sp
194
195 /* Get IRQ stack for this CPU */
196 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
197#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
198 lui k1, %hi(irq_stack)
199#else
200 lui k1, %highest(irq_stack)
201 daddiu k1, %higher(irq_stack)
202 dsll k1, 16
203 daddiu k1, %hi(irq_stack)
204 dsll k1, 16
205#endif
206 LONG_SRL k0, SMP_CPUID_PTRSHIFT
207 LONG_ADDU k1, k0
208 LONG_L t0, %lo(irq_stack)(k1)
209
210 # Check if already on IRQ stack
211 PTR_LI t1, ~(_THREAD_SIZE-1)
212 and t1, t1, sp
213 beq t0, t1, 2f
214
215 /* Switch to IRQ stack */
216 li t1, _IRQ_STACK_START
217 PTR_ADD sp, t0, t1
218
219 /* Save task's sp on IRQ stack so that unwinding can follow it */
220 LONG_S s1, 0(sp)
2212:
222 jal plat_irq_dispatch
223
224 /* Restore sp */
225 move sp, s1
226
227 j ret_from_irq
228#ifdef CONFIG_CPU_MICROMIPS
229 nop
230#endif
231 END(handle_int)
232
233 __INIT
234
235/*
236 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
237 * This is a dedicated interrupt exception vector which reduces the
238 * interrupt processing overhead. The jump instruction will be replaced
239 * at the initialization time.
240 *
241 * Be careful when changing this, it has to be at most 128 bytes
242 * to fit into space reserved for the exception handler.
243 */
244NESTED(except_vec4, 0, sp)
2451: j 1b /* Dummy, will be replaced */
246 END(except_vec4)
247
248/*
249 * EJTAG debug exception handler.
250 * The EJTAG debug exception entry point is 0xbfc00480, which
251 * normally is in the boot PROM, so the boot PROM must do an
252 * unconditional jump to this vector.
253 */
254NESTED(except_vec_ejtag_debug, 0, sp)
255 j ejtag_debug_handler
256#ifdef CONFIG_CPU_MICROMIPS
257 nop
258#endif
259 END(except_vec_ejtag_debug)
260
261 __FINIT
262
263/*
264 * Vectored interrupt handler.
265 * This prototype is copied to ebase + n*IntCtl.VS and patched
266 * to invoke the handler
267 */
268BUILD_ROLLBACK_PROLOGUE except_vec_vi
269NESTED(except_vec_vi, 0, sp)
270 SAVE_SOME docfi=1
271 SAVE_AT docfi=1
272 .set push
273 .set noreorder
274 PTR_LA v1, except_vec_vi_handler
275FEXPORT(except_vec_vi_lui)
276 lui v0, 0 /* Patched */
277 jr v1
278FEXPORT(except_vec_vi_ori)
279 ori v0, 0 /* Patched */
280 .set pop
281 END(except_vec_vi)
282EXPORT(except_vec_vi_end)
283
284/*
285 * Common Vectored Interrupt code
286 * Complete the register saves and invoke the handler which is passed in $v0
287 */
288NESTED(except_vec_vi_handler, 0, sp)
289 SAVE_TEMP
290 SAVE_STATIC
291 CLI
292#ifdef CONFIG_TRACE_IRQFLAGS
293 move s0, v0
294 TRACE_IRQS_OFF
295 move v0, s0
296#endif
297
298 LONG_L s0, TI_REGS($28)
299 LONG_S sp, TI_REGS($28)
300
301 /*
302 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
303 * Check if we are already using the IRQ stack.
304 */
305 move s1, sp # Preserve the sp
306
307 /* Get IRQ stack for this CPU */
308 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
309#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
310 lui k1, %hi(irq_stack)
311#else
312 lui k1, %highest(irq_stack)
313 daddiu k1, %higher(irq_stack)
314 dsll k1, 16
315 daddiu k1, %hi(irq_stack)
316 dsll k1, 16
317#endif
318 LONG_SRL k0, SMP_CPUID_PTRSHIFT
319 LONG_ADDU k1, k0
320 LONG_L t0, %lo(irq_stack)(k1)
321
322 # Check if already on IRQ stack
323 PTR_LI t1, ~(_THREAD_SIZE-1)
324 and t1, t1, sp
325 beq t0, t1, 2f
326
327 /* Switch to IRQ stack */
328 li t1, _IRQ_STACK_START
329 PTR_ADD sp, t0, t1
330
331 /* Save task's sp on IRQ stack so that unwinding can follow it */
332 LONG_S s1, 0(sp)
3332:
334 jalr v0
335
336 /* Restore sp */
337 move sp, s1
338
339 j ret_from_irq
340 END(except_vec_vi_handler)
341
342/*
343 * EJTAG debug exception handler.
344 */
345NESTED(ejtag_debug_handler, PT_SIZE, sp)
346 .set push
347 .set noat
348 MTC0 k0, CP0_DESAVE
349 mfc0 k0, CP0_DEBUG
350
351 andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP.
352 beqz k0, ejtag_return
353
354#ifdef CONFIG_SMP
3551: PTR_LA k0, ejtag_debug_buffer_spinlock
356 __SYNC(full, loongson3_war)
3572: ll k0, 0(k0)
358 bnez k0, 2b
359 PTR_LA k0, ejtag_debug_buffer_spinlock
360 sc k0, 0(k0)
361 beqz k0, 1b
362# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
363 sync
364# endif
365
366 PTR_LA k0, ejtag_debug_buffer
367 LONG_S k1, 0(k0)
368
369 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
370 PTR_SRL k1, SMP_CPUID_PTRSHIFT
371 PTR_SLL k1, LONGLOG
372 PTR_LA k0, ejtag_debug_buffer_per_cpu
373 PTR_ADDU k0, k1
374
375 PTR_LA k1, ejtag_debug_buffer
376 LONG_L k1, 0(k1)
377 LONG_S k1, 0(k0)
378
379 PTR_LA k0, ejtag_debug_buffer_spinlock
380 sw zero, 0(k0)
381#else
382 PTR_LA k0, ejtag_debug_buffer
383 LONG_S k1, 0(k0)
384#endif
385
386 SAVE_ALL
387 move a0, sp
388 jal ejtag_exception_handler
389 RESTORE_ALL
390
391#ifdef CONFIG_SMP
392 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
393 PTR_SRL k1, SMP_CPUID_PTRSHIFT
394 PTR_SLL k1, LONGLOG
395 PTR_LA k0, ejtag_debug_buffer_per_cpu
396 PTR_ADDU k0, k1
397 LONG_L k1, 0(k0)
398#else
399 PTR_LA k0, ejtag_debug_buffer
400 LONG_L k1, 0(k0)
401#endif
402
403ejtag_return:
404 back_to_back_c0_hazard
405 MFC0 k0, CP0_DESAVE
406 .set mips32
407 deret
408 .set pop
409 END(ejtag_debug_handler)
410
411/*
412 * This buffer is reserved for the use of the EJTAG debug
413 * handler.
414 */
415 .data
416EXPORT(ejtag_debug_buffer)
417 .fill LONGSIZE
418#ifdef CONFIG_SMP
419EXPORT(ejtag_debug_buffer_spinlock)
420 .fill LONGSIZE
421EXPORT(ejtag_debug_buffer_per_cpu)
422 .fill LONGSIZE * NR_CPUS
423#endif
424 .previous
425
426 __INIT
427
428/*
429 * NMI debug exception handler for MIPS reference boards.
430 * The NMI debug exception entry point is 0xbfc00000, which
431 * normally is in the boot PROM, so the boot PROM must do a
432 * unconditional jump to this vector.
433 */
434NESTED(except_vec_nmi, 0, sp)
435 j nmi_handler
436#ifdef CONFIG_CPU_MICROMIPS
437 nop
438#endif
439 END(except_vec_nmi)
440
441 __FINIT
442
443NESTED(nmi_handler, PT_SIZE, sp)
444 .cfi_signal_frame
445 .set push
446 .set noat
447 /*
448 * Clear ERL - restore segment mapping
449 * Clear BEV - required for page fault exception handler to work
450 */
451 mfc0 k0, CP0_STATUS
452 ori k0, k0, ST0_EXL
453 li k1, ~(ST0_BEV | ST0_ERL)
454 and k0, k0, k1
455 mtc0 k0, CP0_STATUS
456 _ehb
457 SAVE_ALL
458 move a0, sp
459 jal nmi_exception_handler
460 /* nmi_exception_handler never returns */
461 .set pop
462 END(nmi_handler)
463
464 .macro __build_clear_none
465 .endm
466
467 .macro __build_clear_sti
468 TRACE_IRQS_ON
469 STI
470 .endm
471
472 .macro __build_clear_cli
473 CLI
474 TRACE_IRQS_OFF
475 .endm
476
477 .macro __build_clear_fpe
478 CLI
479 TRACE_IRQS_OFF
480 .set push
481 /* gas fails to assemble cfc1 for some archs (octeon).*/ \
482 .set mips1
483 SET_HARDFLOAT
484 cfc1 a1, fcr31
485 .set pop
486 .endm
487
488 .macro __build_clear_msa_fpe
489 CLI
490 TRACE_IRQS_OFF
491 _cfcmsa a1, MSA_CSR
492 .endm
493
494 .macro __build_clear_ade
495 MFC0 t0, CP0_BADVADDR
496 PTR_S t0, PT_BVADDR(sp)
497 KMODE
498 .endm
499
500 .macro __build_clear_gsexc
501 .set push
502 /*
503 * We need to specify a selector to access the CP0.Diag1 (GSCause)
504 * register. All GSExc-equipped processors have MIPS32.
505 */
506 .set mips32
507 mfc0 a1, CP0_DIAGNOSTIC1
508 .set pop
509 TRACE_IRQS_ON
510 STI
511 .endm
512
513 .macro __BUILD_silent exception
514 .endm
515
516 /* Gas tries to parse the ASM_PRINT argument as a string containing
517 string escapes and emits bogus warnings if it believes to
518 recognize an unknown escape code. So make the arguments
519 start with an n and gas will believe \n is ok ... */
520 .macro __BUILD_verbose nexception
521 LONG_L a1, PT_EPC(sp)
522#ifdef CONFIG_32BIT
523 ASM_PRINT("Got \nexception at %08lx\012")
524#endif
525#ifdef CONFIG_64BIT
526 ASM_PRINT("Got \nexception at %016lx\012")
527#endif
528 .endm
529
530 .macro __BUILD_count exception
531 LONG_L t0,exception_count_\exception
532 LONG_ADDIU t0, 1
533 LONG_S t0,exception_count_\exception
534 .comm exception_count\exception, 8, 8
535 .endm
536
537 .macro __BUILD_HANDLER exception handler clear verbose ext
538 .align 5
539 NESTED(handle_\exception, PT_SIZE, sp)
540 .cfi_signal_frame
541 .set noat
542 SAVE_ALL
543 FEXPORT(handle_\exception\ext)
544 __build_clear_\clear
545 .set at
546 __BUILD_\verbose \exception
547 move a0, sp
548 jal do_\handler
549 j ret_from_exception
550 END(handle_\exception)
551 .endm
552
553 .macro BUILD_HANDLER exception handler clear verbose
554 __BUILD_HANDLER \exception \handler \clear \verbose _int
555 .endm
556
557 BUILD_HANDLER adel ade ade silent /* #4 */
558 BUILD_HANDLER ades ade ade silent /* #5 */
559 BUILD_HANDLER ibe be cli silent /* #6 */
560 BUILD_HANDLER dbe be cli silent /* #7 */
561 BUILD_HANDLER bp bp sti silent /* #9 */
562 BUILD_HANDLER ri ri sti silent /* #10 */
563 BUILD_HANDLER cpu cpu sti silent /* #11 */
564 BUILD_HANDLER ov ov sti silent /* #12 */
565 BUILD_HANDLER tr tr sti silent /* #13 */
566 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
567#ifdef CONFIG_MIPS_FP_SUPPORT
568 BUILD_HANDLER fpe fpe fpe silent /* #15 */
569#endif
570 BUILD_HANDLER ftlb ftlb none silent /* #16 */
571 BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */
572 BUILD_HANDLER msa msa sti silent /* #21 */
573 BUILD_HANDLER mdmx mdmx sti silent /* #22 */
574#ifdef CONFIG_HARDWARE_WATCHPOINTS
575 /*
576 * For watch, interrupts will be enabled after the watch
577 * registers are read.
578 */
579 BUILD_HANDLER watch watch cli silent /* #23 */
580#else
581 BUILD_HANDLER watch watch sti verbose /* #23 */
582#endif
583 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
584 BUILD_HANDLER mt mt sti silent /* #25 */
585 BUILD_HANDLER dsp dsp sti silent /* #26 */
586 BUILD_HANDLER reserved reserved sti verbose /* others */
587
588 .align 5
589 LEAF(handle_ri_rdhwr_tlbp)
590 .set push
591 .set noat
592 .set noreorder
593 /* check if TLB contains a entry for EPC */
594 MFC0 k1, CP0_ENTRYHI
595 andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
596 MFC0 k0, CP0_EPC
597 PTR_SRL k0, _PAGE_SHIFT + 1
598 PTR_SLL k0, _PAGE_SHIFT + 1
599 or k1, k0
600 MTC0 k1, CP0_ENTRYHI
601 mtc0_tlbw_hazard
602 tlbp
603 tlb_probe_hazard
604 mfc0 k1, CP0_INDEX
605 .set pop
606 bltz k1, handle_ri /* slow path */
607 /* fall thru */
608 END(handle_ri_rdhwr_tlbp)
609
610 LEAF(handle_ri_rdhwr)
611 .set push
612 .set noat
613 .set noreorder
614 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
615 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
616 MFC0 k1, CP0_EPC
617#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
618 and k0, k1, 1
619 beqz k0, 1f
620 xor k1, k0
621 lhu k0, (k1)
622 lhu k1, 2(k1)
623 ins k1, k0, 16, 16
624 lui k0, 0x007d
625 b docheck
626 ori k0, 0x6b3c
6271:
628 lui k0, 0x7c03
629 lw k1, (k1)
630 ori k0, 0xe83b
631#else
632 andi k0, k1, 1
633 bnez k0, handle_ri
634 lui k0, 0x7c03
635 lw k1, (k1)
636 ori k0, 0xe83b
637#endif
638 .set reorder
639docheck:
640 bne k0, k1, handle_ri /* if not ours */
641
642isrdhwr:
643 /* The insn is rdhwr. No need to check CAUSE.BD here. */
644 get_saved_sp /* k1 := current_thread_info */
645 .set noreorder
646 MFC0 k0, CP0_EPC
647#if defined(CONFIG_CPU_R3000)
648 ori k1, _THREAD_MASK
649 xori k1, _THREAD_MASK
650 LONG_L v1, TI_TP_VALUE(k1)
651 LONG_ADDIU k0, 4
652 jr k0
653 rfe
654#else
655#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
656 LONG_ADDIU k0, 4 /* stall on $k0 */
657#else
658 .set at=v1
659 LONG_ADDIU k0, 4
660 .set noat
661#endif
662 MTC0 k0, CP0_EPC
663 /* I hope three instructions between MTC0 and ERET are enough... */
664 ori k1, _THREAD_MASK
665 xori k1, _THREAD_MASK
666 LONG_L v1, TI_TP_VALUE(k1)
667 .set push
668 .set arch=r4000
669 eret
670 .set pop
671#endif
672 .set pop
673 END(handle_ri_rdhwr)
674
675#ifdef CONFIG_CPU_R4X00_BUGS64
676/* A temporary overflow handler used by check_daddi(). */
677
678 __INIT
679
680 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
681#endif