Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2002, 2007 Maciej W. Rozycki
9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
10 */
11#include <linux/init.h>
12
13#include <asm/asm.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/irqflags.h>
17#include <asm/regdef.h>
18#include <asm/fpregdef.h>
19#include <asm/mipsregs.h>
20#include <asm/stackframe.h>
21#include <asm/war.h>
22#include <asm/thread_info.h>
23
24 __INIT
25
26/*
27 * General exception vector for all other CPUs.
28 *
29 * Be careful when changing this, it has to be at most 128 bytes
30 * to fit into space reserved for the exception handler.
31 */
32NESTED(except_vec3_generic, 0, sp)
33 .set push
34 .set noat
35#if R5432_CP0_INTERRUPT_WAR
36 mfc0 k0, CP0_INDEX
37#endif
38 mfc0 k1, CP0_CAUSE
39 andi k1, k1, 0x7c
40#ifdef CONFIG_64BIT
41 dsll k1, k1, 1
42#endif
43 PTR_L k0, exception_handlers(k1)
44 jr k0
45 .set pop
46 END(except_vec3_generic)
47
48/*
49 * General exception handler for CPUs with virtual coherency exception.
50 *
51 * Be careful when changing this, it has to be at most 256 (as a special
52 * exception) bytes to fit into space reserved for the exception handler.
53 */
54NESTED(except_vec3_r4000, 0, sp)
55 .set push
56 .set arch=r4000
57 .set noat
58 mfc0 k1, CP0_CAUSE
59 li k0, 31<<2
60 andi k1, k1, 0x7c
61 .set push
62 .set noreorder
63 .set nomacro
64 beq k1, k0, handle_vced
65 li k0, 14<<2
66 beq k1, k0, handle_vcei
67#ifdef CONFIG_64BIT
68 dsll k1, k1, 1
69#endif
70 .set pop
71 PTR_L k0, exception_handlers(k1)
72 jr k0
73
74 /*
75 * Big shit, we now may have two dirty primary cache lines for the same
76 * physical address. We can safely invalidate the line pointed to by
77 * c0_badvaddr because after return from this exception handler the
78 * load / store will be re-executed.
79 */
80handle_vced:
81 MFC0 k0, CP0_BADVADDR
82 li k1, -4 # Is this ...
83 and k0, k1 # ... really needed?
84 mtc0 zero, CP0_TAGLO
85 cache Index_Store_Tag_D, (k0)
86 cache Hit_Writeback_Inv_SD, (k0)
87#ifdef CONFIG_PROC_FS
88 PTR_LA k0, vced_count
89 lw k1, (k0)
90 addiu k1, 1
91 sw k1, (k0)
92#endif
93 eret
94
95handle_vcei:
96 MFC0 k0, CP0_BADVADDR
97 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
98#ifdef CONFIG_PROC_FS
99 PTR_LA k0, vcei_count
100 lw k1, (k0)
101 addiu k1, 1
102 sw k1, (k0)
103#endif
104 eret
105 .set pop
106 END(except_vec3_r4000)
107
108 __FINIT
109
110 .align 5 /* 32 byte rollback region */
111LEAF(__r4k_wait)
112 .set push
113 .set noreorder
114 /* start of rollback region */
115 LONG_L t0, TI_FLAGS($28)
116 nop
117 andi t0, _TIF_NEED_RESCHED
118 bnez t0, 1f
119 nop
120 nop
121 nop
122#ifdef CONFIG_CPU_MICROMIPS
123 nop
124 nop
125 nop
126 nop
127#endif
128 .set MIPS_ISA_ARCH_LEVEL_RAW
129 wait
130 /* end of rollback region (the region size must be power of two) */
1311:
132 jr ra
133 nop
134 .set pop
135 END(__r4k_wait)
136
137 .macro BUILD_ROLLBACK_PROLOGUE handler
138 FEXPORT(rollback_\handler)
139 .set push
140 .set noat
141 MFC0 k0, CP0_EPC
142 PTR_LA k1, __r4k_wait
143 ori k0, 0x1f /* 32 byte rollback region */
144 xori k0, 0x1f
145 bne k0, k1, 9f
146 MTC0 k0, CP0_EPC
1479:
148 .set pop
149 .endm
150
151 .align 5
152BUILD_ROLLBACK_PROLOGUE handle_int
153NESTED(handle_int, PT_SIZE, sp)
154#ifdef CONFIG_TRACE_IRQFLAGS
155 /*
156 * Check to see if the interrupted code has just disabled
157 * interrupts and ignore this interrupt for now if so.
158 *
159 * local_irq_disable() disables interrupts and then calls
160 * trace_hardirqs_off() to track the state. If an interrupt is taken
161 * after interrupts are disabled but before the state is updated
162 * it will appear to restore_all that it is incorrectly returning with
163 * interrupts disabled
164 */
165 .set push
166 .set noat
167 mfc0 k0, CP0_STATUS
168#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
169 and k0, ST0_IEP
170 bnez k0, 1f
171
172 mfc0 k0, CP0_EPC
173 .set noreorder
174 j k0
175 rfe
176#else
177 and k0, ST0_IE
178 bnez k0, 1f
179
180 eret
181#endif
1821:
183 .set pop
184#endif
185 SAVE_ALL
186 CLI
187 TRACE_IRQS_OFF
188
189 LONG_L s0, TI_REGS($28)
190 LONG_S sp, TI_REGS($28)
191 PTR_LA ra, ret_from_irq
192 PTR_LA v0, plat_irq_dispatch
193 jr v0
194#ifdef CONFIG_CPU_MICROMIPS
195 nop
196#endif
197 END(handle_int)
198
199 __INIT
200
201/*
202 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
203 * This is a dedicated interrupt exception vector which reduces the
204 * interrupt processing overhead. The jump instruction will be replaced
205 * at the initialization time.
206 *
207 * Be careful when changing this, it has to be at most 128 bytes
208 * to fit into space reserved for the exception handler.
209 */
210NESTED(except_vec4, 0, sp)
2111: j 1b /* Dummy, will be replaced */
212 END(except_vec4)
213
214/*
215 * EJTAG debug exception handler.
216 * The EJTAG debug exception entry point is 0xbfc00480, which
217 * normally is in the boot PROM, so the boot PROM must do an
218 * unconditional jump to this vector.
219 */
220NESTED(except_vec_ejtag_debug, 0, sp)
221 j ejtag_debug_handler
222#ifdef CONFIG_CPU_MICROMIPS
223 nop
224#endif
225 END(except_vec_ejtag_debug)
226
227 __FINIT
228
229/*
230 * Vectored interrupt handler.
231 * This prototype is copied to ebase + n*IntCtl.VS and patched
232 * to invoke the handler
233 */
234BUILD_ROLLBACK_PROLOGUE except_vec_vi
235NESTED(except_vec_vi, 0, sp)
236 SAVE_SOME
237 SAVE_AT
238 .set push
239 .set noreorder
240 PTR_LA v1, except_vec_vi_handler
241FEXPORT(except_vec_vi_lui)
242 lui v0, 0 /* Patched */
243 jr v1
244FEXPORT(except_vec_vi_ori)
245 ori v0, 0 /* Patched */
246 .set pop
247 END(except_vec_vi)
248EXPORT(except_vec_vi_end)
249
250/*
251 * Common Vectored Interrupt code
252 * Complete the register saves and invoke the handler which is passed in $v0
253 */
254NESTED(except_vec_vi_handler, 0, sp)
255 SAVE_TEMP
256 SAVE_STATIC
257 CLI
258#ifdef CONFIG_TRACE_IRQFLAGS
259 move s0, v0
260 TRACE_IRQS_OFF
261 move v0, s0
262#endif
263
264 LONG_L s0, TI_REGS($28)
265 LONG_S sp, TI_REGS($28)
266 PTR_LA ra, ret_from_irq
267 jr v0
268 END(except_vec_vi_handler)
269
270/*
271 * EJTAG debug exception handler.
272 */
273NESTED(ejtag_debug_handler, PT_SIZE, sp)
274 .set push
275 .set noat
276 MTC0 k0, CP0_DESAVE
277 mfc0 k0, CP0_DEBUG
278
279 sll k0, k0, 30 # Check for SDBBP.
280 bgez k0, ejtag_return
281
282 PTR_LA k0, ejtag_debug_buffer
283 LONG_S k1, 0(k0)
284 SAVE_ALL
285 move a0, sp
286 jal ejtag_exception_handler
287 RESTORE_ALL
288 PTR_LA k0, ejtag_debug_buffer
289 LONG_L k1, 0(k0)
290
291ejtag_return:
292 MFC0 k0, CP0_DESAVE
293 .set mips32
294 deret
295 .set pop
296 END(ejtag_debug_handler)
297
298/*
299 * This buffer is reserved for the use of the EJTAG debug
300 * handler.
301 */
302 .data
303EXPORT(ejtag_debug_buffer)
304 .fill LONGSIZE
305 .previous
306
307 __INIT
308
309/*
310 * NMI debug exception handler for MIPS reference boards.
311 * The NMI debug exception entry point is 0xbfc00000, which
312 * normally is in the boot PROM, so the boot PROM must do a
313 * unconditional jump to this vector.
314 */
315NESTED(except_vec_nmi, 0, sp)
316 j nmi_handler
317#ifdef CONFIG_CPU_MICROMIPS
318 nop
319#endif
320 END(except_vec_nmi)
321
322 __FINIT
323
324NESTED(nmi_handler, PT_SIZE, sp)
325 .set push
326 .set noat
327 /*
328 * Clear ERL - restore segment mapping
329 * Clear BEV - required for page fault exception handler to work
330 */
331 mfc0 k0, CP0_STATUS
332 ori k0, k0, ST0_EXL
333 li k1, ~(ST0_BEV | ST0_ERL)
334 and k0, k0, k1
335 mtc0 k0, CP0_STATUS
336 _ehb
337 SAVE_ALL
338 move a0, sp
339 jal nmi_exception_handler
340 /* nmi_exception_handler never returns */
341 .set pop
342 END(nmi_handler)
343
344 .macro __build_clear_none
345 .endm
346
347 .macro __build_clear_sti
348 TRACE_IRQS_ON
349 STI
350 .endm
351
352 .macro __build_clear_cli
353 CLI
354 TRACE_IRQS_OFF
355 .endm
356
357 .macro __build_clear_fpe
358 .set push
359 /* gas fails to assemble cfc1 for some archs (octeon).*/ \
360 .set mips1
361 SET_HARDFLOAT
362 cfc1 a1, fcr31
363 .set pop
364 CLI
365 TRACE_IRQS_OFF
366 .endm
367
368 .macro __build_clear_msa_fpe
369 _cfcmsa a1, MSA_CSR
370 CLI
371 TRACE_IRQS_OFF
372 .endm
373
374 .macro __build_clear_ade
375 MFC0 t0, CP0_BADVADDR
376 PTR_S t0, PT_BVADDR(sp)
377 KMODE
378 .endm
379
380 .macro __BUILD_silent exception
381 .endm
382
383 /* Gas tries to parse the PRINT argument as a string containing
384 string escapes and emits bogus warnings if it believes to
385 recognize an unknown escape code. So make the arguments
386 start with an n and gas will believe \n is ok ... */
387 .macro __BUILD_verbose nexception
388 LONG_L a1, PT_EPC(sp)
389#ifdef CONFIG_32BIT
390 PRINT("Got \nexception at %08lx\012")
391#endif
392#ifdef CONFIG_64BIT
393 PRINT("Got \nexception at %016lx\012")
394#endif
395 .endm
396
397 .macro __BUILD_count exception
398 LONG_L t0,exception_count_\exception
399 LONG_ADDIU t0, 1
400 LONG_S t0,exception_count_\exception
401 .comm exception_count\exception, 8, 8
402 .endm
403
404 .macro __BUILD_HANDLER exception handler clear verbose ext
405 .align 5
406 NESTED(handle_\exception, PT_SIZE, sp)
407 .set noat
408 SAVE_ALL
409 FEXPORT(handle_\exception\ext)
410 __build_clear_\clear
411 .set at
412 __BUILD_\verbose \exception
413 move a0, sp
414 PTR_LA ra, ret_from_exception
415 j do_\handler
416 END(handle_\exception)
417 .endm
418
419 .macro BUILD_HANDLER exception handler clear verbose
420 __BUILD_HANDLER \exception \handler \clear \verbose _int
421 .endm
422
423 BUILD_HANDLER adel ade ade silent /* #4 */
424 BUILD_HANDLER ades ade ade silent /* #5 */
425 BUILD_HANDLER ibe be cli silent /* #6 */
426 BUILD_HANDLER dbe be cli silent /* #7 */
427 BUILD_HANDLER bp bp sti silent /* #9 */
428 BUILD_HANDLER ri ri sti silent /* #10 */
429 BUILD_HANDLER cpu cpu sti silent /* #11 */
430 BUILD_HANDLER ov ov sti silent /* #12 */
431 BUILD_HANDLER tr tr sti silent /* #13 */
432 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
433 BUILD_HANDLER fpe fpe fpe silent /* #15 */
434 BUILD_HANDLER ftlb ftlb none silent /* #16 */
435 BUILD_HANDLER msa msa sti silent /* #21 */
436 BUILD_HANDLER mdmx mdmx sti silent /* #22 */
437#ifdef CONFIG_HARDWARE_WATCHPOINTS
438 /*
439 * For watch, interrupts will be enabled after the watch
440 * registers are read.
441 */
442 BUILD_HANDLER watch watch cli silent /* #23 */
443#else
444 BUILD_HANDLER watch watch sti verbose /* #23 */
445#endif
446 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
447 BUILD_HANDLER mt mt sti silent /* #25 */
448 BUILD_HANDLER dsp dsp sti silent /* #26 */
449 BUILD_HANDLER reserved reserved sti verbose /* others */
450
451 .align 5
452 LEAF(handle_ri_rdhwr_vivt)
453 .set push
454 .set noat
455 .set noreorder
456 /* check if TLB contains a entry for EPC */
457 MFC0 k1, CP0_ENTRYHI
458 andi k1, 0xff /* ASID_MASK */
459 MFC0 k0, CP0_EPC
460 PTR_SRL k0, _PAGE_SHIFT + 1
461 PTR_SLL k0, _PAGE_SHIFT + 1
462 or k1, k0
463 MTC0 k1, CP0_ENTRYHI
464 mtc0_tlbw_hazard
465 tlbp
466 tlb_probe_hazard
467 mfc0 k1, CP0_INDEX
468 .set pop
469 bltz k1, handle_ri /* slow path */
470 /* fall thru */
471 END(handle_ri_rdhwr_vivt)
472
473 LEAF(handle_ri_rdhwr)
474 .set push
475 .set noat
476 .set noreorder
477 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
478 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
479 MFC0 k1, CP0_EPC
480#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
481 and k0, k1, 1
482 beqz k0, 1f
483 xor k1, k0
484 lhu k0, (k1)
485 lhu k1, 2(k1)
486 ins k1, k0, 16, 16
487 lui k0, 0x007d
488 b docheck
489 ori k0, 0x6b3c
4901:
491 lui k0, 0x7c03
492 lw k1, (k1)
493 ori k0, 0xe83b
494#else
495 andi k0, k1, 1
496 bnez k0, handle_ri
497 lui k0, 0x7c03
498 lw k1, (k1)
499 ori k0, 0xe83b
500#endif
501 .set reorder
502docheck:
503 bne k0, k1, handle_ri /* if not ours */
504
505isrdhwr:
506 /* The insn is rdhwr. No need to check CAUSE.BD here. */
507 get_saved_sp /* k1 := current_thread_info */
508 .set noreorder
509 MFC0 k0, CP0_EPC
510#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
511 ori k1, _THREAD_MASK
512 xori k1, _THREAD_MASK
513 LONG_L v1, TI_TP_VALUE(k1)
514 LONG_ADDIU k0, 4
515 jr k0
516 rfe
517#else
518#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
519 LONG_ADDIU k0, 4 /* stall on $k0 */
520#else
521 .set at=v1
522 LONG_ADDIU k0, 4
523 .set noat
524#endif
525 MTC0 k0, CP0_EPC
526 /* I hope three instructions between MTC0 and ERET are enough... */
527 ori k1, _THREAD_MASK
528 xori k1, _THREAD_MASK
529 LONG_L v1, TI_TP_VALUE(k1)
530 .set arch=r4000
531 eret
532 .set mips0
533#endif
534 .set pop
535 END(handle_ri_rdhwr)
536
537#ifdef CONFIG_64BIT
538/* A temporary overflow handler used by check_daddi(). */
539
540 __INIT
541
542 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
543#endif
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2002, 2007 Maciej W. Rozycki
9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
10 */
11#include <linux/init.h>
12
13#include <asm/asm.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/irqflags.h>
17#include <asm/regdef.h>
18#include <asm/fpregdef.h>
19#include <asm/mipsregs.h>
20#include <asm/stackframe.h>
21#include <asm/sync.h>
22#include <asm/war.h>
23#include <asm/thread_info.h>
24
25 __INIT
26
27/*
28 * General exception vector for all other CPUs.
29 *
30 * Be careful when changing this, it has to be at most 128 bytes
31 * to fit into space reserved for the exception handler.
32 */
33NESTED(except_vec3_generic, 0, sp)
34 .set push
35 .set noat
36 mfc0 k1, CP0_CAUSE
37 andi k1, k1, 0x7c
38#ifdef CONFIG_64BIT
39 dsll k1, k1, 1
40#endif
41 PTR_L k0, exception_handlers(k1)
42 jr k0
43 .set pop
44 END(except_vec3_generic)
45
46/*
47 * General exception handler for CPUs with virtual coherency exception.
48 *
49 * Be careful when changing this, it has to be at most 256 (as a special
50 * exception) bytes to fit into space reserved for the exception handler.
51 */
52NESTED(except_vec3_r4000, 0, sp)
53 .set push
54 .set arch=r4000
55 .set noat
56 mfc0 k1, CP0_CAUSE
57 li k0, 31<<2
58 andi k1, k1, 0x7c
59 .set push
60 .set noreorder
61 .set nomacro
62 beq k1, k0, handle_vced
63 li k0, 14<<2
64 beq k1, k0, handle_vcei
65#ifdef CONFIG_64BIT
66 dsll k1, k1, 1
67#endif
68 .set pop
69 PTR_L k0, exception_handlers(k1)
70 jr k0
71
72 /*
73 * Big shit, we now may have two dirty primary cache lines for the same
74 * physical address. We can safely invalidate the line pointed to by
75 * c0_badvaddr because after return from this exception handler the
76 * load / store will be re-executed.
77 */
78handle_vced:
79 MFC0 k0, CP0_BADVADDR
80 li k1, -4 # Is this ...
81 and k0, k1 # ... really needed?
82 mtc0 zero, CP0_TAGLO
83 cache Index_Store_Tag_D, (k0)
84 cache Hit_Writeback_Inv_SD, (k0)
85#ifdef CONFIG_PROC_FS
86 PTR_LA k0, vced_count
87 lw k1, (k0)
88 addiu k1, 1
89 sw k1, (k0)
90#endif
91 eret
92
93handle_vcei:
94 MFC0 k0, CP0_BADVADDR
95 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
96#ifdef CONFIG_PROC_FS
97 PTR_LA k0, vcei_count
98 lw k1, (k0)
99 addiu k1, 1
100 sw k1, (k0)
101#endif
102 eret
103 .set pop
104 END(except_vec3_r4000)
105
106 __FINIT
107
108 .align 5 /* 32 byte rollback region */
109LEAF(__r4k_wait)
110 .set push
111 .set noreorder
112 /* start of rollback region */
113 LONG_L t0, TI_FLAGS($28)
114 nop
115 andi t0, _TIF_NEED_RESCHED
116 bnez t0, 1f
117 nop
118 nop
119 nop
120#ifdef CONFIG_CPU_MICROMIPS
121 nop
122 nop
123 nop
124 nop
125#endif
126 .set MIPS_ISA_ARCH_LEVEL_RAW
127 wait
128 /* end of rollback region (the region size must be power of two) */
1291:
130 jr ra
131 nop
132 .set pop
133 END(__r4k_wait)
134
135 .macro BUILD_ROLLBACK_PROLOGUE handler
136 FEXPORT(rollback_\handler)
137 .set push
138 .set noat
139 MFC0 k0, CP0_EPC
140 PTR_LA k1, __r4k_wait
141 ori k0, 0x1f /* 32 byte rollback region */
142 xori k0, 0x1f
143 bne k0, k1, \handler
144 MTC0 k0, CP0_EPC
145 .set pop
146 .endm
147
148 .align 5
149BUILD_ROLLBACK_PROLOGUE handle_int
150NESTED(handle_int, PT_SIZE, sp)
151 .cfi_signal_frame
152#ifdef CONFIG_TRACE_IRQFLAGS
153 /*
154 * Check to see if the interrupted code has just disabled
155 * interrupts and ignore this interrupt for now if so.
156 *
157 * local_irq_disable() disables interrupts and then calls
158 * trace_hardirqs_off() to track the state. If an interrupt is taken
159 * after interrupts are disabled but before the state is updated
160 * it will appear to restore_all that it is incorrectly returning with
161 * interrupts disabled
162 */
163 .set push
164 .set noat
165 mfc0 k0, CP0_STATUS
166#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
167 and k0, ST0_IEP
168 bnez k0, 1f
169
170 mfc0 k0, CP0_EPC
171 .set noreorder
172 j k0
173 rfe
174#else
175 and k0, ST0_IE
176 bnez k0, 1f
177
178 eret
179#endif
1801:
181 .set pop
182#endif
183 SAVE_ALL docfi=1
184 CLI
185 TRACE_IRQS_OFF
186
187 LONG_L s0, TI_REGS($28)
188 LONG_S sp, TI_REGS($28)
189
190 /*
191 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
192 * Check if we are already using the IRQ stack.
193 */
194 move s1, sp # Preserve the sp
195
196 /* Get IRQ stack for this CPU */
197 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
198#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
199 lui k1, %hi(irq_stack)
200#else
201 lui k1, %highest(irq_stack)
202 daddiu k1, %higher(irq_stack)
203 dsll k1, 16
204 daddiu k1, %hi(irq_stack)
205 dsll k1, 16
206#endif
207 LONG_SRL k0, SMP_CPUID_PTRSHIFT
208 LONG_ADDU k1, k0
209 LONG_L t0, %lo(irq_stack)(k1)
210
211 # Check if already on IRQ stack
212 PTR_LI t1, ~(_THREAD_SIZE-1)
213 and t1, t1, sp
214 beq t0, t1, 2f
215
216 /* Switch to IRQ stack */
217 li t1, _IRQ_STACK_START
218 PTR_ADD sp, t0, t1
219
220 /* Save task's sp on IRQ stack so that unwinding can follow it */
221 LONG_S s1, 0(sp)
2222:
223 jal plat_irq_dispatch
224
225 /* Restore sp */
226 move sp, s1
227
228 j ret_from_irq
229#ifdef CONFIG_CPU_MICROMIPS
230 nop
231#endif
232 END(handle_int)
233
234 __INIT
235
236/*
237 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
238 * This is a dedicated interrupt exception vector which reduces the
239 * interrupt processing overhead. The jump instruction will be replaced
240 * at the initialization time.
241 *
242 * Be careful when changing this, it has to be at most 128 bytes
243 * to fit into space reserved for the exception handler.
244 */
245NESTED(except_vec4, 0, sp)
2461: j 1b /* Dummy, will be replaced */
247 END(except_vec4)
248
249/*
250 * EJTAG debug exception handler.
251 * The EJTAG debug exception entry point is 0xbfc00480, which
252 * normally is in the boot PROM, so the boot PROM must do an
253 * unconditional jump to this vector.
254 */
255NESTED(except_vec_ejtag_debug, 0, sp)
256 j ejtag_debug_handler
257#ifdef CONFIG_CPU_MICROMIPS
258 nop
259#endif
260 END(except_vec_ejtag_debug)
261
262 __FINIT
263
264/*
265 * Vectored interrupt handler.
266 * This prototype is copied to ebase + n*IntCtl.VS and patched
267 * to invoke the handler
268 */
269BUILD_ROLLBACK_PROLOGUE except_vec_vi
270NESTED(except_vec_vi, 0, sp)
271 SAVE_SOME docfi=1
272 SAVE_AT docfi=1
273 .set push
274 .set noreorder
275 PTR_LA v1, except_vec_vi_handler
276FEXPORT(except_vec_vi_lui)
277 lui v0, 0 /* Patched */
278 jr v1
279FEXPORT(except_vec_vi_ori)
280 ori v0, 0 /* Patched */
281 .set pop
282 END(except_vec_vi)
283EXPORT(except_vec_vi_end)
284
285/*
286 * Common Vectored Interrupt code
287 * Complete the register saves and invoke the handler which is passed in $v0
288 */
289NESTED(except_vec_vi_handler, 0, sp)
290 SAVE_TEMP
291 SAVE_STATIC
292 CLI
293#ifdef CONFIG_TRACE_IRQFLAGS
294 move s0, v0
295 TRACE_IRQS_OFF
296 move v0, s0
297#endif
298
299 LONG_L s0, TI_REGS($28)
300 LONG_S sp, TI_REGS($28)
301
302 /*
303 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
304 * Check if we are already using the IRQ stack.
305 */
306 move s1, sp # Preserve the sp
307
308 /* Get IRQ stack for this CPU */
309 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
310#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
311 lui k1, %hi(irq_stack)
312#else
313 lui k1, %highest(irq_stack)
314 daddiu k1, %higher(irq_stack)
315 dsll k1, 16
316 daddiu k1, %hi(irq_stack)
317 dsll k1, 16
318#endif
319 LONG_SRL k0, SMP_CPUID_PTRSHIFT
320 LONG_ADDU k1, k0
321 LONG_L t0, %lo(irq_stack)(k1)
322
323 # Check if already on IRQ stack
324 PTR_LI t1, ~(_THREAD_SIZE-1)
325 and t1, t1, sp
326 beq t0, t1, 2f
327
328 /* Switch to IRQ stack */
329 li t1, _IRQ_STACK_START
330 PTR_ADD sp, t0, t1
331
332 /* Save task's sp on IRQ stack so that unwinding can follow it */
333 LONG_S s1, 0(sp)
3342:
335 jalr v0
336
337 /* Restore sp */
338 move sp, s1
339
340 j ret_from_irq
341 END(except_vec_vi_handler)
342
343/*
344 * EJTAG debug exception handler.
345 */
346NESTED(ejtag_debug_handler, PT_SIZE, sp)
347 .set push
348 .set noat
349 MTC0 k0, CP0_DESAVE
350 mfc0 k0, CP0_DEBUG
351
352 sll k0, k0, 30 # Check for SDBBP.
353 bgez k0, ejtag_return
354
355#ifdef CONFIG_SMP
3561: PTR_LA k0, ejtag_debug_buffer_spinlock
357 __SYNC(full, loongson3_war)
3582: ll k0, 0(k0)
359 bnez k0, 2b
360 PTR_LA k0, ejtag_debug_buffer_spinlock
361 sc k0, 0(k0)
362 beqz k0, 1b
363# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
364 sync
365# endif
366
367 PTR_LA k0, ejtag_debug_buffer
368 LONG_S k1, 0(k0)
369
370 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
371 PTR_SRL k1, SMP_CPUID_PTRSHIFT
372 PTR_SLL k1, LONGLOG
373 PTR_LA k0, ejtag_debug_buffer_per_cpu
374 PTR_ADDU k0, k1
375
376 PTR_LA k1, ejtag_debug_buffer
377 LONG_L k1, 0(k1)
378 LONG_S k1, 0(k0)
379
380 PTR_LA k0, ejtag_debug_buffer_spinlock
381 sw zero, 0(k0)
382#else
383 PTR_LA k0, ejtag_debug_buffer
384 LONG_S k1, 0(k0)
385#endif
386
387 SAVE_ALL
388 move a0, sp
389 jal ejtag_exception_handler
390 RESTORE_ALL
391
392#ifdef CONFIG_SMP
393 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
394 PTR_SRL k1, SMP_CPUID_PTRSHIFT
395 PTR_SLL k1, LONGLOG
396 PTR_LA k0, ejtag_debug_buffer_per_cpu
397 PTR_ADDU k0, k1
398 LONG_L k1, 0(k0)
399#else
400 PTR_LA k0, ejtag_debug_buffer
401 LONG_L k1, 0(k0)
402#endif
403
404ejtag_return:
405 back_to_back_c0_hazard
406 MFC0 k0, CP0_DESAVE
407 .set mips32
408 deret
409 .set pop
410 END(ejtag_debug_handler)
411
412/*
413 * This buffer is reserved for the use of the EJTAG debug
414 * handler.
415 */
416 .data
417EXPORT(ejtag_debug_buffer)
418 .fill LONGSIZE
419#ifdef CONFIG_SMP
420EXPORT(ejtag_debug_buffer_spinlock)
421 .fill LONGSIZE
422EXPORT(ejtag_debug_buffer_per_cpu)
423 .fill LONGSIZE * NR_CPUS
424#endif
425 .previous
426
427 __INIT
428
429/*
430 * NMI debug exception handler for MIPS reference boards.
431 * The NMI debug exception entry point is 0xbfc00000, which
432 * normally is in the boot PROM, so the boot PROM must do a
433 * unconditional jump to this vector.
434 */
435NESTED(except_vec_nmi, 0, sp)
436 j nmi_handler
437#ifdef CONFIG_CPU_MICROMIPS
438 nop
439#endif
440 END(except_vec_nmi)
441
442 __FINIT
443
444NESTED(nmi_handler, PT_SIZE, sp)
445 .cfi_signal_frame
446 .set push
447 .set noat
448 /*
449 * Clear ERL - restore segment mapping
450 * Clear BEV - required for page fault exception handler to work
451 */
452 mfc0 k0, CP0_STATUS
453 ori k0, k0, ST0_EXL
454 li k1, ~(ST0_BEV | ST0_ERL)
455 and k0, k0, k1
456 mtc0 k0, CP0_STATUS
457 _ehb
458 SAVE_ALL
459 move a0, sp
460 jal nmi_exception_handler
461 /* nmi_exception_handler never returns */
462 .set pop
463 END(nmi_handler)
464
465 .macro __build_clear_none
466 .endm
467
468 .macro __build_clear_sti
469 TRACE_IRQS_ON
470 STI
471 .endm
472
473 .macro __build_clear_cli
474 CLI
475 TRACE_IRQS_OFF
476 .endm
477
478 .macro __build_clear_fpe
479 CLI
480 TRACE_IRQS_OFF
481 .set push
482 /* gas fails to assemble cfc1 for some archs (octeon).*/ \
483 .set mips1
484 SET_HARDFLOAT
485 cfc1 a1, fcr31
486 .set pop
487 .endm
488
489 .macro __build_clear_msa_fpe
490 CLI
491 TRACE_IRQS_OFF
492 _cfcmsa a1, MSA_CSR
493 .endm
494
495 .macro __build_clear_ade
496 MFC0 t0, CP0_BADVADDR
497 PTR_S t0, PT_BVADDR(sp)
498 KMODE
499 .endm
500
501 .macro __build_clear_gsexc
502 .set push
503 /*
504 * We need to specify a selector to access the CP0.Diag1 (GSCause)
505 * register. All GSExc-equipped processors have MIPS32.
506 */
507 .set mips32
508 mfc0 a1, CP0_DIAGNOSTIC1
509 .set pop
510 TRACE_IRQS_ON
511 STI
512 .endm
513
514 .macro __BUILD_silent exception
515 .endm
516
517 /* Gas tries to parse the ASM_PRINT argument as a string containing
518 string escapes and emits bogus warnings if it believes to
519 recognize an unknown escape code. So make the arguments
520 start with an n and gas will believe \n is ok ... */
521 .macro __BUILD_verbose nexception
522 LONG_L a1, PT_EPC(sp)
523#ifdef CONFIG_32BIT
524 ASM_PRINT("Got \nexception at %08lx\012")
525#endif
526#ifdef CONFIG_64BIT
527 ASM_PRINT("Got \nexception at %016lx\012")
528#endif
529 .endm
530
531 .macro __BUILD_count exception
532 LONG_L t0,exception_count_\exception
533 LONG_ADDIU t0, 1
534 LONG_S t0,exception_count_\exception
535 .comm exception_count\exception, 8, 8
536 .endm
537
538 .macro __BUILD_HANDLER exception handler clear verbose ext
539 .align 5
540 NESTED(handle_\exception, PT_SIZE, sp)
541 .cfi_signal_frame
542 .set noat
543 SAVE_ALL
544 FEXPORT(handle_\exception\ext)
545 __build_clear_\clear
546 .set at
547 __BUILD_\verbose \exception
548 move a0, sp
549 jal do_\handler
550 j ret_from_exception
551 END(handle_\exception)
552 .endm
553
554 .macro BUILD_HANDLER exception handler clear verbose
555 __BUILD_HANDLER \exception \handler \clear \verbose _int
556 .endm
557
558 BUILD_HANDLER adel ade ade silent /* #4 */
559 BUILD_HANDLER ades ade ade silent /* #5 */
560 BUILD_HANDLER ibe be cli silent /* #6 */
561 BUILD_HANDLER dbe be cli silent /* #7 */
562 BUILD_HANDLER bp bp sti silent /* #9 */
563 BUILD_HANDLER ri ri sti silent /* #10 */
564 BUILD_HANDLER cpu cpu sti silent /* #11 */
565 BUILD_HANDLER ov ov sti silent /* #12 */
566 BUILD_HANDLER tr tr sti silent /* #13 */
567 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
568#ifdef CONFIG_MIPS_FP_SUPPORT
569 BUILD_HANDLER fpe fpe fpe silent /* #15 */
570#endif
571 BUILD_HANDLER ftlb ftlb none silent /* #16 */
572 BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */
573 BUILD_HANDLER msa msa sti silent /* #21 */
574 BUILD_HANDLER mdmx mdmx sti silent /* #22 */
575#ifdef CONFIG_HARDWARE_WATCHPOINTS
576 /*
577 * For watch, interrupts will be enabled after the watch
578 * registers are read.
579 */
580 BUILD_HANDLER watch watch cli silent /* #23 */
581#else
582 BUILD_HANDLER watch watch sti verbose /* #23 */
583#endif
584 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
585 BUILD_HANDLER mt mt sti silent /* #25 */
586 BUILD_HANDLER dsp dsp sti silent /* #26 */
587 BUILD_HANDLER reserved reserved sti verbose /* others */
588
589 .align 5
590 LEAF(handle_ri_rdhwr_tlbp)
591 .set push
592 .set noat
593 .set noreorder
594 /* check if TLB contains a entry for EPC */
595 MFC0 k1, CP0_ENTRYHI
596 andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
597 MFC0 k0, CP0_EPC
598 PTR_SRL k0, _PAGE_SHIFT + 1
599 PTR_SLL k0, _PAGE_SHIFT + 1
600 or k1, k0
601 MTC0 k1, CP0_ENTRYHI
602 mtc0_tlbw_hazard
603 tlbp
604 tlb_probe_hazard
605 mfc0 k1, CP0_INDEX
606 .set pop
607 bltz k1, handle_ri /* slow path */
608 /* fall thru */
609 END(handle_ri_rdhwr_tlbp)
610
611 LEAF(handle_ri_rdhwr)
612 .set push
613 .set noat
614 .set noreorder
615 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
616 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
617 MFC0 k1, CP0_EPC
618#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
619 and k0, k1, 1
620 beqz k0, 1f
621 xor k1, k0
622 lhu k0, (k1)
623 lhu k1, 2(k1)
624 ins k1, k0, 16, 16
625 lui k0, 0x007d
626 b docheck
627 ori k0, 0x6b3c
6281:
629 lui k0, 0x7c03
630 lw k1, (k1)
631 ori k0, 0xe83b
632#else
633 andi k0, k1, 1
634 bnez k0, handle_ri
635 lui k0, 0x7c03
636 lw k1, (k1)
637 ori k0, 0xe83b
638#endif
639 .set reorder
640docheck:
641 bne k0, k1, handle_ri /* if not ours */
642
643isrdhwr:
644 /* The insn is rdhwr. No need to check CAUSE.BD here. */
645 get_saved_sp /* k1 := current_thread_info */
646 .set noreorder
647 MFC0 k0, CP0_EPC
648#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
649 ori k1, _THREAD_MASK
650 xori k1, _THREAD_MASK
651 LONG_L v1, TI_TP_VALUE(k1)
652 LONG_ADDIU k0, 4
653 jr k0
654 rfe
655#else
656#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
657 LONG_ADDIU k0, 4 /* stall on $k0 */
658#else
659 .set at=v1
660 LONG_ADDIU k0, 4
661 .set noat
662#endif
663 MTC0 k0, CP0_EPC
664 /* I hope three instructions between MTC0 and ERET are enough... */
665 ori k1, _THREAD_MASK
666 xori k1, _THREAD_MASK
667 LONG_L v1, TI_TP_VALUE(k1)
668 .set push
669 .set arch=r4000
670 eret
671 .set pop
672#endif
673 .set pop
674 END(handle_ri_rdhwr)
675
676#ifdef CONFIG_CPU_R4X00_BUGS64
677/* A temporary overflow handler used by check_daddi(). */
678
679 __INIT
680
681 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
682#endif