Loading...
1/*
2 * arch/sh/kernel/cpu/sh3/entry.S
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2003 - 2012 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/sys.h>
12#include <linux/errno.h>
13#include <linux/linkage.h>
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/unistd.h>
17#include <cpu/mmu_context.h>
18#include <asm/page.h>
19#include <asm/cache.h>
20#include <asm/thread_info.h>
21
22! NOTE:
23! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
24! to be jumped is too far, but it causes illegal slot exception.
25
26/*
27 * entry.S contains the system-call and fault low-level handling routines.
28 * This also contains the timer-interrupt handler, as well as all interrupts
29 * and faults that can result in a task-switch.
30 *
31 * NOTE: This code handles signal-recognition, which happens every time
32 * after a timer-interrupt and after each system call.
33 *
34 * NOTE: This code uses a convention that instructions in the delay slot
35 * of a transfer-control instruction are indented by an extra space, thus:
36 *
37 * jmp @k0 ! control-transfer instruction
38 * ldc k1, ssr ! delay slot
39 *
40 * Stack layout in 'ret_from_syscall':
41 * ptrace needs to have all regs on the stack.
42 * if the order here is changed, it needs to be
43 * updated in ptrace.c and ptrace.h
44 *
45 * r0
46 * ...
47 * r15 = stack pointer
48 * spc
49 * pr
50 * ssr
51 * gbr
52 * mach
53 * macl
54 * syscall #
55 *
56 */
57/* Offsets to the stack */
58OFF_R0 = 0 /* Return value. New ABI also arg4 */
59OFF_R1 = 4 /* New ABI: arg5 */
60OFF_R2 = 8 /* New ABI: arg6 */
61OFF_R3 = 12 /* New ABI: syscall_nr */
62OFF_R4 = 16 /* New ABI: arg0 */
63OFF_R5 = 20 /* New ABI: arg1 */
64OFF_R6 = 24 /* New ABI: arg2 */
65OFF_R7 = 28 /* New ABI: arg3 */
66OFF_SP = (15*4)
67OFF_PC = (16*4)
68OFF_SR = (16*4+8)
69OFF_TRA = (16*4+6*4)
70
71#define k0 r0
72#define k1 r1
73#define k2 r2
74#define k3 r3
75#define k4 r4
76
77#define g_imask r6 /* r6_bank1 */
78#define k_g_imask r6_bank /* r6_bank1 */
79#define current r7 /* r7_bank1 */
80
81#include <asm/entry-macros.S>
82
83/*
84 * Kernel mode register usage:
85 * k0 scratch
86 * k1 scratch
87 * k2 scratch (Exception code)
88 * k3 scratch (Return address)
89 * k4 scratch
90 * k5 reserved
91 * k6 Global Interrupt Mask (0--15 << 4)
92 * k7 CURRENT_THREAD_INFO (pointer to current thread info)
93 */
94
95!
96! TLB Miss / Initial Page write exception handling
97! _and_
98! TLB hits, but the access violate the protection.
99! It can be valid access, such as stack grow and/or C-O-W.
100!
101!
102! Find the pmd/pte entry and loadtlb
103! If it's not found, cause address error (SEGV)
104!
105! Although this could be written in assembly language (and it'd be faster),
106! this first version depends *much* on C implementation.
107!
108
109#if defined(CONFIG_MMU)
110 .align 2
111ENTRY(tlb_miss_load)
112 bra call_handle_tlbmiss
113 mov #0, r5
114
115 .align 2
116ENTRY(tlb_miss_store)
117 bra call_handle_tlbmiss
118 mov #FAULT_CODE_WRITE, r5
119
120 .align 2
121ENTRY(initial_page_write)
122 bra call_handle_tlbmiss
123 mov #FAULT_CODE_INITIAL, r5
124
125 .align 2
126ENTRY(tlb_protection_violation_load)
127 bra call_do_page_fault
128 mov #FAULT_CODE_PROT, r5
129
130 .align 2
131ENTRY(tlb_protection_violation_store)
132 bra call_do_page_fault
133 mov #(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5
134
135call_handle_tlbmiss:
136 mov.l 1f, r0
137 mov r5, r8
138 mov.l @r0, r6
139 mov.l 2f, r0
140 sts pr, r10
141 jsr @r0
142 mov r15, r4
143 !
144 tst r0, r0
145 bf/s 0f
146 lds r10, pr
147 rts
148 nop
1490:
150 mov r8, r5
151call_do_page_fault:
152 mov.l 1f, r0
153 mov.l @r0, r6
154
155 mov.l 3f, r0
156 mov.l 4f, r1
157 mov r15, r4
158 jmp @r0
159 lds r1, pr
160
161 .align 2
1621: .long MMU_TEA
1632: .long handle_tlbmiss
1643: .long do_page_fault
1654: .long ret_from_exception
166
167 .align 2
168ENTRY(address_error_load)
169 bra call_dae
170 mov #0,r5 ! writeaccess = 0
171
172 .align 2
173ENTRY(address_error_store)
174 bra call_dae
175 mov #1,r5 ! writeaccess = 1
176
177 .align 2
178call_dae:
179 mov.l 1f, r0
180 mov.l @r0, r6 ! address
181 mov.l 2f, r0
182 jmp @r0
183 mov r15, r4 ! regs
184
185 .align 2
1861: .long MMU_TEA
1872: .long do_address_error
188#endif /* CONFIG_MMU */
189
190#if defined(CONFIG_SH_STANDARD_BIOS)
191 /* Unwind the stack and jmp to the debug entry */
192ENTRY(sh_bios_handler)
193 mov.l 1f, r8
194 bsr restore_regs
195 nop
196
197 lds k2, pr ! restore pr
198 mov k4, r15
199 !
200 mov.l 2f, k0
201 mov.l @k0, k0
202 jmp @k0
203 ldc k3, ssr
204 .align 2
2051: .long 0x300000f0
2062: .long gdb_vbr_vector
207#endif /* CONFIG_SH_STANDARD_BIOS */
208
209! restore_regs()
210! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
211! - switch bank
212! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
213! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
214! k2 returns original pr
215! k3 returns original sr
216! k4 returns original stack pointer
217! r8 passes SR bitmask, overwritten with restored data on return
218! r9 trashed
219! BL=0 on entry, on exit BL=1 (depending on r8).
220
221ENTRY(restore_regs)
222 mov.l @r15+, r0
223 mov.l @r15+, r1
224 mov.l @r15+, r2
225 mov.l @r15+, r3
226 mov.l @r15+, r4
227 mov.l @r15+, r5
228 mov.l @r15+, r6
229 mov.l @r15+, r7
230 !
231 stc sr, r9
232 or r8, r9
233 ldc r9, sr
234 !
235 mov.l @r15+, r8
236 mov.l @r15+, r9
237 mov.l @r15+, r10
238 mov.l @r15+, r11
239 mov.l @r15+, r12
240 mov.l @r15+, r13
241 mov.l @r15+, r14
242 mov.l @r15+, k4 ! original stack pointer
243 ldc.l @r15+, spc
244 mov.l @r15+, k2 ! original PR
245 mov.l @r15+, k3 ! original SR
246 ldc.l @r15+, gbr
247 lds.l @r15+, mach
248 lds.l @r15+, macl
249 rts
250 add #4, r15 ! Skip syscall number
251
252restore_all:
253 mov.l 7f, r8
254 bsr restore_regs
255 nop
256
257 lds k2, pr ! restore pr
258 !
259 ! Calculate new SR value
260 mov k3, k2 ! original SR value
261 mov #0xfffffff0, k1
262 extu.b k1, k1
263 not k1, k1
264 and k1, k2 ! Mask original SR value
265 !
266 mov k3, k0 ! Calculate IMASK-bits
267 shlr2 k0
268 and #0x3c, k0
269 cmp/eq #0x3c, k0
270 bt/s 6f
271 shll2 k0
272 mov g_imask, k0
273 !
2746: or k0, k2 ! Set the IMASK-bits
275 ldc k2, ssr
276 !
277 mov k4, r15
278 rte
279 nop
280
281 .align 2
2825: .long 0x00001000 ! DSP
2837: .long 0x30000000
284
285! common exception handler
286#include "../../entry-common.S"
287
288! Exception Vector Base
289!
290! Should be aligned page boundary.
291!
292 .balign 4096,0,4096
293ENTRY(vbr_base)
294 .long 0
295!
296! 0x100: General exception vector
297!
298 .balign 256,0,256
299general_exception:
300 bra handle_exception
301 sts pr, k3 ! save original pr value in k3
302
303! prepare_stack()
304! - roll back gRB
305! - switch to kernel stack
306! k0 returns original sp (after roll back)
307! k1 trashed
308! k2 trashed
309
310prepare_stack:
311#ifdef CONFIG_GUSA
312 ! Check for roll back gRB (User and Kernel)
313 mov r15, k0
314 shll k0
315 bf/s 1f
316 shll k0
317 bf/s 1f
318 stc spc, k1
319 stc r0_bank, k0
320 cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
321 bt/s 2f
322 stc r1_bank, k1
323
324 add #-2, k0
325 add r15, k0
326 ldc k0, spc ! PC = saved r0 + r15 - 2
3272: mov k1, r15 ! SP = r1
3281:
329#endif
330 ! Switch to kernel stack if needed
331 stc ssr, k0 ! Is it from kernel space?
332 shll k0 ! Check MD bit (bit30) by shifting it into...
333 shll k0 ! ...the T bit
334 bt/s 1f ! It's a kernel to kernel transition.
335 mov r15, k0 ! save original stack to k0
336 /* User space to kernel */
337 mov #(THREAD_SIZE >> 10), k1
338 shll8 k1 ! k1 := THREAD_SIZE
339 shll2 k1
340 add current, k1
341 mov k1, r15 ! change to kernel stack
342 !
3431:
344 rts
345 nop
346
347!
348! 0x400: Instruction and Data TLB miss exception vector
349!
350 .balign 1024,0,1024
351tlb_miss:
352 sts pr, k3 ! save original pr value in k3
353
354handle_exception:
355 mova exception_data, k0
356
357 ! Setup stack and save DSP context (k0 contains original r15 on return)
358 bsr prepare_stack
359 PREF(k0)
360
361 ! Save registers / Switch to bank 0
362 mov.l 5f, k2 ! vector register address
363 mov.l 1f, k4 ! SR bits to clear in k4
364 bsr save_regs ! needs original pr value in k3
365 mov.l @k2, k2 ! read out vector and keep in k2
366
367handle_exception_special:
368 setup_frame_reg
369
370 ! Setup return address and jump to exception handler
371 mov.l 7f, r9 ! fetch return address
372 stc r2_bank, r0 ! k2 (vector)
373 mov.l 6f, r10
374 shlr2 r0
375 shlr r0
376 mov.l @(r0, r10), r10
377 jmp @r10
378 lds r9, pr ! put return address in pr
379
380 .align L1_CACHE_SHIFT
381
382! save_regs()
383! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
384! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
385! - switch bank
386! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
387! k0 contains original stack pointer*
388! k1 trashed
389! k3 passes original pr*
390! k4 passes SR bitmask
391! BL=1 on entry, on exit BL=0.
392
393ENTRY(save_regs)
394 mov #-1, r1
395 mov.l k1, @-r15 ! set TRA (default: -1)
396 sts.l macl, @-r15
397 sts.l mach, @-r15
398 stc.l gbr, @-r15
399 stc.l ssr, @-r15
400 mov.l k3, @-r15 ! original pr in k3
401 stc.l spc, @-r15
402
403 mov.l k0, @-r15 ! original stack pointer in k0
404 mov.l r14, @-r15
405 mov.l r13, @-r15
406 mov.l r12, @-r15
407 mov.l r11, @-r15
408 mov.l r10, @-r15
409 mov.l r9, @-r15
410 mov.l r8, @-r15
411
412 mov.l 0f, k3 ! SR bits to set in k3
413
414 ! fall-through
415
416! save_low_regs()
417! - modify SR for bank switch
418! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
419! k3 passes bits to set in SR
420! k4 passes bits to clear in SR
421
422ENTRY(save_low_regs)
423 stc sr, r8
424 or k3, r8
425 and k4, r8
426 ldc r8, sr
427
428 mov.l r7, @-r15
429 mov.l r6, @-r15
430 mov.l r5, @-r15
431 mov.l r4, @-r15
432 mov.l r3, @-r15
433 mov.l r2, @-r15
434 mov.l r1, @-r15
435 rts
436 mov.l r0, @-r15
437
438!
439! 0x600: Interrupt / NMI vector
440!
441 .balign 512,0,512
442ENTRY(handle_interrupt)
443 sts pr, k3 ! save original pr value in k3
444 mova exception_data, k0
445
446 ! Setup stack and save DSP context (k0 contains original r15 on return)
447 bsr prepare_stack
448 PREF(k0)
449
450 ! Save registers / Switch to bank 0
451 mov.l 1f, k4 ! SR bits to clear in k4
452 bsr save_regs ! needs original pr value in k3
453 mov #-1, k2 ! default vector kept in k2
454
455 setup_frame_reg
456
457 stc sr, r0 ! get status register
458 shlr2 r0
459 and #0x3c, r0
460 cmp/eq #0x3c, r0
461 bf 9f
462 TRACE_IRQS_OFF
4639:
464
465 ! Setup return address and jump to do_IRQ
466 mov.l 4f, r9 ! fetch return address
467 lds r9, pr ! put return address in pr
468 mov.l 2f, r4
469 mov.l 3f, r9
470 mov.l @r4, r4 ! pass INTEVT vector as arg0
471
472 shlr2 r4
473 shlr r4
474 mov r4, r0 ! save vector->jmp table offset for later
475
476 shlr2 r4 ! vector to IRQ# conversion
477 add #-0x10, r4
478
479 cmp/pz r4 ! is it a valid IRQ?
480 bt 10f
481
482 /*
483 * We got here as a result of taking the INTEVT path for something
484 * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
485 * path and special case the event dispatch instead. This is the
486 * expected path for the NMI (and any other brilliantly implemented
487 * exception), which effectively wants regular exception dispatch
488 * but is unfortunately reported through INTEVT rather than
489 * EXPEVT. Grr.
490 */
491 mov.l 6f, r9
492 mov.l @(r0, r9), r9
493 jmp @r9
494 mov r15, r8 ! trap handlers take saved regs in r8
495
49610:
497 jmp @r9 ! Off to do_IRQ() we go.
498 mov r15, r5 ! pass saved registers as arg1
499
500ENTRY(exception_none)
501 rts
502 nop
503
504 .align L1_CACHE_SHIFT
505exception_data:
5060: .long 0x000080f0 ! FD=1, IMASK=15
5071: .long 0xcfffffff ! RB=0, BL=0
5082: .long INTEVT
5093: .long do_IRQ
5104: .long ret_from_irq
5115: .long EXPEVT
5126: .long exception_handling_table
5137: .long ret_from_exception
1/*
2 * arch/sh/kernel/cpu/sh3/entry.S
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2003 - 2006 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/sys.h>
12#include <linux/errno.h>
13#include <linux/linkage.h>
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/unistd.h>
17#include <cpu/mmu_context.h>
18#include <asm/page.h>
19#include <asm/cache.h>
20
21! NOTE:
22! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
23! to be jumped is too far, but it causes illegal slot exception.
24
25/*
26 * entry.S contains the system-call and fault low-level handling routines.
27 * This also contains the timer-interrupt handler, as well as all interrupts
28 * and faults that can result in a task-switch.
29 *
30 * NOTE: This code handles signal-recognition, which happens every time
31 * after a timer-interrupt and after each system call.
32 *
33 * NOTE: This code uses a convention that instructions in the delay slot
34 * of a transfer-control instruction are indented by an extra space, thus:
35 *
36 * jmp @k0 ! control-transfer instruction
37 * ldc k1, ssr ! delay slot
38 *
39 * Stack layout in 'ret_from_syscall':
40 * ptrace needs to have all regs on the stack.
41 * if the order here is changed, it needs to be
42 * updated in ptrace.c and ptrace.h
43 *
44 * r0
45 * ...
46 * r15 = stack pointer
47 * spc
48 * pr
49 * ssr
50 * gbr
51 * mach
52 * macl
53 * syscall #
54 *
55 */
56/* Offsets to the stack */
57OFF_R0 = 0 /* Return value. New ABI also arg4 */
58OFF_R1 = 4 /* New ABI: arg5 */
59OFF_R2 = 8 /* New ABI: arg6 */
60OFF_R3 = 12 /* New ABI: syscall_nr */
61OFF_R4 = 16 /* New ABI: arg0 */
62OFF_R5 = 20 /* New ABI: arg1 */
63OFF_R6 = 24 /* New ABI: arg2 */
64OFF_R7 = 28 /* New ABI: arg3 */
65OFF_SP = (15*4)
66OFF_PC = (16*4)
67OFF_SR = (16*4+8)
68OFF_TRA = (16*4+6*4)
69
70#define k0 r0
71#define k1 r1
72#define k2 r2
73#define k3 r3
74#define k4 r4
75
76#define g_imask r6 /* r6_bank1 */
77#define k_g_imask r6_bank /* r6_bank1 */
78#define current r7 /* r7_bank1 */
79
80#include <asm/entry-macros.S>
81
82/*
83 * Kernel mode register usage:
84 * k0 scratch
85 * k1 scratch
86 * k2 scratch (Exception code)
87 * k3 scratch (Return address)
88 * k4 scratch
89 * k5 reserved
90 * k6 Global Interrupt Mask (0--15 << 4)
91 * k7 CURRENT_THREAD_INFO (pointer to current thread info)
92 */
93
94!
95! TLB Miss / Initial Page write exception handling
96! _and_
97! TLB hits, but the access violate the protection.
98! It can be valid access, such as stack grow and/or C-O-W.
99!
100!
101! Find the pmd/pte entry and loadtlb
102! If it's not found, cause address error (SEGV)
103!
104! Although this could be written in assembly language (and it'd be faster),
105! this first version depends *much* on C implementation.
106!
107
108#if defined(CONFIG_MMU)
109 .align 2
110ENTRY(tlb_miss_load)
111 bra call_handle_tlbmiss
112 mov #0, r5
113
114 .align 2
115ENTRY(tlb_miss_store)
116 bra call_handle_tlbmiss
117 mov #1, r5
118
119 .align 2
120ENTRY(initial_page_write)
121 bra call_handle_tlbmiss
122 mov #2, r5
123
124 .align 2
125ENTRY(tlb_protection_violation_load)
126 bra call_do_page_fault
127 mov #0, r5
128
129 .align 2
130ENTRY(tlb_protection_violation_store)
131 bra call_do_page_fault
132 mov #1, r5
133
134call_handle_tlbmiss:
135 mov.l 1f, r0
136 mov r5, r8
137 mov.l @r0, r6
138 mov.l 2f, r0
139 sts pr, r10
140 jsr @r0
141 mov r15, r4
142 !
143 tst r0, r0
144 bf/s 0f
145 lds r10, pr
146 rts
147 nop
1480:
149 mov r8, r5
150call_do_page_fault:
151 mov.l 1f, r0
152 mov.l @r0, r6
153
154 mov.l 3f, r0
155 mov.l 4f, r1
156 mov r15, r4
157 jmp @r0
158 lds r1, pr
159
160 .align 2
1611: .long MMU_TEA
1622: .long handle_tlbmiss
1633: .long do_page_fault
1644: .long ret_from_exception
165
166 .align 2
167ENTRY(address_error_load)
168 bra call_dae
169 mov #0,r5 ! writeaccess = 0
170
171 .align 2
172ENTRY(address_error_store)
173 bra call_dae
174 mov #1,r5 ! writeaccess = 1
175
176 .align 2
177call_dae:
178 mov.l 1f, r0
179 mov.l @r0, r6 ! address
180 mov.l 2f, r0
181 jmp @r0
182 mov r15, r4 ! regs
183
184 .align 2
1851: .long MMU_TEA
1862: .long do_address_error
187#endif /* CONFIG_MMU */
188
189#if defined(CONFIG_SH_STANDARD_BIOS)
190 /* Unwind the stack and jmp to the debug entry */
191ENTRY(sh_bios_handler)
192 mov.l 1f, r8
193 bsr restore_regs
194 nop
195
196 lds k2, pr ! restore pr
197 mov k4, r15
198 !
199 mov.l 2f, k0
200 mov.l @k0, k0
201 jmp @k0
202 ldc k3, ssr
203 .align 2
2041: .long 0x300000f0
2052: .long gdb_vbr_vector
206#endif /* CONFIG_SH_STANDARD_BIOS */
207
208! restore_regs()
209! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
210! - switch bank
211! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
212! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
213! k2 returns original pr
214! k3 returns original sr
215! k4 returns original stack pointer
216! r8 passes SR bitmask, overwritten with restored data on return
217! r9 trashed
218! BL=0 on entry, on exit BL=1 (depending on r8).
219
220ENTRY(restore_regs)
221 mov.l @r15+, r0
222 mov.l @r15+, r1
223 mov.l @r15+, r2
224 mov.l @r15+, r3
225 mov.l @r15+, r4
226 mov.l @r15+, r5
227 mov.l @r15+, r6
228 mov.l @r15+, r7
229 !
230 stc sr, r9
231 or r8, r9
232 ldc r9, sr
233 !
234 mov.l @r15+, r8
235 mov.l @r15+, r9
236 mov.l @r15+, r10
237 mov.l @r15+, r11
238 mov.l @r15+, r12
239 mov.l @r15+, r13
240 mov.l @r15+, r14
241 mov.l @r15+, k4 ! original stack pointer
242 ldc.l @r15+, spc
243 mov.l @r15+, k2 ! original PR
244 mov.l @r15+, k3 ! original SR
245 ldc.l @r15+, gbr
246 lds.l @r15+, mach
247 lds.l @r15+, macl
248 rts
249 add #4, r15 ! Skip syscall number
250
251restore_all:
252 mov.l 7f, r8
253 bsr restore_regs
254 nop
255
256 lds k2, pr ! restore pr
257 !
258 ! Calculate new SR value
259 mov k3, k2 ! original SR value
260 mov #0xfffffff0, k1
261 extu.b k1, k1
262 not k1, k1
263 and k1, k2 ! Mask original SR value
264 !
265 mov k3, k0 ! Calculate IMASK-bits
266 shlr2 k0
267 and #0x3c, k0
268 cmp/eq #0x3c, k0
269 bt/s 6f
270 shll2 k0
271 mov g_imask, k0
272 !
2736: or k0, k2 ! Set the IMASK-bits
274 ldc k2, ssr
275 !
276 mov k4, r15
277 rte
278 nop
279
280 .align 2
2815: .long 0x00001000 ! DSP
2827: .long 0x30000000
283
284! common exception handler
285#include "../../entry-common.S"
286
287! Exception Vector Base
288!
289! Should be aligned page boundary.
290!
291 .balign 4096,0,4096
292ENTRY(vbr_base)
293 .long 0
294!
295! 0x100: General exception vector
296!
297 .balign 256,0,256
298general_exception:
299 bra handle_exception
300 sts pr, k3 ! save original pr value in k3
301
302! prepare_stack()
303! - roll back gRB
304! - switch to kernel stack
305! k0 returns original sp (after roll back)
306! k1 trashed
307! k2 trashed
308
309prepare_stack:
310#ifdef CONFIG_GUSA
311 ! Check for roll back gRB (User and Kernel)
312 mov r15, k0
313 shll k0
314 bf/s 1f
315 shll k0
316 bf/s 1f
317 stc spc, k1
318 stc r0_bank, k0
319 cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
320 bt/s 2f
321 stc r1_bank, k1
322
323 add #-2, k0
324 add r15, k0
325 ldc k0, spc ! PC = saved r0 + r15 - 2
3262: mov k1, r15 ! SP = r1
3271:
328#endif
329 ! Switch to kernel stack if needed
330 stc ssr, k0 ! Is it from kernel space?
331 shll k0 ! Check MD bit (bit30) by shifting it into...
332 shll k0 ! ...the T bit
333 bt/s 1f ! It's a kernel to kernel transition.
334 mov r15, k0 ! save original stack to k0
335 /* User space to kernel */
336 mov #(THREAD_SIZE >> 10), k1
337 shll8 k1 ! k1 := THREAD_SIZE
338 shll2 k1
339 add current, k1
340 mov k1, r15 ! change to kernel stack
341 !
3421:
343 rts
344 nop
345
346!
347! 0x400: Instruction and Data TLB miss exception vector
348!
349 .balign 1024,0,1024
350tlb_miss:
351 sts pr, k3 ! save original pr value in k3
352
353handle_exception:
354 mova exception_data, k0
355
356 ! Setup stack and save DSP context (k0 contains original r15 on return)
357 bsr prepare_stack
358 PREF(k0)
359
360 ! Save registers / Switch to bank 0
361 mov.l 5f, k2 ! vector register address
362 mov.l 1f, k4 ! SR bits to clear in k4
363 bsr save_regs ! needs original pr value in k3
364 mov.l @k2, k2 ! read out vector and keep in k2
365
366handle_exception_special:
367 setup_frame_reg
368
369 ! Setup return address and jump to exception handler
370 mov.l 7f, r9 ! fetch return address
371 stc r2_bank, r0 ! k2 (vector)
372 mov.l 6f, r10
373 shlr2 r0
374 shlr r0
375 mov.l @(r0, r10), r10
376 jmp @r10
377 lds r9, pr ! put return address in pr
378
379 .align L1_CACHE_SHIFT
380
381! save_regs()
382! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
383! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
384! - switch bank
385! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
386! k0 contains original stack pointer*
387! k1 trashed
388! k3 passes original pr*
389! k4 passes SR bitmask
390! BL=1 on entry, on exit BL=0.
391
392ENTRY(save_regs)
393 mov #-1, r1
394 mov.l k1, @-r15 ! set TRA (default: -1)
395 sts.l macl, @-r15
396 sts.l mach, @-r15
397 stc.l gbr, @-r15
398 stc.l ssr, @-r15
399 mov.l k3, @-r15 ! original pr in k3
400 stc.l spc, @-r15
401
402 mov.l k0, @-r15 ! original stack pointer in k0
403 mov.l r14, @-r15
404 mov.l r13, @-r15
405 mov.l r12, @-r15
406 mov.l r11, @-r15
407 mov.l r10, @-r15
408 mov.l r9, @-r15
409 mov.l r8, @-r15
410
411 mov.l 0f, k3 ! SR bits to set in k3
412
413 ! fall-through
414
415! save_low_regs()
416! - modify SR for bank switch
417! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
418! k3 passes bits to set in SR
419! k4 passes bits to clear in SR
420
421ENTRY(save_low_regs)
422 stc sr, r8
423 or k3, r8
424 and k4, r8
425 ldc r8, sr
426
427 mov.l r7, @-r15
428 mov.l r6, @-r15
429 mov.l r5, @-r15
430 mov.l r4, @-r15
431 mov.l r3, @-r15
432 mov.l r2, @-r15
433 mov.l r1, @-r15
434 rts
435 mov.l r0, @-r15
436
437!
438! 0x600: Interrupt / NMI vector
439!
440 .balign 512,0,512
441ENTRY(handle_interrupt)
442 sts pr, k3 ! save original pr value in k3
443 mova exception_data, k0
444
445 ! Setup stack and save DSP context (k0 contains original r15 on return)
446 bsr prepare_stack
447 PREF(k0)
448
449 ! Save registers / Switch to bank 0
450 mov.l 1f, k4 ! SR bits to clear in k4
451 bsr save_regs ! needs original pr value in k3
452 mov #-1, k2 ! default vector kept in k2
453
454 setup_frame_reg
455
456 stc sr, r0 ! get status register
457 shlr2 r0
458 and #0x3c, r0
459 cmp/eq #0x3c, r0
460 bf 9f
461 TRACE_IRQS_OFF
4629:
463
464 ! Setup return address and jump to do_IRQ
465 mov.l 4f, r9 ! fetch return address
466 lds r9, pr ! put return address in pr
467 mov.l 2f, r4
468 mov.l 3f, r9
469 mov.l @r4, r4 ! pass INTEVT vector as arg0
470
471 shlr2 r4
472 shlr r4
473 mov r4, r0 ! save vector->jmp table offset for later
474
475 shlr2 r4 ! vector to IRQ# conversion
476 add #-0x10, r4
477
478 cmp/pz r4 ! is it a valid IRQ?
479 bt 10f
480
481 /*
482 * We got here as a result of taking the INTEVT path for something
483 * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
484 * path and special case the event dispatch instead. This is the
485 * expected path for the NMI (and any other brilliantly implemented
486 * exception), which effectively wants regular exception dispatch
487 * but is unfortunately reported through INTEVT rather than
488 * EXPEVT. Grr.
489 */
490 mov.l 6f, r9
491 mov.l @(r0, r9), r9
492 jmp @r9
493 mov r15, r8 ! trap handlers take saved regs in r8
494
49510:
496 jmp @r9 ! Off to do_IRQ() we go.
497 mov r15, r5 ! pass saved registers as arg1
498
499ENTRY(exception_none)
500 rts
501 nop
502
503 .align L1_CACHE_SHIFT
504exception_data:
5050: .long 0x000080f0 ! FD=1, IMASK=15
5061: .long 0xcfffffff ! RB=0, BL=0
5072: .long INTEVT
5083: .long do_IRQ
5094: .long ret_from_irq
5105: .long EXPEVT
5116: .long exception_handling_table
5127: .long ret_from_exception