Loading...
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * arch/sh/kernel/cpu/sh3/entry.S
4 *
5 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
6 * Copyright (C) 2003 - 2012 Paul Mundt
7 */
8#include <linux/sys.h>
9#include <linux/errno.h>
10#include <linux/linkage.h>
11#include <asm/asm-offsets.h>
12#include <asm/thread_info.h>
13#include <asm/unistd.h>
14#include <cpu/mmu_context.h>
15#include <asm/page.h>
16#include <asm/cache.h>
17
18! NOTE:
19! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
20! to be jumped is too far, but it causes illegal slot exception.
21
22/*
23 * entry.S contains the system-call and fault low-level handling routines.
24 * This also contains the timer-interrupt handler, as well as all interrupts
25 * and faults that can result in a task-switch.
26 *
27 * NOTE: This code handles signal-recognition, which happens every time
28 * after a timer-interrupt and after each system call.
29 *
30 * NOTE: This code uses a convention that instructions in the delay slot
31 * of a transfer-control instruction are indented by an extra space, thus:
32 *
33 * jmp @k0 ! control-transfer instruction
34 * ldc k1, ssr ! delay slot
35 *
36 * Stack layout in 'ret_from_syscall':
37 * ptrace needs to have all regs on the stack.
38 * if the order here is changed, it needs to be
39 * updated in ptrace.c and ptrace.h
40 *
41 * r0
42 * ...
43 * r15 = stack pointer
44 * spc
45 * pr
46 * ssr
47 * gbr
48 * mach
49 * macl
50 * syscall #
51 *
52 */
53/* Offsets to the stack */
54OFF_R0 = 0 /* Return value. New ABI also arg4 */
55OFF_R1 = 4 /* New ABI: arg5 */
56OFF_R2 = 8 /* New ABI: arg6 */
57OFF_R3 = 12 /* New ABI: syscall_nr */
58OFF_R4 = 16 /* New ABI: arg0 */
59OFF_R5 = 20 /* New ABI: arg1 */
60OFF_R6 = 24 /* New ABI: arg2 */
61OFF_R7 = 28 /* New ABI: arg3 */
62OFF_SP = (15*4)
63OFF_PC = (16*4)
64OFF_SR = (16*4+8)
65OFF_TRA = (16*4+6*4)
66
67#define k0 r0
68#define k1 r1
69#define k2 r2
70#define k3 r3
71#define k4 r4
72
73#define g_imask r6 /* r6_bank1 */
74#define k_g_imask r6_bank /* r6_bank1 */
75#define current r7 /* r7_bank1 */
76
77#include <asm/entry-macros.S>
78
79/*
80 * Kernel mode register usage:
81 * k0 scratch
82 * k1 scratch
83 * k2 scratch (Exception code)
84 * k3 scratch (Return address)
85 * k4 scratch
86 * k5 reserved
87 * k6 Global Interrupt Mask (0--15 << 4)
88 * k7 CURRENT_THREAD_INFO (pointer to current thread info)
89 */
90
91!
92! TLB Miss / Initial Page write exception handling
93! _and_
94! TLB hits, but the access violate the protection.
95! It can be valid access, such as stack grow and/or C-O-W.
96!
97!
98! Find the pmd/pte entry and loadtlb
99! If it's not found, cause address error (SEGV)
100!
101! Although this could be written in assembly language (and it'd be faster),
102! this first version depends *much* on C implementation.
103!
104
105#if defined(CONFIG_MMU)
106 .align 2
107ENTRY(tlb_miss_load)
108 bra call_handle_tlbmiss
109 mov #0, r5
110
111 .align 2
112ENTRY(tlb_miss_store)
113 bra call_handle_tlbmiss
114 mov #FAULT_CODE_WRITE, r5
115
116 .align 2
117ENTRY(initial_page_write)
118 bra call_handle_tlbmiss
119 mov #FAULT_CODE_INITIAL, r5
120
121 .align 2
122ENTRY(tlb_protection_violation_load)
123 bra call_do_page_fault
124 mov #FAULT_CODE_PROT, r5
125
126 .align 2
127ENTRY(tlb_protection_violation_store)
128 bra call_do_page_fault
129 mov #(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5
130
131call_handle_tlbmiss:
132 mov.l 1f, r0
133 mov r5, r8
134 mov.l @r0, r6
135 mov.l 2f, r0
136 sts pr, r10
137 jsr @r0
138 mov r15, r4
139 !
140 tst r0, r0
141 bf/s 0f
142 lds r10, pr
143 rts
144 nop
1450:
146 mov r8, r5
147call_do_page_fault:
148 mov.l 1f, r0
149 mov.l @r0, r6
150
151 mov.l 3f, r0
152 mov.l 4f, r1
153 mov r15, r4
154 jmp @r0
155 lds r1, pr
156
157 .align 2
1581: .long MMU_TEA
1592: .long handle_tlbmiss
1603: .long do_page_fault
1614: .long ret_from_exception
162
163 .align 2
164ENTRY(address_error_load)
165 bra call_dae
166 mov #0,r5 ! writeaccess = 0
167
168 .align 2
169ENTRY(address_error_store)
170 bra call_dae
171 mov #1,r5 ! writeaccess = 1
172
173 .align 2
174call_dae:
175 mov.l 1f, r0
176 mov.l @r0, r6 ! address
177 mov.l 2f, r0
178 jmp @r0
179 mov r15, r4 ! regs
180
181 .align 2
1821: .long MMU_TEA
1832: .long do_address_error
184#endif /* CONFIG_MMU */
185
186#if defined(CONFIG_SH_STANDARD_BIOS)
187 /* Unwind the stack and jmp to the debug entry */
188ENTRY(sh_bios_handler)
189 mov.l 1f, r8
190 bsr restore_regs
191 nop
192
193 lds k2, pr ! restore pr
194 mov k4, r15
195 !
196 mov.l 2f, k0
197 mov.l @k0, k0
198 jmp @k0
199 ldc k3, ssr
200 .align 2
2011: .long 0x300000f0
2022: .long gdb_vbr_vector
203#endif /* CONFIG_SH_STANDARD_BIOS */
204
205! restore_regs()
206! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
207! - switch bank
208! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
209! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
210! k2 returns original pr
211! k3 returns original sr
212! k4 returns original stack pointer
213! r8 passes SR bitmask, overwritten with restored data on return
214! r9 trashed
215! BL=0 on entry, on exit BL=1 (depending on r8).
216
217ENTRY(restore_regs)
218 mov.l @r15+, r0
219 mov.l @r15+, r1
220 mov.l @r15+, r2
221 mov.l @r15+, r3
222 mov.l @r15+, r4
223 mov.l @r15+, r5
224 mov.l @r15+, r6
225 mov.l @r15+, r7
226 !
227 stc sr, r9
228 or r8, r9
229 ldc r9, sr
230 !
231 mov.l @r15+, r8
232 mov.l @r15+, r9
233 mov.l @r15+, r10
234 mov.l @r15+, r11
235 mov.l @r15+, r12
236 mov.l @r15+, r13
237 mov.l @r15+, r14
238 mov.l @r15+, k4 ! original stack pointer
239 ldc.l @r15+, spc
240 mov.l @r15+, k2 ! original PR
241 mov.l @r15+, k3 ! original SR
242 ldc.l @r15+, gbr
243 lds.l @r15+, mach
244 lds.l @r15+, macl
245 rts
246 add #4, r15 ! Skip syscall number
247
248restore_all:
249 mov.l 7f, r8
250 bsr restore_regs
251 nop
252
253 lds k2, pr ! restore pr
254 !
255 ! Calculate new SR value
256 mov k3, k2 ! original SR value
257 mov #0xfffffff0, k1
258 extu.b k1, k1
259 not k1, k1
260 and k1, k2 ! Mask original SR value
261 !
262 mov k3, k0 ! Calculate IMASK-bits
263 shlr2 k0
264 and #0x3c, k0
265 cmp/eq #0x3c, k0
266 bt/s 6f
267 shll2 k0
268 mov g_imask, k0
269 !
2706: or k0, k2 ! Set the IMASK-bits
271 ldc k2, ssr
272 !
273 mov k4, r15
274 rte
275 nop
276
277 .align 2
2785: .long 0x00001000 ! DSP
2797: .long 0x30000000
280
281! common exception handler
282#include "../../entry-common.S"
283
284! Exception Vector Base
285!
286! Should be aligned page boundary.
287!
288 .balign 4096,0,4096
289ENTRY(vbr_base)
290 .long 0
291!
292! 0x100: General exception vector
293!
294 .balign 256,0,256
295general_exception:
296 bra handle_exception
297 sts pr, k3 ! save original pr value in k3
298
299! prepare_stack()
300! - roll back gRB
301! - switch to kernel stack
302! k0 returns original sp (after roll back)
303! k1 trashed
304! k2 trashed
305
306prepare_stack:
307#ifdef CONFIG_GUSA
308 ! Check for roll back gRB (User and Kernel)
309 mov r15, k0
310 shll k0
311 bf/s 1f
312 shll k0
313 bf/s 1f
314 stc spc, k1
315 stc r0_bank, k0
316 cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
317 bt/s 2f
318 stc r1_bank, k1
319
320 add #-2, k0
321 add r15, k0
322 ldc k0, spc ! PC = saved r0 + r15 - 2
3232: mov k1, r15 ! SP = r1
3241:
325#endif
326 ! Switch to kernel stack if needed
327 stc ssr, k0 ! Is it from kernel space?
328 shll k0 ! Check MD bit (bit30) by shifting it into...
329 shll k0 ! ...the T bit
330 bt/s 1f ! It's a kernel to kernel transition.
331 mov r15, k0 ! save original stack to k0
332 /* User space to kernel */
333 mov #(THREAD_SIZE >> 10), k1
334 shll8 k1 ! k1 := THREAD_SIZE
335 shll2 k1
336 add current, k1
337 mov k1, r15 ! change to kernel stack
338 !
3391:
340 rts
341 nop
342
343!
344! 0x400: Instruction and Data TLB miss exception vector
345!
346 .balign 1024,0,1024
347tlb_miss:
348 sts pr, k3 ! save original pr value in k3
349
350handle_exception:
351 mova exception_data, k0
352
353 ! Setup stack and save DSP context (k0 contains original r15 on return)
354 bsr prepare_stack
355 PREF(k0)
356
357 ! Save registers / Switch to bank 0
358 mov.l 5f, k2 ! vector register address
359 mov.l 1f, k4 ! SR bits to clear in k4
360 bsr save_regs ! needs original pr value in k3
361 mov.l @k2, k2 ! read out vector and keep in k2
362
363handle_exception_special:
364 setup_frame_reg
365
366 ! Setup return address and jump to exception handler
367 mov.l 7f, r9 ! fetch return address
368 stc r2_bank, r0 ! k2 (vector)
369 mov.l 6f, r10
370 shlr2 r0
371 shlr r0
372 mov.l @(r0, r10), r10
373 jmp @r10
374 lds r9, pr ! put return address in pr
375
376 .align L1_CACHE_SHIFT
377
378! save_regs()
379! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
380! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
381! - switch bank
382! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
383! k0 contains original stack pointer*
384! k1 trashed
385! k3 passes original pr*
386! k4 passes SR bitmask
387! BL=1 on entry, on exit BL=0.
388
389ENTRY(save_regs)
390 mov #-1, r1
391 mov.l k1, @-r15 ! set TRA (default: -1)
392 sts.l macl, @-r15
393 sts.l mach, @-r15
394 stc.l gbr, @-r15
395 stc.l ssr, @-r15
396 mov.l k3, @-r15 ! original pr in k3
397 stc.l spc, @-r15
398
399 mov.l k0, @-r15 ! original stack pointer in k0
400 mov.l r14, @-r15
401 mov.l r13, @-r15
402 mov.l r12, @-r15
403 mov.l r11, @-r15
404 mov.l r10, @-r15
405 mov.l r9, @-r15
406 mov.l r8, @-r15
407
408 mov.l 0f, k3 ! SR bits to set in k3
409
410 ! fall-through
411
412! save_low_regs()
413! - modify SR for bank switch
414! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
415! k3 passes bits to set in SR
416! k4 passes bits to clear in SR
417
418ENTRY(save_low_regs)
419 stc sr, r8
420 or k3, r8
421 and k4, r8
422 ldc r8, sr
423
424 mov.l r7, @-r15
425 mov.l r6, @-r15
426 mov.l r5, @-r15
427 mov.l r4, @-r15
428 mov.l r3, @-r15
429 mov.l r2, @-r15
430 mov.l r1, @-r15
431 rts
432 mov.l r0, @-r15
433
434!
435! 0x600: Interrupt / NMI vector
436!
437 .balign 512,0,512
438ENTRY(handle_interrupt)
439 sts pr, k3 ! save original pr value in k3
440 mova exception_data, k0
441
442 ! Setup stack and save DSP context (k0 contains original r15 on return)
443 bsr prepare_stack
444 PREF(k0)
445
446 ! Save registers / Switch to bank 0
447 mov.l 1f, k4 ! SR bits to clear in k4
448 bsr save_regs ! needs original pr value in k3
449 mov #-1, k2 ! default vector kept in k2
450
451 setup_frame_reg
452
453 stc sr, r0 ! get status register
454 shlr2 r0
455 and #0x3c, r0
456 cmp/eq #0x3c, r0
457 bf 9f
458 TRACE_IRQS_OFF
4599:
460
461 ! Setup return address and jump to do_IRQ
462 mov.l 4f, r9 ! fetch return address
463 lds r9, pr ! put return address in pr
464 mov.l 2f, r4
465 mov.l 3f, r9
466 mov.l @r4, r4 ! pass INTEVT vector as arg0
467
468 shlr2 r4
469 shlr r4
470 mov r4, r0 ! save vector->jmp table offset for later
471
472 shlr2 r4 ! vector to IRQ# conversion
473
474 mov #0x10, r5
475 cmp/hs r5, r4 ! is it a valid IRQ?
476 bt 10f
477
478 /*
479 * We got here as a result of taking the INTEVT path for something
480 * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
481 * path and special case the event dispatch instead. This is the
482 * expected path for the NMI (and any other brilliantly implemented
483 * exception), which effectively wants regular exception dispatch
484 * but is unfortunately reported through INTEVT rather than
485 * EXPEVT. Grr.
486 */
487 mov.l 6f, r9
488 mov.l @(r0, r9), r9
489 jmp @r9
490 mov r15, r8 ! trap handlers take saved regs in r8
491
49210:
493 jmp @r9 ! Off to do_IRQ() we go.
494 mov r15, r5 ! pass saved registers as arg1
495
496ENTRY(exception_none)
497 rts
498 nop
499
500 .align L1_CACHE_SHIFT
501exception_data:
5020: .long 0x000080f0 ! FD=1, IMASK=15
5031: .long 0xcfffffff ! RB=0, BL=0
5042: .long INTEVT
5053: .long do_IRQ
5064: .long ret_from_irq
5075: .long EXPEVT
5086: .long exception_handling_table
5097: .long ret_from_exception
1/*
2 * arch/sh/kernel/cpu/sh3/entry.S
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2003 - 2006 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/sys.h>
12#include <linux/errno.h>
13#include <linux/linkage.h>
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/unistd.h>
17#include <cpu/mmu_context.h>
18#include <asm/page.h>
19#include <asm/cache.h>
20
21! NOTE:
22! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
23! to be jumped is too far, but it causes illegal slot exception.
24
25/*
26 * entry.S contains the system-call and fault low-level handling routines.
27 * This also contains the timer-interrupt handler, as well as all interrupts
28 * and faults that can result in a task-switch.
29 *
30 * NOTE: This code handles signal-recognition, which happens every time
31 * after a timer-interrupt and after each system call.
32 *
33 * NOTE: This code uses a convention that instructions in the delay slot
34 * of a transfer-control instruction are indented by an extra space, thus:
35 *
36 * jmp @k0 ! control-transfer instruction
37 * ldc k1, ssr ! delay slot
38 *
39 * Stack layout in 'ret_from_syscall':
40 * ptrace needs to have all regs on the stack.
41 * if the order here is changed, it needs to be
42 * updated in ptrace.c and ptrace.h
43 *
44 * r0
45 * ...
46 * r15 = stack pointer
47 * spc
48 * pr
49 * ssr
50 * gbr
51 * mach
52 * macl
53 * syscall #
54 *
55 */
56/* Offsets to the stack */
57OFF_R0 = 0 /* Return value. New ABI also arg4 */
58OFF_R1 = 4 /* New ABI: arg5 */
59OFF_R2 = 8 /* New ABI: arg6 */
60OFF_R3 = 12 /* New ABI: syscall_nr */
61OFF_R4 = 16 /* New ABI: arg0 */
62OFF_R5 = 20 /* New ABI: arg1 */
63OFF_R6 = 24 /* New ABI: arg2 */
64OFF_R7 = 28 /* New ABI: arg3 */
65OFF_SP = (15*4)
66OFF_PC = (16*4)
67OFF_SR = (16*4+8)
68OFF_TRA = (16*4+6*4)
69
70#define k0 r0
71#define k1 r1
72#define k2 r2
73#define k3 r3
74#define k4 r4
75
76#define g_imask r6 /* r6_bank1 */
77#define k_g_imask r6_bank /* r6_bank1 */
78#define current r7 /* r7_bank1 */
79
80#include <asm/entry-macros.S>
81
82/*
83 * Kernel mode register usage:
84 * k0 scratch
85 * k1 scratch
86 * k2 scratch (Exception code)
87 * k3 scratch (Return address)
88 * k4 scratch
89 * k5 reserved
90 * k6 Global Interrupt Mask (0--15 << 4)
91 * k7 CURRENT_THREAD_INFO (pointer to current thread info)
92 */
93
94!
95! TLB Miss / Initial Page write exception handling
96! _and_
97! TLB hits, but the access violate the protection.
98! It can be valid access, such as stack grow and/or C-O-W.
99!
100!
101! Find the pmd/pte entry and loadtlb
102! If it's not found, cause address error (SEGV)
103!
104! Although this could be written in assembly language (and it'd be faster),
105! this first version depends *much* on C implementation.
106!
107
108#if defined(CONFIG_MMU)
109 .align 2
110ENTRY(tlb_miss_load)
111 bra call_handle_tlbmiss
112 mov #0, r5
113
114 .align 2
115ENTRY(tlb_miss_store)
116 bra call_handle_tlbmiss
117 mov #1, r5
118
119 .align 2
120ENTRY(initial_page_write)
121 bra call_handle_tlbmiss
122 mov #2, r5
123
124 .align 2
125ENTRY(tlb_protection_violation_load)
126 bra call_do_page_fault
127 mov #0, r5
128
129 .align 2
130ENTRY(tlb_protection_violation_store)
131 bra call_do_page_fault
132 mov #1, r5
133
134call_handle_tlbmiss:
135 mov.l 1f, r0
136 mov r5, r8
137 mov.l @r0, r6
138 mov.l 2f, r0
139 sts pr, r10
140 jsr @r0
141 mov r15, r4
142 !
143 tst r0, r0
144 bf/s 0f
145 lds r10, pr
146 rts
147 nop
1480:
149 mov r8, r5
150call_do_page_fault:
151 mov.l 1f, r0
152 mov.l @r0, r6
153
154 mov.l 3f, r0
155 mov.l 4f, r1
156 mov r15, r4
157 jmp @r0
158 lds r1, pr
159
160 .align 2
1611: .long MMU_TEA
1622: .long handle_tlbmiss
1633: .long do_page_fault
1644: .long ret_from_exception
165
166 .align 2
167ENTRY(address_error_load)
168 bra call_dae
169 mov #0,r5 ! writeaccess = 0
170
171 .align 2
172ENTRY(address_error_store)
173 bra call_dae
174 mov #1,r5 ! writeaccess = 1
175
176 .align 2
177call_dae:
178 mov.l 1f, r0
179 mov.l @r0, r6 ! address
180 mov.l 2f, r0
181 jmp @r0
182 mov r15, r4 ! regs
183
184 .align 2
1851: .long MMU_TEA
1862: .long do_address_error
187#endif /* CONFIG_MMU */
188
189#if defined(CONFIG_SH_STANDARD_BIOS)
190 /* Unwind the stack and jmp to the debug entry */
191ENTRY(sh_bios_handler)
192 mov.l 1f, r8
193 bsr restore_regs
194 nop
195
196 lds k2, pr ! restore pr
197 mov k4, r15
198 !
199 mov.l 2f, k0
200 mov.l @k0, k0
201 jmp @k0
202 ldc k3, ssr
203 .align 2
2041: .long 0x300000f0
2052: .long gdb_vbr_vector
206#endif /* CONFIG_SH_STANDARD_BIOS */
207
208! restore_regs()
209! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
210! - switch bank
211! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
212! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
213! k2 returns original pr
214! k3 returns original sr
215! k4 returns original stack pointer
216! r8 passes SR bitmask, overwritten with restored data on return
217! r9 trashed
218! BL=0 on entry, on exit BL=1 (depending on r8).
219
220ENTRY(restore_regs)
221 mov.l @r15+, r0
222 mov.l @r15+, r1
223 mov.l @r15+, r2
224 mov.l @r15+, r3
225 mov.l @r15+, r4
226 mov.l @r15+, r5
227 mov.l @r15+, r6
228 mov.l @r15+, r7
229 !
230 stc sr, r9
231 or r8, r9
232 ldc r9, sr
233 !
234 mov.l @r15+, r8
235 mov.l @r15+, r9
236 mov.l @r15+, r10
237 mov.l @r15+, r11
238 mov.l @r15+, r12
239 mov.l @r15+, r13
240 mov.l @r15+, r14
241 mov.l @r15+, k4 ! original stack pointer
242 ldc.l @r15+, spc
243 mov.l @r15+, k2 ! original PR
244 mov.l @r15+, k3 ! original SR
245 ldc.l @r15+, gbr
246 lds.l @r15+, mach
247 lds.l @r15+, macl
248 rts
249 add #4, r15 ! Skip syscall number
250
251restore_all:
252 mov.l 7f, r8
253 bsr restore_regs
254 nop
255
256 lds k2, pr ! restore pr
257 !
258 ! Calculate new SR value
259 mov k3, k2 ! original SR value
260 mov #0xfffffff0, k1
261 extu.b k1, k1
262 not k1, k1
263 and k1, k2 ! Mask original SR value
264 !
265 mov k3, k0 ! Calculate IMASK-bits
266 shlr2 k0
267 and #0x3c, k0
268 cmp/eq #0x3c, k0
269 bt/s 6f
270 shll2 k0
271 mov g_imask, k0
272 !
2736: or k0, k2 ! Set the IMASK-bits
274 ldc k2, ssr
275 !
276 mov k4, r15
277 rte
278 nop
279
280 .align 2
2815: .long 0x00001000 ! DSP
2827: .long 0x30000000
283
284! common exception handler
285#include "../../entry-common.S"
286
287! Exception Vector Base
288!
289! Should be aligned page boundary.
290!
291 .balign 4096,0,4096
292ENTRY(vbr_base)
293 .long 0
294!
295! 0x100: General exception vector
296!
297 .balign 256,0,256
298general_exception:
299 bra handle_exception
300 sts pr, k3 ! save original pr value in k3
301
302! prepare_stack()
303! - roll back gRB
304! - switch to kernel stack
305! k0 returns original sp (after roll back)
306! k1 trashed
307! k2 trashed
308
309prepare_stack:
310#ifdef CONFIG_GUSA
311 ! Check for roll back gRB (User and Kernel)
312 mov r15, k0
313 shll k0
314 bf/s 1f
315 shll k0
316 bf/s 1f
317 stc spc, k1
318 stc r0_bank, k0
319 cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
320 bt/s 2f
321 stc r1_bank, k1
322
323 add #-2, k0
324 add r15, k0
325 ldc k0, spc ! PC = saved r0 + r15 - 2
3262: mov k1, r15 ! SP = r1
3271:
328#endif
329 ! Switch to kernel stack if needed
330 stc ssr, k0 ! Is it from kernel space?
331 shll k0 ! Check MD bit (bit30) by shifting it into...
332 shll k0 ! ...the T bit
333 bt/s 1f ! It's a kernel to kernel transition.
334 mov r15, k0 ! save original stack to k0
335 /* User space to kernel */
336 mov #(THREAD_SIZE >> 10), k1
337 shll8 k1 ! k1 := THREAD_SIZE
338 shll2 k1
339 add current, k1
340 mov k1, r15 ! change to kernel stack
341 !
3421:
343 rts
344 nop
345
346!
347! 0x400: Instruction and Data TLB miss exception vector
348!
349 .balign 1024,0,1024
350tlb_miss:
351 sts pr, k3 ! save original pr value in k3
352
353handle_exception:
354 mova exception_data, k0
355
356 ! Setup stack and save DSP context (k0 contains original r15 on return)
357 bsr prepare_stack
358 PREF(k0)
359
360 ! Save registers / Switch to bank 0
361 mov.l 5f, k2 ! vector register address
362 mov.l 1f, k4 ! SR bits to clear in k4
363 bsr save_regs ! needs original pr value in k3
364 mov.l @k2, k2 ! read out vector and keep in k2
365
366handle_exception_special:
367 setup_frame_reg
368
369 ! Setup return address and jump to exception handler
370 mov.l 7f, r9 ! fetch return address
371 stc r2_bank, r0 ! k2 (vector)
372 mov.l 6f, r10
373 shlr2 r0
374 shlr r0
375 mov.l @(r0, r10), r10
376 jmp @r10
377 lds r9, pr ! put return address in pr
378
379 .align L1_CACHE_SHIFT
380
381! save_regs()
382! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
383! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
384! - switch bank
385! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
386! k0 contains original stack pointer*
387! k1 trashed
388! k3 passes original pr*
389! k4 passes SR bitmask
390! BL=1 on entry, on exit BL=0.
391
392ENTRY(save_regs)
393 mov #-1, r1
394 mov.l k1, @-r15 ! set TRA (default: -1)
395 sts.l macl, @-r15
396 sts.l mach, @-r15
397 stc.l gbr, @-r15
398 stc.l ssr, @-r15
399 mov.l k3, @-r15 ! original pr in k3
400 stc.l spc, @-r15
401
402 mov.l k0, @-r15 ! original stack pointer in k0
403 mov.l r14, @-r15
404 mov.l r13, @-r15
405 mov.l r12, @-r15
406 mov.l r11, @-r15
407 mov.l r10, @-r15
408 mov.l r9, @-r15
409 mov.l r8, @-r15
410
411 mov.l 0f, k3 ! SR bits to set in k3
412
413 ! fall-through
414
415! save_low_regs()
416! - modify SR for bank switch
417! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
418! k3 passes bits to set in SR
419! k4 passes bits to clear in SR
420
421ENTRY(save_low_regs)
422 stc sr, r8
423 or k3, r8
424 and k4, r8
425 ldc r8, sr
426
427 mov.l r7, @-r15
428 mov.l r6, @-r15
429 mov.l r5, @-r15
430 mov.l r4, @-r15
431 mov.l r3, @-r15
432 mov.l r2, @-r15
433 mov.l r1, @-r15
434 rts
435 mov.l r0, @-r15
436
437!
438! 0x600: Interrupt / NMI vector
439!
440 .balign 512,0,512
441ENTRY(handle_interrupt)
442 sts pr, k3 ! save original pr value in k3
443 mova exception_data, k0
444
445 ! Setup stack and save DSP context (k0 contains original r15 on return)
446 bsr prepare_stack
447 PREF(k0)
448
449 ! Save registers / Switch to bank 0
450 mov.l 1f, k4 ! SR bits to clear in k4
451 bsr save_regs ! needs original pr value in k3
452 mov #-1, k2 ! default vector kept in k2
453
454 setup_frame_reg
455
456 stc sr, r0 ! get status register
457 shlr2 r0
458 and #0x3c, r0
459 cmp/eq #0x3c, r0
460 bf 9f
461 TRACE_IRQS_OFF
4629:
463
464 ! Setup return address and jump to do_IRQ
465 mov.l 4f, r9 ! fetch return address
466 lds r9, pr ! put return address in pr
467 mov.l 2f, r4
468 mov.l 3f, r9
469 mov.l @r4, r4 ! pass INTEVT vector as arg0
470
471 shlr2 r4
472 shlr r4
473 mov r4, r0 ! save vector->jmp table offset for later
474
475 shlr2 r4 ! vector to IRQ# conversion
476 add #-0x10, r4
477
478 cmp/pz r4 ! is it a valid IRQ?
479 bt 10f
480
481 /*
482 * We got here as a result of taking the INTEVT path for something
483 * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
484 * path and special case the event dispatch instead. This is the
485 * expected path for the NMI (and any other brilliantly implemented
486 * exception), which effectively wants regular exception dispatch
487 * but is unfortunately reported through INTEVT rather than
488 * EXPEVT. Grr.
489 */
490 mov.l 6f, r9
491 mov.l @(r0, r9), r9
492 jmp @r9
493 mov r15, r8 ! trap handlers take saved regs in r8
494
49510:
496 jmp @r9 ! Off to do_IRQ() we go.
497 mov r15, r5 ! pass saved registers as arg1
498
499ENTRY(exception_none)
500 rts
501 nop
502
503 .align L1_CACHE_SHIFT
504exception_data:
5050: .long 0x000080f0 ! FD=1, IMASK=15
5061: .long 0xcfffffff ! RB=0, BL=0
5072: .long INTEVT
5083: .long do_IRQ
5094: .long ret_from_irq
5105: .long EXPEVT
5116: .long exception_handling_table
5127: .long ret_from_exception