Loading...
1/*
2 * arch/sh/kernel/cpu/sh5/entry.S
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2004 - 2008 Paul Mundt
6 * Copyright (C) 2003, 2004 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/sys.h>
15#include <cpu/registers.h>
16#include <asm/processor.h>
17#include <asm/unistd.h>
18#include <asm/thread_info.h>
19#include <asm/asm-offsets.h>
20
21/*
22 * SR fields.
23 */
24#define SR_ASID_MASK 0x00ff0000
25#define SR_FD_MASK 0x00008000
26#define SR_SS 0x08000000
27#define SR_BL 0x10000000
28#define SR_MD 0x40000000
29
30/*
31 * Event code.
32 */
33#define EVENT_INTERRUPT 0
34#define EVENT_FAULT_TLB 1
35#define EVENT_FAULT_NOT_TLB 2
36#define EVENT_DEBUG 3
37
38/* EXPEVT values */
39#define RESET_CAUSE 0x20
40#define DEBUGSS_CAUSE 0x980
41
42/*
43 * Frame layout. Quad index.
44 */
45#define FRAME_T(x) FRAME_TBASE+(x*8)
46#define FRAME_R(x) FRAME_RBASE+(x*8)
47#define FRAME_S(x) FRAME_SBASE+(x*8)
48#define FSPC 0
49#define FSSR 1
50#define FSYSCALL_ID 2
51
52/* Arrange the save frame to be a multiple of 32 bytes long */
53#define FRAME_SBASE 0
54#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
55#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
56#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
57#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
58
59#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
60#define FP_FRAME_BASE 0
61
62#define SAVED_R2 0*8
63#define SAVED_R3 1*8
64#define SAVED_R4 2*8
65#define SAVED_R5 3*8
66#define SAVED_R18 4*8
67#define SAVED_R6 5*8
68#define SAVED_TR0 6*8
69
70/* These are the registers saved in the TLB path that aren't saved in the first
71 level of the normal one. */
72#define TLB_SAVED_R25 7*8
73#define TLB_SAVED_TR1 8*8
74#define TLB_SAVED_TR2 9*8
75#define TLB_SAVED_TR3 10*8
76#define TLB_SAVED_TR4 11*8
77/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
78 breakage otherwise. */
79#define TLB_SAVED_R0 12*8
80#define TLB_SAVED_R1 13*8
81
82#define CLI() \
83 getcon SR, r6; \
84 ori r6, 0xf0, r6; \
85 putcon r6, SR;
86
87#define STI() \
88 getcon SR, r6; \
89 andi r6, ~0xf0, r6; \
90 putcon r6, SR;
91
92#ifdef CONFIG_PREEMPT
93# define preempt_stop() CLI()
94#else
95# define preempt_stop()
96# define resume_kernel restore_all
97#endif
98
99 .section .data, "aw"
100
101#define FAST_TLBMISS_STACK_CACHELINES 4
102#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
103
104/* Register back-up area for all exceptions */
105 .balign 32
106 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
107 * register saves etc. */
108 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
109/* This is 32 byte aligned by construction */
110/* Register back-up area for all exceptions */
111reg_save_area:
112 .quad 0
113 .quad 0
114 .quad 0
115 .quad 0
116
117 .quad 0
118 .quad 0
119 .quad 0
120 .quad 0
121
122 .quad 0
123 .quad 0
124 .quad 0
125 .quad 0
126
127 .quad 0
128 .quad 0
129
130/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
131 * reentrancy. Note this area may be accessed via physical address.
132 * Align so this fits a whole single cache line, for ease of purging.
133 */
134 .balign 32,0,32
135resvec_save_area:
136 .quad 0
137 .quad 0
138 .quad 0
139 .quad 0
140 .quad 0
141 .balign 32,0,32
142
143/* Jump table of 3rd level handlers */
144trap_jtable:
145 .long do_exception_error /* 0x000 */
146 .long do_exception_error /* 0x020 */
147#ifdef CONFIG_MMU
148 .long tlb_miss_load /* 0x040 */
149 .long tlb_miss_store /* 0x060 */
150#else
151 .long do_exception_error
152 .long do_exception_error
153#endif
154 ! ARTIFICIAL pseudo-EXPEVT setting
155 .long do_debug_interrupt /* 0x080 */
156#ifdef CONFIG_MMU
157 .long tlb_miss_load /* 0x0A0 */
158 .long tlb_miss_store /* 0x0C0 */
159#else
160 .long do_exception_error
161 .long do_exception_error
162#endif
163 .long do_address_error_load /* 0x0E0 */
164 .long do_address_error_store /* 0x100 */
165#ifdef CONFIG_SH_FPU
166 .long do_fpu_error /* 0x120 */
167#else
168 .long do_exception_error /* 0x120 */
169#endif
170 .long do_exception_error /* 0x140 */
171 .long system_call /* 0x160 */
172 .long do_reserved_inst /* 0x180 */
173 .long do_illegal_slot_inst /* 0x1A0 */
174 .long do_exception_error /* 0x1C0 - NMI */
175 .long do_exception_error /* 0x1E0 */
176 .rept 15
177 .long do_IRQ /* 0x200 - 0x3C0 */
178 .endr
179 .long do_exception_error /* 0x3E0 */
180 .rept 32
181 .long do_IRQ /* 0x400 - 0x7E0 */
182 .endr
183 .long fpu_error_or_IRQA /* 0x800 */
184 .long fpu_error_or_IRQB /* 0x820 */
185 .long do_IRQ /* 0x840 */
186 .long do_IRQ /* 0x860 */
187 .rept 6
188 .long do_exception_error /* 0x880 - 0x920 */
189 .endr
190 .long breakpoint_trap_handler /* 0x940 */
191 .long do_exception_error /* 0x960 */
192 .long do_single_step /* 0x980 */
193
194 .rept 3
195 .long do_exception_error /* 0x9A0 - 0x9E0 */
196 .endr
197 .long do_IRQ /* 0xA00 */
198 .long do_IRQ /* 0xA20 */
199#ifdef CONFIG_MMU
200 .long itlb_miss_or_IRQ /* 0xA40 */
201#else
202 .long do_IRQ
203#endif
204 .long do_IRQ /* 0xA60 */
205 .long do_IRQ /* 0xA80 */
206#ifdef CONFIG_MMU
207 .long itlb_miss_or_IRQ /* 0xAA0 */
208#else
209 .long do_IRQ
210#endif
211 .long do_exception_error /* 0xAC0 */
212 .long do_address_error_exec /* 0xAE0 */
213 .rept 8
214 .long do_exception_error /* 0xB00 - 0xBE0 */
215 .endr
216 .rept 18
217 .long do_IRQ /* 0xC00 - 0xE20 */
218 .endr
219
220 .section .text64, "ax"
221
222/*
223 * --- Exception/Interrupt/Event Handling Section
224 */
225
226/*
227 * VBR and RESVEC blocks.
228 *
229 * First level handler for VBR-based exceptions.
230 *
231 * To avoid waste of space, align to the maximum text block size.
232 * This is assumed to be at most 128 bytes or 32 instructions.
233 * DO NOT EXCEED 32 instructions on the first level handlers !
234 *
235 * Also note that RESVEC is contained within the VBR block
236 * where the room left (1KB - TEXT_SIZE) allows placing
237 * the RESVEC block (at most 512B + TEXT_SIZE).
238 *
239 * So first (and only) level handler for RESVEC-based exceptions.
240 *
241 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
242 * and interrupt) we are a lot tight with register space until
243 * saving onto the stack frame, which is done in handle_exception().
244 *
245 */
246
247#define TEXT_SIZE 128
248#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
249
250 .balign TEXT_SIZE
251LVBR_block:
252 .space 256, 0 /* Power-on class handler, */
253 /* not required here */
254not_a_tlb_miss:
255 synco /* TAKum03020 (but probably a good idea anyway.) */
256 /* Save original stack pointer into KCR1 */
257 putcon SP, KCR1
258
259 /* Save other original registers into reg_save_area */
260 movi reg_save_area, SP
261 st.q SP, SAVED_R2, r2
262 st.q SP, SAVED_R3, r3
263 st.q SP, SAVED_R4, r4
264 st.q SP, SAVED_R5, r5
265 st.q SP, SAVED_R6, r6
266 st.q SP, SAVED_R18, r18
267 gettr tr0, r3
268 st.q SP, SAVED_TR0, r3
269
270 /* Set args for Non-debug, Not a TLB miss class handler */
271 getcon EXPEVT, r2
272 movi ret_from_exception, r3
273 ori r3, 1, r3
274 movi EVENT_FAULT_NOT_TLB, r4
275 or SP, ZERO, r5
276 getcon KCR1, SP
277 pta handle_exception, tr0
278 blink tr0, ZERO
279
280 .balign 256
281 ! VBR+0x200
282 nop
283 .balign 256
284 ! VBR+0x300
285 nop
286 .balign 256
287 /*
288 * Instead of the natural .balign 1024 place RESVEC here
289 * respecting the final 1KB alignment.
290 */
291 .balign TEXT_SIZE
292 /*
293 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
294 * block making sure the final alignment is correct.
295 */
296#ifdef CONFIG_MMU
297tlb_miss:
298 synco /* TAKum03020 (but probably a good idea anyway.) */
299 putcon SP, KCR1
300 movi reg_save_area, SP
301 /* SP is guaranteed 32-byte aligned. */
302 st.q SP, TLB_SAVED_R0 , r0
303 st.q SP, TLB_SAVED_R1 , r1
304 st.q SP, SAVED_R2 , r2
305 st.q SP, SAVED_R3 , r3
306 st.q SP, SAVED_R4 , r4
307 st.q SP, SAVED_R5 , r5
308 st.q SP, SAVED_R6 , r6
309 st.q SP, SAVED_R18, r18
310
311 /* Save R25 for safety; as/ld may want to use it to achieve the call to
312 * the code in mm/tlbmiss.c */
313 st.q SP, TLB_SAVED_R25, r25
314 gettr tr0, r2
315 gettr tr1, r3
316 gettr tr2, r4
317 gettr tr3, r5
318 gettr tr4, r18
319 st.q SP, SAVED_TR0 , r2
320 st.q SP, TLB_SAVED_TR1 , r3
321 st.q SP, TLB_SAVED_TR2 , r4
322 st.q SP, TLB_SAVED_TR3 , r5
323 st.q SP, TLB_SAVED_TR4 , r18
324
325 pt do_fast_page_fault, tr0
326 getcon SSR, r2
327 getcon EXPEVT, r3
328 getcon TEA, r4
329 shlri r2, 30, r2
330 andi r2, 1, r2 /* r2 = SSR.MD */
331 blink tr0, LINK
332
333 pt fixup_to_invoke_general_handler, tr1
334
335 /* If the fast path handler fixed the fault, just drop through quickly
336 to the restore code right away to return to the excepting context.
337 */
338 bnei/u r2, 0, tr1
339
340fast_tlb_miss_restore:
341 ld.q SP, SAVED_TR0, r2
342 ld.q SP, TLB_SAVED_TR1, r3
343 ld.q SP, TLB_SAVED_TR2, r4
344
345 ld.q SP, TLB_SAVED_TR3, r5
346 ld.q SP, TLB_SAVED_TR4, r18
347
348 ptabs r2, tr0
349 ptabs r3, tr1
350 ptabs r4, tr2
351 ptabs r5, tr3
352 ptabs r18, tr4
353
354 ld.q SP, TLB_SAVED_R0, r0
355 ld.q SP, TLB_SAVED_R1, r1
356 ld.q SP, SAVED_R2, r2
357 ld.q SP, SAVED_R3, r3
358 ld.q SP, SAVED_R4, r4
359 ld.q SP, SAVED_R5, r5
360 ld.q SP, SAVED_R6, r6
361 ld.q SP, SAVED_R18, r18
362 ld.q SP, TLB_SAVED_R25, r25
363
364 getcon KCR1, SP
365 rte
366 nop /* for safety, in case the code is run on sh5-101 cut1.x */
367
368fixup_to_invoke_general_handler:
369
370 /* OK, new method. Restore stuff that's not expected to get saved into
371 the 'first-level' reg save area, then just fall through to setting
372 up the registers and calling the second-level handler. */
373
374 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
375 r25,tr1-4 and save r6 to get into the right state. */
376
377 ld.q SP, TLB_SAVED_TR1, r3
378 ld.q SP, TLB_SAVED_TR2, r4
379 ld.q SP, TLB_SAVED_TR3, r5
380 ld.q SP, TLB_SAVED_TR4, r18
381 ld.q SP, TLB_SAVED_R25, r25
382
383 ld.q SP, TLB_SAVED_R0, r0
384 ld.q SP, TLB_SAVED_R1, r1
385
386 ptabs/u r3, tr1
387 ptabs/u r4, tr2
388 ptabs/u r5, tr3
389 ptabs/u r18, tr4
390
391 /* Set args for Non-debug, TLB miss class handler */
392 getcon EXPEVT, r2
393 movi ret_from_exception, r3
394 ori r3, 1, r3
395 movi EVENT_FAULT_TLB, r4
396 or SP, ZERO, r5
397 getcon KCR1, SP
398 pta handle_exception, tr0
399 blink tr0, ZERO
400#else /* CONFIG_MMU */
401 .balign 256
402#endif
403
404/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
405 DOES END UP AT VBR+0x600 */
406 nop
407 nop
408 nop
409 nop
410 nop
411 nop
412
413 .balign 256
414 /* VBR + 0x600 */
415
416interrupt:
417 synco /* TAKum03020 (but probably a good idea anyway.) */
418 /* Save original stack pointer into KCR1 */
419 putcon SP, KCR1
420
421 /* Save other original registers into reg_save_area */
422 movi reg_save_area, SP
423 st.q SP, SAVED_R2, r2
424 st.q SP, SAVED_R3, r3
425 st.q SP, SAVED_R4, r4
426 st.q SP, SAVED_R5, r5
427 st.q SP, SAVED_R6, r6
428 st.q SP, SAVED_R18, r18
429 gettr tr0, r3
430 st.q SP, SAVED_TR0, r3
431
432 /* Set args for interrupt class handler */
433 getcon INTEVT, r2
434 movi ret_from_irq, r3
435 ori r3, 1, r3
436 movi EVENT_INTERRUPT, r4
437 or SP, ZERO, r5
438 getcon KCR1, SP
439 pta handle_exception, tr0
440 blink tr0, ZERO
441 .balign TEXT_SIZE /* let's waste the bare minimum */
442
443LVBR_block_end: /* Marker. Used for total checking */
444
445 .balign 256
446LRESVEC_block:
447 /* Panic handler. Called with MMU off. Possible causes/actions:
448 * - Reset: Jump to program start.
449 * - Single Step: Turn off Single Step & return.
450 * - Others: Call panic handler, passing PC as arg.
451 * (this may need to be extended...)
452 */
453reset_or_panic:
454 synco /* TAKum03020 (but probably a good idea anyway.) */
455 putcon SP, DCR
456 /* First save r0-1 and tr0, as we need to use these */
457 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
458 st.q SP, 0, r0
459 st.q SP, 8, r1
460 gettr tr0, r0
461 st.q SP, 32, r0
462
463 /* Check cause */
464 getcon EXPEVT, r0
465 movi RESET_CAUSE, r1
466 sub r1, r0, r1 /* r1=0 if reset */
467 movi _stext-CONFIG_PAGE_OFFSET, r0
468 ori r0, 1, r0
469 ptabs r0, tr0
470 beqi r1, 0, tr0 /* Jump to start address if reset */
471
472 getcon EXPEVT, r0
473 movi DEBUGSS_CAUSE, r1
474 sub r1, r0, r1 /* r1=0 if single step */
475 pta single_step_panic, tr0
476 beqi r1, 0, tr0 /* jump if single step */
477
478 /* Now jump to where we save the registers. */
479 movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
480 ptabs r1, tr0
481 blink tr0, r63
482
483single_step_panic:
484 /* We are in a handler with Single Step set. We need to resume the
485 * handler, by turning on MMU & turning off Single Step. */
486 getcon SSR, r0
487 movi SR_MMU, r1
488 or r0, r1, r0
489 movi ~SR_SS, r1
490 and r0, r1, r0
491 putcon r0, SSR
492 /* Restore EXPEVT, as the rte won't do this */
493 getcon PEXPEVT, r0
494 putcon r0, EXPEVT
495 /* Restore regs */
496 ld.q SP, 32, r0
497 ptabs r0, tr0
498 ld.q SP, 0, r0
499 ld.q SP, 8, r1
500 getcon DCR, SP
501 synco
502 rte
503
504
505 .balign 256
506debug_exception:
507 synco /* TAKum03020 (but probably a good idea anyway.) */
508 /*
509 * Single step/software_break_point first level handler.
510 * Called with MMU off, so the first thing we do is enable it
511 * by doing an rte with appropriate SSR.
512 */
513 putcon SP, DCR
514 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
515 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
516
517 /* With the MMU off, we are bypassing the cache, so purge any
518 * data that will be made stale by the following stores.
519 */
520 ocbp SP, 0
521 synco
522
523 st.q SP, 0, r0
524 st.q SP, 8, r1
525 getcon SPC, r0
526 st.q SP, 16, r0
527 getcon SSR, r0
528 st.q SP, 24, r0
529
530 /* Enable MMU, block exceptions, set priv mode, disable single step */
531 movi SR_MMU | SR_BL | SR_MD, r1
532 or r0, r1, r0
533 movi ~SR_SS, r1
534 and r0, r1, r0
535 putcon r0, SSR
536 /* Force control to debug_exception_2 when rte is executed */
537 movi debug_exeception_2, r0
538 ori r0, 1, r0 /* force SHmedia, just in case */
539 putcon r0, SPC
540 getcon DCR, SP
541 synco
542 rte
543debug_exeception_2:
544 /* Restore saved regs */
545 putcon SP, KCR1
546 movi resvec_save_area, SP
547 ld.q SP, 24, r0
548 putcon r0, SSR
549 ld.q SP, 16, r0
550 putcon r0, SPC
551 ld.q SP, 0, r0
552 ld.q SP, 8, r1
553
554 /* Save other original registers into reg_save_area */
555 movi reg_save_area, SP
556 st.q SP, SAVED_R2, r2
557 st.q SP, SAVED_R3, r3
558 st.q SP, SAVED_R4, r4
559 st.q SP, SAVED_R5, r5
560 st.q SP, SAVED_R6, r6
561 st.q SP, SAVED_R18, r18
562 gettr tr0, r3
563 st.q SP, SAVED_TR0, r3
564
565 /* Set args for debug class handler */
566 getcon EXPEVT, r2
567 movi ret_from_exception, r3
568 ori r3, 1, r3
569 movi EVENT_DEBUG, r4
570 or SP, ZERO, r5
571 getcon KCR1, SP
572 pta handle_exception, tr0
573 blink tr0, ZERO
574
575 .balign 256
576debug_interrupt:
577 /* !!! WE COME HERE IN REAL MODE !!! */
578 /* Hook-up debug interrupt to allow various debugging options to be
579 * hooked into its handler. */
580 /* Save original stack pointer into KCR1 */
581 synco
582 putcon SP, KCR1
583 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
584 ocbp SP, 0
585 ocbp SP, 32
586 synco
587
588 /* Save other original registers into reg_save_area thru real addresses */
589 st.q SP, SAVED_R2, r2
590 st.q SP, SAVED_R3, r3
591 st.q SP, SAVED_R4, r4
592 st.q SP, SAVED_R5, r5
593 st.q SP, SAVED_R6, r6
594 st.q SP, SAVED_R18, r18
595 gettr tr0, r3
596 st.q SP, SAVED_TR0, r3
597
598 /* move (spc,ssr)->(pspc,pssr). The rte will shift
599 them back again, so that they look like the originals
600 as far as the real handler code is concerned. */
601 getcon spc, r6
602 putcon r6, pspc
603 getcon ssr, r6
604 putcon r6, pssr
605
606 ! construct useful SR for handle_exception
607 movi 3, r6
608 shlli r6, 30, r6
609 getcon sr, r18
610 or r18, r6, r6
611 putcon r6, ssr
612
613 ! SSR is now the current SR with the MD and MMU bits set
614 ! i.e. the rte will switch back to priv mode and put
615 ! the mmu back on
616
617 ! construct spc
618 movi handle_exception, r18
619 ori r18, 1, r18 ! for safety (do we need this?)
620 putcon r18, spc
621
622 /* Set args for Non-debug, Not a TLB miss class handler */
623
624 ! EXPEVT==0x80 is unused, so 'steal' this value to put the
625 ! debug interrupt handler in the vectoring table
626 movi 0x80, r2
627 movi ret_from_exception, r3
628 ori r3, 1, r3
629 movi EVENT_FAULT_NOT_TLB, r4
630
631 or SP, ZERO, r5
632 movi CONFIG_PAGE_OFFSET, r6
633 add r6, r5, r5
634 getcon KCR1, SP
635
636 synco ! for safety
637 rte ! -> handle_exception, switch back to priv mode again
638
639LRESVEC_block_end: /* Marker. Unused. */
640
641 .balign TEXT_SIZE
642
643/*
644 * Second level handler for VBR-based exceptions. Pre-handler.
645 * In common to all stack-frame sensitive handlers.
646 *
647 * Inputs:
648 * (KCR0) Current [current task union]
649 * (KCR1) Original SP
650 * (r2) INTEVT/EXPEVT
651 * (r3) appropriate return address
652 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
653 * (r5) Pointer to reg_save_area
654 * (SP) Original SP
655 *
656 * Available registers:
657 * (r6)
658 * (r18)
659 * (tr0)
660 *
661 */
662handle_exception:
663 /* Common 2nd level handler. */
664
665 /* First thing we need an appropriate stack pointer */
666 getcon SSR, r6
667 shlri r6, 30, r6
668 andi r6, 1, r6
669 pta stack_ok, tr0
670 bne r6, ZERO, tr0 /* Original stack pointer is fine */
671
672 /* Set stack pointer for user fault */
673 getcon KCR0, SP
674 movi THREAD_SIZE, r6 /* Point to the end */
675 add SP, r6, SP
676
677stack_ok:
678
679/* DEBUG : check for underflow/overflow of the kernel stack */
680 pta no_underflow, tr0
681 getcon KCR0, r6
682 movi 1024, r18
683 add r6, r18, r6
684 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
685
686/* Just panic to cause a crash. */
687bad_sp:
688 ld.b r63, 0, r6
689 nop
690
691no_underflow:
692 pta bad_sp, tr0
693 getcon kcr0, r6
694 movi THREAD_SIZE, r18
695 add r18, r6, r6
696 bgt SP, r6, tr0 ! sp above the stack
697
698 /* Make some room for the BASIC frame. */
699 movi -(FRAME_SIZE), r6
700 add SP, r6, SP
701
702/* Could do this with no stalling if we had another spare register, but the
703 code below will be OK. */
704 ld.q r5, SAVED_R2, r6
705 ld.q r5, SAVED_R3, r18
706 st.q SP, FRAME_R(2), r6
707 ld.q r5, SAVED_R4, r6
708 st.q SP, FRAME_R(3), r18
709 ld.q r5, SAVED_R5, r18
710 st.q SP, FRAME_R(4), r6
711 ld.q r5, SAVED_R6, r6
712 st.q SP, FRAME_R(5), r18
713 ld.q r5, SAVED_R18, r18
714 st.q SP, FRAME_R(6), r6
715 ld.q r5, SAVED_TR0, r6
716 st.q SP, FRAME_R(18), r18
717 st.q SP, FRAME_T(0), r6
718
719 /* Keep old SP around */
720 getcon KCR1, r6
721
722 /* Save the rest of the general purpose registers */
723 st.q SP, FRAME_R(0), r0
724 st.q SP, FRAME_R(1), r1
725 st.q SP, FRAME_R(7), r7
726 st.q SP, FRAME_R(8), r8
727 st.q SP, FRAME_R(9), r9
728 st.q SP, FRAME_R(10), r10
729 st.q SP, FRAME_R(11), r11
730 st.q SP, FRAME_R(12), r12
731 st.q SP, FRAME_R(13), r13
732 st.q SP, FRAME_R(14), r14
733
734 /* SP is somewhere else */
735 st.q SP, FRAME_R(15), r6
736
737 st.q SP, FRAME_R(16), r16
738 st.q SP, FRAME_R(17), r17
739 /* r18 is saved earlier. */
740 st.q SP, FRAME_R(19), r19
741 st.q SP, FRAME_R(20), r20
742 st.q SP, FRAME_R(21), r21
743 st.q SP, FRAME_R(22), r22
744 st.q SP, FRAME_R(23), r23
745 st.q SP, FRAME_R(24), r24
746 st.q SP, FRAME_R(25), r25
747 st.q SP, FRAME_R(26), r26
748 st.q SP, FRAME_R(27), r27
749 st.q SP, FRAME_R(28), r28
750 st.q SP, FRAME_R(29), r29
751 st.q SP, FRAME_R(30), r30
752 st.q SP, FRAME_R(31), r31
753 st.q SP, FRAME_R(32), r32
754 st.q SP, FRAME_R(33), r33
755 st.q SP, FRAME_R(34), r34
756 st.q SP, FRAME_R(35), r35
757 st.q SP, FRAME_R(36), r36
758 st.q SP, FRAME_R(37), r37
759 st.q SP, FRAME_R(38), r38
760 st.q SP, FRAME_R(39), r39
761 st.q SP, FRAME_R(40), r40
762 st.q SP, FRAME_R(41), r41
763 st.q SP, FRAME_R(42), r42
764 st.q SP, FRAME_R(43), r43
765 st.q SP, FRAME_R(44), r44
766 st.q SP, FRAME_R(45), r45
767 st.q SP, FRAME_R(46), r46
768 st.q SP, FRAME_R(47), r47
769 st.q SP, FRAME_R(48), r48
770 st.q SP, FRAME_R(49), r49
771 st.q SP, FRAME_R(50), r50
772 st.q SP, FRAME_R(51), r51
773 st.q SP, FRAME_R(52), r52
774 st.q SP, FRAME_R(53), r53
775 st.q SP, FRAME_R(54), r54
776 st.q SP, FRAME_R(55), r55
777 st.q SP, FRAME_R(56), r56
778 st.q SP, FRAME_R(57), r57
779 st.q SP, FRAME_R(58), r58
780 st.q SP, FRAME_R(59), r59
781 st.q SP, FRAME_R(60), r60
782 st.q SP, FRAME_R(61), r61
783 st.q SP, FRAME_R(62), r62
784
785 /*
786 * Save the S* registers.
787 */
788 getcon SSR, r61
789 st.q SP, FRAME_S(FSSR), r61
790 getcon SPC, r62
791 st.q SP, FRAME_S(FSPC), r62
792 movi -1, r62 /* Reset syscall_nr */
793 st.q SP, FRAME_S(FSYSCALL_ID), r62
794
795 /* Save the rest of the target registers */
796 gettr tr1, r6
797 st.q SP, FRAME_T(1), r6
798 gettr tr2, r6
799 st.q SP, FRAME_T(2), r6
800 gettr tr3, r6
801 st.q SP, FRAME_T(3), r6
802 gettr tr4, r6
803 st.q SP, FRAME_T(4), r6
804 gettr tr5, r6
805 st.q SP, FRAME_T(5), r6
806 gettr tr6, r6
807 st.q SP, FRAME_T(6), r6
808 gettr tr7, r6
809 st.q SP, FRAME_T(7), r6
810
811 ! setup FP so that unwinder can wind back through nested kernel mode
812 ! exceptions
813 add SP, ZERO, r14
814
815 /* For syscall and debug race condition, get TRA now */
816 getcon TRA, r5
817
818 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
819 * Also set FD, to catch FPU usage in the kernel.
820 *
821 * benedict.gaster@superh.com 29/07/2002
822 *
823 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
824 * same time change BL from 1->0, as any pending interrupt of a level
825 * higher than he previous value of IMASK will leak through and be
826 * taken unexpectedly.
827 *
828 * To avoid this we raise the IMASK and then issue another PUTCON to
829 * enable interrupts.
830 */
831 getcon SR, r6
832 movi SR_IMASK | SR_FD, r7
833 or r6, r7, r6
834 putcon r6, SR
835 movi SR_UNBLOCK_EXC, r7
836 and r6, r7, r6
837 putcon r6, SR
838
839
840 /* Now call the appropriate 3rd level handler */
841 or r3, ZERO, LINK
842 movi trap_jtable, r3
843 shlri r2, 3, r2
844 ldx.l r2, r3, r3
845 shlri r2, 2, r2
846 ptabs r3, tr0
847 or SP, ZERO, r3
848 blink tr0, ZERO
849
850/*
851 * Second level handler for VBR-based exceptions. Post-handlers.
852 *
853 * Post-handlers for interrupts (ret_from_irq), exceptions
854 * (ret_from_exception) and common reentrance doors (restore_all
855 * to get back to the original context, ret_from_syscall loop to
856 * check kernel exiting).
857 *
858 * ret_with_reschedule and work_notifysig are an inner lables of
859 * the ret_from_syscall loop.
860 *
861 * In common to all stack-frame sensitive handlers.
862 *
863 * Inputs:
864 * (SP) struct pt_regs *, original register's frame pointer (basic)
865 *
866 */
867 .global ret_from_irq
868ret_from_irq:
869 ld.q SP, FRAME_S(FSSR), r6
870 shlri r6, 30, r6
871 andi r6, 1, r6
872 pta resume_kernel, tr0
873 bne r6, ZERO, tr0 /* no further checks */
874 STI()
875 pta ret_with_reschedule, tr0
876 blink tr0, ZERO /* Do not check softirqs */
877
878 .global ret_from_exception
879ret_from_exception:
880 preempt_stop()
881
882 ld.q SP, FRAME_S(FSSR), r6
883 shlri r6, 30, r6
884 andi r6, 1, r6
885 pta resume_kernel, tr0
886 bne r6, ZERO, tr0 /* no further checks */
887
888 /* Check softirqs */
889
890#ifdef CONFIG_PREEMPT
891 pta ret_from_syscall, tr0
892 blink tr0, ZERO
893
894resume_kernel:
895 CLI()
896
897 pta restore_all, tr0
898
899 getcon KCR0, r6
900 ld.l r6, TI_PRE_COUNT, r7
901 beq/u r7, ZERO, tr0
902
903need_resched:
904 ld.l r6, TI_FLAGS, r7
905 movi (1 << TIF_NEED_RESCHED), r8
906 and r8, r7, r8
907 bne r8, ZERO, tr0
908
909 getcon SR, r7
910 andi r7, 0xf0, r7
911 bne r7, ZERO, tr0
912
913 movi preempt_schedule_irq, r7
914 ori r7, 1, r7
915 ptabs r7, tr1
916 blink tr1, LINK
917
918 pta need_resched, tr1
919 blink tr1, ZERO
920#endif
921
922 .global ret_from_syscall
923ret_from_syscall:
924
925ret_with_reschedule:
926 getcon KCR0, r6 ! r6 contains current_thread_info
927 ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
928
929 movi _TIF_NEED_RESCHED, r8
930 and r8, r7, r8
931 pta work_resched, tr0
932 bne r8, ZERO, tr0
933
934 pta restore_all, tr1
935
936 movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
937 and r8, r7, r8
938 pta work_notifysig, tr0
939 bne r8, ZERO, tr0
940
941 blink tr1, ZERO
942
943work_resched:
944 pta ret_from_syscall, tr0
945 gettr tr0, LINK
946 movi schedule, r6
947 ptabs r6, tr0
948 blink tr0, ZERO /* Call schedule(), return on top */
949
950work_notifysig:
951 gettr tr1, LINK
952
953 movi do_notify_resume, r6
954 ptabs r6, tr0
955 or SP, ZERO, r2
956 or r7, ZERO, r3
957 blink tr0, LINK /* Call do_notify_resume(regs, current_thread_info->flags), return here */
958
959restore_all:
960 /* Do prefetches */
961
962 ld.q SP, FRAME_T(0), r6
963 ld.q SP, FRAME_T(1), r7
964 ld.q SP, FRAME_T(2), r8
965 ld.q SP, FRAME_T(3), r9
966 ptabs r6, tr0
967 ptabs r7, tr1
968 ptabs r8, tr2
969 ptabs r9, tr3
970 ld.q SP, FRAME_T(4), r6
971 ld.q SP, FRAME_T(5), r7
972 ld.q SP, FRAME_T(6), r8
973 ld.q SP, FRAME_T(7), r9
974 ptabs r6, tr4
975 ptabs r7, tr5
976 ptabs r8, tr6
977 ptabs r9, tr7
978
979 ld.q SP, FRAME_R(0), r0
980 ld.q SP, FRAME_R(1), r1
981 ld.q SP, FRAME_R(2), r2
982 ld.q SP, FRAME_R(3), r3
983 ld.q SP, FRAME_R(4), r4
984 ld.q SP, FRAME_R(5), r5
985 ld.q SP, FRAME_R(6), r6
986 ld.q SP, FRAME_R(7), r7
987 ld.q SP, FRAME_R(8), r8
988 ld.q SP, FRAME_R(9), r9
989 ld.q SP, FRAME_R(10), r10
990 ld.q SP, FRAME_R(11), r11
991 ld.q SP, FRAME_R(12), r12
992 ld.q SP, FRAME_R(13), r13
993 ld.q SP, FRAME_R(14), r14
994
995 ld.q SP, FRAME_R(16), r16
996 ld.q SP, FRAME_R(17), r17
997 ld.q SP, FRAME_R(18), r18
998 ld.q SP, FRAME_R(19), r19
999 ld.q SP, FRAME_R(20), r20
1000 ld.q SP, FRAME_R(21), r21
1001 ld.q SP, FRAME_R(22), r22
1002 ld.q SP, FRAME_R(23), r23
1003 ld.q SP, FRAME_R(24), r24
1004 ld.q SP, FRAME_R(25), r25
1005 ld.q SP, FRAME_R(26), r26
1006 ld.q SP, FRAME_R(27), r27
1007 ld.q SP, FRAME_R(28), r28
1008 ld.q SP, FRAME_R(29), r29
1009 ld.q SP, FRAME_R(30), r30
1010 ld.q SP, FRAME_R(31), r31
1011 ld.q SP, FRAME_R(32), r32
1012 ld.q SP, FRAME_R(33), r33
1013 ld.q SP, FRAME_R(34), r34
1014 ld.q SP, FRAME_R(35), r35
1015 ld.q SP, FRAME_R(36), r36
1016 ld.q SP, FRAME_R(37), r37
1017 ld.q SP, FRAME_R(38), r38
1018 ld.q SP, FRAME_R(39), r39
1019 ld.q SP, FRAME_R(40), r40
1020 ld.q SP, FRAME_R(41), r41
1021 ld.q SP, FRAME_R(42), r42
1022 ld.q SP, FRAME_R(43), r43
1023 ld.q SP, FRAME_R(44), r44
1024 ld.q SP, FRAME_R(45), r45
1025 ld.q SP, FRAME_R(46), r46
1026 ld.q SP, FRAME_R(47), r47
1027 ld.q SP, FRAME_R(48), r48
1028 ld.q SP, FRAME_R(49), r49
1029 ld.q SP, FRAME_R(50), r50
1030 ld.q SP, FRAME_R(51), r51
1031 ld.q SP, FRAME_R(52), r52
1032 ld.q SP, FRAME_R(53), r53
1033 ld.q SP, FRAME_R(54), r54
1034 ld.q SP, FRAME_R(55), r55
1035 ld.q SP, FRAME_R(56), r56
1036 ld.q SP, FRAME_R(57), r57
1037 ld.q SP, FRAME_R(58), r58
1038
1039 getcon SR, r59
1040 movi SR_BLOCK_EXC, r60
1041 or r59, r60, r59
1042 putcon r59, SR /* SR.BL = 1, keep nesting out */
1043 ld.q SP, FRAME_S(FSSR), r61
1044 ld.q SP, FRAME_S(FSPC), r62
1045 movi SR_ASID_MASK, r60
1046 and r59, r60, r59
1047 andc r61, r60, r61 /* Clear out older ASID */
1048 or r59, r61, r61 /* Retain current ASID */
1049 putcon r61, SSR
1050 putcon r62, SPC
1051
1052 /* Ignore FSYSCALL_ID */
1053
1054 ld.q SP, FRAME_R(59), r59
1055 ld.q SP, FRAME_R(60), r60
1056 ld.q SP, FRAME_R(61), r61
1057 ld.q SP, FRAME_R(62), r62
1058
1059 /* Last touch */
1060 ld.q SP, FRAME_R(15), SP
1061 rte
1062 nop
1063
1064/*
1065 * Third level handlers for VBR-based exceptions. Adapting args to
1066 * and/or deflecting to fourth level handlers.
1067 *
1068 * Fourth level handlers interface.
1069 * Most are C-coded handlers directly pointed by the trap_jtable.
1070 * (Third = Fourth level)
1071 * Inputs:
1072 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1073 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1074 * (r3) struct pt_regs *, original register's frame pointer
1075 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1076 * (r5) TRA control register (for syscall/debug benefit only)
1077 * (LINK) return address
1078 * (SP) = r3
1079 *
1080 * Kernel TLB fault handlers will get a slightly different interface.
1081 * (r2) struct pt_regs *, original register's frame pointer
1082 * (r3) page fault error code (see asm/thread_info.h)
1083 * (r4) Effective Address of fault
1084 * (LINK) return address
1085 * (SP) = r2
1086 *
1087 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1088 *
1089 */
1090#ifdef CONFIG_MMU
1091tlb_miss_load:
1092 or SP, ZERO, r2
1093 or ZERO, ZERO, r3 /* Read */
1094 getcon TEA, r4
1095 pta call_do_page_fault, tr0
1096 beq ZERO, ZERO, tr0
1097
1098tlb_miss_store:
1099 or SP, ZERO, r2
1100 movi FAULT_CODE_WRITE, r3 /* Write */
1101 getcon TEA, r4
1102 pta call_do_page_fault, tr0
1103 beq ZERO, ZERO, tr0
1104
1105itlb_miss_or_IRQ:
1106 pta its_IRQ, tr0
1107 beqi/u r4, EVENT_INTERRUPT, tr0
1108
1109 /* ITLB miss */
1110 or SP, ZERO, r2
1111 movi FAULT_CODE_ITLB, r3
1112 getcon TEA, r4
1113 /* Fall through */
1114
1115call_do_page_fault:
1116 movi do_page_fault, r6
1117 ptabs r6, tr0
1118 blink tr0, ZERO
1119#endif /* CONFIG_MMU */
1120
1121fpu_error_or_IRQA:
1122 pta its_IRQ, tr0
1123 beqi/l r4, EVENT_INTERRUPT, tr0
1124#ifdef CONFIG_SH_FPU
1125 movi fpu_state_restore_trap_handler, r6
1126#else
1127 movi do_exception_error, r6
1128#endif
1129 ptabs r6, tr0
1130 blink tr0, ZERO
1131
1132fpu_error_or_IRQB:
1133 pta its_IRQ, tr0
1134 beqi/l r4, EVENT_INTERRUPT, tr0
1135#ifdef CONFIG_SH_FPU
1136 movi fpu_state_restore_trap_handler, r6
1137#else
1138 movi do_exception_error, r6
1139#endif
1140 ptabs r6, tr0
1141 blink tr0, ZERO
1142
1143its_IRQ:
1144 movi do_IRQ, r6
1145 ptabs r6, tr0
1146 blink tr0, ZERO
1147
1148/*
1149 * system_call/unknown_trap third level handler:
1150 *
1151 * Inputs:
1152 * (r2) fault/interrupt code, entry number (TRAP = 11)
1153 * (r3) struct pt_regs *, original register's frame pointer
1154 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1155 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1156 * (SP) = r3
1157 * (LINK) return address: ret_from_exception
1158 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1159 *
1160 * Outputs:
1161 * (*r3) Syscall reply (Saved r2)
1162 * (LINK) In case of syscall only it can be scrapped.
1163 * Common second level post handler will be ret_from_syscall.
1164 * Common (non-trace) exit point to that is syscall_ret (saving
1165 * result to r2). Common bad exit point is syscall_bad (returning
1166 * ENOSYS then saved to r2).
1167 *
1168 */
1169
1170unknown_trap:
1171 /* Unknown Trap or User Trace */
1172 movi do_unknown_trapa, r6
1173 ptabs r6, tr0
1174 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1175 andi r2, 0x1ff, r2 /* r2 = syscall # */
1176 blink tr0, LINK
1177
1178 pta syscall_ret, tr0
1179 blink tr0, ZERO
1180
1181 /* New syscall implementation*/
1182system_call:
1183 pta unknown_trap, tr0
1184 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1185 shlri r4, 20, r4
1186 bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
1187
1188 /* It's a system call */
1189 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1190 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1191
1192 STI()
1193
1194 pta syscall_allowed, tr0
1195 movi NR_syscalls - 1, r4 /* Last valid */
1196 bgeu/l r4, r5, tr0
1197
1198syscall_bad:
1199 /* Return ENOSYS ! */
1200 movi -(ENOSYS), r2 /* Fall-through */
1201
1202 .global syscall_ret
1203syscall_ret:
1204 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1205 ld.q SP, FRAME_S(FSPC), r2
1206 addi r2, 4, r2 /* Move PC, being pre-execution event */
1207 st.q SP, FRAME_S(FSPC), r2
1208 pta ret_from_syscall, tr0
1209 blink tr0, ZERO
1210
1211
1212/* A different return path for ret_from_fork, because we now need
1213 * to call schedule_tail with the later kernels. Because prev is
1214 * loaded into r2 by switch_to() means we can just call it straight away
1215 */
1216
1217.global ret_from_fork
1218ret_from_fork:
1219
1220 movi schedule_tail,r5
1221 ori r5, 1, r5
1222 ptabs r5, tr0
1223 blink tr0, LINK
1224
1225 ld.q SP, FRAME_S(FSPC), r2
1226 addi r2, 4, r2 /* Move PC, being pre-execution event */
1227 st.q SP, FRAME_S(FSPC), r2
1228 pta ret_from_syscall, tr0
1229 blink tr0, ZERO
1230
1231.global ret_from_kernel_thread
1232ret_from_kernel_thread:
1233
1234 movi schedule_tail,r5
1235 ori r5, 1, r5
1236 ptabs r5, tr0
1237 blink tr0, LINK
1238
1239 ld.q SP, FRAME_R(2), r2
1240 ld.q SP, FRAME_R(3), r3
1241 ptabs r3, tr0
1242 blink tr0, LINK
1243
1244 ld.q SP, FRAME_S(FSPC), r2
1245 addi r2, 4, r2 /* Move PC, being pre-execution event */
1246 st.q SP, FRAME_S(FSPC), r2
1247 pta ret_from_syscall, tr0
1248 blink tr0, ZERO
1249
1250syscall_allowed:
1251 /* Use LINK to deflect the exit point, default is syscall_ret */
1252 pta syscall_ret, tr0
1253 gettr tr0, LINK
1254 pta syscall_notrace, tr0
1255
1256 getcon KCR0, r2
1257 ld.l r2, TI_FLAGS, r4
1258 movi _TIF_WORK_SYSCALL_MASK, r6
1259 and r6, r4, r6
1260 beq/l r6, ZERO, tr0
1261
1262 /* Trace it by calling syscall_trace before and after */
1263 movi do_syscall_trace_enter, r4
1264 or SP, ZERO, r2
1265 ptabs r4, tr0
1266 blink tr0, LINK
1267
1268 /* Save the retval */
1269 st.q SP, FRAME_R(2), r2
1270
1271 /* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
1272 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1273 andi r5, 0x1ff, r5
1274
1275 pta syscall_ret_trace, tr0
1276 gettr tr0, LINK
1277
1278syscall_notrace:
1279 /* Now point to the appropriate 4th level syscall handler */
1280 movi sys_call_table, r4
1281 shlli r5, 2, r5
1282 ldx.l r4, r5, r5
1283 ptabs r5, tr0
1284
1285 /* Prepare original args */
1286 ld.q SP, FRAME_R(2), r2
1287 ld.q SP, FRAME_R(3), r3
1288 ld.q SP, FRAME_R(4), r4
1289 ld.q SP, FRAME_R(5), r5
1290 ld.q SP, FRAME_R(6), r6
1291 ld.q SP, FRAME_R(7), r7
1292
1293 /* And now the trick for those syscalls requiring regs * ! */
1294 or SP, ZERO, r8
1295
1296 /* Call it */
1297 blink tr0, ZERO /* LINK is already properly set */
1298
1299syscall_ret_trace:
1300 /* We get back here only if under trace */
1301 st.q SP, FRAME_R(9), r2 /* Save return value */
1302
1303 movi do_syscall_trace_leave, LINK
1304 or SP, ZERO, r2
1305 ptabs LINK, tr0
1306 blink tr0, LINK
1307
1308 /* This needs to be done after any syscall tracing */
1309 ld.q SP, FRAME_S(FSPC), r2
1310 addi r2, 4, r2 /* Move PC, being pre-execution event */
1311 st.q SP, FRAME_S(FSPC), r2
1312
1313 pta ret_from_syscall, tr0
1314 blink tr0, ZERO /* Resume normal return sequence */
1315
1316/*
1317 * --- Switch to running under a particular ASID and return the previous ASID value
1318 * --- The caller is assumed to have done a cli before calling this.
1319 *
1320 * Input r2 : new ASID
1321 * Output r2 : old ASID
1322 */
1323
1324 .global switch_and_save_asid
1325switch_and_save_asid:
1326 getcon sr, r0
1327 movi 255, r4
1328 shlli r4, 16, r4 /* r4 = mask to select ASID */
1329 and r0, r4, r3 /* r3 = shifted old ASID */
1330 andi r2, 255, r2 /* mask down new ASID */
1331 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1332 andc r0, r4, r0 /* efface old ASID from SR */
1333 or r0, r2, r0 /* insert the new ASID */
1334 putcon r0, ssr
1335 movi 1f, r0
1336 putcon r0, spc
1337 rte
1338 nop
13391:
1340 ptabs LINK, tr0
1341 shlri r3, 16, r2 /* r2 = old ASID */
1342 blink tr0, r63
1343
1344 .global route_to_panic_handler
1345route_to_panic_handler:
1346 /* Switch to real mode, goto panic_handler, don't return. Useful for
1347 last-chance debugging, e.g. if no output wants to go to the console.
1348 */
1349
1350 movi panic_handler - CONFIG_PAGE_OFFSET, r1
1351 ptabs r1, tr0
1352 pta 1f, tr1
1353 gettr tr1, r0
1354 putcon r0, spc
1355 getcon sr, r0
1356 movi 1, r1
1357 shlli r1, 31, r1
1358 andc r0, r1, r0
1359 putcon r0, ssr
1360 rte
1361 nop
13621: /* Now in real mode */
1363 blink tr0, r63
1364 nop
1365
1366 .global peek_real_address_q
1367peek_real_address_q:
1368 /* Two args:
1369 r2 : real mode address to peek
1370 r2(out) : result quadword
1371
1372 This is provided as a cheapskate way of manipulating device
1373 registers for debugging (to avoid the need to ioremap the debug
1374 module, and to avoid the need to ioremap the watchpoint
1375 controller in a way that identity maps sufficient bits to avoid the
1376 SH5-101 cut2 silicon defect).
1377
1378 This code is not performance critical
1379 */
1380
1381 add.l r2, r63, r2 /* sign extend address */
1382 getcon sr, r0 /* r0 = saved original SR */
1383 movi 1, r1
1384 shlli r1, 28, r1
1385 or r0, r1, r1 /* r0 with block bit set */
1386 putcon r1, sr /* now in critical section */
1387 movi 1, r36
1388 shlli r36, 31, r36
1389 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1390
1391 putcon r1, ssr
1392 movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1393 movi 1f, r37 /* virtual mode return addr */
1394 putcon r36, spc
1395
1396 synco
1397 rte
1398 nop
1399
1400.peek0: /* come here in real mode, don't touch caches!!
1401 still in critical section (sr.bl==1) */
1402 putcon r0, ssr
1403 putcon r37, spc
1404 /* Here's the actual peek. If the address is bad, all bets are now off
1405 * what will happen (handlers invoked in real-mode = bad news) */
1406 ld.q r2, 0, r2
1407 synco
1408 rte /* Back to virtual mode */
1409 nop
1410
14111:
1412 ptabs LINK, tr0
1413 blink tr0, r63
1414
1415 .global poke_real_address_q
1416poke_real_address_q:
1417 /* Two args:
1418 r2 : real mode address to poke
1419 r3 : quadword value to write.
1420
1421 This is provided as a cheapskate way of manipulating device
1422 registers for debugging (to avoid the need to ioremap the debug
1423 module, and to avoid the need to ioremap the watchpoint
1424 controller in a way that identity maps sufficient bits to avoid the
1425 SH5-101 cut2 silicon defect).
1426
1427 This code is not performance critical
1428 */
1429
1430 add.l r2, r63, r2 /* sign extend address */
1431 getcon sr, r0 /* r0 = saved original SR */
1432 movi 1, r1
1433 shlli r1, 28, r1
1434 or r0, r1, r1 /* r0 with block bit set */
1435 putcon r1, sr /* now in critical section */
1436 movi 1, r36
1437 shlli r36, 31, r36
1438 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1439
1440 putcon r1, ssr
1441 movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1442 movi 1f, r37 /* virtual mode return addr */
1443 putcon r36, spc
1444
1445 synco
1446 rte
1447 nop
1448
1449.poke0: /* come here in real mode, don't touch caches!!
1450 still in critical section (sr.bl==1) */
1451 putcon r0, ssr
1452 putcon r37, spc
1453 /* Here's the actual poke. If the address is bad, all bets are now off
1454 * what will happen (handlers invoked in real-mode = bad news) */
1455 st.q r2, 0, r3
1456 synco
1457 rte /* Back to virtual mode */
1458 nop
1459
14601:
1461 ptabs LINK, tr0
1462 blink tr0, r63
1463
1464#ifdef CONFIG_MMU
1465/*
1466 * --- User Access Handling Section
1467 */
1468
1469/*
1470 * User Access support. It all moved to non inlined Assembler
1471 * functions in here.
1472 *
1473 * __kernel_size_t __copy_user(void *__to, const void *__from,
1474 * __kernel_size_t __n)
1475 *
1476 * Inputs:
1477 * (r2) target address
1478 * (r3) source address
1479 * (r4) size in bytes
1480 *
1481 * Ouputs:
1482 * (*r2) target data
1483 * (r2) non-copied bytes
1484 *
1485 * If a fault occurs on the user pointer, bail out early and return the
1486 * number of bytes not copied in r2.
1487 * Strategy : for large blocks, call a real memcpy function which can
1488 * move >1 byte at a time using unaligned ld/st instructions, and can
1489 * manipulate the cache using prefetch + alloco to improve the speed
1490 * further. If a fault occurs in that function, just revert to the
1491 * byte-by-byte approach used for small blocks; this is rare so the
1492 * performance hit for that case does not matter.
1493 *
1494 * For small blocks it's not worth the overhead of setting up and calling
1495 * the memcpy routine; do the copy a byte at a time.
1496 *
1497 */
1498 .global __copy_user
1499__copy_user:
1500 pta __copy_user_byte_by_byte, tr1
1501 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1502 bge/u r0, r4, tr1
1503 pta copy_user_memcpy, tr0
1504 addi SP, -32, SP
1505 /* Save arguments in case we have to fix-up unhandled page fault */
1506 st.q SP, 0, r2
1507 st.q SP, 8, r3
1508 st.q SP, 16, r4
1509 st.q SP, 24, r35 ! r35 is callee-save
1510 /* Save LINK in a register to reduce RTS time later (otherwise
1511 ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1512 ori LINK, 0, r35
1513 blink tr0, LINK
1514
1515 /* Copy completed normally if we get back here */
1516 ptabs r35, tr0
1517 ld.q SP, 24, r35
1518 /* don't restore r2-r4, pointless */
1519 /* set result=r2 to zero as the copy must have succeeded. */
1520 or r63, r63, r2
1521 addi SP, 32, SP
1522 blink tr0, r63 ! RTS
1523
1524 .global __copy_user_fixup
1525__copy_user_fixup:
1526 /* Restore stack frame */
1527 ori r35, 0, LINK
1528 ld.q SP, 24, r35
1529 ld.q SP, 16, r4
1530 ld.q SP, 8, r3
1531 ld.q SP, 0, r2
1532 addi SP, 32, SP
1533 /* Fall through to original code, in the 'same' state we entered with */
1534
1535/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1536 user address. In that rare case, the speed drop can be tolerated. */
1537__copy_user_byte_by_byte:
1538 pta ___copy_user_exit, tr1
1539 pta ___copy_user1, tr0
1540 beq/u r4, r63, tr1 /* early exit for zero length copy */
1541 sub r2, r3, r0
1542 addi r0, -1, r0
1543
1544___copy_user1:
1545 ld.b r3, 0, r5 /* Fault address 1 */
1546
1547 /* Could rewrite this to use just 1 add, but the second comes 'free'
1548 due to load latency */
1549 addi r3, 1, r3
1550 addi r4, -1, r4 /* No real fixup required */
1551___copy_user2:
1552 stx.b r3, r0, r5 /* Fault address 2 */
1553 bne r4, ZERO, tr0
1554
1555___copy_user_exit:
1556 or r4, ZERO, r2
1557 ptabs LINK, tr0
1558 blink tr0, ZERO
1559
1560/*
1561 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1562 *
1563 * Inputs:
1564 * (r2) target address
1565 * (r3) size in bytes
1566 *
1567 * Ouputs:
1568 * (*r2) zero-ed target data
1569 * (r2) non-zero-ed bytes
1570 */
1571 .global __clear_user
1572__clear_user:
1573 pta ___clear_user_exit, tr1
1574 pta ___clear_user1, tr0
1575 beq/u r3, r63, tr1
1576
1577___clear_user1:
1578 st.b r2, 0, ZERO /* Fault address */
1579 addi r2, 1, r2
1580 addi r3, -1, r3 /* No real fixup required */
1581 bne r3, ZERO, tr0
1582
1583___clear_user_exit:
1584 or r3, ZERO, r2
1585 ptabs LINK, tr0
1586 blink tr0, ZERO
1587
1588#endif /* CONFIG_MMU */
1589
1590/*
1591 * extern long __get_user_asm_?(void *val, long addr)
1592 *
1593 * Inputs:
1594 * (r2) dest address
1595 * (r3) source address (in User Space)
1596 *
1597 * Ouputs:
1598 * (r2) -EFAULT (faulting)
1599 * 0 (not faulting)
1600 */
1601 .global __get_user_asm_b
1602__get_user_asm_b:
1603 or r2, ZERO, r4
1604 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1605
1606___get_user_asm_b1:
1607 ld.b r3, 0, r5 /* r5 = data */
1608 st.b r4, 0, r5
1609 or ZERO, ZERO, r2
1610
1611___get_user_asm_b_exit:
1612 ptabs LINK, tr0
1613 blink tr0, ZERO
1614
1615
1616 .global __get_user_asm_w
1617__get_user_asm_w:
1618 or r2, ZERO, r4
1619 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1620
1621___get_user_asm_w1:
1622 ld.w r3, 0, r5 /* r5 = data */
1623 st.w r4, 0, r5
1624 or ZERO, ZERO, r2
1625
1626___get_user_asm_w_exit:
1627 ptabs LINK, tr0
1628 blink tr0, ZERO
1629
1630
1631 .global __get_user_asm_l
1632__get_user_asm_l:
1633 or r2, ZERO, r4
1634 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1635
1636___get_user_asm_l1:
1637 ld.l r3, 0, r5 /* r5 = data */
1638 st.l r4, 0, r5
1639 or ZERO, ZERO, r2
1640
1641___get_user_asm_l_exit:
1642 ptabs LINK, tr0
1643 blink tr0, ZERO
1644
1645
1646 .global __get_user_asm_q
1647__get_user_asm_q:
1648 or r2, ZERO, r4
1649 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1650
1651___get_user_asm_q1:
1652 ld.q r3, 0, r5 /* r5 = data */
1653 st.q r4, 0, r5
1654 or ZERO, ZERO, r2
1655
1656___get_user_asm_q_exit:
1657 ptabs LINK, tr0
1658 blink tr0, ZERO
1659
1660/*
1661 * extern long __put_user_asm_?(void *pval, long addr)
1662 *
1663 * Inputs:
1664 * (r2) kernel pointer to value
1665 * (r3) dest address (in User Space)
1666 *
1667 * Ouputs:
1668 * (r2) -EFAULT (faulting)
1669 * 0 (not faulting)
1670 */
1671 .global __put_user_asm_b
1672__put_user_asm_b:
1673 ld.b r2, 0, r4 /* r4 = data */
1674 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1675
1676___put_user_asm_b1:
1677 st.b r3, 0, r4
1678 or ZERO, ZERO, r2
1679
1680___put_user_asm_b_exit:
1681 ptabs LINK, tr0
1682 blink tr0, ZERO
1683
1684
1685 .global __put_user_asm_w
1686__put_user_asm_w:
1687 ld.w r2, 0, r4 /* r4 = data */
1688 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1689
1690___put_user_asm_w1:
1691 st.w r3, 0, r4
1692 or ZERO, ZERO, r2
1693
1694___put_user_asm_w_exit:
1695 ptabs LINK, tr0
1696 blink tr0, ZERO
1697
1698
1699 .global __put_user_asm_l
1700__put_user_asm_l:
1701 ld.l r2, 0, r4 /* r4 = data */
1702 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1703
1704___put_user_asm_l1:
1705 st.l r3, 0, r4
1706 or ZERO, ZERO, r2
1707
1708___put_user_asm_l_exit:
1709 ptabs LINK, tr0
1710 blink tr0, ZERO
1711
1712
1713 .global __put_user_asm_q
1714__put_user_asm_q:
1715 ld.q r2, 0, r4 /* r4 = data */
1716 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1717
1718___put_user_asm_q1:
1719 st.q r3, 0, r4
1720 or ZERO, ZERO, r2
1721
1722___put_user_asm_q_exit:
1723 ptabs LINK, tr0
1724 blink tr0, ZERO
1725
1726panic_stash_regs:
1727 /* The idea is : when we get an unhandled panic, we dump the registers
1728 to a known memory location, the just sit in a tight loop.
1729 This allows the human to look at the memory region through the GDB
1730 session (assuming the debug module's SHwy initiator isn't locked up
1731 or anything), to hopefully analyze the cause of the panic. */
1732
1733 /* On entry, former r15 (SP) is in DCR
1734 former r0 is at resvec_saved_area + 0
1735 former r1 is at resvec_saved_area + 8
1736 former tr0 is at resvec_saved_area + 32
1737 DCR is the only register whose value is lost altogether.
1738 */
1739
1740 movi 0xffffffff80000000, r0 ! phy of dump area
1741 ld.q SP, 0x000, r1 ! former r0
1742 st.q r0, 0x000, r1
1743 ld.q SP, 0x008, r1 ! former r1
1744 st.q r0, 0x008, r1
1745 st.q r0, 0x010, r2
1746 st.q r0, 0x018, r3
1747 st.q r0, 0x020, r4
1748 st.q r0, 0x028, r5
1749 st.q r0, 0x030, r6
1750 st.q r0, 0x038, r7
1751 st.q r0, 0x040, r8
1752 st.q r0, 0x048, r9
1753 st.q r0, 0x050, r10
1754 st.q r0, 0x058, r11
1755 st.q r0, 0x060, r12
1756 st.q r0, 0x068, r13
1757 st.q r0, 0x070, r14
1758 getcon dcr, r14
1759 st.q r0, 0x078, r14
1760 st.q r0, 0x080, r16
1761 st.q r0, 0x088, r17
1762 st.q r0, 0x090, r18
1763 st.q r0, 0x098, r19
1764 st.q r0, 0x0a0, r20
1765 st.q r0, 0x0a8, r21
1766 st.q r0, 0x0b0, r22
1767 st.q r0, 0x0b8, r23
1768 st.q r0, 0x0c0, r24
1769 st.q r0, 0x0c8, r25
1770 st.q r0, 0x0d0, r26
1771 st.q r0, 0x0d8, r27
1772 st.q r0, 0x0e0, r28
1773 st.q r0, 0x0e8, r29
1774 st.q r0, 0x0f0, r30
1775 st.q r0, 0x0f8, r31
1776 st.q r0, 0x100, r32
1777 st.q r0, 0x108, r33
1778 st.q r0, 0x110, r34
1779 st.q r0, 0x118, r35
1780 st.q r0, 0x120, r36
1781 st.q r0, 0x128, r37
1782 st.q r0, 0x130, r38
1783 st.q r0, 0x138, r39
1784 st.q r0, 0x140, r40
1785 st.q r0, 0x148, r41
1786 st.q r0, 0x150, r42
1787 st.q r0, 0x158, r43
1788 st.q r0, 0x160, r44
1789 st.q r0, 0x168, r45
1790 st.q r0, 0x170, r46
1791 st.q r0, 0x178, r47
1792 st.q r0, 0x180, r48
1793 st.q r0, 0x188, r49
1794 st.q r0, 0x190, r50
1795 st.q r0, 0x198, r51
1796 st.q r0, 0x1a0, r52
1797 st.q r0, 0x1a8, r53
1798 st.q r0, 0x1b0, r54
1799 st.q r0, 0x1b8, r55
1800 st.q r0, 0x1c0, r56
1801 st.q r0, 0x1c8, r57
1802 st.q r0, 0x1d0, r58
1803 st.q r0, 0x1d8, r59
1804 st.q r0, 0x1e0, r60
1805 st.q r0, 0x1e8, r61
1806 st.q r0, 0x1f0, r62
1807 st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
1808
1809 ld.q SP, 0x020, r1 ! former tr0
1810 st.q r0, 0x200, r1
1811 gettr tr1, r1
1812 st.q r0, 0x208, r1
1813 gettr tr2, r1
1814 st.q r0, 0x210, r1
1815 gettr tr3, r1
1816 st.q r0, 0x218, r1
1817 gettr tr4, r1
1818 st.q r0, 0x220, r1
1819 gettr tr5, r1
1820 st.q r0, 0x228, r1
1821 gettr tr6, r1
1822 st.q r0, 0x230, r1
1823 gettr tr7, r1
1824 st.q r0, 0x238, r1
1825
1826 getcon sr, r1
1827 getcon ssr, r2
1828 getcon pssr, r3
1829 getcon spc, r4
1830 getcon pspc, r5
1831 getcon intevt, r6
1832 getcon expevt, r7
1833 getcon pexpevt, r8
1834 getcon tra, r9
1835 getcon tea, r10
1836 getcon kcr0, r11
1837 getcon kcr1, r12
1838 getcon vbr, r13
1839 getcon resvec, r14
1840
1841 st.q r0, 0x240, r1
1842 st.q r0, 0x248, r2
1843 st.q r0, 0x250, r3
1844 st.q r0, 0x258, r4
1845 st.q r0, 0x260, r5
1846 st.q r0, 0x268, r6
1847 st.q r0, 0x270, r7
1848 st.q r0, 0x278, r8
1849 st.q r0, 0x280, r9
1850 st.q r0, 0x288, r10
1851 st.q r0, 0x290, r11
1852 st.q r0, 0x298, r12
1853 st.q r0, 0x2a0, r13
1854 st.q r0, 0x2a8, r14
1855
1856 getcon SPC,r2
1857 getcon SSR,r3
1858 getcon EXPEVT,r4
1859 /* Prepare to jump to C - physical address */
1860 movi panic_handler-CONFIG_PAGE_OFFSET, r1
1861 ori r1, 1, r1
1862 ptabs r1, tr0
1863 getcon DCR, SP
1864 blink tr0, ZERO
1865 nop
1866 nop
1867 nop
1868 nop
1869
1870
1871
1872
1873/*
1874 * --- Signal Handling Section
1875 */
1876
1877/*
1878 * extern long long _sa_default_rt_restorer
1879 * extern long long _sa_default_restorer
1880 *
1881 * or, better,
1882 *
1883 * extern void _sa_default_rt_restorer(void)
1884 * extern void _sa_default_restorer(void)
1885 *
1886 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1887 * from user space. Copied into user space by signal management.
1888 * Both must be quad aligned and 2 quad long (4 instructions).
1889 *
1890 */
1891 .balign 8
1892 .global sa_default_rt_restorer
1893sa_default_rt_restorer:
1894 movi 0x10, r9
1895 shori __NR_rt_sigreturn, r9
1896 trapa r9
1897 nop
1898
1899 .balign 8
1900 .global sa_default_restorer
1901sa_default_restorer:
1902 movi 0x10, r9
1903 shori __NR_sigreturn, r9
1904 trapa r9
1905 nop
1906
1907/*
1908 * --- __ex_table Section
1909 */
1910
1911/*
1912 * User Access Exception Table.
1913 */
1914 .section __ex_table, "a"
1915
1916 .global asm_uaccess_start /* Just a marker */
1917asm_uaccess_start:
1918
1919#ifdef CONFIG_MMU
1920 .long ___copy_user1, ___copy_user_exit
1921 .long ___copy_user2, ___copy_user_exit
1922 .long ___clear_user1, ___clear_user_exit
1923#endif
1924 .long ___get_user_asm_b1, ___get_user_asm_b_exit
1925 .long ___get_user_asm_w1, ___get_user_asm_w_exit
1926 .long ___get_user_asm_l1, ___get_user_asm_l_exit
1927 .long ___get_user_asm_q1, ___get_user_asm_q_exit
1928 .long ___put_user_asm_b1, ___put_user_asm_b_exit
1929 .long ___put_user_asm_w1, ___put_user_asm_w_exit
1930 .long ___put_user_asm_l1, ___put_user_asm_l_exit
1931 .long ___put_user_asm_q1, ___put_user_asm_q_exit
1932
1933 .global asm_uaccess_end /* Just a marker */
1934asm_uaccess_end:
1935
1936
1937
1938
1939/*
1940 * --- .init.text Section
1941 */
1942
1943 __INIT
1944
1945/*
1946 * void trap_init (void)
1947 *
1948 */
1949 .global trap_init
1950trap_init:
1951 addi SP, -24, SP /* Room to save r28/r29/r30 */
1952 st.q SP, 0, r28
1953 st.q SP, 8, r29
1954 st.q SP, 16, r30
1955
1956 /* Set VBR and RESVEC */
1957 movi LVBR_block, r19
1958 andi r19, -4, r19 /* reset MMUOFF + reserved */
1959 /* For RESVEC exceptions we force the MMU off, which means we need the
1960 physical address. */
1961 movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
1962 andi r20, -4, r20 /* reset reserved */
1963 ori r20, 1, r20 /* set MMUOFF */
1964 putcon r19, VBR
1965 putcon r20, RESVEC
1966
1967 /* Sanity check */
1968 movi LVBR_block_end, r21
1969 andi r21, -4, r21
1970 movi BLOCK_SIZE, r29 /* r29 = expected size */
1971 or r19, ZERO, r30
1972 add r19, r29, r19
1973
1974 /*
1975 * Ugly, but better loop forever now than crash afterwards.
1976 * We should print a message, but if we touch LVBR or
1977 * LRESVEC blocks we should not be surprised if we get stuck
1978 * in trap_init().
1979 */
1980 pta trap_init_loop, tr1
1981 gettr tr1, r28 /* r28 = trap_init_loop */
1982 sub r21, r30, r30 /* r30 = actual size */
1983
1984 /*
1985 * VBR/RESVEC handlers overlap by being bigger than
1986 * allowed. Very bad. Just loop forever.
1987 * (r28) panic/loop address
1988 * (r29) expected size
1989 * (r30) actual size
1990 */
1991trap_init_loop:
1992 bne r19, r21, tr1
1993
1994 /* Now that exception vectors are set up reset SR.BL */
1995 getcon SR, r22
1996 movi SR_UNBLOCK_EXC, r23
1997 and r22, r23, r22
1998 putcon r22, SR
1999
2000 addi SP, 24, SP
2001 ptabs LINK, tr0
2002 blink tr0, ZERO
2003
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * arch/sh/kernel/cpu/sh5/entry.S
4 *
5 * Copyright (C) 2000, 2001 Paolo Alberelli
6 * Copyright (C) 2004 - 2008 Paul Mundt
7 * Copyright (C) 2003, 2004 Richard Curnow
8 */
9#include <linux/errno.h>
10#include <linux/init.h>
11#include <linux/sys.h>
12#include <cpu/registers.h>
13#include <asm/processor.h>
14#include <asm/unistd.h>
15#include <asm/thread_info.h>
16#include <asm/asm-offsets.h>
17
18/*
19 * SR fields.
20 */
21#define SR_ASID_MASK 0x00ff0000
22#define SR_FD_MASK 0x00008000
23#define SR_SS 0x08000000
24#define SR_BL 0x10000000
25#define SR_MD 0x40000000
26
27/*
28 * Event code.
29 */
30#define EVENT_INTERRUPT 0
31#define EVENT_FAULT_TLB 1
32#define EVENT_FAULT_NOT_TLB 2
33#define EVENT_DEBUG 3
34
35/* EXPEVT values */
36#define RESET_CAUSE 0x20
37#define DEBUGSS_CAUSE 0x980
38
39/*
40 * Frame layout. Quad index.
41 */
42#define FRAME_T(x) FRAME_TBASE+(x*8)
43#define FRAME_R(x) FRAME_RBASE+(x*8)
44#define FRAME_S(x) FRAME_SBASE+(x*8)
45#define FSPC 0
46#define FSSR 1
47#define FSYSCALL_ID 2
48
49/* Arrange the save frame to be a multiple of 32 bytes long */
50#define FRAME_SBASE 0
51#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
52#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
53#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
54#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
55
56#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
57#define FP_FRAME_BASE 0
58
59#define SAVED_R2 0*8
60#define SAVED_R3 1*8
61#define SAVED_R4 2*8
62#define SAVED_R5 3*8
63#define SAVED_R18 4*8
64#define SAVED_R6 5*8
65#define SAVED_TR0 6*8
66
67/* These are the registers saved in the TLB path that aren't saved in the first
68 level of the normal one. */
69#define TLB_SAVED_R25 7*8
70#define TLB_SAVED_TR1 8*8
71#define TLB_SAVED_TR2 9*8
72#define TLB_SAVED_TR3 10*8
73#define TLB_SAVED_TR4 11*8
74/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
75 breakage otherwise. */
76#define TLB_SAVED_R0 12*8
77#define TLB_SAVED_R1 13*8
78
79#define CLI() \
80 getcon SR, r6; \
81 ori r6, 0xf0, r6; \
82 putcon r6, SR;
83
84#define STI() \
85 getcon SR, r6; \
86 andi r6, ~0xf0, r6; \
87 putcon r6, SR;
88
89#ifdef CONFIG_PREEMPT
90# define preempt_stop() CLI()
91#else
92# define preempt_stop()
93# define resume_kernel restore_all
94#endif
95
96 .section .data, "aw"
97
98#define FAST_TLBMISS_STACK_CACHELINES 4
99#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
100
101/* Register back-up area for all exceptions */
102 .balign 32
103 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
104 * register saves etc. */
105 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
106/* This is 32 byte aligned by construction */
107/* Register back-up area for all exceptions */
108reg_save_area:
109 .quad 0
110 .quad 0
111 .quad 0
112 .quad 0
113
114 .quad 0
115 .quad 0
116 .quad 0
117 .quad 0
118
119 .quad 0
120 .quad 0
121 .quad 0
122 .quad 0
123
124 .quad 0
125 .quad 0
126
127/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
128 * reentrancy. Note this area may be accessed via physical address.
129 * Align so this fits a whole single cache line, for ease of purging.
130 */
131 .balign 32,0,32
132resvec_save_area:
133 .quad 0
134 .quad 0
135 .quad 0
136 .quad 0
137 .quad 0
138 .balign 32,0,32
139
140/* Jump table of 3rd level handlers */
141trap_jtable:
142 .long do_exception_error /* 0x000 */
143 .long do_exception_error /* 0x020 */
144#ifdef CONFIG_MMU
145 .long tlb_miss_load /* 0x040 */
146 .long tlb_miss_store /* 0x060 */
147#else
148 .long do_exception_error
149 .long do_exception_error
150#endif
151 ! ARTIFICIAL pseudo-EXPEVT setting
152 .long do_debug_interrupt /* 0x080 */
153#ifdef CONFIG_MMU
154 .long tlb_miss_load /* 0x0A0 */
155 .long tlb_miss_store /* 0x0C0 */
156#else
157 .long do_exception_error
158 .long do_exception_error
159#endif
160 .long do_address_error_load /* 0x0E0 */
161 .long do_address_error_store /* 0x100 */
162#ifdef CONFIG_SH_FPU
163 .long do_fpu_error /* 0x120 */
164#else
165 .long do_exception_error /* 0x120 */
166#endif
167 .long do_exception_error /* 0x140 */
168 .long system_call /* 0x160 */
169 .long do_reserved_inst /* 0x180 */
170 .long do_illegal_slot_inst /* 0x1A0 */
171 .long do_exception_error /* 0x1C0 - NMI */
172 .long do_exception_error /* 0x1E0 */
173 .rept 15
174 .long do_IRQ /* 0x200 - 0x3C0 */
175 .endr
176 .long do_exception_error /* 0x3E0 */
177 .rept 32
178 .long do_IRQ /* 0x400 - 0x7E0 */
179 .endr
180 .long fpu_error_or_IRQA /* 0x800 */
181 .long fpu_error_or_IRQB /* 0x820 */
182 .long do_IRQ /* 0x840 */
183 .long do_IRQ /* 0x860 */
184 .rept 6
185 .long do_exception_error /* 0x880 - 0x920 */
186 .endr
187 .long breakpoint_trap_handler /* 0x940 */
188 .long do_exception_error /* 0x960 */
189 .long do_single_step /* 0x980 */
190
191 .rept 3
192 .long do_exception_error /* 0x9A0 - 0x9E0 */
193 .endr
194 .long do_IRQ /* 0xA00 */
195 .long do_IRQ /* 0xA20 */
196#ifdef CONFIG_MMU
197 .long itlb_miss_or_IRQ /* 0xA40 */
198#else
199 .long do_IRQ
200#endif
201 .long do_IRQ /* 0xA60 */
202 .long do_IRQ /* 0xA80 */
203#ifdef CONFIG_MMU
204 .long itlb_miss_or_IRQ /* 0xAA0 */
205#else
206 .long do_IRQ
207#endif
208 .long do_exception_error /* 0xAC0 */
209 .long do_address_error_exec /* 0xAE0 */
210 .rept 8
211 .long do_exception_error /* 0xB00 - 0xBE0 */
212 .endr
213 .rept 18
214 .long do_IRQ /* 0xC00 - 0xE20 */
215 .endr
216
217 .section .text64, "ax"
218
219/*
220 * --- Exception/Interrupt/Event Handling Section
221 */
222
223/*
224 * VBR and RESVEC blocks.
225 *
226 * First level handler for VBR-based exceptions.
227 *
228 * To avoid waste of space, align to the maximum text block size.
229 * This is assumed to be at most 128 bytes or 32 instructions.
230 * DO NOT EXCEED 32 instructions on the first level handlers !
231 *
232 * Also note that RESVEC is contained within the VBR block
233 * where the room left (1KB - TEXT_SIZE) allows placing
234 * the RESVEC block (at most 512B + TEXT_SIZE).
235 *
236 * So first (and only) level handler for RESVEC-based exceptions.
237 *
238 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
239 * and interrupt) we are a lot tight with register space until
240 * saving onto the stack frame, which is done in handle_exception().
241 *
242 */
243
244#define TEXT_SIZE 128
245#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
246
247 .balign TEXT_SIZE
248LVBR_block:
249 .space 256, 0 /* Power-on class handler, */
250 /* not required here */
251not_a_tlb_miss:
252 synco /* TAKum03020 (but probably a good idea anyway.) */
253 /* Save original stack pointer into KCR1 */
254 putcon SP, KCR1
255
256 /* Save other original registers into reg_save_area */
257 movi reg_save_area, SP
258 st.q SP, SAVED_R2, r2
259 st.q SP, SAVED_R3, r3
260 st.q SP, SAVED_R4, r4
261 st.q SP, SAVED_R5, r5
262 st.q SP, SAVED_R6, r6
263 st.q SP, SAVED_R18, r18
264 gettr tr0, r3
265 st.q SP, SAVED_TR0, r3
266
267 /* Set args for Non-debug, Not a TLB miss class handler */
268 getcon EXPEVT, r2
269 movi ret_from_exception, r3
270 ori r3, 1, r3
271 movi EVENT_FAULT_NOT_TLB, r4
272 or SP, ZERO, r5
273 getcon KCR1, SP
274 pta handle_exception, tr0
275 blink tr0, ZERO
276
277 .balign 256
278 ! VBR+0x200
279 nop
280 .balign 256
281 ! VBR+0x300
282 nop
283 .balign 256
284 /*
285 * Instead of the natural .balign 1024 place RESVEC here
286 * respecting the final 1KB alignment.
287 */
288 .balign TEXT_SIZE
289 /*
290 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
291 * block making sure the final alignment is correct.
292 */
293#ifdef CONFIG_MMU
294tlb_miss:
295 synco /* TAKum03020 (but probably a good idea anyway.) */
296 putcon SP, KCR1
297 movi reg_save_area, SP
298 /* SP is guaranteed 32-byte aligned. */
299 st.q SP, TLB_SAVED_R0 , r0
300 st.q SP, TLB_SAVED_R1 , r1
301 st.q SP, SAVED_R2 , r2
302 st.q SP, SAVED_R3 , r3
303 st.q SP, SAVED_R4 , r4
304 st.q SP, SAVED_R5 , r5
305 st.q SP, SAVED_R6 , r6
306 st.q SP, SAVED_R18, r18
307
308 /* Save R25 for safety; as/ld may want to use it to achieve the call to
309 * the code in mm/tlbmiss.c */
310 st.q SP, TLB_SAVED_R25, r25
311 gettr tr0, r2
312 gettr tr1, r3
313 gettr tr2, r4
314 gettr tr3, r5
315 gettr tr4, r18
316 st.q SP, SAVED_TR0 , r2
317 st.q SP, TLB_SAVED_TR1 , r3
318 st.q SP, TLB_SAVED_TR2 , r4
319 st.q SP, TLB_SAVED_TR3 , r5
320 st.q SP, TLB_SAVED_TR4 , r18
321
322 pt do_fast_page_fault, tr0
323 getcon SSR, r2
324 getcon EXPEVT, r3
325 getcon TEA, r4
326 shlri r2, 30, r2
327 andi r2, 1, r2 /* r2 = SSR.MD */
328 blink tr0, LINK
329
330 pt fixup_to_invoke_general_handler, tr1
331
332 /* If the fast path handler fixed the fault, just drop through quickly
333 to the restore code right away to return to the excepting context.
334 */
335 bnei/u r2, 0, tr1
336
337fast_tlb_miss_restore:
338 ld.q SP, SAVED_TR0, r2
339 ld.q SP, TLB_SAVED_TR1, r3
340 ld.q SP, TLB_SAVED_TR2, r4
341
342 ld.q SP, TLB_SAVED_TR3, r5
343 ld.q SP, TLB_SAVED_TR4, r18
344
345 ptabs r2, tr0
346 ptabs r3, tr1
347 ptabs r4, tr2
348 ptabs r5, tr3
349 ptabs r18, tr4
350
351 ld.q SP, TLB_SAVED_R0, r0
352 ld.q SP, TLB_SAVED_R1, r1
353 ld.q SP, SAVED_R2, r2
354 ld.q SP, SAVED_R3, r3
355 ld.q SP, SAVED_R4, r4
356 ld.q SP, SAVED_R5, r5
357 ld.q SP, SAVED_R6, r6
358 ld.q SP, SAVED_R18, r18
359 ld.q SP, TLB_SAVED_R25, r25
360
361 getcon KCR1, SP
362 rte
363 nop /* for safety, in case the code is run on sh5-101 cut1.x */
364
365fixup_to_invoke_general_handler:
366
367 /* OK, new method. Restore stuff that's not expected to get saved into
368 the 'first-level' reg save area, then just fall through to setting
369 up the registers and calling the second-level handler. */
370
371 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
372 r25,tr1-4 and save r6 to get into the right state. */
373
374 ld.q SP, TLB_SAVED_TR1, r3
375 ld.q SP, TLB_SAVED_TR2, r4
376 ld.q SP, TLB_SAVED_TR3, r5
377 ld.q SP, TLB_SAVED_TR4, r18
378 ld.q SP, TLB_SAVED_R25, r25
379
380 ld.q SP, TLB_SAVED_R0, r0
381 ld.q SP, TLB_SAVED_R1, r1
382
383 ptabs/u r3, tr1
384 ptabs/u r4, tr2
385 ptabs/u r5, tr3
386 ptabs/u r18, tr4
387
388 /* Set args for Non-debug, TLB miss class handler */
389 getcon EXPEVT, r2
390 movi ret_from_exception, r3
391 ori r3, 1, r3
392 movi EVENT_FAULT_TLB, r4
393 or SP, ZERO, r5
394 getcon KCR1, SP
395 pta handle_exception, tr0
396 blink tr0, ZERO
397#else /* CONFIG_MMU */
398 .balign 256
399#endif
400
401/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
402 DOES END UP AT VBR+0x600 */
403 nop
404 nop
405 nop
406 nop
407 nop
408 nop
409
410 .balign 256
411 /* VBR + 0x600 */
412
413interrupt:
414 synco /* TAKum03020 (but probably a good idea anyway.) */
415 /* Save original stack pointer into KCR1 */
416 putcon SP, KCR1
417
418 /* Save other original registers into reg_save_area */
419 movi reg_save_area, SP
420 st.q SP, SAVED_R2, r2
421 st.q SP, SAVED_R3, r3
422 st.q SP, SAVED_R4, r4
423 st.q SP, SAVED_R5, r5
424 st.q SP, SAVED_R6, r6
425 st.q SP, SAVED_R18, r18
426 gettr tr0, r3
427 st.q SP, SAVED_TR0, r3
428
429 /* Set args for interrupt class handler */
430 getcon INTEVT, r2
431 movi ret_from_irq, r3
432 ori r3, 1, r3
433 movi EVENT_INTERRUPT, r4
434 or SP, ZERO, r5
435 getcon KCR1, SP
436 pta handle_exception, tr0
437 blink tr0, ZERO
438 .balign TEXT_SIZE /* let's waste the bare minimum */
439
440LVBR_block_end: /* Marker. Used for total checking */
441
442 .balign 256
443LRESVEC_block:
444 /* Panic handler. Called with MMU off. Possible causes/actions:
445 * - Reset: Jump to program start.
446 * - Single Step: Turn off Single Step & return.
447 * - Others: Call panic handler, passing PC as arg.
448 * (this may need to be extended...)
449 */
450reset_or_panic:
451 synco /* TAKum03020 (but probably a good idea anyway.) */
452 putcon SP, DCR
453 /* First save r0-1 and tr0, as we need to use these */
454 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
455 st.q SP, 0, r0
456 st.q SP, 8, r1
457 gettr tr0, r0
458 st.q SP, 32, r0
459
460 /* Check cause */
461 getcon EXPEVT, r0
462 movi RESET_CAUSE, r1
463 sub r1, r0, r1 /* r1=0 if reset */
464 movi _stext-CONFIG_PAGE_OFFSET, r0
465 ori r0, 1, r0
466 ptabs r0, tr0
467 beqi r1, 0, tr0 /* Jump to start address if reset */
468
469 getcon EXPEVT, r0
470 movi DEBUGSS_CAUSE, r1
471 sub r1, r0, r1 /* r1=0 if single step */
472 pta single_step_panic, tr0
473 beqi r1, 0, tr0 /* jump if single step */
474
475 /* Now jump to where we save the registers. */
476 movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
477 ptabs r1, tr0
478 blink tr0, r63
479
480single_step_panic:
481 /* We are in a handler with Single Step set. We need to resume the
482 * handler, by turning on MMU & turning off Single Step. */
483 getcon SSR, r0
484 movi SR_MMU, r1
485 or r0, r1, r0
486 movi ~SR_SS, r1
487 and r0, r1, r0
488 putcon r0, SSR
489 /* Restore EXPEVT, as the rte won't do this */
490 getcon PEXPEVT, r0
491 putcon r0, EXPEVT
492 /* Restore regs */
493 ld.q SP, 32, r0
494 ptabs r0, tr0
495 ld.q SP, 0, r0
496 ld.q SP, 8, r1
497 getcon DCR, SP
498 synco
499 rte
500
501
502 .balign 256
503debug_exception:
504 synco /* TAKum03020 (but probably a good idea anyway.) */
505 /*
506 * Single step/software_break_point first level handler.
507 * Called with MMU off, so the first thing we do is enable it
508 * by doing an rte with appropriate SSR.
509 */
510 putcon SP, DCR
511 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
512 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
513
514 /* With the MMU off, we are bypassing the cache, so purge any
515 * data that will be made stale by the following stores.
516 */
517 ocbp SP, 0
518 synco
519
520 st.q SP, 0, r0
521 st.q SP, 8, r1
522 getcon SPC, r0
523 st.q SP, 16, r0
524 getcon SSR, r0
525 st.q SP, 24, r0
526
527 /* Enable MMU, block exceptions, set priv mode, disable single step */
528 movi SR_MMU | SR_BL | SR_MD, r1
529 or r0, r1, r0
530 movi ~SR_SS, r1
531 and r0, r1, r0
532 putcon r0, SSR
533 /* Force control to debug_exception_2 when rte is executed */
534 movi debug_exeception_2, r0
535 ori r0, 1, r0 /* force SHmedia, just in case */
536 putcon r0, SPC
537 getcon DCR, SP
538 synco
539 rte
540debug_exeception_2:
541 /* Restore saved regs */
542 putcon SP, KCR1
543 movi resvec_save_area, SP
544 ld.q SP, 24, r0
545 putcon r0, SSR
546 ld.q SP, 16, r0
547 putcon r0, SPC
548 ld.q SP, 0, r0
549 ld.q SP, 8, r1
550
551 /* Save other original registers into reg_save_area */
552 movi reg_save_area, SP
553 st.q SP, SAVED_R2, r2
554 st.q SP, SAVED_R3, r3
555 st.q SP, SAVED_R4, r4
556 st.q SP, SAVED_R5, r5
557 st.q SP, SAVED_R6, r6
558 st.q SP, SAVED_R18, r18
559 gettr tr0, r3
560 st.q SP, SAVED_TR0, r3
561
562 /* Set args for debug class handler */
563 getcon EXPEVT, r2
564 movi ret_from_exception, r3
565 ori r3, 1, r3
566 movi EVENT_DEBUG, r4
567 or SP, ZERO, r5
568 getcon KCR1, SP
569 pta handle_exception, tr0
570 blink tr0, ZERO
571
572 .balign 256
573debug_interrupt:
574 /* !!! WE COME HERE IN REAL MODE !!! */
575 /* Hook-up debug interrupt to allow various debugging options to be
576 * hooked into its handler. */
577 /* Save original stack pointer into KCR1 */
578 synco
579 putcon SP, KCR1
580 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
581 ocbp SP, 0
582 ocbp SP, 32
583 synco
584
585 /* Save other original registers into reg_save_area thru real addresses */
586 st.q SP, SAVED_R2, r2
587 st.q SP, SAVED_R3, r3
588 st.q SP, SAVED_R4, r4
589 st.q SP, SAVED_R5, r5
590 st.q SP, SAVED_R6, r6
591 st.q SP, SAVED_R18, r18
592 gettr tr0, r3
593 st.q SP, SAVED_TR0, r3
594
595 /* move (spc,ssr)->(pspc,pssr). The rte will shift
596 them back again, so that they look like the originals
597 as far as the real handler code is concerned. */
598 getcon spc, r6
599 putcon r6, pspc
600 getcon ssr, r6
601 putcon r6, pssr
602
603 ! construct useful SR for handle_exception
604 movi 3, r6
605 shlli r6, 30, r6
606 getcon sr, r18
607 or r18, r6, r6
608 putcon r6, ssr
609
610 ! SSR is now the current SR with the MD and MMU bits set
611 ! i.e. the rte will switch back to priv mode and put
612 ! the mmu back on
613
614 ! construct spc
615 movi handle_exception, r18
616 ori r18, 1, r18 ! for safety (do we need this?)
617 putcon r18, spc
618
619 /* Set args for Non-debug, Not a TLB miss class handler */
620
621 ! EXPEVT==0x80 is unused, so 'steal' this value to put the
622 ! debug interrupt handler in the vectoring table
623 movi 0x80, r2
624 movi ret_from_exception, r3
625 ori r3, 1, r3
626 movi EVENT_FAULT_NOT_TLB, r4
627
628 or SP, ZERO, r5
629 movi CONFIG_PAGE_OFFSET, r6
630 add r6, r5, r5
631 getcon KCR1, SP
632
633 synco ! for safety
634 rte ! -> handle_exception, switch back to priv mode again
635
636LRESVEC_block_end: /* Marker. Unused. */
637
638 .balign TEXT_SIZE
639
640/*
641 * Second level handler for VBR-based exceptions. Pre-handler.
642 * In common to all stack-frame sensitive handlers.
643 *
644 * Inputs:
645 * (KCR0) Current [current task union]
646 * (KCR1) Original SP
647 * (r2) INTEVT/EXPEVT
648 * (r3) appropriate return address
649 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
650 * (r5) Pointer to reg_save_area
651 * (SP) Original SP
652 *
653 * Available registers:
654 * (r6)
655 * (r18)
656 * (tr0)
657 *
658 */
659handle_exception:
660 /* Common 2nd level handler. */
661
662 /* First thing we need an appropriate stack pointer */
663 getcon SSR, r6
664 shlri r6, 30, r6
665 andi r6, 1, r6
666 pta stack_ok, tr0
667 bne r6, ZERO, tr0 /* Original stack pointer is fine */
668
669 /* Set stack pointer for user fault */
670 getcon KCR0, SP
671 movi THREAD_SIZE, r6 /* Point to the end */
672 add SP, r6, SP
673
674stack_ok:
675
676/* DEBUG : check for underflow/overflow of the kernel stack */
677 pta no_underflow, tr0
678 getcon KCR0, r6
679 movi 1024, r18
680 add r6, r18, r6
681 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
682
683/* Just panic to cause a crash. */
684bad_sp:
685 ld.b r63, 0, r6
686 nop
687
688no_underflow:
689 pta bad_sp, tr0
690 getcon kcr0, r6
691 movi THREAD_SIZE, r18
692 add r18, r6, r6
693 bgt SP, r6, tr0 ! sp above the stack
694
695 /* Make some room for the BASIC frame. */
696 movi -(FRAME_SIZE), r6
697 add SP, r6, SP
698
699/* Could do this with no stalling if we had another spare register, but the
700 code below will be OK. */
701 ld.q r5, SAVED_R2, r6
702 ld.q r5, SAVED_R3, r18
703 st.q SP, FRAME_R(2), r6
704 ld.q r5, SAVED_R4, r6
705 st.q SP, FRAME_R(3), r18
706 ld.q r5, SAVED_R5, r18
707 st.q SP, FRAME_R(4), r6
708 ld.q r5, SAVED_R6, r6
709 st.q SP, FRAME_R(5), r18
710 ld.q r5, SAVED_R18, r18
711 st.q SP, FRAME_R(6), r6
712 ld.q r5, SAVED_TR0, r6
713 st.q SP, FRAME_R(18), r18
714 st.q SP, FRAME_T(0), r6
715
716 /* Keep old SP around */
717 getcon KCR1, r6
718
719 /* Save the rest of the general purpose registers */
720 st.q SP, FRAME_R(0), r0
721 st.q SP, FRAME_R(1), r1
722 st.q SP, FRAME_R(7), r7
723 st.q SP, FRAME_R(8), r8
724 st.q SP, FRAME_R(9), r9
725 st.q SP, FRAME_R(10), r10
726 st.q SP, FRAME_R(11), r11
727 st.q SP, FRAME_R(12), r12
728 st.q SP, FRAME_R(13), r13
729 st.q SP, FRAME_R(14), r14
730
731 /* SP is somewhere else */
732 st.q SP, FRAME_R(15), r6
733
734 st.q SP, FRAME_R(16), r16
735 st.q SP, FRAME_R(17), r17
736 /* r18 is saved earlier. */
737 st.q SP, FRAME_R(19), r19
738 st.q SP, FRAME_R(20), r20
739 st.q SP, FRAME_R(21), r21
740 st.q SP, FRAME_R(22), r22
741 st.q SP, FRAME_R(23), r23
742 st.q SP, FRAME_R(24), r24
743 st.q SP, FRAME_R(25), r25
744 st.q SP, FRAME_R(26), r26
745 st.q SP, FRAME_R(27), r27
746 st.q SP, FRAME_R(28), r28
747 st.q SP, FRAME_R(29), r29
748 st.q SP, FRAME_R(30), r30
749 st.q SP, FRAME_R(31), r31
750 st.q SP, FRAME_R(32), r32
751 st.q SP, FRAME_R(33), r33
752 st.q SP, FRAME_R(34), r34
753 st.q SP, FRAME_R(35), r35
754 st.q SP, FRAME_R(36), r36
755 st.q SP, FRAME_R(37), r37
756 st.q SP, FRAME_R(38), r38
757 st.q SP, FRAME_R(39), r39
758 st.q SP, FRAME_R(40), r40
759 st.q SP, FRAME_R(41), r41
760 st.q SP, FRAME_R(42), r42
761 st.q SP, FRAME_R(43), r43
762 st.q SP, FRAME_R(44), r44
763 st.q SP, FRAME_R(45), r45
764 st.q SP, FRAME_R(46), r46
765 st.q SP, FRAME_R(47), r47
766 st.q SP, FRAME_R(48), r48
767 st.q SP, FRAME_R(49), r49
768 st.q SP, FRAME_R(50), r50
769 st.q SP, FRAME_R(51), r51
770 st.q SP, FRAME_R(52), r52
771 st.q SP, FRAME_R(53), r53
772 st.q SP, FRAME_R(54), r54
773 st.q SP, FRAME_R(55), r55
774 st.q SP, FRAME_R(56), r56
775 st.q SP, FRAME_R(57), r57
776 st.q SP, FRAME_R(58), r58
777 st.q SP, FRAME_R(59), r59
778 st.q SP, FRAME_R(60), r60
779 st.q SP, FRAME_R(61), r61
780 st.q SP, FRAME_R(62), r62
781
782 /*
783 * Save the S* registers.
784 */
785 getcon SSR, r61
786 st.q SP, FRAME_S(FSSR), r61
787 getcon SPC, r62
788 st.q SP, FRAME_S(FSPC), r62
789 movi -1, r62 /* Reset syscall_nr */
790 st.q SP, FRAME_S(FSYSCALL_ID), r62
791
792 /* Save the rest of the target registers */
793 gettr tr1, r6
794 st.q SP, FRAME_T(1), r6
795 gettr tr2, r6
796 st.q SP, FRAME_T(2), r6
797 gettr tr3, r6
798 st.q SP, FRAME_T(3), r6
799 gettr tr4, r6
800 st.q SP, FRAME_T(4), r6
801 gettr tr5, r6
802 st.q SP, FRAME_T(5), r6
803 gettr tr6, r6
804 st.q SP, FRAME_T(6), r6
805 gettr tr7, r6
806 st.q SP, FRAME_T(7), r6
807
808 ! setup FP so that unwinder can wind back through nested kernel mode
809 ! exceptions
810 add SP, ZERO, r14
811
812 /* For syscall and debug race condition, get TRA now */
813 getcon TRA, r5
814
815 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
816 * Also set FD, to catch FPU usage in the kernel.
817 *
818 * benedict.gaster@superh.com 29/07/2002
819 *
820 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
821 * same time change BL from 1->0, as any pending interrupt of a level
822 * higher than he previous value of IMASK will leak through and be
823 * taken unexpectedly.
824 *
825 * To avoid this we raise the IMASK and then issue another PUTCON to
826 * enable interrupts.
827 */
828 getcon SR, r6
829 movi SR_IMASK | SR_FD, r7
830 or r6, r7, r6
831 putcon r6, SR
832 movi SR_UNBLOCK_EXC, r7
833 and r6, r7, r6
834 putcon r6, SR
835
836
837 /* Now call the appropriate 3rd level handler */
838 or r3, ZERO, LINK
839 movi trap_jtable, r3
840 shlri r2, 3, r2
841 ldx.l r2, r3, r3
842 shlri r2, 2, r2
843 ptabs r3, tr0
844 or SP, ZERO, r3
845 blink tr0, ZERO
846
847/*
848 * Second level handler for VBR-based exceptions. Post-handlers.
849 *
850 * Post-handlers for interrupts (ret_from_irq), exceptions
851 * (ret_from_exception) and common reentrance doors (restore_all
852 * to get back to the original context, ret_from_syscall loop to
853 * check kernel exiting).
854 *
855 * ret_with_reschedule and work_notifysig are an inner lables of
856 * the ret_from_syscall loop.
857 *
858 * In common to all stack-frame sensitive handlers.
859 *
860 * Inputs:
861 * (SP) struct pt_regs *, original register's frame pointer (basic)
862 *
863 */
864 .global ret_from_irq
865ret_from_irq:
866 ld.q SP, FRAME_S(FSSR), r6
867 shlri r6, 30, r6
868 andi r6, 1, r6
869 pta resume_kernel, tr0
870 bne r6, ZERO, tr0 /* no further checks */
871 STI()
872 pta ret_with_reschedule, tr0
873 blink tr0, ZERO /* Do not check softirqs */
874
875 .global ret_from_exception
876ret_from_exception:
877 preempt_stop()
878
879 ld.q SP, FRAME_S(FSSR), r6
880 shlri r6, 30, r6
881 andi r6, 1, r6
882 pta resume_kernel, tr0
883 bne r6, ZERO, tr0 /* no further checks */
884
885 /* Check softirqs */
886
887#ifdef CONFIG_PREEMPT
888 pta ret_from_syscall, tr0
889 blink tr0, ZERO
890
891resume_kernel:
892 CLI()
893
894 pta restore_all, tr0
895
896 getcon KCR0, r6
897 ld.l r6, TI_PRE_COUNT, r7
898 beq/u r7, ZERO, tr0
899
900need_resched:
901 ld.l r6, TI_FLAGS, r7
902 movi (1 << TIF_NEED_RESCHED), r8
903 and r8, r7, r8
904 bne r8, ZERO, tr0
905
906 getcon SR, r7
907 andi r7, 0xf0, r7
908 bne r7, ZERO, tr0
909
910 movi preempt_schedule_irq, r7
911 ori r7, 1, r7
912 ptabs r7, tr1
913 blink tr1, LINK
914
915 pta need_resched, tr1
916 blink tr1, ZERO
917#endif
918
919 .global ret_from_syscall
920ret_from_syscall:
921
922ret_with_reschedule:
923 getcon KCR0, r6 ! r6 contains current_thread_info
924 ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
925
926 movi _TIF_NEED_RESCHED, r8
927 and r8, r7, r8
928 pta work_resched, tr0
929 bne r8, ZERO, tr0
930
931 pta restore_all, tr1
932
933 movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
934 and r8, r7, r8
935 pta work_notifysig, tr0
936 bne r8, ZERO, tr0
937
938 blink tr1, ZERO
939
940work_resched:
941 pta ret_from_syscall, tr0
942 gettr tr0, LINK
943 movi schedule, r6
944 ptabs r6, tr0
945 blink tr0, ZERO /* Call schedule(), return on top */
946
947work_notifysig:
948 gettr tr1, LINK
949
950 movi do_notify_resume, r6
951 ptabs r6, tr0
952 or SP, ZERO, r2
953 or r7, ZERO, r3
954 blink tr0, LINK /* Call do_notify_resume(regs, current_thread_info->flags), return here */
955
956restore_all:
957 /* Do prefetches */
958
959 ld.q SP, FRAME_T(0), r6
960 ld.q SP, FRAME_T(1), r7
961 ld.q SP, FRAME_T(2), r8
962 ld.q SP, FRAME_T(3), r9
963 ptabs r6, tr0
964 ptabs r7, tr1
965 ptabs r8, tr2
966 ptabs r9, tr3
967 ld.q SP, FRAME_T(4), r6
968 ld.q SP, FRAME_T(5), r7
969 ld.q SP, FRAME_T(6), r8
970 ld.q SP, FRAME_T(7), r9
971 ptabs r6, tr4
972 ptabs r7, tr5
973 ptabs r8, tr6
974 ptabs r9, tr7
975
976 ld.q SP, FRAME_R(0), r0
977 ld.q SP, FRAME_R(1), r1
978 ld.q SP, FRAME_R(2), r2
979 ld.q SP, FRAME_R(3), r3
980 ld.q SP, FRAME_R(4), r4
981 ld.q SP, FRAME_R(5), r5
982 ld.q SP, FRAME_R(6), r6
983 ld.q SP, FRAME_R(7), r7
984 ld.q SP, FRAME_R(8), r8
985 ld.q SP, FRAME_R(9), r9
986 ld.q SP, FRAME_R(10), r10
987 ld.q SP, FRAME_R(11), r11
988 ld.q SP, FRAME_R(12), r12
989 ld.q SP, FRAME_R(13), r13
990 ld.q SP, FRAME_R(14), r14
991
992 ld.q SP, FRAME_R(16), r16
993 ld.q SP, FRAME_R(17), r17
994 ld.q SP, FRAME_R(18), r18
995 ld.q SP, FRAME_R(19), r19
996 ld.q SP, FRAME_R(20), r20
997 ld.q SP, FRAME_R(21), r21
998 ld.q SP, FRAME_R(22), r22
999 ld.q SP, FRAME_R(23), r23
1000 ld.q SP, FRAME_R(24), r24
1001 ld.q SP, FRAME_R(25), r25
1002 ld.q SP, FRAME_R(26), r26
1003 ld.q SP, FRAME_R(27), r27
1004 ld.q SP, FRAME_R(28), r28
1005 ld.q SP, FRAME_R(29), r29
1006 ld.q SP, FRAME_R(30), r30
1007 ld.q SP, FRAME_R(31), r31
1008 ld.q SP, FRAME_R(32), r32
1009 ld.q SP, FRAME_R(33), r33
1010 ld.q SP, FRAME_R(34), r34
1011 ld.q SP, FRAME_R(35), r35
1012 ld.q SP, FRAME_R(36), r36
1013 ld.q SP, FRAME_R(37), r37
1014 ld.q SP, FRAME_R(38), r38
1015 ld.q SP, FRAME_R(39), r39
1016 ld.q SP, FRAME_R(40), r40
1017 ld.q SP, FRAME_R(41), r41
1018 ld.q SP, FRAME_R(42), r42
1019 ld.q SP, FRAME_R(43), r43
1020 ld.q SP, FRAME_R(44), r44
1021 ld.q SP, FRAME_R(45), r45
1022 ld.q SP, FRAME_R(46), r46
1023 ld.q SP, FRAME_R(47), r47
1024 ld.q SP, FRAME_R(48), r48
1025 ld.q SP, FRAME_R(49), r49
1026 ld.q SP, FRAME_R(50), r50
1027 ld.q SP, FRAME_R(51), r51
1028 ld.q SP, FRAME_R(52), r52
1029 ld.q SP, FRAME_R(53), r53
1030 ld.q SP, FRAME_R(54), r54
1031 ld.q SP, FRAME_R(55), r55
1032 ld.q SP, FRAME_R(56), r56
1033 ld.q SP, FRAME_R(57), r57
1034 ld.q SP, FRAME_R(58), r58
1035
1036 getcon SR, r59
1037 movi SR_BLOCK_EXC, r60
1038 or r59, r60, r59
1039 putcon r59, SR /* SR.BL = 1, keep nesting out */
1040 ld.q SP, FRAME_S(FSSR), r61
1041 ld.q SP, FRAME_S(FSPC), r62
1042 movi SR_ASID_MASK, r60
1043 and r59, r60, r59
1044 andc r61, r60, r61 /* Clear out older ASID */
1045 or r59, r61, r61 /* Retain current ASID */
1046 putcon r61, SSR
1047 putcon r62, SPC
1048
1049 /* Ignore FSYSCALL_ID */
1050
1051 ld.q SP, FRAME_R(59), r59
1052 ld.q SP, FRAME_R(60), r60
1053 ld.q SP, FRAME_R(61), r61
1054 ld.q SP, FRAME_R(62), r62
1055
1056 /* Last touch */
1057 ld.q SP, FRAME_R(15), SP
1058 rte
1059 nop
1060
1061/*
1062 * Third level handlers for VBR-based exceptions. Adapting args to
1063 * and/or deflecting to fourth level handlers.
1064 *
1065 * Fourth level handlers interface.
1066 * Most are C-coded handlers directly pointed by the trap_jtable.
1067 * (Third = Fourth level)
1068 * Inputs:
1069 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1070 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1071 * (r3) struct pt_regs *, original register's frame pointer
1072 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1073 * (r5) TRA control register (for syscall/debug benefit only)
1074 * (LINK) return address
1075 * (SP) = r3
1076 *
1077 * Kernel TLB fault handlers will get a slightly different interface.
1078 * (r2) struct pt_regs *, original register's frame pointer
1079 * (r3) page fault error code (see asm/thread_info.h)
1080 * (r4) Effective Address of fault
1081 * (LINK) return address
1082 * (SP) = r2
1083 *
1084 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1085 *
1086 */
1087#ifdef CONFIG_MMU
1088tlb_miss_load:
1089 or SP, ZERO, r2
1090 or ZERO, ZERO, r3 /* Read */
1091 getcon TEA, r4
1092 pta call_do_page_fault, tr0
1093 beq ZERO, ZERO, tr0
1094
1095tlb_miss_store:
1096 or SP, ZERO, r2
1097 movi FAULT_CODE_WRITE, r3 /* Write */
1098 getcon TEA, r4
1099 pta call_do_page_fault, tr0
1100 beq ZERO, ZERO, tr0
1101
1102itlb_miss_or_IRQ:
1103 pta its_IRQ, tr0
1104 beqi/u r4, EVENT_INTERRUPT, tr0
1105
1106 /* ITLB miss */
1107 or SP, ZERO, r2
1108 movi FAULT_CODE_ITLB, r3
1109 getcon TEA, r4
1110 /* Fall through */
1111
1112call_do_page_fault:
1113 movi do_page_fault, r6
1114 ptabs r6, tr0
1115 blink tr0, ZERO
1116#endif /* CONFIG_MMU */
1117
1118fpu_error_or_IRQA:
1119 pta its_IRQ, tr0
1120 beqi/l r4, EVENT_INTERRUPT, tr0
1121#ifdef CONFIG_SH_FPU
1122 movi fpu_state_restore_trap_handler, r6
1123#else
1124 movi do_exception_error, r6
1125#endif
1126 ptabs r6, tr0
1127 blink tr0, ZERO
1128
1129fpu_error_or_IRQB:
1130 pta its_IRQ, tr0
1131 beqi/l r4, EVENT_INTERRUPT, tr0
1132#ifdef CONFIG_SH_FPU
1133 movi fpu_state_restore_trap_handler, r6
1134#else
1135 movi do_exception_error, r6
1136#endif
1137 ptabs r6, tr0
1138 blink tr0, ZERO
1139
1140its_IRQ:
1141 movi do_IRQ, r6
1142 ptabs r6, tr0
1143 blink tr0, ZERO
1144
1145/*
1146 * system_call/unknown_trap third level handler:
1147 *
1148 * Inputs:
1149 * (r2) fault/interrupt code, entry number (TRAP = 11)
1150 * (r3) struct pt_regs *, original register's frame pointer
1151 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1152 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1153 * (SP) = r3
1154 * (LINK) return address: ret_from_exception
1155 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1156 *
1157 * Outputs:
1158 * (*r3) Syscall reply (Saved r2)
1159 * (LINK) In case of syscall only it can be scrapped.
1160 * Common second level post handler will be ret_from_syscall.
1161 * Common (non-trace) exit point to that is syscall_ret (saving
1162 * result to r2). Common bad exit point is syscall_bad (returning
1163 * ENOSYS then saved to r2).
1164 *
1165 */
1166
1167unknown_trap:
1168 /* Unknown Trap or User Trace */
1169 movi do_unknown_trapa, r6
1170 ptabs r6, tr0
1171 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1172 andi r2, 0x1ff, r2 /* r2 = syscall # */
1173 blink tr0, LINK
1174
1175 pta syscall_ret, tr0
1176 blink tr0, ZERO
1177
1178 /* New syscall implementation*/
1179system_call:
1180 pta unknown_trap, tr0
1181 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1182 shlri r4, 20, r4
1183 bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
1184
1185 /* It's a system call */
1186 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1187 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1188
1189 STI()
1190
1191 pta syscall_allowed, tr0
1192 movi NR_syscalls - 1, r4 /* Last valid */
1193 bgeu/l r4, r5, tr0
1194
1195syscall_bad:
1196 /* Return ENOSYS ! */
1197 movi -(ENOSYS), r2 /* Fall-through */
1198
1199 .global syscall_ret
1200syscall_ret:
1201 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1202 ld.q SP, FRAME_S(FSPC), r2
1203 addi r2, 4, r2 /* Move PC, being pre-execution event */
1204 st.q SP, FRAME_S(FSPC), r2
1205 pta ret_from_syscall, tr0
1206 blink tr0, ZERO
1207
1208
1209/* A different return path for ret_from_fork, because we now need
1210 * to call schedule_tail with the later kernels. Because prev is
1211 * loaded into r2 by switch_to() means we can just call it straight away
1212 */
1213
1214.global ret_from_fork
1215ret_from_fork:
1216
1217 movi schedule_tail,r5
1218 ori r5, 1, r5
1219 ptabs r5, tr0
1220 blink tr0, LINK
1221
1222 ld.q SP, FRAME_S(FSPC), r2
1223 addi r2, 4, r2 /* Move PC, being pre-execution event */
1224 st.q SP, FRAME_S(FSPC), r2
1225 pta ret_from_syscall, tr0
1226 blink tr0, ZERO
1227
1228.global ret_from_kernel_thread
1229ret_from_kernel_thread:
1230
1231 movi schedule_tail,r5
1232 ori r5, 1, r5
1233 ptabs r5, tr0
1234 blink tr0, LINK
1235
1236 ld.q SP, FRAME_R(2), r2
1237 ld.q SP, FRAME_R(3), r3
1238 ptabs r3, tr0
1239 blink tr0, LINK
1240
1241 ld.q SP, FRAME_S(FSPC), r2
1242 addi r2, 4, r2 /* Move PC, being pre-execution event */
1243 st.q SP, FRAME_S(FSPC), r2
1244 pta ret_from_syscall, tr0
1245 blink tr0, ZERO
1246
1247syscall_allowed:
1248 /* Use LINK to deflect the exit point, default is syscall_ret */
1249 pta syscall_ret, tr0
1250 gettr tr0, LINK
1251 pta syscall_notrace, tr0
1252
1253 getcon KCR0, r2
1254 ld.l r2, TI_FLAGS, r4
1255 movi _TIF_WORK_SYSCALL_MASK, r6
1256 and r6, r4, r6
1257 beq/l r6, ZERO, tr0
1258
1259 /* Trace it by calling syscall_trace before and after */
1260 movi do_syscall_trace_enter, r4
1261 or SP, ZERO, r2
1262 ptabs r4, tr0
1263 blink tr0, LINK
1264
1265 /* Save the retval */
1266 st.q SP, FRAME_R(2), r2
1267
1268 /* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
1269 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1270 andi r5, 0x1ff, r5
1271
1272 pta syscall_ret_trace, tr0
1273 gettr tr0, LINK
1274
1275syscall_notrace:
1276 /* Now point to the appropriate 4th level syscall handler */
1277 movi sys_call_table, r4
1278 shlli r5, 2, r5
1279 ldx.l r4, r5, r5
1280 ptabs r5, tr0
1281
1282 /* Prepare original args */
1283 ld.q SP, FRAME_R(2), r2
1284 ld.q SP, FRAME_R(3), r3
1285 ld.q SP, FRAME_R(4), r4
1286 ld.q SP, FRAME_R(5), r5
1287 ld.q SP, FRAME_R(6), r6
1288 ld.q SP, FRAME_R(7), r7
1289
1290 /* And now the trick for those syscalls requiring regs * ! */
1291 or SP, ZERO, r8
1292
1293 /* Call it */
1294 blink tr0, ZERO /* LINK is already properly set */
1295
1296syscall_ret_trace:
1297 /* We get back here only if under trace */
1298 st.q SP, FRAME_R(9), r2 /* Save return value */
1299
1300 movi do_syscall_trace_leave, LINK
1301 or SP, ZERO, r2
1302 ptabs LINK, tr0
1303 blink tr0, LINK
1304
1305 /* This needs to be done after any syscall tracing */
1306 ld.q SP, FRAME_S(FSPC), r2
1307 addi r2, 4, r2 /* Move PC, being pre-execution event */
1308 st.q SP, FRAME_S(FSPC), r2
1309
1310 pta ret_from_syscall, tr0
1311 blink tr0, ZERO /* Resume normal return sequence */
1312
1313/*
1314 * --- Switch to running under a particular ASID and return the previous ASID value
1315 * --- The caller is assumed to have done a cli before calling this.
1316 *
1317 * Input r2 : new ASID
1318 * Output r2 : old ASID
1319 */
1320
1321 .global switch_and_save_asid
1322switch_and_save_asid:
1323 getcon sr, r0
1324 movi 255, r4
1325 shlli r4, 16, r4 /* r4 = mask to select ASID */
1326 and r0, r4, r3 /* r3 = shifted old ASID */
1327 andi r2, 255, r2 /* mask down new ASID */
1328 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1329 andc r0, r4, r0 /* efface old ASID from SR */
1330 or r0, r2, r0 /* insert the new ASID */
1331 putcon r0, ssr
1332 movi 1f, r0
1333 putcon r0, spc
1334 rte
1335 nop
13361:
1337 ptabs LINK, tr0
1338 shlri r3, 16, r2 /* r2 = old ASID */
1339 blink tr0, r63
1340
1341 .global route_to_panic_handler
1342route_to_panic_handler:
1343 /* Switch to real mode, goto panic_handler, don't return. Useful for
1344 last-chance debugging, e.g. if no output wants to go to the console.
1345 */
1346
1347 movi panic_handler - CONFIG_PAGE_OFFSET, r1
1348 ptabs r1, tr0
1349 pta 1f, tr1
1350 gettr tr1, r0
1351 putcon r0, spc
1352 getcon sr, r0
1353 movi 1, r1
1354 shlli r1, 31, r1
1355 andc r0, r1, r0
1356 putcon r0, ssr
1357 rte
1358 nop
13591: /* Now in real mode */
1360 blink tr0, r63
1361 nop
1362
1363 .global peek_real_address_q
1364peek_real_address_q:
1365 /* Two args:
1366 r2 : real mode address to peek
1367 r2(out) : result quadword
1368
1369 This is provided as a cheapskate way of manipulating device
1370 registers for debugging (to avoid the need to ioremap the debug
1371 module, and to avoid the need to ioremap the watchpoint
1372 controller in a way that identity maps sufficient bits to avoid the
1373 SH5-101 cut2 silicon defect).
1374
1375 This code is not performance critical
1376 */
1377
1378 add.l r2, r63, r2 /* sign extend address */
1379 getcon sr, r0 /* r0 = saved original SR */
1380 movi 1, r1
1381 shlli r1, 28, r1
1382 or r0, r1, r1 /* r0 with block bit set */
1383 putcon r1, sr /* now in critical section */
1384 movi 1, r36
1385 shlli r36, 31, r36
1386 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1387
1388 putcon r1, ssr
1389 movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1390 movi 1f, r37 /* virtual mode return addr */
1391 putcon r36, spc
1392
1393 synco
1394 rte
1395 nop
1396
1397.peek0: /* come here in real mode, don't touch caches!!
1398 still in critical section (sr.bl==1) */
1399 putcon r0, ssr
1400 putcon r37, spc
1401 /* Here's the actual peek. If the address is bad, all bets are now off
1402 * what will happen (handlers invoked in real-mode = bad news) */
1403 ld.q r2, 0, r2
1404 synco
1405 rte /* Back to virtual mode */
1406 nop
1407
14081:
1409 ptabs LINK, tr0
1410 blink tr0, r63
1411
1412 .global poke_real_address_q
1413poke_real_address_q:
1414 /* Two args:
1415 r2 : real mode address to poke
1416 r3 : quadword value to write.
1417
1418 This is provided as a cheapskate way of manipulating device
1419 registers for debugging (to avoid the need to ioremap the debug
1420 module, and to avoid the need to ioremap the watchpoint
1421 controller in a way that identity maps sufficient bits to avoid the
1422 SH5-101 cut2 silicon defect).
1423
1424 This code is not performance critical
1425 */
1426
1427 add.l r2, r63, r2 /* sign extend address */
1428 getcon sr, r0 /* r0 = saved original SR */
1429 movi 1, r1
1430 shlli r1, 28, r1
1431 or r0, r1, r1 /* r0 with block bit set */
1432 putcon r1, sr /* now in critical section */
1433 movi 1, r36
1434 shlli r36, 31, r36
1435 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1436
1437 putcon r1, ssr
1438 movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1439 movi 1f, r37 /* virtual mode return addr */
1440 putcon r36, spc
1441
1442 synco
1443 rte
1444 nop
1445
1446.poke0: /* come here in real mode, don't touch caches!!
1447 still in critical section (sr.bl==1) */
1448 putcon r0, ssr
1449 putcon r37, spc
1450 /* Here's the actual poke. If the address is bad, all bets are now off
1451 * what will happen (handlers invoked in real-mode = bad news) */
1452 st.q r2, 0, r3
1453 synco
1454 rte /* Back to virtual mode */
1455 nop
1456
14571:
1458 ptabs LINK, tr0
1459 blink tr0, r63
1460
1461#ifdef CONFIG_MMU
1462/*
1463 * --- User Access Handling Section
1464 */
1465
1466/*
1467 * User Access support. It all moved to non inlined Assembler
1468 * functions in here.
1469 *
1470 * __kernel_size_t __copy_user(void *__to, const void *__from,
1471 * __kernel_size_t __n)
1472 *
1473 * Inputs:
1474 * (r2) target address
1475 * (r3) source address
1476 * (r4) size in bytes
1477 *
1478 * Ouputs:
1479 * (*r2) target data
1480 * (r2) non-copied bytes
1481 *
1482 * If a fault occurs on the user pointer, bail out early and return the
1483 * number of bytes not copied in r2.
1484 * Strategy : for large blocks, call a real memcpy function which can
1485 * move >1 byte at a time using unaligned ld/st instructions, and can
1486 * manipulate the cache using prefetch + alloco to improve the speed
1487 * further. If a fault occurs in that function, just revert to the
1488 * byte-by-byte approach used for small blocks; this is rare so the
1489 * performance hit for that case does not matter.
1490 *
1491 * For small blocks it's not worth the overhead of setting up and calling
1492 * the memcpy routine; do the copy a byte at a time.
1493 *
1494 */
1495 .global __copy_user
1496__copy_user:
1497 pta __copy_user_byte_by_byte, tr1
1498 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1499 bge/u r0, r4, tr1
1500 pta copy_user_memcpy, tr0
1501 addi SP, -32, SP
1502 /* Save arguments in case we have to fix-up unhandled page fault */
1503 st.q SP, 0, r2
1504 st.q SP, 8, r3
1505 st.q SP, 16, r4
1506 st.q SP, 24, r35 ! r35 is callee-save
1507 /* Save LINK in a register to reduce RTS time later (otherwise
1508 ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1509 ori LINK, 0, r35
1510 blink tr0, LINK
1511
1512 /* Copy completed normally if we get back here */
1513 ptabs r35, tr0
1514 ld.q SP, 24, r35
1515 /* don't restore r2-r4, pointless */
1516 /* set result=r2 to zero as the copy must have succeeded. */
1517 or r63, r63, r2
1518 addi SP, 32, SP
1519 blink tr0, r63 ! RTS
1520
1521 .global __copy_user_fixup
1522__copy_user_fixup:
1523 /* Restore stack frame */
1524 ori r35, 0, LINK
1525 ld.q SP, 24, r35
1526 ld.q SP, 16, r4
1527 ld.q SP, 8, r3
1528 ld.q SP, 0, r2
1529 addi SP, 32, SP
1530 /* Fall through to original code, in the 'same' state we entered with */
1531
1532/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1533 user address. In that rare case, the speed drop can be tolerated. */
1534__copy_user_byte_by_byte:
1535 pta ___copy_user_exit, tr1
1536 pta ___copy_user1, tr0
1537 beq/u r4, r63, tr1 /* early exit for zero length copy */
1538 sub r2, r3, r0
1539 addi r0, -1, r0
1540
1541___copy_user1:
1542 ld.b r3, 0, r5 /* Fault address 1 */
1543
1544 /* Could rewrite this to use just 1 add, but the second comes 'free'
1545 due to load latency */
1546 addi r3, 1, r3
1547 addi r4, -1, r4 /* No real fixup required */
1548___copy_user2:
1549 stx.b r3, r0, r5 /* Fault address 2 */
1550 bne r4, ZERO, tr0
1551
1552___copy_user_exit:
1553 or r4, ZERO, r2
1554 ptabs LINK, tr0
1555 blink tr0, ZERO
1556
1557/*
1558 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1559 *
1560 * Inputs:
1561 * (r2) target address
1562 * (r3) size in bytes
1563 *
1564 * Ouputs:
1565 * (*r2) zero-ed target data
1566 * (r2) non-zero-ed bytes
1567 */
1568 .global __clear_user
1569__clear_user:
1570 pta ___clear_user_exit, tr1
1571 pta ___clear_user1, tr0
1572 beq/u r3, r63, tr1
1573
1574___clear_user1:
1575 st.b r2, 0, ZERO /* Fault address */
1576 addi r2, 1, r2
1577 addi r3, -1, r3 /* No real fixup required */
1578 bne r3, ZERO, tr0
1579
1580___clear_user_exit:
1581 or r3, ZERO, r2
1582 ptabs LINK, tr0
1583 blink tr0, ZERO
1584
1585#endif /* CONFIG_MMU */
1586
1587/*
1588 * extern long __get_user_asm_?(void *val, long addr)
1589 *
1590 * Inputs:
1591 * (r2) dest address
1592 * (r3) source address (in User Space)
1593 *
1594 * Ouputs:
1595 * (r2) -EFAULT (faulting)
1596 * 0 (not faulting)
1597 */
1598 .global __get_user_asm_b
1599__get_user_asm_b:
1600 or r2, ZERO, r4
1601 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1602
1603___get_user_asm_b1:
1604 ld.b r3, 0, r5 /* r5 = data */
1605 st.b r4, 0, r5
1606 or ZERO, ZERO, r2
1607
1608___get_user_asm_b_exit:
1609 ptabs LINK, tr0
1610 blink tr0, ZERO
1611
1612
1613 .global __get_user_asm_w
1614__get_user_asm_w:
1615 or r2, ZERO, r4
1616 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1617
1618___get_user_asm_w1:
1619 ld.w r3, 0, r5 /* r5 = data */
1620 st.w r4, 0, r5
1621 or ZERO, ZERO, r2
1622
1623___get_user_asm_w_exit:
1624 ptabs LINK, tr0
1625 blink tr0, ZERO
1626
1627
1628 .global __get_user_asm_l
1629__get_user_asm_l:
1630 or r2, ZERO, r4
1631 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1632
1633___get_user_asm_l1:
1634 ld.l r3, 0, r5 /* r5 = data */
1635 st.l r4, 0, r5
1636 or ZERO, ZERO, r2
1637
1638___get_user_asm_l_exit:
1639 ptabs LINK, tr0
1640 blink tr0, ZERO
1641
1642
1643 .global __get_user_asm_q
1644__get_user_asm_q:
1645 or r2, ZERO, r4
1646 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1647
1648___get_user_asm_q1:
1649 ld.q r3, 0, r5 /* r5 = data */
1650 st.q r4, 0, r5
1651 or ZERO, ZERO, r2
1652
1653___get_user_asm_q_exit:
1654 ptabs LINK, tr0
1655 blink tr0, ZERO
1656
1657/*
1658 * extern long __put_user_asm_?(void *pval, long addr)
1659 *
1660 * Inputs:
1661 * (r2) kernel pointer to value
1662 * (r3) dest address (in User Space)
1663 *
1664 * Ouputs:
1665 * (r2) -EFAULT (faulting)
1666 * 0 (not faulting)
1667 */
1668 .global __put_user_asm_b
1669__put_user_asm_b:
1670 ld.b r2, 0, r4 /* r4 = data */
1671 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1672
1673___put_user_asm_b1:
1674 st.b r3, 0, r4
1675 or ZERO, ZERO, r2
1676
1677___put_user_asm_b_exit:
1678 ptabs LINK, tr0
1679 blink tr0, ZERO
1680
1681
1682 .global __put_user_asm_w
1683__put_user_asm_w:
1684 ld.w r2, 0, r4 /* r4 = data */
1685 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1686
1687___put_user_asm_w1:
1688 st.w r3, 0, r4
1689 or ZERO, ZERO, r2
1690
1691___put_user_asm_w_exit:
1692 ptabs LINK, tr0
1693 blink tr0, ZERO
1694
1695
1696 .global __put_user_asm_l
1697__put_user_asm_l:
1698 ld.l r2, 0, r4 /* r4 = data */
1699 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1700
1701___put_user_asm_l1:
1702 st.l r3, 0, r4
1703 or ZERO, ZERO, r2
1704
1705___put_user_asm_l_exit:
1706 ptabs LINK, tr0
1707 blink tr0, ZERO
1708
1709
1710 .global __put_user_asm_q
1711__put_user_asm_q:
1712 ld.q r2, 0, r4 /* r4 = data */
1713 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1714
1715___put_user_asm_q1:
1716 st.q r3, 0, r4
1717 or ZERO, ZERO, r2
1718
1719___put_user_asm_q_exit:
1720 ptabs LINK, tr0
1721 blink tr0, ZERO
1722
1723panic_stash_regs:
1724 /* The idea is : when we get an unhandled panic, we dump the registers
1725 to a known memory location, the just sit in a tight loop.
1726 This allows the human to look at the memory region through the GDB
1727 session (assuming the debug module's SHwy initiator isn't locked up
1728 or anything), to hopefully analyze the cause of the panic. */
1729
1730 /* On entry, former r15 (SP) is in DCR
1731 former r0 is at resvec_saved_area + 0
1732 former r1 is at resvec_saved_area + 8
1733 former tr0 is at resvec_saved_area + 32
1734 DCR is the only register whose value is lost altogether.
1735 */
1736
1737 movi 0xffffffff80000000, r0 ! phy of dump area
1738 ld.q SP, 0x000, r1 ! former r0
1739 st.q r0, 0x000, r1
1740 ld.q SP, 0x008, r1 ! former r1
1741 st.q r0, 0x008, r1
1742 st.q r0, 0x010, r2
1743 st.q r0, 0x018, r3
1744 st.q r0, 0x020, r4
1745 st.q r0, 0x028, r5
1746 st.q r0, 0x030, r6
1747 st.q r0, 0x038, r7
1748 st.q r0, 0x040, r8
1749 st.q r0, 0x048, r9
1750 st.q r0, 0x050, r10
1751 st.q r0, 0x058, r11
1752 st.q r0, 0x060, r12
1753 st.q r0, 0x068, r13
1754 st.q r0, 0x070, r14
1755 getcon dcr, r14
1756 st.q r0, 0x078, r14
1757 st.q r0, 0x080, r16
1758 st.q r0, 0x088, r17
1759 st.q r0, 0x090, r18
1760 st.q r0, 0x098, r19
1761 st.q r0, 0x0a0, r20
1762 st.q r0, 0x0a8, r21
1763 st.q r0, 0x0b0, r22
1764 st.q r0, 0x0b8, r23
1765 st.q r0, 0x0c0, r24
1766 st.q r0, 0x0c8, r25
1767 st.q r0, 0x0d0, r26
1768 st.q r0, 0x0d8, r27
1769 st.q r0, 0x0e0, r28
1770 st.q r0, 0x0e8, r29
1771 st.q r0, 0x0f0, r30
1772 st.q r0, 0x0f8, r31
1773 st.q r0, 0x100, r32
1774 st.q r0, 0x108, r33
1775 st.q r0, 0x110, r34
1776 st.q r0, 0x118, r35
1777 st.q r0, 0x120, r36
1778 st.q r0, 0x128, r37
1779 st.q r0, 0x130, r38
1780 st.q r0, 0x138, r39
1781 st.q r0, 0x140, r40
1782 st.q r0, 0x148, r41
1783 st.q r0, 0x150, r42
1784 st.q r0, 0x158, r43
1785 st.q r0, 0x160, r44
1786 st.q r0, 0x168, r45
1787 st.q r0, 0x170, r46
1788 st.q r0, 0x178, r47
1789 st.q r0, 0x180, r48
1790 st.q r0, 0x188, r49
1791 st.q r0, 0x190, r50
1792 st.q r0, 0x198, r51
1793 st.q r0, 0x1a0, r52
1794 st.q r0, 0x1a8, r53
1795 st.q r0, 0x1b0, r54
1796 st.q r0, 0x1b8, r55
1797 st.q r0, 0x1c0, r56
1798 st.q r0, 0x1c8, r57
1799 st.q r0, 0x1d0, r58
1800 st.q r0, 0x1d8, r59
1801 st.q r0, 0x1e0, r60
1802 st.q r0, 0x1e8, r61
1803 st.q r0, 0x1f0, r62
1804 st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
1805
1806 ld.q SP, 0x020, r1 ! former tr0
1807 st.q r0, 0x200, r1
1808 gettr tr1, r1
1809 st.q r0, 0x208, r1
1810 gettr tr2, r1
1811 st.q r0, 0x210, r1
1812 gettr tr3, r1
1813 st.q r0, 0x218, r1
1814 gettr tr4, r1
1815 st.q r0, 0x220, r1
1816 gettr tr5, r1
1817 st.q r0, 0x228, r1
1818 gettr tr6, r1
1819 st.q r0, 0x230, r1
1820 gettr tr7, r1
1821 st.q r0, 0x238, r1
1822
1823 getcon sr, r1
1824 getcon ssr, r2
1825 getcon pssr, r3
1826 getcon spc, r4
1827 getcon pspc, r5
1828 getcon intevt, r6
1829 getcon expevt, r7
1830 getcon pexpevt, r8
1831 getcon tra, r9
1832 getcon tea, r10
1833 getcon kcr0, r11
1834 getcon kcr1, r12
1835 getcon vbr, r13
1836 getcon resvec, r14
1837
1838 st.q r0, 0x240, r1
1839 st.q r0, 0x248, r2
1840 st.q r0, 0x250, r3
1841 st.q r0, 0x258, r4
1842 st.q r0, 0x260, r5
1843 st.q r0, 0x268, r6
1844 st.q r0, 0x270, r7
1845 st.q r0, 0x278, r8
1846 st.q r0, 0x280, r9
1847 st.q r0, 0x288, r10
1848 st.q r0, 0x290, r11
1849 st.q r0, 0x298, r12
1850 st.q r0, 0x2a0, r13
1851 st.q r0, 0x2a8, r14
1852
1853 getcon SPC,r2
1854 getcon SSR,r3
1855 getcon EXPEVT,r4
1856 /* Prepare to jump to C - physical address */
1857 movi panic_handler-CONFIG_PAGE_OFFSET, r1
1858 ori r1, 1, r1
1859 ptabs r1, tr0
1860 getcon DCR, SP
1861 blink tr0, ZERO
1862 nop
1863 nop
1864 nop
1865 nop
1866
1867
1868
1869
1870/*
1871 * --- Signal Handling Section
1872 */
1873
1874/*
1875 * extern long long _sa_default_rt_restorer
1876 * extern long long _sa_default_restorer
1877 *
1878 * or, better,
1879 *
1880 * extern void _sa_default_rt_restorer(void)
1881 * extern void _sa_default_restorer(void)
1882 *
1883 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1884 * from user space. Copied into user space by signal management.
1885 * Both must be quad aligned and 2 quad long (4 instructions).
1886 *
1887 */
1888 .balign 8
1889 .global sa_default_rt_restorer
1890sa_default_rt_restorer:
1891 movi 0x10, r9
1892 shori __NR_rt_sigreturn, r9
1893 trapa r9
1894 nop
1895
1896 .balign 8
1897 .global sa_default_restorer
1898sa_default_restorer:
1899 movi 0x10, r9
1900 shori __NR_sigreturn, r9
1901 trapa r9
1902 nop
1903
1904/*
1905 * --- __ex_table Section
1906 */
1907
1908/*
1909 * User Access Exception Table.
1910 */
1911 .section __ex_table, "a"
1912
1913 .global asm_uaccess_start /* Just a marker */
1914asm_uaccess_start:
1915
1916#ifdef CONFIG_MMU
1917 .long ___copy_user1, ___copy_user_exit
1918 .long ___copy_user2, ___copy_user_exit
1919 .long ___clear_user1, ___clear_user_exit
1920#endif
1921 .long ___get_user_asm_b1, ___get_user_asm_b_exit
1922 .long ___get_user_asm_w1, ___get_user_asm_w_exit
1923 .long ___get_user_asm_l1, ___get_user_asm_l_exit
1924 .long ___get_user_asm_q1, ___get_user_asm_q_exit
1925 .long ___put_user_asm_b1, ___put_user_asm_b_exit
1926 .long ___put_user_asm_w1, ___put_user_asm_w_exit
1927 .long ___put_user_asm_l1, ___put_user_asm_l_exit
1928 .long ___put_user_asm_q1, ___put_user_asm_q_exit
1929
1930 .global asm_uaccess_end /* Just a marker */
1931asm_uaccess_end:
1932
1933
1934
1935
1936/*
1937 * --- .init.text Section
1938 */
1939
1940 __INIT
1941
1942/*
1943 * void trap_init (void)
1944 *
1945 */
1946 .global trap_init
1947trap_init:
1948 addi SP, -24, SP /* Room to save r28/r29/r30 */
1949 st.q SP, 0, r28
1950 st.q SP, 8, r29
1951 st.q SP, 16, r30
1952
1953 /* Set VBR and RESVEC */
1954 movi LVBR_block, r19
1955 andi r19, -4, r19 /* reset MMUOFF + reserved */
1956 /* For RESVEC exceptions we force the MMU off, which means we need the
1957 physical address. */
1958 movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
1959 andi r20, -4, r20 /* reset reserved */
1960 ori r20, 1, r20 /* set MMUOFF */
1961 putcon r19, VBR
1962 putcon r20, RESVEC
1963
1964 /* Sanity check */
1965 movi LVBR_block_end, r21
1966 andi r21, -4, r21
1967 movi BLOCK_SIZE, r29 /* r29 = expected size */
1968 or r19, ZERO, r30
1969 add r19, r29, r19
1970
1971 /*
1972 * Ugly, but better loop forever now than crash afterwards.
1973 * We should print a message, but if we touch LVBR or
1974 * LRESVEC blocks we should not be surprised if we get stuck
1975 * in trap_init().
1976 */
1977 pta trap_init_loop, tr1
1978 gettr tr1, r28 /* r28 = trap_init_loop */
1979 sub r21, r30, r30 /* r30 = actual size */
1980
1981 /*
1982 * VBR/RESVEC handlers overlap by being bigger than
1983 * allowed. Very bad. Just loop forever.
1984 * (r28) panic/loop address
1985 * (r29) expected size
1986 * (r30) actual size
1987 */
1988trap_init_loop:
1989 bne r19, r21, tr1
1990
1991 /* Now that exception vectors are set up reset SR.BL */
1992 getcon SR, r22
1993 movi SR_UNBLOCK_EXC, r23
1994 and r22, r23, r22
1995 putcon r22, SR
1996
1997 addi SP, 24, SP
1998 ptabs LINK, tr0
1999 blink tr0, ZERO
2000