Loading...
1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/linkage.h>
12#include <asm/thread_info.h>
13#include <linux/errno.h>
14#include <asm/entry.h>
15#include <asm/asm-offsets.h>
16#include <asm/registers.h>
17#include <asm/unistd.h>
18#include <asm/percpu.h>
19#include <asm/signal.h>
20
21#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
22 .macro disable_irq
23 msrclr r0, MSR_IE
24 .endm
25
26 .macro enable_irq
27 msrset r0, MSR_IE
28 .endm
29
30 .macro clear_bip
31 msrclr r0, MSR_BIP
32 .endm
33#else
34 .macro disable_irq
35 mfs r11, rmsr
36 andi r11, r11, ~MSR_IE
37 mts rmsr, r11
38 .endm
39
40 .macro enable_irq
41 mfs r11, rmsr
42 ori r11, r11, MSR_IE
43 mts rmsr, r11
44 .endm
45
46 .macro clear_bip
47 mfs r11, rmsr
48 andi r11, r11, ~MSR_BIP
49 mts rmsr, r11
50 .endm
51#endif
52
53ENTRY(_interrupt)
54 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
55 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
56 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
57 beqid r11, 1f
58 nop
59 brid 2f /* jump over */
60 addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
611: /* switch to kernel stack */
62 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
63 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
64 /* calculate kernel stack pointer */
65 addik r1, r1, THREAD_SIZE - PT_SIZE
662:
67 swi r11, r1, PT_MODE /* store the mode */
68 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
69 swi r2, r1, PT_R2
70 swi r3, r1, PT_R3
71 swi r4, r1, PT_R4
72 swi r5, r1, PT_R5
73 swi r6, r1, PT_R6
74 swi r7, r1, PT_R7
75 swi r8, r1, PT_R8
76 swi r9, r1, PT_R9
77 swi r10, r1, PT_R10
78 swi r11, r1, PT_R11
79 swi r12, r1, PT_R12
80 swi r13, r1, PT_R13
81 swi r14, r1, PT_R14
82 swi r14, r1, PT_PC
83 swi r15, r1, PT_R15
84 swi r16, r1, PT_R16
85 swi r17, r1, PT_R17
86 swi r18, r1, PT_R18
87 swi r19, r1, PT_R19
88 swi r20, r1, PT_R20
89 swi r21, r1, PT_R21
90 swi r22, r1, PT_R22
91 swi r23, r1, PT_R23
92 swi r24, r1, PT_R24
93 swi r25, r1, PT_R25
94 swi r26, r1, PT_R26
95 swi r27, r1, PT_R27
96 swi r28, r1, PT_R28
97 swi r29, r1, PT_R29
98 swi r30, r1, PT_R30
99 swi r31, r1, PT_R31
100 /* special purpose registers */
101 mfs r11, rmsr
102 swi r11, r1, PT_MSR
103 mfs r11, rear
104 swi r11, r1, PT_EAR
105 mfs r11, resr
106 swi r11, r1, PT_ESR
107 mfs r11, rfsr
108 swi r11, r1, PT_FSR
109 /* reload original stack pointer and save it */
110 lwi r11, r0, PER_CPU(ENTRY_SP)
111 swi r11, r1, PT_R1
112 /* update mode indicator we are in kernel mode */
113 addik r11, r0, 1
114 swi r11, r0, PER_CPU(KM)
115 /* restore r31 */
116 lwi r31, r0, PER_CPU(CURRENT_SAVE)
117 /* prepare the link register, the argument and jump */
118 addik r15, r0, ret_from_intr - 8
119 addk r6, r0, r15
120 braid do_IRQ
121 add r5, r0, r1
122
123ret_from_intr:
124 lwi r11, r1, PT_MODE
125 bneid r11, no_intr_resched
126
1273:
128 lwi r6, r31, TS_THREAD_INFO /* get thread info */
129 lwi r19, r6, TI_FLAGS /* get flags in thread info */
130 /* do an extra work if any bits are set */
131
132 andi r11, r19, _TIF_NEED_RESCHED
133 beqi r11, 1f
134 bralid r15, schedule
135 nop
136 bri 3b
1371: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
138 beqid r11, no_intr_resched
139 addk r5, r1, r0
140 bralid r15, do_notify_resume
141 addk r6, r0, r0
142 bri 3b
143
144no_intr_resched:
145 /* Disable interrupts, we are now committed to the state restore */
146 disable_irq
147
148 /* save mode indicator */
149 lwi r11, r1, PT_MODE
150 swi r11, r0, PER_CPU(KM)
151
152 /* save r31 */
153 swi r31, r0, PER_CPU(CURRENT_SAVE)
154restore_context:
155 /* special purpose registers */
156 lwi r11, r1, PT_FSR
157 mts rfsr, r11
158 lwi r11, r1, PT_ESR
159 mts resr, r11
160 lwi r11, r1, PT_EAR
161 mts rear, r11
162 lwi r11, r1, PT_MSR
163 mts rmsr, r11
164
165 lwi r31, r1, PT_R31
166 lwi r30, r1, PT_R30
167 lwi r29, r1, PT_R29
168 lwi r28, r1, PT_R28
169 lwi r27, r1, PT_R27
170 lwi r26, r1, PT_R26
171 lwi r25, r1, PT_R25
172 lwi r24, r1, PT_R24
173 lwi r23, r1, PT_R23
174 lwi r22, r1, PT_R22
175 lwi r21, r1, PT_R21
176 lwi r20, r1, PT_R20
177 lwi r19, r1, PT_R19
178 lwi r18, r1, PT_R18
179 lwi r17, r1, PT_R17
180 lwi r16, r1, PT_R16
181 lwi r15, r1, PT_R15
182 lwi r14, r1, PT_PC
183 lwi r13, r1, PT_R13
184 lwi r12, r1, PT_R12
185 lwi r11, r1, PT_R11
186 lwi r10, r1, PT_R10
187 lwi r9, r1, PT_R9
188 lwi r8, r1, PT_R8
189 lwi r7, r1, PT_R7
190 lwi r6, r1, PT_R6
191 lwi r5, r1, PT_R5
192 lwi r4, r1, PT_R4
193 lwi r3, r1, PT_R3
194 lwi r2, r1, PT_R2
195 lwi r1, r1, PT_R1
196 rtid r14, 0
197 nop
198
199ENTRY(_reset)
200 brai 0;
201
202ENTRY(_user_exception)
203 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
204 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
205 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
206 beqid r11, 1f /* Already in kernel mode? */
207 nop
208 brid 2f /* jump over */
209 addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
2101: /* Switch to kernel stack */
211 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
212 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
213 /* calculate kernel stack pointer */
214 addik r1, r1, THREAD_SIZE - PT_SIZE
2152:
216 swi r11, r1, PT_MODE /* store the mode */
217 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
218 /* save them on stack */
219 swi r2, r1, PT_R2
220 swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
221 swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
222 swi r5, r1, PT_R5
223 swi r6, r1, PT_R6
224 swi r7, r1, PT_R7
225 swi r8, r1, PT_R8
226 swi r9, r1, PT_R9
227 swi r10, r1, PT_R10
228 swi r11, r1, PT_R11
229 /* r12: _always_ in clobber list; see unistd.h */
230 swi r12, r1, PT_R12
231 swi r13, r1, PT_R13
232 /* r14: _always_ in clobber list; see unistd.h */
233 swi r14, r1, PT_R14
234 /* but we want to return to the next inst. */
235 addik r14, r14, 0x4
236 swi r14, r1, PT_PC /* increment by 4 and store in pc */
237 swi r15, r1, PT_R15
238 swi r16, r1, PT_R16
239 swi r17, r1, PT_R17
240 swi r18, r1, PT_R18
241 swi r19, r1, PT_R19
242 swi r20, r1, PT_R20
243 swi r21, r1, PT_R21
244 swi r22, r1, PT_R22
245 swi r23, r1, PT_R23
246 swi r24, r1, PT_R24
247 swi r25, r1, PT_R25
248 swi r26, r1, PT_R26
249 swi r27, r1, PT_R27
250 swi r28, r1, PT_R28
251 swi r29, r1, PT_R29
252 swi r30, r1, PT_R30
253 swi r31, r1, PT_R31
254
255 disable_irq
256 nop /* make sure IE bit is in effect */
257 clear_bip /* once IE is in effect it is safe to clear BIP */
258 nop
259
260 /* special purpose registers */
261 mfs r11, rmsr
262 swi r11, r1, PT_MSR
263 mfs r11, rear
264 swi r11, r1, PT_EAR
265 mfs r11, resr
266 swi r11, r1, PT_ESR
267 mfs r11, rfsr
268 swi r11, r1, PT_FSR
269 /* reload original stack pointer and save it */
270 lwi r11, r0, PER_CPU(ENTRY_SP)
271 swi r11, r1, PT_R1
272 /* update mode indicator we are in kernel mode */
273 addik r11, r0, 1
274 swi r11, r0, PER_CPU(KM)
275 /* restore r31 */
276 lwi r31, r0, PER_CPU(CURRENT_SAVE)
277 /* re-enable interrupts now we are in kernel mode */
278 enable_irq
279
280 /* See if the system call number is valid. */
281 addi r11, r12, -__NR_syscalls
282 bgei r11, 1f /* return to user if not valid */
283 /* Figure out which function to use for this system call. */
284 /* Note Microblaze barrel shift is optional, so don't rely on it */
285 add r12, r12, r12 /* convert num -> ptr */
286 addik r30, r0, 1 /* restarts allowed */
287 add r12, r12, r12
288 lwi r12, r12, sys_call_table /* Get function pointer */
289 addik r15, r0, ret_to_user-8 /* set return address */
290 bra r12 /* Make the system call. */
291 bri 0 /* won't reach here */
2921:
293 brid ret_to_user /* jump to syscall epilogue */
294 addi r3, r0, -ENOSYS /* set errno in delay slot */
295
296/*
297 * Debug traps are like a system call, but entered via brki r14, 0x60
298 * All we need to do is send the SIGTRAP signal to current, ptrace and
299 * do_notify_resume will handle the rest
300 */
301ENTRY(_debug_exception)
302 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
303 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
304 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
305 addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
306 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
307 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
308//save_context:
309 swi r11, r1, PT_MODE /* store the mode */
310 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
311 /* save them on stack */
312 swi r2, r1, PT_R2
313 swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
314 swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
315 swi r5, r1, PT_R5
316 swi r6, r1, PT_R6
317 swi r7, r1, PT_R7
318 swi r8, r1, PT_R8
319 swi r9, r1, PT_R9
320 swi r10, r1, PT_R10
321 swi r11, r1, PT_R11
322 /* r12: _always_ in clobber list; see unistd.h */
323 swi r12, r1, PT_R12
324 swi r13, r1, PT_R13
325 /* r14: _always_ in clobber list; see unistd.h */
326 swi r14, r1, PT_R14
327 swi r14, r1, PT_PC /* Will return to interrupted instruction */
328 swi r15, r1, PT_R15
329 swi r16, r1, PT_R16
330 swi r17, r1, PT_R17
331 swi r18, r1, PT_R18
332 swi r19, r1, PT_R19
333 swi r20, r1, PT_R20
334 swi r21, r1, PT_R21
335 swi r22, r1, PT_R22
336 swi r23, r1, PT_R23
337 swi r24, r1, PT_R24
338 swi r25, r1, PT_R25
339 swi r26, r1, PT_R26
340 swi r27, r1, PT_R27
341 swi r28, r1, PT_R28
342 swi r29, r1, PT_R29
343 swi r30, r1, PT_R30
344 swi r31, r1, PT_R31
345
346 disable_irq
347 nop /* make sure IE bit is in effect */
348 clear_bip /* once IE is in effect it is safe to clear BIP */
349 nop
350
351 /* special purpose registers */
352 mfs r11, rmsr
353 swi r11, r1, PT_MSR
354 mfs r11, rear
355 swi r11, r1, PT_EAR
356 mfs r11, resr
357 swi r11, r1, PT_ESR
358 mfs r11, rfsr
359 swi r11, r1, PT_FSR
360 /* reload original stack pointer and save it */
361 lwi r11, r0, PER_CPU(ENTRY_SP)
362 swi r11, r1, PT_R1
363 /* update mode indicator we are in kernel mode */
364 addik r11, r0, 1
365 swi r11, r0, PER_CPU(KM)
366 /* restore r31 */
367 lwi r31, r0, PER_CPU(CURRENT_SAVE)
368 /* re-enable interrupts now we are in kernel mode */
369 enable_irq
370
371 addi r5, r0, SIGTRAP /* sending the trap signal */
372 add r6, r0, r31 /* to current */
373 bralid r15, send_sig
374 add r7, r0, r0 /* 3rd param zero */
375
376 addik r30, r0, 1 /* restarts allowed ??? */
377 /* Restore r3/r4 to work around how ret_to_user works */
378 lwi r3, r1, PT_R3
379 lwi r4, r1, PT_R4
380 bri ret_to_user
381
382ENTRY(_break)
383 bri 0
384
385/* struct task_struct *_switch_to(struct thread_info *prev,
386 struct thread_info *next); */
387ENTRY(_switch_to)
388 /* prepare return value */
389 addk r3, r0, r31
390
391 /* save registers in cpu_context */
392 /* use r11 and r12, volatile registers, as temp register */
393 addik r11, r5, TI_CPU_CONTEXT
394 swi r1, r11, CC_R1
395 swi r2, r11, CC_R2
396 /* skip volatile registers.
397 * they are saved on stack when we jumped to _switch_to() */
398 /* dedicated registers */
399 swi r13, r11, CC_R13
400 swi r14, r11, CC_R14
401 swi r15, r11, CC_R15
402 swi r16, r11, CC_R16
403 swi r17, r11, CC_R17
404 swi r18, r11, CC_R18
405 /* save non-volatile registers */
406 swi r19, r11, CC_R19
407 swi r20, r11, CC_R20
408 swi r21, r11, CC_R21
409 swi r22, r11, CC_R22
410 swi r23, r11, CC_R23
411 swi r24, r11, CC_R24
412 swi r25, r11, CC_R25
413 swi r26, r11, CC_R26
414 swi r27, r11, CC_R27
415 swi r28, r11, CC_R28
416 swi r29, r11, CC_R29
417 swi r30, r11, CC_R30
418 /* special purpose registers */
419 mfs r12, rmsr
420 swi r12, r11, CC_MSR
421 mfs r12, rear
422 swi r12, r11, CC_EAR
423 mfs r12, resr
424 swi r12, r11, CC_ESR
425 mfs r12, rfsr
426 swi r12, r11, CC_FSR
427
428 /* update r31, the current */
429 lwi r31, r6, TI_TASK
430 swi r31, r0, PER_CPU(CURRENT_SAVE)
431
432 /* get new process' cpu context and restore */
433 addik r11, r6, TI_CPU_CONTEXT
434
435 /* special purpose registers */
436 lwi r12, r11, CC_FSR
437 mts rfsr, r12
438 lwi r12, r11, CC_ESR
439 mts resr, r12
440 lwi r12, r11, CC_EAR
441 mts rear, r12
442 lwi r12, r11, CC_MSR
443 mts rmsr, r12
444 /* non-volatile registers */
445 lwi r30, r11, CC_R30
446 lwi r29, r11, CC_R29
447 lwi r28, r11, CC_R28
448 lwi r27, r11, CC_R27
449 lwi r26, r11, CC_R26
450 lwi r25, r11, CC_R25
451 lwi r24, r11, CC_R24
452 lwi r23, r11, CC_R23
453 lwi r22, r11, CC_R22
454 lwi r21, r11, CC_R21
455 lwi r20, r11, CC_R20
456 lwi r19, r11, CC_R19
457 /* dedicated registers */
458 lwi r18, r11, CC_R18
459 lwi r17, r11, CC_R17
460 lwi r16, r11, CC_R16
461 lwi r15, r11, CC_R15
462 lwi r14, r11, CC_R14
463 lwi r13, r11, CC_R13
464 /* skip volatile registers */
465 lwi r2, r11, CC_R2
466 lwi r1, r11, CC_R1
467
468 rtsd r15, 8
469 nop
470
471ENTRY(ret_from_fork)
472 addk r5, r0, r3
473 brlid r15, schedule_tail
474 nop
475 swi r31, r1, PT_R31 /* save r31 in user context. */
476 /* will soon be restored to r31 in ret_to_user */
477 addk r3, r0, r0
478 brid ret_to_user
479 nop
480
481ENTRY(ret_from_kernel_thread)
482 brlid r15, schedule_tail
483 addk r5, r0, r3
484 brald r15, r20
485 addk r5, r0, r19
486 brid ret_to_user
487 addk r3, r0, r0
488
489work_pending:
490 lwi r11, r1, PT_MODE
491 bneid r11, 2f
4923:
493 enable_irq
494 andi r11, r19, _TIF_NEED_RESCHED
495 beqi r11, 1f
496 bralid r15, schedule
497 nop
498 bri 4f
4991: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
500 beqi r11, no_work_pending
501 addk r5, r30, r0
502 bralid r15, do_notify_resume
503 addik r6, r0, 1
504 addk r30, r0, r0 /* no restarts from now on */
5054:
506 disable_irq
507 lwi r6, r31, TS_THREAD_INFO /* get thread info */
508 lwi r19, r6, TI_FLAGS /* get flags in thread info */
509 bri 3b
510
511ENTRY(ret_to_user)
512 disable_irq
513
514 swi r4, r1, PT_R4 /* return val */
515 swi r3, r1, PT_R3 /* return val */
516
517 lwi r6, r31, TS_THREAD_INFO /* get thread info */
518 lwi r19, r6, TI_FLAGS /* get flags in thread info */
519 bnei r19, work_pending /* do an extra work if any bits are set */
520no_work_pending:
521 disable_irq
522
5232:
524 /* save r31 */
525 swi r31, r0, PER_CPU(CURRENT_SAVE)
526 /* save mode indicator */
527 lwi r18, r1, PT_MODE
528 swi r18, r0, PER_CPU(KM)
529//restore_context:
530 /* special purpose registers */
531 lwi r18, r1, PT_FSR
532 mts rfsr, r18
533 lwi r18, r1, PT_ESR
534 mts resr, r18
535 lwi r18, r1, PT_EAR
536 mts rear, r18
537 lwi r18, r1, PT_MSR
538 mts rmsr, r18
539
540 lwi r31, r1, PT_R31
541 lwi r30, r1, PT_R30
542 lwi r29, r1, PT_R29
543 lwi r28, r1, PT_R28
544 lwi r27, r1, PT_R27
545 lwi r26, r1, PT_R26
546 lwi r25, r1, PT_R25
547 lwi r24, r1, PT_R24
548 lwi r23, r1, PT_R23
549 lwi r22, r1, PT_R22
550 lwi r21, r1, PT_R21
551 lwi r20, r1, PT_R20
552 lwi r19, r1, PT_R19
553 lwi r18, r1, PT_R18
554 lwi r17, r1, PT_R17
555 lwi r16, r1, PT_R16
556 lwi r15, r1, PT_R15
557 lwi r14, r1, PT_PC
558 lwi r13, r1, PT_R13
559 lwi r12, r1, PT_R12
560 lwi r11, r1, PT_R11
561 lwi r10, r1, PT_R10
562 lwi r9, r1, PT_R9
563 lwi r8, r1, PT_R8
564 lwi r7, r1, PT_R7
565 lwi r6, r1, PT_R6
566 lwi r5, r1, PT_R5
567 lwi r4, r1, PT_R4 /* return val */
568 lwi r3, r1, PT_R3 /* return val */
569 lwi r2, r1, PT_R2
570 lwi r1, r1, PT_R1
571
572 rtid r14, 0
573 nop
574
575sys_rt_sigreturn_wrapper:
576 addk r30, r0, r0 /* no restarts for this one */
577 brid sys_rt_sigreturn
578 addk r5, r1, r0
579
580 /* Interrupt vector table */
581 .section .init.ivt, "ax"
582 .org 0x0
583 brai _reset
584 brai _user_exception
585 brai _interrupt
586 brai _break
587 brai _hw_exception_handler
588 .org 0x60
589 brai _debug_exception
590
591.section .rodata,"a"
592#include "syscall_table.S"
593
594syscall_table_size=(.-sys_call_table)
595
596type_SYSCALL:
597 .ascii "SYSCALL\0"
598type_IRQ:
599 .ascii "IRQ\0"
600type_IRQ_PREEMPT:
601 .ascii "IRQ (PREEMPTED)\0"
602type_SYSCALL_PREEMPT:
603 .ascii " SYSCALL (PREEMPTED)\0"
604
605 /*
606 * Trap decoding for stack unwinder
607 * Tuples are (start addr, end addr, string)
608 * If return address lies on [start addr, end addr],
609 * unwinder displays 'string'
610 */
611
612 .align 4
613.global microblaze_trap_handlers
614microblaze_trap_handlers:
615 /* Exact matches come first */
616 .word ret_to_user ; .word ret_to_user ; .word type_SYSCALL
617 .word ret_from_intr; .word ret_from_intr ; .word type_IRQ
618 /* Fuzzy matches go here */
619 .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
620 .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
621 /* End of table */
622 .word 0 ; .word 0 ; .word 0
1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/linkage.h>
12#include <asm/thread_info.h>
13#include <linux/errno.h>
14#include <asm/entry.h>
15#include <asm/asm-offsets.h>
16#include <asm/registers.h>
17#include <asm/unistd.h>
18#include <asm/percpu.h>
19#include <asm/signal.h>
20
21#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
22 .macro disable_irq
23 msrclr r0, MSR_IE
24 .endm
25
26 .macro enable_irq
27 msrset r0, MSR_IE
28 .endm
29
30 .macro clear_bip
31 msrclr r0, MSR_BIP
32 .endm
33#else
34 .macro disable_irq
35 mfs r11, rmsr
36 andi r11, r11, ~MSR_IE
37 mts rmsr, r11
38 .endm
39
40 .macro enable_irq
41 mfs r11, rmsr
42 ori r11, r11, MSR_IE
43 mts rmsr, r11
44 .endm
45
46 .macro clear_bip
47 mfs r11, rmsr
48 andi r11, r11, ~MSR_BIP
49 mts rmsr, r11
50 .endm
51#endif
52
53ENTRY(_interrupt)
54 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
55 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
56 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
57 beqid r11, 1f
58 nop
59 brid 2f /* jump over */
60 addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
611: /* switch to kernel stack */
62 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
63 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
64 /* calculate kernel stack pointer */
65 addik r1, r1, THREAD_SIZE - PT_SIZE
662:
67 swi r11, r1, PT_MODE /* store the mode */
68 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
69 swi r2, r1, PT_R2
70 swi r3, r1, PT_R3
71 swi r4, r1, PT_R4
72 swi r5, r1, PT_R5
73 swi r6, r1, PT_R6
74 swi r7, r1, PT_R7
75 swi r8, r1, PT_R8
76 swi r9, r1, PT_R9
77 swi r10, r1, PT_R10
78 swi r11, r1, PT_R11
79 swi r12, r1, PT_R12
80 swi r13, r1, PT_R13
81 swi r14, r1, PT_R14
82 swi r14, r1, PT_PC
83 swi r15, r1, PT_R15
84 swi r16, r1, PT_R16
85 swi r17, r1, PT_R17
86 swi r18, r1, PT_R18
87 swi r19, r1, PT_R19
88 swi r20, r1, PT_R20
89 swi r21, r1, PT_R21
90 swi r22, r1, PT_R22
91 swi r23, r1, PT_R23
92 swi r24, r1, PT_R24
93 swi r25, r1, PT_R25
94 swi r26, r1, PT_R26
95 swi r27, r1, PT_R27
96 swi r28, r1, PT_R28
97 swi r29, r1, PT_R29
98 swi r30, r1, PT_R30
99 swi r31, r1, PT_R31
100 /* special purpose registers */
101 mfs r11, rmsr
102 swi r11, r1, PT_MSR
103 mfs r11, rear
104 swi r11, r1, PT_EAR
105 mfs r11, resr
106 swi r11, r1, PT_ESR
107 mfs r11, rfsr
108 swi r11, r1, PT_FSR
109 /* reload original stack pointer and save it */
110 lwi r11, r0, PER_CPU(ENTRY_SP)
111 swi r11, r1, PT_R1
112 /* update mode indicator we are in kernel mode */
113 addik r11, r0, 1
114 swi r11, r0, PER_CPU(KM)
115 /* restore r31 */
116 lwi r31, r0, PER_CPU(CURRENT_SAVE)
117 /* prepare the link register, the argument and jump */
118 addik r15, r0, ret_from_intr - 8
119 addk r6, r0, r15
120 braid do_IRQ
121 add r5, r0, r1
122
123ret_from_intr:
124 lwi r11, r1, PT_MODE
125 bneid r11, no_intr_resched
126
127 lwi r6, r31, TS_THREAD_INFO /* get thread info */
128 lwi r19, r6, TI_FLAGS /* get flags in thread info */
129 /* do an extra work if any bits are set */
130
131 andi r11, r19, _TIF_NEED_RESCHED
132 beqi r11, 1f
133 bralid r15, schedule
134 nop
1351: andi r11, r19, _TIF_SIGPENDING
136 beqid r11, no_intr_resched
137 addk r5, r1, r0
138 addk r7, r0, r0
139 bralid r15, do_signal
140 addk r6, r0, r0
141
142no_intr_resched:
143 /* Disable interrupts, we are now committed to the state restore */
144 disable_irq
145
146 /* save mode indicator */
147 lwi r11, r1, PT_MODE
148 swi r11, r0, PER_CPU(KM)
149
150 /* save r31 */
151 swi r31, r0, PER_CPU(CURRENT_SAVE)
152restore_context:
153 /* special purpose registers */
154 lwi r11, r1, PT_FSR
155 mts rfsr, r11
156 lwi r11, r1, PT_ESR
157 mts resr, r11
158 lwi r11, r1, PT_EAR
159 mts rear, r11
160 lwi r11, r1, PT_MSR
161 mts rmsr, r11
162
163 lwi r31, r1, PT_R31
164 lwi r30, r1, PT_R30
165 lwi r29, r1, PT_R29
166 lwi r28, r1, PT_R28
167 lwi r27, r1, PT_R27
168 lwi r26, r1, PT_R26
169 lwi r25, r1, PT_R25
170 lwi r24, r1, PT_R24
171 lwi r23, r1, PT_R23
172 lwi r22, r1, PT_R22
173 lwi r21, r1, PT_R21
174 lwi r20, r1, PT_R20
175 lwi r19, r1, PT_R19
176 lwi r18, r1, PT_R18
177 lwi r17, r1, PT_R17
178 lwi r16, r1, PT_R16
179 lwi r15, r1, PT_R15
180 lwi r14, r1, PT_PC
181 lwi r13, r1, PT_R13
182 lwi r12, r1, PT_R12
183 lwi r11, r1, PT_R11
184 lwi r10, r1, PT_R10
185 lwi r9, r1, PT_R9
186 lwi r8, r1, PT_R8
187 lwi r7, r1, PT_R7
188 lwi r6, r1, PT_R6
189 lwi r5, r1, PT_R5
190 lwi r4, r1, PT_R4
191 lwi r3, r1, PT_R3
192 lwi r2, r1, PT_R2
193 lwi r1, r1, PT_R1
194 rtid r14, 0
195 nop
196
197ENTRY(_reset)
198 brai 0;
199
200ENTRY(_user_exception)
201 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
202 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
203 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
204 beqid r11, 1f /* Already in kernel mode? */
205 nop
206 brid 2f /* jump over */
207 addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
2081: /* Switch to kernel stack */
209 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
210 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
211 /* calculate kernel stack pointer */
212 addik r1, r1, THREAD_SIZE - PT_SIZE
2132:
214 swi r11, r1, PT_MODE /* store the mode */
215 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
216 /* save them on stack */
217 swi r2, r1, PT_R2
218 swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
219 swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
220 swi r5, r1, PT_R5
221 swi r6, r1, PT_R6
222 swi r7, r1, PT_R7
223 swi r8, r1, PT_R8
224 swi r9, r1, PT_R9
225 swi r10, r1, PT_R10
226 swi r11, r1, PT_R11
227 /* r12: _always_ in clobber list; see unistd.h */
228 swi r12, r1, PT_R12
229 swi r13, r1, PT_R13
230 /* r14: _always_ in clobber list; see unistd.h */
231 swi r14, r1, PT_R14
232 /* but we want to return to the next inst. */
233 addik r14, r14, 0x4
234 swi r14, r1, PT_PC /* increment by 4 and store in pc */
235 swi r15, r1, PT_R15
236 swi r16, r1, PT_R16
237 swi r17, r1, PT_R17
238 swi r18, r1, PT_R18
239 swi r19, r1, PT_R19
240 swi r20, r1, PT_R20
241 swi r21, r1, PT_R21
242 swi r22, r1, PT_R22
243 swi r23, r1, PT_R23
244 swi r24, r1, PT_R24
245 swi r25, r1, PT_R25
246 swi r26, r1, PT_R26
247 swi r27, r1, PT_R27
248 swi r28, r1, PT_R28
249 swi r29, r1, PT_R29
250 swi r30, r1, PT_R30
251 swi r31, r1, PT_R31
252
253 disable_irq
254 nop /* make sure IE bit is in effect */
255 clear_bip /* once IE is in effect it is safe to clear BIP */
256 nop
257
258 /* special purpose registers */
259 mfs r11, rmsr
260 swi r11, r1, PT_MSR
261 mfs r11, rear
262 swi r11, r1, PT_EAR
263 mfs r11, resr
264 swi r11, r1, PT_ESR
265 mfs r11, rfsr
266 swi r11, r1, PT_FSR
267 /* reload original stack pointer and save it */
268 lwi r11, r0, PER_CPU(ENTRY_SP)
269 swi r11, r1, PT_R1
270 /* update mode indicator we are in kernel mode */
271 addik r11, r0, 1
272 swi r11, r0, PER_CPU(KM)
273 /* restore r31 */
274 lwi r31, r0, PER_CPU(CURRENT_SAVE)
275 /* re-enable interrupts now we are in kernel mode */
276 enable_irq
277
278 /* See if the system call number is valid. */
279 addi r11, r12, -__NR_syscalls
280 bgei r11, 1f /* return to user if not valid */
281 /* Figure out which function to use for this system call. */
282 /* Note Microblaze barrel shift is optional, so don't rely on it */
283 add r12, r12, r12 /* convert num -> ptr */
284 add r12, r12, r12
285 lwi r12, r12, sys_call_table /* Get function pointer */
286 addik r15, r0, ret_to_user-8 /* set return address */
287 bra r12 /* Make the system call. */
288 bri 0 /* won't reach here */
2891:
290 brid ret_to_user /* jump to syscall epilogue */
291 addi r3, r0, -ENOSYS /* set errno in delay slot */
292
293/*
294 * Debug traps are like a system call, but entered via brki r14, 0x60
295 * All we need to do is send the SIGTRAP signal to current, ptrace and do_signal
296 * will handle the rest
297 */
298ENTRY(_debug_exception)
299 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
300 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
301 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
302 addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
303 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
304 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
305//save_context:
306 swi r11, r1, PT_MODE /* store the mode */
307 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
308 /* save them on stack */
309 swi r2, r1, PT_R2
310 swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
311 swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
312 swi r5, r1, PT_R5
313 swi r6, r1, PT_R6
314 swi r7, r1, PT_R7
315 swi r8, r1, PT_R8
316 swi r9, r1, PT_R9
317 swi r10, r1, PT_R10
318 swi r11, r1, PT_R11
319 /* r12: _always_ in clobber list; see unistd.h */
320 swi r12, r1, PT_R12
321 swi r13, r1, PT_R13
322 /* r14: _always_ in clobber list; see unistd.h */
323 swi r14, r1, PT_R14
324 swi r14, r1, PT_PC /* Will return to interrupted instruction */
325 swi r15, r1, PT_R15
326 swi r16, r1, PT_R16
327 swi r17, r1, PT_R17
328 swi r18, r1, PT_R18
329 swi r19, r1, PT_R19
330 swi r20, r1, PT_R20
331 swi r21, r1, PT_R21
332 swi r22, r1, PT_R22
333 swi r23, r1, PT_R23
334 swi r24, r1, PT_R24
335 swi r25, r1, PT_R25
336 swi r26, r1, PT_R26
337 swi r27, r1, PT_R27
338 swi r28, r1, PT_R28
339 swi r29, r1, PT_R29
340 swi r30, r1, PT_R30
341 swi r31, r1, PT_R31
342
343 disable_irq
344 nop /* make sure IE bit is in effect */
345 clear_bip /* once IE is in effect it is safe to clear BIP */
346 nop
347
348 /* special purpose registers */
349 mfs r11, rmsr
350 swi r11, r1, PT_MSR
351 mfs r11, rear
352 swi r11, r1, PT_EAR
353 mfs r11, resr
354 swi r11, r1, PT_ESR
355 mfs r11, rfsr
356 swi r11, r1, PT_FSR
357 /* reload original stack pointer and save it */
358 lwi r11, r0, PER_CPU(ENTRY_SP)
359 swi r11, r1, PT_R1
360 /* update mode indicator we are in kernel mode */
361 addik r11, r0, 1
362 swi r11, r0, PER_CPU(KM)
363 /* restore r31 */
364 lwi r31, r0, PER_CPU(CURRENT_SAVE)
365 /* re-enable interrupts now we are in kernel mode */
366 enable_irq
367
368 addi r5, r0, SIGTRAP /* sending the trap signal */
369 add r6, r0, r31 /* to current */
370 bralid r15, send_sig
371 add r7, r0, r0 /* 3rd param zero */
372
373 /* Restore r3/r4 to work around how ret_to_user works */
374 lwi r3, r1, PT_R3
375 lwi r4, r1, PT_R4
376 bri ret_to_user
377
378ENTRY(_break)
379 bri 0
380
381/* struct task_struct *_switch_to(struct thread_info *prev,
382 struct thread_info *next); */
383ENTRY(_switch_to)
384 /* prepare return value */
385 addk r3, r0, r31
386
387 /* save registers in cpu_context */
388 /* use r11 and r12, volatile registers, as temp register */
389 addik r11, r5, TI_CPU_CONTEXT
390 swi r1, r11, CC_R1
391 swi r2, r11, CC_R2
392 /* skip volatile registers.
393 * they are saved on stack when we jumped to _switch_to() */
394 /* dedicated registers */
395 swi r13, r11, CC_R13
396 swi r14, r11, CC_R14
397 swi r15, r11, CC_R15
398 swi r16, r11, CC_R16
399 swi r17, r11, CC_R17
400 swi r18, r11, CC_R18
401 /* save non-volatile registers */
402 swi r19, r11, CC_R19
403 swi r20, r11, CC_R20
404 swi r21, r11, CC_R21
405 swi r22, r11, CC_R22
406 swi r23, r11, CC_R23
407 swi r24, r11, CC_R24
408 swi r25, r11, CC_R25
409 swi r26, r11, CC_R26
410 swi r27, r11, CC_R27
411 swi r28, r11, CC_R28
412 swi r29, r11, CC_R29
413 swi r30, r11, CC_R30
414 /* special purpose registers */
415 mfs r12, rmsr
416 swi r12, r11, CC_MSR
417 mfs r12, rear
418 swi r12, r11, CC_EAR
419 mfs r12, resr
420 swi r12, r11, CC_ESR
421 mfs r12, rfsr
422 swi r12, r11, CC_FSR
423
424 /* update r31, the current */
425 lwi r31, r6, TI_TASK
426 swi r31, r0, PER_CPU(CURRENT_SAVE)
427
428 /* get new process' cpu context and restore */
429 addik r11, r6, TI_CPU_CONTEXT
430
431 /* special purpose registers */
432 lwi r12, r11, CC_FSR
433 mts rfsr, r12
434 lwi r12, r11, CC_ESR
435 mts resr, r12
436 lwi r12, r11, CC_EAR
437 mts rear, r12
438 lwi r12, r11, CC_MSR
439 mts rmsr, r12
440 /* non-volatile registers */
441 lwi r30, r11, CC_R30
442 lwi r29, r11, CC_R29
443 lwi r28, r11, CC_R28
444 lwi r27, r11, CC_R27
445 lwi r26, r11, CC_R26
446 lwi r25, r11, CC_R25
447 lwi r24, r11, CC_R24
448 lwi r23, r11, CC_R23
449 lwi r22, r11, CC_R22
450 lwi r21, r11, CC_R21
451 lwi r20, r11, CC_R20
452 lwi r19, r11, CC_R19
453 /* dedicated registers */
454 lwi r18, r11, CC_R18
455 lwi r17, r11, CC_R17
456 lwi r16, r11, CC_R16
457 lwi r15, r11, CC_R15
458 lwi r14, r11, CC_R14
459 lwi r13, r11, CC_R13
460 /* skip volatile registers */
461 lwi r2, r11, CC_R2
462 lwi r1, r11, CC_R1
463
464 rtsd r15, 8
465 nop
466
467ENTRY(ret_from_fork)
468 addk r5, r0, r3
469 addk r6, r0, r1
470 brlid r15, schedule_tail
471 nop
472 swi r31, r1, PT_R31 /* save r31 in user context. */
473 /* will soon be restored to r31 in ret_to_user */
474 addk r3, r0, r0
475 brid ret_to_user
476 nop
477
478work_pending:
479 enable_irq
480
481 andi r11, r19, _TIF_NEED_RESCHED
482 beqi r11, 1f
483 bralid r15, schedule
484 nop
4851: andi r11, r19, _TIF_SIGPENDING
486 beqi r11, no_work_pending
487 addk r5, r1, r0
488 addik r7, r0, 1
489 bralid r15, do_signal
490 addk r6, r0, r0
491 bri no_work_pending
492
493ENTRY(ret_to_user)
494 disable_irq
495
496 swi r4, r1, PT_R4 /* return val */
497 swi r3, r1, PT_R3 /* return val */
498
499 lwi r6, r31, TS_THREAD_INFO /* get thread info */
500 lwi r19, r6, TI_FLAGS /* get flags in thread info */
501 bnei r19, work_pending /* do an extra work if any bits are set */
502no_work_pending:
503 disable_irq
504
505 /* save r31 */
506 swi r31, r0, PER_CPU(CURRENT_SAVE)
507 /* save mode indicator */
508 lwi r18, r1, PT_MODE
509 swi r18, r0, PER_CPU(KM)
510//restore_context:
511 /* special purpose registers */
512 lwi r18, r1, PT_FSR
513 mts rfsr, r18
514 lwi r18, r1, PT_ESR
515 mts resr, r18
516 lwi r18, r1, PT_EAR
517 mts rear, r18
518 lwi r18, r1, PT_MSR
519 mts rmsr, r18
520
521 lwi r31, r1, PT_R31
522 lwi r30, r1, PT_R30
523 lwi r29, r1, PT_R29
524 lwi r28, r1, PT_R28
525 lwi r27, r1, PT_R27
526 lwi r26, r1, PT_R26
527 lwi r25, r1, PT_R25
528 lwi r24, r1, PT_R24
529 lwi r23, r1, PT_R23
530 lwi r22, r1, PT_R22
531 lwi r21, r1, PT_R21
532 lwi r20, r1, PT_R20
533 lwi r19, r1, PT_R19
534 lwi r18, r1, PT_R18
535 lwi r17, r1, PT_R17
536 lwi r16, r1, PT_R16
537 lwi r15, r1, PT_R15
538 lwi r14, r1, PT_PC
539 lwi r13, r1, PT_R13
540 lwi r12, r1, PT_R12
541 lwi r11, r1, PT_R11
542 lwi r10, r1, PT_R10
543 lwi r9, r1, PT_R9
544 lwi r8, r1, PT_R8
545 lwi r7, r1, PT_R7
546 lwi r6, r1, PT_R6
547 lwi r5, r1, PT_R5
548 lwi r4, r1, PT_R4 /* return val */
549 lwi r3, r1, PT_R3 /* return val */
550 lwi r2, r1, PT_R2
551 lwi r1, r1, PT_R1
552
553 rtid r14, 0
554 nop
555
556sys_vfork:
557 brid microblaze_vfork
558 addk r5, r1, r0
559
560sys_clone:
561 brid microblaze_clone
562 addk r7, r1, r0
563
564sys_execve:
565 brid microblaze_execve
566 addk r8, r1, r0
567
568sys_rt_sigreturn_wrapper:
569 brid sys_rt_sigreturn
570 addk r5, r1, r0
571
572sys_rt_sigsuspend_wrapper:
573 brid sys_rt_sigsuspend
574 addk r7, r1, r0
575
576 /* Interrupt vector table */
577 .section .init.ivt, "ax"
578 .org 0x0
579 brai _reset
580 brai _user_exception
581 brai _interrupt
582 brai _break
583 brai _hw_exception_handler
584 .org 0x60
585 brai _debug_exception
586
587.section .rodata,"a"
588#include "syscall_table.S"
589
590syscall_table_size=(.-sys_call_table)
591
592type_SYSCALL:
593 .ascii "SYSCALL\0"
594type_IRQ:
595 .ascii "IRQ\0"
596type_IRQ_PREEMPT:
597 .ascii "IRQ (PREEMPTED)\0"
598type_SYSCALL_PREEMPT:
599 .ascii " SYSCALL (PREEMPTED)\0"
600
601 /*
602 * Trap decoding for stack unwinder
603 * Tuples are (start addr, end addr, string)
604 * If return address lies on [start addr, end addr],
605 * unwinder displays 'string'
606 */
607
608 .align 4
609.global microblaze_trap_handlers
610microblaze_trap_handlers:
611 /* Exact matches come first */
612 .word ret_to_user ; .word ret_to_user ; .word type_SYSCALL
613 .word ret_from_intr; .word ret_from_intr ; .word type_IRQ
614 /* Fuzzy matches go here */
615 .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
616 .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
617 /* End of table */
618 .word 0 ; .word 0 ; .word 0