Loading...
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
16 */
17
18#include <asm/assembler.h>
19#include <asm/memory.h>
20#include <asm/glue-df.h>
21#include <asm/glue-pf.h>
22#include <asm/vfpmacros.h>
23#ifndef CONFIG_MULTI_IRQ_HANDLER
24#include <mach/entry-macro.S>
25#endif
26#include <asm/thread_notify.h>
27#include <asm/unwind.h>
28#include <asm/unistd.h>
29#include <asm/tls.h>
30#include <asm/system_info.h>
31
32#include "entry-header.S"
33#include <asm/entry-macro-multi.S>
34
35/*
36 * Interrupt handling.
37 */
38 .macro irq_handler
39#ifdef CONFIG_MULTI_IRQ_HANDLER
40 ldr r1, =handle_arch_irq
41 mov r0, sp
42 adr lr, BSYM(9997f)
43 ldr pc, [r1]
44#else
45 arch_irq_handler_default
46#endif
479997:
48 .endm
49
50 .macro pabt_helper
51 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
52#ifdef MULTI_PABORT
53 ldr ip, .LCprocfns
54 mov lr, pc
55 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
56#else
57 bl CPU_PABORT_HANDLER
58#endif
59 .endm
60
61 .macro dabt_helper
62
63 @
64 @ Call the processor-specific abort handler:
65 @
66 @ r2 - pt_regs
67 @ r4 - aborted context pc
68 @ r5 - aborted context psr
69 @
70 @ The abort handler must return the aborted address in r0, and
71 @ the fault status register in r1. r9 must be preserved.
72 @
73#ifdef MULTI_DABORT
74 ldr ip, .LCprocfns
75 mov lr, pc
76 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
77#else
78 bl CPU_DABORT_HANDLER
79#endif
80 .endm
81
82#ifdef CONFIG_KPROBES
83 .section .kprobes.text,"ax",%progbits
84#else
85 .text
86#endif
87
88/*
89 * Invalid mode handlers
90 */
91 .macro inv_entry, reason
92 sub sp, sp, #S_FRAME_SIZE
93 ARM( stmib sp, {r1 - lr} )
94 THUMB( stmia sp, {r0 - r12} )
95 THUMB( str sp, [sp, #S_SP] )
96 THUMB( str lr, [sp, #S_LR] )
97 mov r1, #\reason
98 .endm
99
100__pabt_invalid:
101 inv_entry BAD_PREFETCH
102 b common_invalid
103ENDPROC(__pabt_invalid)
104
105__dabt_invalid:
106 inv_entry BAD_DATA
107 b common_invalid
108ENDPROC(__dabt_invalid)
109
110__irq_invalid:
111 inv_entry BAD_IRQ
112 b common_invalid
113ENDPROC(__irq_invalid)
114
115__und_invalid:
116 inv_entry BAD_UNDEFINSTR
117
118 @
119 @ XXX fall through to common_invalid
120 @
121
122@
123@ common_invalid - generic code for failed exception (re-entrant version of handlers)
124@
125common_invalid:
126 zero_fp
127
128 ldmia r0, {r4 - r6}
129 add r0, sp, #S_PC @ here for interlock avoidance
130 mov r7, #-1 @ "" "" "" ""
131 str r4, [sp] @ save preserved r0
132 stmia r0, {r5 - r7} @ lr_<exception>,
133 @ cpsr_<exception>, "old_r0"
134
135 mov r0, sp
136 b bad_mode
137ENDPROC(__und_invalid)
138
139/*
140 * SVC mode handlers
141 */
142
143#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
144#define SPFIX(code...) code
145#else
146#define SPFIX(code...)
147#endif
148
149 .macro svc_entry, stack_hole=0
150 UNWIND(.fnstart )
151 UNWIND(.save {r0 - pc} )
152 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
153#ifdef CONFIG_THUMB2_KERNEL
154 SPFIX( str r0, [sp] ) @ temporarily saved
155 SPFIX( mov r0, sp )
156 SPFIX( tst r0, #4 ) @ test original stack alignment
157 SPFIX( ldr r0, [sp] ) @ restored
158#else
159 SPFIX( tst sp, #4 )
160#endif
161 SPFIX( subeq sp, sp, #4 )
162 stmia sp, {r1 - r12}
163
164 ldmia r0, {r3 - r5}
165 add r7, sp, #S_SP - 4 @ here for interlock avoidance
166 mov r6, #-1 @ "" "" "" ""
167 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
168 SPFIX( addeq r2, r2, #4 )
169 str r3, [sp, #-4]! @ save the "real" r0 copied
170 @ from the exception stack
171
172 mov r3, lr
173
174 @
175 @ We are now ready to fill in the remaining blanks on the stack:
176 @
177 @ r2 - sp_svc
178 @ r3 - lr_svc
179 @ r4 - lr_<exception>, already fixed up for correct return/restart
180 @ r5 - spsr_<exception>
181 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
182 @
183 stmia r7, {r2 - r6}
184
185#ifdef CONFIG_TRACE_IRQFLAGS
186 bl trace_hardirqs_off
187#endif
188 .endm
189
190 .align 5
191__dabt_svc:
192 svc_entry
193 mov r2, sp
194 dabt_helper
195 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
196 svc_exit r5 @ return from exception
197 UNWIND(.fnend )
198ENDPROC(__dabt_svc)
199
200 .align 5
201__irq_svc:
202 svc_entry
203 irq_handler
204
205#ifdef CONFIG_PREEMPT
206 get_thread_info tsk
207 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
208 ldr r0, [tsk, #TI_FLAGS] @ get flags
209 teq r8, #0 @ if preempt count != 0
210 movne r0, #0 @ force flags to 0
211 tst r0, #_TIF_NEED_RESCHED
212 blne svc_preempt
213#endif
214
215 svc_exit r5, irq = 1 @ return from exception
216 UNWIND(.fnend )
217ENDPROC(__irq_svc)
218
219 .ltorg
220
221#ifdef CONFIG_PREEMPT
222svc_preempt:
223 mov r8, lr
2241: bl preempt_schedule_irq @ irq en/disable is done inside
225 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
226 tst r0, #_TIF_NEED_RESCHED
227 moveq pc, r8 @ go again
228 b 1b
229#endif
230
231__und_fault:
232 @ Correct the PC such that it is pointing at the instruction
233 @ which caused the fault. If the faulting instruction was ARM
234 @ the PC will be pointing at the next instruction, and have to
235 @ subtract 4. Otherwise, it is Thumb, and the PC will be
236 @ pointing at the second half of the Thumb instruction. We
237 @ have to subtract 2.
238 ldr r2, [r0, #S_PC]
239 sub r2, r2, r1
240 str r2, [r0, #S_PC]
241 b do_undefinstr
242ENDPROC(__und_fault)
243
244 .align 5
245__und_svc:
246#ifdef CONFIG_KPROBES
247 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
248 @ it obviously needs free stack space which then will belong to
249 @ the saved context.
250 svc_entry 64
251#else
252 svc_entry
253#endif
254 @
255 @ call emulation code, which returns using r9 if it has emulated
256 @ the instruction, or the more conventional lr if we are to treat
257 @ this as a real undefined instruction
258 @
259 @ r0 - instruction
260 @
261#ifndef CONFIG_THUMB2_KERNEL
262 ldr r0, [r4, #-4]
263#else
264 mov r1, #2
265 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
266 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
267 blo __und_svc_fault
268 ldrh r9, [r4] @ bottom 16 bits
269 add r4, r4, #2
270 str r4, [sp, #S_PC]
271 orr r0, r9, r0, lsl #16
272#endif
273 adr r9, BSYM(__und_svc_finish)
274 mov r2, r4
275 bl call_fpe
276
277 mov r1, #4 @ PC correction to apply
278__und_svc_fault:
279 mov r0, sp @ struct pt_regs *regs
280 bl __und_fault
281
282__und_svc_finish:
283 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
284 svc_exit r5 @ return from exception
285 UNWIND(.fnend )
286ENDPROC(__und_svc)
287
288 .align 5
289__pabt_svc:
290 svc_entry
291 mov r2, sp @ regs
292 pabt_helper
293 svc_exit r5 @ return from exception
294 UNWIND(.fnend )
295ENDPROC(__pabt_svc)
296
297 .align 5
298.LCcralign:
299 .word cr_alignment
300#ifdef MULTI_DABORT
301.LCprocfns:
302 .word processor
303#endif
304.LCfp:
305 .word fp_enter
306
307/*
308 * User mode handlers
309 *
310 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
311 */
312
313#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
314#error "sizeof(struct pt_regs) must be a multiple of 8"
315#endif
316
317 .macro usr_entry
318 UNWIND(.fnstart )
319 UNWIND(.cantunwind ) @ don't unwind the user space
320 sub sp, sp, #S_FRAME_SIZE
321 ARM( stmib sp, {r1 - r12} )
322 THUMB( stmia sp, {r0 - r12} )
323
324 ldmia r0, {r3 - r5}
325 add r0, sp, #S_PC @ here for interlock avoidance
326 mov r6, #-1 @ "" "" "" ""
327
328 str r3, [sp] @ save the "real" r0 copied
329 @ from the exception stack
330
331 @
332 @ We are now ready to fill in the remaining blanks on the stack:
333 @
334 @ r4 - lr_<exception>, already fixed up for correct return/restart
335 @ r5 - spsr_<exception>
336 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
337 @
338 @ Also, separately save sp_usr and lr_usr
339 @
340 stmia r0, {r4 - r6}
341 ARM( stmdb r0, {sp, lr}^ )
342 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
343
344 @
345 @ Enable the alignment trap while in kernel mode
346 @
347 alignment_trap r0
348
349 @
350 @ Clear FP to mark the first stack frame
351 @
352 zero_fp
353
354#ifdef CONFIG_IRQSOFF_TRACER
355 bl trace_hardirqs_off
356#endif
357 ct_user_exit save = 0
358 .endm
359
360 .macro kuser_cmpxchg_check
361#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
362 !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
363#ifndef CONFIG_MMU
364#warning "NPTL on non MMU needs fixing"
365#else
366 @ Make sure our user space atomic helper is restarted
367 @ if it was interrupted in a critical region. Here we
368 @ perform a quick test inline since it should be false
369 @ 99.9999% of the time. The rest is done out of line.
370 cmp r4, #TASK_SIZE
371 blhs kuser_cmpxchg64_fixup
372#endif
373#endif
374 .endm
375
376 .align 5
377__dabt_usr:
378 usr_entry
379 kuser_cmpxchg_check
380 mov r2, sp
381 dabt_helper
382 b ret_from_exception
383 UNWIND(.fnend )
384ENDPROC(__dabt_usr)
385
386 .align 5
387__irq_usr:
388 usr_entry
389 kuser_cmpxchg_check
390 irq_handler
391 get_thread_info tsk
392 mov why, #0
393 b ret_to_user_from_irq
394 UNWIND(.fnend )
395ENDPROC(__irq_usr)
396
397 .ltorg
398
399 .align 5
400__und_usr:
401 usr_entry
402
403 mov r2, r4
404 mov r3, r5
405
406 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
407 @ faulting instruction depending on Thumb mode.
408 @ r3 = regs->ARM_cpsr
409 @
410 @ The emulation code returns using r9 if it has emulated the
411 @ instruction, or the more conventional lr if we are to treat
412 @ this as a real undefined instruction
413 @
414 adr r9, BSYM(ret_from_exception)
415
416 tst r3, #PSR_T_BIT @ Thumb mode?
417 bne __und_usr_thumb
418 sub r4, r2, #4 @ ARM instr at LR - 4
4191: ldrt r0, [r4]
420 ARM_BE8(rev r0, r0) @ little endian instruction
421
422 @ r0 = 32-bit ARM instruction which caused the exception
423 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
424 @ r4 = PC value for the faulting instruction
425 @ lr = 32-bit undefined instruction function
426 adr lr, BSYM(__und_usr_fault_32)
427 b call_fpe
428
429__und_usr_thumb:
430 @ Thumb instruction
431 sub r4, r2, #2 @ First half of thumb instr at LR - 2
432#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
433/*
434 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
435 * can never be supported in a single kernel, this code is not applicable at
436 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
437 * made about .arch directives.
438 */
439#if __LINUX_ARM_ARCH__ < 7
440/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
441#define NEED_CPU_ARCHITECTURE
442 ldr r5, .LCcpu_architecture
443 ldr r5, [r5]
444 cmp r5, #CPU_ARCH_ARMv7
445 blo __und_usr_fault_16 @ 16bit undefined instruction
446/*
447 * The following code won't get run unless the running CPU really is v7, so
448 * coding round the lack of ldrht on older arches is pointless. Temporarily
449 * override the assembler target arch with the minimum required instead:
450 */
451 .arch armv6t2
452#endif
4532: ldrht r5, [r4]
454ARM_BE8(rev16 r5, r5) @ little endian instruction
455 cmp r5, #0xe800 @ 32bit instruction if xx != 0
456 blo __und_usr_fault_16 @ 16bit undefined instruction
4573: ldrht r0, [r2]
458ARM_BE8(rev16 r0, r0) @ little endian instruction
459 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
460 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
461 orr r0, r0, r5, lsl #16
462 adr lr, BSYM(__und_usr_fault_32)
463 @ r0 = the two 16-bit Thumb instructions which caused the exception
464 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
465 @ r4 = PC value for the first 16-bit Thumb instruction
466 @ lr = 32bit undefined instruction function
467
468#if __LINUX_ARM_ARCH__ < 7
469/* If the target arch was overridden, change it back: */
470#ifdef CONFIG_CPU_32v6K
471 .arch armv6k
472#else
473 .arch armv6
474#endif
475#endif /* __LINUX_ARM_ARCH__ < 7 */
476#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
477 b __und_usr_fault_16
478#endif
479 UNWIND(.fnend)
480ENDPROC(__und_usr)
481
482/*
483 * The out of line fixup for the ldrt instructions above.
484 */
485 .pushsection .fixup, "ax"
486 .align 2
4874: mov pc, r9
488 .popsection
489 .pushsection __ex_table,"a"
490 .long 1b, 4b
491#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
492 .long 2b, 4b
493 .long 3b, 4b
494#endif
495 .popsection
496
497/*
498 * Check whether the instruction is a co-processor instruction.
499 * If yes, we need to call the relevant co-processor handler.
500 *
501 * Note that we don't do a full check here for the co-processor
502 * instructions; all instructions with bit 27 set are well
503 * defined. The only instructions that should fault are the
504 * co-processor instructions. However, we have to watch out
505 * for the ARM6/ARM7 SWI bug.
506 *
507 * NEON is a special case that has to be handled here. Not all
508 * NEON instructions are co-processor instructions, so we have
509 * to make a special case of checking for them. Plus, there's
510 * five groups of them, so we have a table of mask/opcode pairs
511 * to check against, and if any match then we branch off into the
512 * NEON handler code.
513 *
514 * Emulators may wish to make use of the following registers:
515 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
516 * r2 = PC value to resume execution after successful emulation
517 * r9 = normal "successful" return address
518 * r10 = this threads thread_info structure
519 * lr = unrecognised instruction return address
520 * IRQs disabled, FIQs enabled.
521 */
522 @
523 @ Fall-through from Thumb-2 __und_usr
524 @
525#ifdef CONFIG_NEON
526 get_thread_info r10 @ get current thread
527 adr r6, .LCneon_thumb_opcodes
528 b 2f
529#endif
530call_fpe:
531 get_thread_info r10 @ get current thread
532#ifdef CONFIG_NEON
533 adr r6, .LCneon_arm_opcodes
5342: ldr r5, [r6], #4 @ mask value
535 ldr r7, [r6], #4 @ opcode bits matching in mask
536 cmp r5, #0 @ end mask?
537 beq 1f
538 and r8, r0, r5
539 cmp r8, r7 @ NEON instruction?
540 bne 2b
541 mov r7, #1
542 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
543 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
544 b do_vfp @ let VFP handler handle this
5451:
546#endif
547 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
548 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
549 moveq pc, lr
550 and r8, r0, #0x00000f00 @ mask out CP number
551 THUMB( lsr r8, r8, #8 )
552 mov r7, #1
553 add r6, r10, #TI_USED_CP
554 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
555 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
556#ifdef CONFIG_IWMMXT
557 @ Test if we need to give access to iWMMXt coprocessors
558 ldr r5, [r10, #TI_FLAGS]
559 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
560 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
561 bcs iwmmxt_task_enable
562#endif
563 ARM( add pc, pc, r8, lsr #6 )
564 THUMB( lsl r8, r8, #2 )
565 THUMB( add pc, r8 )
566 nop
567
568 movw_pc lr @ CP#0
569 W(b) do_fpe @ CP#1 (FPE)
570 W(b) do_fpe @ CP#2 (FPE)
571 movw_pc lr @ CP#3
572#ifdef CONFIG_CRUNCH
573 b crunch_task_enable @ CP#4 (MaverickCrunch)
574 b crunch_task_enable @ CP#5 (MaverickCrunch)
575 b crunch_task_enable @ CP#6 (MaverickCrunch)
576#else
577 movw_pc lr @ CP#4
578 movw_pc lr @ CP#5
579 movw_pc lr @ CP#6
580#endif
581 movw_pc lr @ CP#7
582 movw_pc lr @ CP#8
583 movw_pc lr @ CP#9
584#ifdef CONFIG_VFP
585 W(b) do_vfp @ CP#10 (VFP)
586 W(b) do_vfp @ CP#11 (VFP)
587#else
588 movw_pc lr @ CP#10 (VFP)
589 movw_pc lr @ CP#11 (VFP)
590#endif
591 movw_pc lr @ CP#12
592 movw_pc lr @ CP#13
593 movw_pc lr @ CP#14 (Debug)
594 movw_pc lr @ CP#15 (Control)
595
596#ifdef NEED_CPU_ARCHITECTURE
597 .align 2
598.LCcpu_architecture:
599 .word __cpu_architecture
600#endif
601
602#ifdef CONFIG_NEON
603 .align 6
604
605.LCneon_arm_opcodes:
606 .word 0xfe000000 @ mask
607 .word 0xf2000000 @ opcode
608
609 .word 0xff100000 @ mask
610 .word 0xf4000000 @ opcode
611
612 .word 0x00000000 @ mask
613 .word 0x00000000 @ opcode
614
615.LCneon_thumb_opcodes:
616 .word 0xef000000 @ mask
617 .word 0xef000000 @ opcode
618
619 .word 0xff100000 @ mask
620 .word 0xf9000000 @ opcode
621
622 .word 0x00000000 @ mask
623 .word 0x00000000 @ opcode
624#endif
625
626do_fpe:
627 enable_irq
628 ldr r4, .LCfp
629 add r10, r10, #TI_FPSTATE @ r10 = workspace
630 ldr pc, [r4] @ Call FP module USR entry point
631
632/*
633 * The FP module is called with these registers set:
634 * r0 = instruction
635 * r2 = PC+4
636 * r9 = normal "successful" return address
637 * r10 = FP workspace
638 * lr = unrecognised FP instruction return address
639 */
640
641 .pushsection .data
642ENTRY(fp_enter)
643 .word no_fp
644 .popsection
645
646ENTRY(no_fp)
647 mov pc, lr
648ENDPROC(no_fp)
649
650__und_usr_fault_32:
651 mov r1, #4
652 b 1f
653__und_usr_fault_16:
654 mov r1, #2
6551: enable_irq
656 mov r0, sp
657 adr lr, BSYM(ret_from_exception)
658 b __und_fault
659ENDPROC(__und_usr_fault_32)
660ENDPROC(__und_usr_fault_16)
661
662 .align 5
663__pabt_usr:
664 usr_entry
665 mov r2, sp @ regs
666 pabt_helper
667 UNWIND(.fnend )
668 /* fall through */
669/*
670 * This is the return code to user mode for abort handlers
671 */
672ENTRY(ret_from_exception)
673 UNWIND(.fnstart )
674 UNWIND(.cantunwind )
675 get_thread_info tsk
676 mov why, #0
677 b ret_to_user
678 UNWIND(.fnend )
679ENDPROC(__pabt_usr)
680ENDPROC(ret_from_exception)
681
682/*
683 * Register switch for ARMv3 and ARMv4 processors
684 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
685 * previous and next are guaranteed not to be the same.
686 */
687ENTRY(__switch_to)
688 UNWIND(.fnstart )
689 UNWIND(.cantunwind )
690 add ip, r1, #TI_CPU_SAVE
691 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
692 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
693 THUMB( str sp, [ip], #4 )
694 THUMB( str lr, [ip], #4 )
695 ldr r4, [r2, #TI_TP_VALUE]
696 ldr r5, [r2, #TI_TP_VALUE + 4]
697#ifdef CONFIG_CPU_USE_DOMAINS
698 ldr r6, [r2, #TI_CPU_DOMAIN]
699#endif
700 switch_tls r1, r4, r5, r3, r7
701#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
702 ldr r7, [r2, #TI_TASK]
703 ldr r8, =__stack_chk_guard
704 ldr r7, [r7, #TSK_STACK_CANARY]
705#endif
706#ifdef CONFIG_CPU_USE_DOMAINS
707 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
708#endif
709 mov r5, r0
710 add r4, r2, #TI_CPU_SAVE
711 ldr r0, =thread_notify_head
712 mov r1, #THREAD_NOTIFY_SWITCH
713 bl atomic_notifier_call_chain
714#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
715 str r7, [r8]
716#endif
717 THUMB( mov ip, r4 )
718 mov r0, r5
719 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
720 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
721 THUMB( ldr sp, [ip], #4 )
722 THUMB( ldr pc, [ip] )
723 UNWIND(.fnend )
724ENDPROC(__switch_to)
725
726 __INIT
727
728/*
729 * User helpers.
730 *
731 * Each segment is 32-byte aligned and will be moved to the top of the high
732 * vector page. New segments (if ever needed) must be added in front of
733 * existing ones. This mechanism should be used only for things that are
734 * really small and justified, and not be abused freely.
735 *
736 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
737 */
738 THUMB( .arm )
739
740 .macro usr_ret, reg
741#ifdef CONFIG_ARM_THUMB
742 bx \reg
743#else
744 mov pc, \reg
745#endif
746 .endm
747
748 .macro kuser_pad, sym, size
749 .if (. - \sym) & 3
750 .rept 4 - (. - \sym) & 3
751 .byte 0
752 .endr
753 .endif
754 .rept (\size - (. - \sym)) / 4
755 .word 0xe7fddef1
756 .endr
757 .endm
758
759#ifdef CONFIG_KUSER_HELPERS
760 .align 5
761 .globl __kuser_helper_start
762__kuser_helper_start:
763
764/*
765 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
766 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
767 */
768
769__kuser_cmpxchg64: @ 0xffff0f60
770
771#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
772
773 /*
774 * Poor you. No fast solution possible...
775 * The kernel itself must perform the operation.
776 * A special ghost syscall is used for that (see traps.c).
777 */
778 stmfd sp!, {r7, lr}
779 ldr r7, 1f @ it's 20 bits
780 swi __ARM_NR_cmpxchg64
781 ldmfd sp!, {r7, pc}
7821: .word __ARM_NR_cmpxchg64
783
784#elif defined(CONFIG_CPU_32v6K)
785
786 stmfd sp!, {r4, r5, r6, r7}
787 ldrd r4, r5, [r0] @ load old val
788 ldrd r6, r7, [r1] @ load new val
789 smp_dmb arm
7901: ldrexd r0, r1, [r2] @ load current val
791 eors r3, r0, r4 @ compare with oldval (1)
792 eoreqs r3, r1, r5 @ compare with oldval (2)
793 strexdeq r3, r6, r7, [r2] @ store newval if eq
794 teqeq r3, #1 @ success?
795 beq 1b @ if no then retry
796 smp_dmb arm
797 rsbs r0, r3, #0 @ set returned val and C flag
798 ldmfd sp!, {r4, r5, r6, r7}
799 usr_ret lr
800
801#elif !defined(CONFIG_SMP)
802
803#ifdef CONFIG_MMU
804
805 /*
806 * The only thing that can break atomicity in this cmpxchg64
807 * implementation is either an IRQ or a data abort exception
808 * causing another process/thread to be scheduled in the middle of
809 * the critical sequence. The same strategy as for cmpxchg is used.
810 */
811 stmfd sp!, {r4, r5, r6, lr}
812 ldmia r0, {r4, r5} @ load old val
813 ldmia r1, {r6, lr} @ load new val
8141: ldmia r2, {r0, r1} @ load current val
815 eors r3, r0, r4 @ compare with oldval (1)
816 eoreqs r3, r1, r5 @ compare with oldval (2)
8172: stmeqia r2, {r6, lr} @ store newval if eq
818 rsbs r0, r3, #0 @ set return val and C flag
819 ldmfd sp!, {r4, r5, r6, pc}
820
821 .text
822kuser_cmpxchg64_fixup:
823 @ Called from kuser_cmpxchg_fixup.
824 @ r4 = address of interrupted insn (must be preserved).
825 @ sp = saved regs. r7 and r8 are clobbered.
826 @ 1b = first critical insn, 2b = last critical insn.
827 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
828 mov r7, #0xffff0fff
829 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
830 subs r8, r4, r7
831 rsbcss r8, r8, #(2b - 1b)
832 strcs r7, [sp, #S_PC]
833#if __LINUX_ARM_ARCH__ < 6
834 bcc kuser_cmpxchg32_fixup
835#endif
836 mov pc, lr
837 .previous
838
839#else
840#warning "NPTL on non MMU needs fixing"
841 mov r0, #-1
842 adds r0, r0, #0
843 usr_ret lr
844#endif
845
846#else
847#error "incoherent kernel configuration"
848#endif
849
850 kuser_pad __kuser_cmpxchg64, 64
851
852__kuser_memory_barrier: @ 0xffff0fa0
853 smp_dmb arm
854 usr_ret lr
855
856 kuser_pad __kuser_memory_barrier, 32
857
858__kuser_cmpxchg: @ 0xffff0fc0
859
860#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
861
862 /*
863 * Poor you. No fast solution possible...
864 * The kernel itself must perform the operation.
865 * A special ghost syscall is used for that (see traps.c).
866 */
867 stmfd sp!, {r7, lr}
868 ldr r7, 1f @ it's 20 bits
869 swi __ARM_NR_cmpxchg
870 ldmfd sp!, {r7, pc}
8711: .word __ARM_NR_cmpxchg
872
873#elif __LINUX_ARM_ARCH__ < 6
874
875#ifdef CONFIG_MMU
876
877 /*
878 * The only thing that can break atomicity in this cmpxchg
879 * implementation is either an IRQ or a data abort exception
880 * causing another process/thread to be scheduled in the middle
881 * of the critical sequence. To prevent this, code is added to
882 * the IRQ and data abort exception handlers to set the pc back
883 * to the beginning of the critical section if it is found to be
884 * within that critical section (see kuser_cmpxchg_fixup).
885 */
8861: ldr r3, [r2] @ load current val
887 subs r3, r3, r0 @ compare with oldval
8882: streq r1, [r2] @ store newval if eq
889 rsbs r0, r3, #0 @ set return val and C flag
890 usr_ret lr
891
892 .text
893kuser_cmpxchg32_fixup:
894 @ Called from kuser_cmpxchg_check macro.
895 @ r4 = address of interrupted insn (must be preserved).
896 @ sp = saved regs. r7 and r8 are clobbered.
897 @ 1b = first critical insn, 2b = last critical insn.
898 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
899 mov r7, #0xffff0fff
900 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
901 subs r8, r4, r7
902 rsbcss r8, r8, #(2b - 1b)
903 strcs r7, [sp, #S_PC]
904 mov pc, lr
905 .previous
906
907#else
908#warning "NPTL on non MMU needs fixing"
909 mov r0, #-1
910 adds r0, r0, #0
911 usr_ret lr
912#endif
913
914#else
915
916 smp_dmb arm
9171: ldrex r3, [r2]
918 subs r3, r3, r0
919 strexeq r3, r1, [r2]
920 teqeq r3, #1
921 beq 1b
922 rsbs r0, r3, #0
923 /* beware -- each __kuser slot must be 8 instructions max */
924 ALT_SMP(b __kuser_memory_barrier)
925 ALT_UP(usr_ret lr)
926
927#endif
928
929 kuser_pad __kuser_cmpxchg, 32
930
931__kuser_get_tls: @ 0xffff0fe0
932 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
933 usr_ret lr
934 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
935 kuser_pad __kuser_get_tls, 16
936 .rep 3
937 .word 0 @ 0xffff0ff0 software TLS value, then
938 .endr @ pad up to __kuser_helper_version
939
940__kuser_helper_version: @ 0xffff0ffc
941 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
942
943 .globl __kuser_helper_end
944__kuser_helper_end:
945
946#endif
947
948 THUMB( .thumb )
949
950/*
951 * Vector stubs.
952 *
953 * This code is copied to 0xffff1000 so we can use branches in the
954 * vectors, rather than ldr's. Note that this code must not exceed
955 * a page size.
956 *
957 * Common stub entry macro:
958 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
959 *
960 * SP points to a minimal amount of processor-private memory, the address
961 * of which is copied into r0 for the mode specific abort handler.
962 */
963 .macro vector_stub, name, mode, correction=0
964 .align 5
965
966vector_\name:
967 .if \correction
968 sub lr, lr, #\correction
969 .endif
970
971 @
972 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
973 @ (parent CPSR)
974 @
975 stmia sp, {r0, lr} @ save r0, lr
976 mrs lr, spsr
977 str lr, [sp, #8] @ save spsr
978
979 @
980 @ Prepare for SVC32 mode. IRQs remain disabled.
981 @
982 mrs r0, cpsr
983 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
984 msr spsr_cxsf, r0
985
986 @
987 @ the branch table must immediately follow this code
988 @
989 and lr, lr, #0x0f
990 THUMB( adr r0, 1f )
991 THUMB( ldr lr, [r0, lr, lsl #2] )
992 mov r0, sp
993 ARM( ldr lr, [pc, lr, lsl #2] )
994 movs pc, lr @ branch to handler in SVC mode
995ENDPROC(vector_\name)
996
997 .align 2
998 @ handler addresses follow this label
9991:
1000 .endm
1001
1002 .section .stubs, "ax", %progbits
1003__stubs_start:
1004 @ This must be the first word
1005 .word vector_swi
1006
1007vector_rst:
1008 ARM( swi SYS_ERROR0 )
1009 THUMB( svc #0 )
1010 THUMB( nop )
1011 b vector_und
1012
1013/*
1014 * Interrupt dispatcher
1015 */
1016 vector_stub irq, IRQ_MODE, 4
1017
1018 .long __irq_usr @ 0 (USR_26 / USR_32)
1019 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1020 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1021 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1022 .long __irq_invalid @ 4
1023 .long __irq_invalid @ 5
1024 .long __irq_invalid @ 6
1025 .long __irq_invalid @ 7
1026 .long __irq_invalid @ 8
1027 .long __irq_invalid @ 9
1028 .long __irq_invalid @ a
1029 .long __irq_invalid @ b
1030 .long __irq_invalid @ c
1031 .long __irq_invalid @ d
1032 .long __irq_invalid @ e
1033 .long __irq_invalid @ f
1034
1035/*
1036 * Data abort dispatcher
1037 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1038 */
1039 vector_stub dabt, ABT_MODE, 8
1040
1041 .long __dabt_usr @ 0 (USR_26 / USR_32)
1042 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1043 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1044 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1045 .long __dabt_invalid @ 4
1046 .long __dabt_invalid @ 5
1047 .long __dabt_invalid @ 6
1048 .long __dabt_invalid @ 7
1049 .long __dabt_invalid @ 8
1050 .long __dabt_invalid @ 9
1051 .long __dabt_invalid @ a
1052 .long __dabt_invalid @ b
1053 .long __dabt_invalid @ c
1054 .long __dabt_invalid @ d
1055 .long __dabt_invalid @ e
1056 .long __dabt_invalid @ f
1057
1058/*
1059 * Prefetch abort dispatcher
1060 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1061 */
1062 vector_stub pabt, ABT_MODE, 4
1063
1064 .long __pabt_usr @ 0 (USR_26 / USR_32)
1065 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1066 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1067 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1068 .long __pabt_invalid @ 4
1069 .long __pabt_invalid @ 5
1070 .long __pabt_invalid @ 6
1071 .long __pabt_invalid @ 7
1072 .long __pabt_invalid @ 8
1073 .long __pabt_invalid @ 9
1074 .long __pabt_invalid @ a
1075 .long __pabt_invalid @ b
1076 .long __pabt_invalid @ c
1077 .long __pabt_invalid @ d
1078 .long __pabt_invalid @ e
1079 .long __pabt_invalid @ f
1080
1081/*
1082 * Undef instr entry dispatcher
1083 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1084 */
1085 vector_stub und, UND_MODE
1086
1087 .long __und_usr @ 0 (USR_26 / USR_32)
1088 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1089 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1090 .long __und_svc @ 3 (SVC_26 / SVC_32)
1091 .long __und_invalid @ 4
1092 .long __und_invalid @ 5
1093 .long __und_invalid @ 6
1094 .long __und_invalid @ 7
1095 .long __und_invalid @ 8
1096 .long __und_invalid @ 9
1097 .long __und_invalid @ a
1098 .long __und_invalid @ b
1099 .long __und_invalid @ c
1100 .long __und_invalid @ d
1101 .long __und_invalid @ e
1102 .long __und_invalid @ f
1103
1104 .align 5
1105
1106/*=============================================================================
1107 * Address exception handler
1108 *-----------------------------------------------------------------------------
1109 * These aren't too critical.
1110 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1111 */
1112
1113vector_addrexcptn:
1114 b vector_addrexcptn
1115
1116/*=============================================================================
1117 * Undefined FIQs
1118 *-----------------------------------------------------------------------------
1119 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1120 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1121 * Basically to switch modes, we *HAVE* to clobber one register... brain
1122 * damage alert! I don't think that we can execute any code in here in any
1123 * other mode than FIQ... Ok you can switch to another mode, but you can't
1124 * get out of that mode without clobbering one register.
1125 */
1126vector_fiq:
1127 subs pc, lr, #4
1128
1129 .globl vector_fiq_offset
1130 .equ vector_fiq_offset, vector_fiq
1131
1132 .section .vectors, "ax", %progbits
1133__vectors_start:
1134 W(b) vector_rst
1135 W(b) vector_und
1136 W(ldr) pc, __vectors_start + 0x1000
1137 W(b) vector_pabt
1138 W(b) vector_dabt
1139 W(b) vector_addrexcptn
1140 W(b) vector_irq
1141 W(b) vector_fiq
1142
1143 .data
1144
1145 .globl cr_alignment
1146 .globl cr_no_alignment
1147cr_alignment:
1148 .space 4
1149cr_no_alignment:
1150 .space 4
1151
1152#ifdef CONFIG_MULTI_IRQ_HANDLER
1153 .globl handle_arch_irq
1154handle_arch_irq:
1155 .space 4
1156#endif
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
16 */
17
18#include <asm/assembler.h>
19#include <asm/memory.h>
20#include <asm/glue-df.h>
21#include <asm/glue-pf.h>
22#include <asm/vfpmacros.h>
23#ifndef CONFIG_MULTI_IRQ_HANDLER
24#include <mach/entry-macro.S>
25#endif
26#include <asm/thread_notify.h>
27#include <asm/unwind.h>
28#include <asm/unistd.h>
29#include <asm/tls.h>
30#include <asm/system_info.h>
31
32#include "entry-header.S"
33#include <asm/entry-macro-multi.S>
34
35/*
36 * Interrupt handling.
37 */
38 .macro irq_handler
39#ifdef CONFIG_MULTI_IRQ_HANDLER
40 ldr r1, =handle_arch_irq
41 mov r0, sp
42 adr lr, BSYM(9997f)
43 ldr pc, [r1]
44#else
45 arch_irq_handler_default
46#endif
479997:
48 .endm
49
50 .macro pabt_helper
51 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
52#ifdef MULTI_PABORT
53 ldr ip, .LCprocfns
54 mov lr, pc
55 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
56#else
57 bl CPU_PABORT_HANDLER
58#endif
59 .endm
60
61 .macro dabt_helper
62
63 @
64 @ Call the processor-specific abort handler:
65 @
66 @ r2 - pt_regs
67 @ r4 - aborted context pc
68 @ r5 - aborted context psr
69 @
70 @ The abort handler must return the aborted address in r0, and
71 @ the fault status register in r1. r9 must be preserved.
72 @
73#ifdef MULTI_DABORT
74 ldr ip, .LCprocfns
75 mov lr, pc
76 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
77#else
78 bl CPU_DABORT_HANDLER
79#endif
80 .endm
81
82#ifdef CONFIG_KPROBES
83 .section .kprobes.text,"ax",%progbits
84#else
85 .text
86#endif
87
88/*
89 * Invalid mode handlers
90 */
91 .macro inv_entry, reason
92 sub sp, sp, #S_FRAME_SIZE
93 ARM( stmib sp, {r1 - lr} )
94 THUMB( stmia sp, {r0 - r12} )
95 THUMB( str sp, [sp, #S_SP] )
96 THUMB( str lr, [sp, #S_LR] )
97 mov r1, #\reason
98 .endm
99
100__pabt_invalid:
101 inv_entry BAD_PREFETCH
102 b common_invalid
103ENDPROC(__pabt_invalid)
104
105__dabt_invalid:
106 inv_entry BAD_DATA
107 b common_invalid
108ENDPROC(__dabt_invalid)
109
110__irq_invalid:
111 inv_entry BAD_IRQ
112 b common_invalid
113ENDPROC(__irq_invalid)
114
115__und_invalid:
116 inv_entry BAD_UNDEFINSTR
117
118 @
119 @ XXX fall through to common_invalid
120 @
121
122@
123@ common_invalid - generic code for failed exception (re-entrant version of handlers)
124@
125common_invalid:
126 zero_fp
127
128 ldmia r0, {r4 - r6}
129 add r0, sp, #S_PC @ here for interlock avoidance
130 mov r7, #-1 @ "" "" "" ""
131 str r4, [sp] @ save preserved r0
132 stmia r0, {r5 - r7} @ lr_<exception>,
133 @ cpsr_<exception>, "old_r0"
134
135 mov r0, sp
136 b bad_mode
137ENDPROC(__und_invalid)
138
139/*
140 * SVC mode handlers
141 */
142
143#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
144#define SPFIX(code...) code
145#else
146#define SPFIX(code...)
147#endif
148
149 .macro svc_entry, stack_hole=0
150 UNWIND(.fnstart )
151 UNWIND(.save {r0 - pc} )
152 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
153#ifdef CONFIG_THUMB2_KERNEL
154 SPFIX( str r0, [sp] ) @ temporarily saved
155 SPFIX( mov r0, sp )
156 SPFIX( tst r0, #4 ) @ test original stack alignment
157 SPFIX( ldr r0, [sp] ) @ restored
158#else
159 SPFIX( tst sp, #4 )
160#endif
161 SPFIX( subeq sp, sp, #4 )
162 stmia sp, {r1 - r12}
163
164 ldmia r0, {r3 - r5}
165 add r7, sp, #S_SP - 4 @ here for interlock avoidance
166 mov r6, #-1 @ "" "" "" ""
167 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
168 SPFIX( addeq r2, r2, #4 )
169 str r3, [sp, #-4]! @ save the "real" r0 copied
170 @ from the exception stack
171
172 mov r3, lr
173
174 @
175 @ We are now ready to fill in the remaining blanks on the stack:
176 @
177 @ r2 - sp_svc
178 @ r3 - lr_svc
179 @ r4 - lr_<exception>, already fixed up for correct return/restart
180 @ r5 - spsr_<exception>
181 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
182 @
183 stmia r7, {r2 - r6}
184
185#ifdef CONFIG_TRACE_IRQFLAGS
186 bl trace_hardirqs_off
187#endif
188 .endm
189
190 .align 5
191__dabt_svc:
192 svc_entry
193 mov r2, sp
194 dabt_helper
195
196 @
197 @ IRQs off again before pulling preserved data off the stack
198 @
199 disable_irq_notrace
200
201#ifdef CONFIG_TRACE_IRQFLAGS
202 tst r5, #PSR_I_BIT
203 bleq trace_hardirqs_on
204 tst r5, #PSR_I_BIT
205 blne trace_hardirqs_off
206#endif
207 svc_exit r5 @ return from exception
208 UNWIND(.fnend )
209ENDPROC(__dabt_svc)
210
211 .align 5
212__irq_svc:
213 svc_entry
214 irq_handler
215
216#ifdef CONFIG_PREEMPT
217 get_thread_info tsk
218 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
219 ldr r0, [tsk, #TI_FLAGS] @ get flags
220 teq r8, #0 @ if preempt count != 0
221 movne r0, #0 @ force flags to 0
222 tst r0, #_TIF_NEED_RESCHED
223 blne svc_preempt
224#endif
225
226#ifdef CONFIG_TRACE_IRQFLAGS
227 @ The parent context IRQs must have been enabled to get here in
228 @ the first place, so there's no point checking the PSR I bit.
229 bl trace_hardirqs_on
230#endif
231 svc_exit r5 @ return from exception
232 UNWIND(.fnend )
233ENDPROC(__irq_svc)
234
235 .ltorg
236
237#ifdef CONFIG_PREEMPT
238svc_preempt:
239 mov r8, lr
2401: bl preempt_schedule_irq @ irq en/disable is done inside
241 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
242 tst r0, #_TIF_NEED_RESCHED
243 moveq pc, r8 @ go again
244 b 1b
245#endif
246
247__und_fault:
248 @ Correct the PC such that it is pointing at the instruction
249 @ which caused the fault. If the faulting instruction was ARM
250 @ the PC will be pointing at the next instruction, and have to
251 @ subtract 4. Otherwise, it is Thumb, and the PC will be
252 @ pointing at the second half of the Thumb instruction. We
253 @ have to subtract 2.
254 ldr r2, [r0, #S_PC]
255 sub r2, r2, r1
256 str r2, [r0, #S_PC]
257 b do_undefinstr
258ENDPROC(__und_fault)
259
260 .align 5
261__und_svc:
262#ifdef CONFIG_KPROBES
263 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
264 @ it obviously needs free stack space which then will belong to
265 @ the saved context.
266 svc_entry 64
267#else
268 svc_entry
269#endif
270 @
271 @ call emulation code, which returns using r9 if it has emulated
272 @ the instruction, or the more conventional lr if we are to treat
273 @ this as a real undefined instruction
274 @
275 @ r0 - instruction
276 @
277#ifndef CONFIG_THUMB2_KERNEL
278 ldr r0, [r4, #-4]
279#else
280 mov r1, #2
281 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
282 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
283 blo __und_svc_fault
284 ldrh r9, [r4] @ bottom 16 bits
285 add r4, r4, #2
286 str r4, [sp, #S_PC]
287 orr r0, r9, r0, lsl #16
288#endif
289 adr r9, BSYM(__und_svc_finish)
290 mov r2, r4
291 bl call_fpe
292
293 mov r1, #4 @ PC correction to apply
294__und_svc_fault:
295 mov r0, sp @ struct pt_regs *regs
296 bl __und_fault
297
298 @
299 @ IRQs off again before pulling preserved data off the stack
300 @
301__und_svc_finish:
302 disable_irq_notrace
303
304 @
305 @ restore SPSR and restart the instruction
306 @
307 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
308#ifdef CONFIG_TRACE_IRQFLAGS
309 tst r5, #PSR_I_BIT
310 bleq trace_hardirqs_on
311 tst r5, #PSR_I_BIT
312 blne trace_hardirqs_off
313#endif
314 svc_exit r5 @ return from exception
315 UNWIND(.fnend )
316ENDPROC(__und_svc)
317
318 .align 5
319__pabt_svc:
320 svc_entry
321 mov r2, sp @ regs
322 pabt_helper
323
324 @
325 @ IRQs off again before pulling preserved data off the stack
326 @
327 disable_irq_notrace
328
329#ifdef CONFIG_TRACE_IRQFLAGS
330 tst r5, #PSR_I_BIT
331 bleq trace_hardirqs_on
332 tst r5, #PSR_I_BIT
333 blne trace_hardirqs_off
334#endif
335 svc_exit r5 @ return from exception
336 UNWIND(.fnend )
337ENDPROC(__pabt_svc)
338
339 .align 5
340.LCcralign:
341 .word cr_alignment
342#ifdef MULTI_DABORT
343.LCprocfns:
344 .word processor
345#endif
346.LCfp:
347 .word fp_enter
348
349/*
350 * User mode handlers
351 *
352 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
353 */
354
355#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
356#error "sizeof(struct pt_regs) must be a multiple of 8"
357#endif
358
359 .macro usr_entry
360 UNWIND(.fnstart )
361 UNWIND(.cantunwind ) @ don't unwind the user space
362 sub sp, sp, #S_FRAME_SIZE
363 ARM( stmib sp, {r1 - r12} )
364 THUMB( stmia sp, {r0 - r12} )
365
366 ldmia r0, {r3 - r5}
367 add r0, sp, #S_PC @ here for interlock avoidance
368 mov r6, #-1 @ "" "" "" ""
369
370 str r3, [sp] @ save the "real" r0 copied
371 @ from the exception stack
372
373 @
374 @ We are now ready to fill in the remaining blanks on the stack:
375 @
376 @ r4 - lr_<exception>, already fixed up for correct return/restart
377 @ r5 - spsr_<exception>
378 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
379 @
380 @ Also, separately save sp_usr and lr_usr
381 @
382 stmia r0, {r4 - r6}
383 ARM( stmdb r0, {sp, lr}^ )
384 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
385
386 @
387 @ Enable the alignment trap while in kernel mode
388 @
389 alignment_trap r0
390
391 @
392 @ Clear FP to mark the first stack frame
393 @
394 zero_fp
395
396#ifdef CONFIG_IRQSOFF_TRACER
397 bl trace_hardirqs_off
398#endif
399 .endm
400
401 .macro kuser_cmpxchg_check
402#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
403#ifndef CONFIG_MMU
404#warning "NPTL on non MMU needs fixing"
405#else
406 @ Make sure our user space atomic helper is restarted
407 @ if it was interrupted in a critical region. Here we
408 @ perform a quick test inline since it should be false
409 @ 99.9999% of the time. The rest is done out of line.
410 cmp r4, #TASK_SIZE
411 blhs kuser_cmpxchg64_fixup
412#endif
413#endif
414 .endm
415
416 .align 5
417__dabt_usr:
418 usr_entry
419 kuser_cmpxchg_check
420 mov r2, sp
421 dabt_helper
422 b ret_from_exception
423 UNWIND(.fnend )
424ENDPROC(__dabt_usr)
425
426 .align 5
427__irq_usr:
428 usr_entry
429 kuser_cmpxchg_check
430 irq_handler
431 get_thread_info tsk
432 mov why, #0
433 b ret_to_user_from_irq
434 UNWIND(.fnend )
435ENDPROC(__irq_usr)
436
437 .ltorg
438
439 .align 5
440__und_usr:
441 usr_entry
442
443 mov r2, r4
444 mov r3, r5
445
446 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
447 @ faulting instruction depending on Thumb mode.
448 @ r3 = regs->ARM_cpsr
449 @
450 @ The emulation code returns using r9 if it has emulated the
451 @ instruction, or the more conventional lr if we are to treat
452 @ this as a real undefined instruction
453 @
454 adr r9, BSYM(ret_from_exception)
455
456 tst r3, #PSR_T_BIT @ Thumb mode?
457 bne __und_usr_thumb
458 sub r4, r2, #4 @ ARM instr at LR - 4
4591: ldrt r0, [r4]
460#ifdef CONFIG_CPU_ENDIAN_BE8
461 rev r0, r0 @ little endian instruction
462#endif
463 @ r0 = 32-bit ARM instruction which caused the exception
464 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
465 @ r4 = PC value for the faulting instruction
466 @ lr = 32-bit undefined instruction function
467 adr lr, BSYM(__und_usr_fault_32)
468 b call_fpe
469
470__und_usr_thumb:
471 @ Thumb instruction
472 sub r4, r2, #2 @ First half of thumb instr at LR - 2
473#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
474/*
475 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
476 * can never be supported in a single kernel, this code is not applicable at
477 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
478 * made about .arch directives.
479 */
480#if __LINUX_ARM_ARCH__ < 7
481/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
482#define NEED_CPU_ARCHITECTURE
483 ldr r5, .LCcpu_architecture
484 ldr r5, [r5]
485 cmp r5, #CPU_ARCH_ARMv7
486 blo __und_usr_fault_16 @ 16bit undefined instruction
487/*
488 * The following code won't get run unless the running CPU really is v7, so
489 * coding round the lack of ldrht on older arches is pointless. Temporarily
490 * override the assembler target arch with the minimum required instead:
491 */
492 .arch armv6t2
493#endif
4942: ldrht r5, [r4]
495 cmp r5, #0xe800 @ 32bit instruction if xx != 0
496 blo __und_usr_fault_16 @ 16bit undefined instruction
4973: ldrht r0, [r2]
498 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
499 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
500 orr r0, r0, r5, lsl #16
501 adr lr, BSYM(__und_usr_fault_32)
502 @ r0 = the two 16-bit Thumb instructions which caused the exception
503 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
504 @ r4 = PC value for the first 16-bit Thumb instruction
505 @ lr = 32bit undefined instruction function
506
507#if __LINUX_ARM_ARCH__ < 7
508/* If the target arch was overridden, change it back: */
509#ifdef CONFIG_CPU_32v6K
510 .arch armv6k
511#else
512 .arch armv6
513#endif
514#endif /* __LINUX_ARM_ARCH__ < 7 */
515#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
516 b __und_usr_fault_16
517#endif
518 UNWIND(.fnend)
519ENDPROC(__und_usr)
520
521/*
522 * The out of line fixup for the ldrt instructions above.
523 */
524 .pushsection .fixup, "ax"
525 .align 2
5264: mov pc, r9
527 .popsection
528 .pushsection __ex_table,"a"
529 .long 1b, 4b
530#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
531 .long 2b, 4b
532 .long 3b, 4b
533#endif
534 .popsection
535
536/*
537 * Check whether the instruction is a co-processor instruction.
538 * If yes, we need to call the relevant co-processor handler.
539 *
540 * Note that we don't do a full check here for the co-processor
541 * instructions; all instructions with bit 27 set are well
542 * defined. The only instructions that should fault are the
543 * co-processor instructions. However, we have to watch out
544 * for the ARM6/ARM7 SWI bug.
545 *
546 * NEON is a special case that has to be handled here. Not all
547 * NEON instructions are co-processor instructions, so we have
548 * to make a special case of checking for them. Plus, there's
549 * five groups of them, so we have a table of mask/opcode pairs
550 * to check against, and if any match then we branch off into the
551 * NEON handler code.
552 *
553 * Emulators may wish to make use of the following registers:
554 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
555 * r2 = PC value to resume execution after successful emulation
556 * r9 = normal "successful" return address
557 * r10 = this threads thread_info structure
558 * lr = unrecognised instruction return address
559 * IRQs disabled, FIQs enabled.
560 */
561 @
562 @ Fall-through from Thumb-2 __und_usr
563 @
564#ifdef CONFIG_NEON
565 adr r6, .LCneon_thumb_opcodes
566 b 2f
567#endif
568call_fpe:
569#ifdef CONFIG_NEON
570 adr r6, .LCneon_arm_opcodes
5712:
572 ldr r7, [r6], #4 @ mask value
573 cmp r7, #0 @ end mask?
574 beq 1f
575 and r8, r0, r7
576 ldr r7, [r6], #4 @ opcode bits matching in mask
577 cmp r8, r7 @ NEON instruction?
578 bne 2b
579 get_thread_info r10
580 mov r7, #1
581 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
582 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
583 b do_vfp @ let VFP handler handle this
5841:
585#endif
586 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
587 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
588 moveq pc, lr
589 get_thread_info r10 @ get current thread
590 and r8, r0, #0x00000f00 @ mask out CP number
591 THUMB( lsr r8, r8, #8 )
592 mov r7, #1
593 add r6, r10, #TI_USED_CP
594 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
595 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
596#ifdef CONFIG_IWMMXT
597 @ Test if we need to give access to iWMMXt coprocessors
598 ldr r5, [r10, #TI_FLAGS]
599 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
600 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
601 bcs iwmmxt_task_enable
602#endif
603 ARM( add pc, pc, r8, lsr #6 )
604 THUMB( lsl r8, r8, #2 )
605 THUMB( add pc, r8 )
606 nop
607
608 movw_pc lr @ CP#0
609 W(b) do_fpe @ CP#1 (FPE)
610 W(b) do_fpe @ CP#2 (FPE)
611 movw_pc lr @ CP#3
612#ifdef CONFIG_CRUNCH
613 b crunch_task_enable @ CP#4 (MaverickCrunch)
614 b crunch_task_enable @ CP#5 (MaverickCrunch)
615 b crunch_task_enable @ CP#6 (MaverickCrunch)
616#else
617 movw_pc lr @ CP#4
618 movw_pc lr @ CP#5
619 movw_pc lr @ CP#6
620#endif
621 movw_pc lr @ CP#7
622 movw_pc lr @ CP#8
623 movw_pc lr @ CP#9
624#ifdef CONFIG_VFP
625 W(b) do_vfp @ CP#10 (VFP)
626 W(b) do_vfp @ CP#11 (VFP)
627#else
628 movw_pc lr @ CP#10 (VFP)
629 movw_pc lr @ CP#11 (VFP)
630#endif
631 movw_pc lr @ CP#12
632 movw_pc lr @ CP#13
633 movw_pc lr @ CP#14 (Debug)
634 movw_pc lr @ CP#15 (Control)
635
636#ifdef NEED_CPU_ARCHITECTURE
637 .align 2
638.LCcpu_architecture:
639 .word __cpu_architecture
640#endif
641
642#ifdef CONFIG_NEON
643 .align 6
644
645.LCneon_arm_opcodes:
646 .word 0xfe000000 @ mask
647 .word 0xf2000000 @ opcode
648
649 .word 0xff100000 @ mask
650 .word 0xf4000000 @ opcode
651
652 .word 0x00000000 @ mask
653 .word 0x00000000 @ opcode
654
655.LCneon_thumb_opcodes:
656 .word 0xef000000 @ mask
657 .word 0xef000000 @ opcode
658
659 .word 0xff100000 @ mask
660 .word 0xf9000000 @ opcode
661
662 .word 0x00000000 @ mask
663 .word 0x00000000 @ opcode
664#endif
665
666do_fpe:
667 enable_irq
668 ldr r4, .LCfp
669 add r10, r10, #TI_FPSTATE @ r10 = workspace
670 ldr pc, [r4] @ Call FP module USR entry point
671
672/*
673 * The FP module is called with these registers set:
674 * r0 = instruction
675 * r2 = PC+4
676 * r9 = normal "successful" return address
677 * r10 = FP workspace
678 * lr = unrecognised FP instruction return address
679 */
680
681 .pushsection .data
682ENTRY(fp_enter)
683 .word no_fp
684 .popsection
685
686ENTRY(no_fp)
687 mov pc, lr
688ENDPROC(no_fp)
689
690__und_usr_fault_32:
691 mov r1, #4
692 b 1f
693__und_usr_fault_16:
694 mov r1, #2
6951: enable_irq
696 mov r0, sp
697 adr lr, BSYM(ret_from_exception)
698 b __und_fault
699ENDPROC(__und_usr_fault_32)
700ENDPROC(__und_usr_fault_16)
701
702 .align 5
703__pabt_usr:
704 usr_entry
705 mov r2, sp @ regs
706 pabt_helper
707 UNWIND(.fnend )
708 /* fall through */
709/*
710 * This is the return code to user mode for abort handlers
711 */
712ENTRY(ret_from_exception)
713 UNWIND(.fnstart )
714 UNWIND(.cantunwind )
715 get_thread_info tsk
716 mov why, #0
717 b ret_to_user
718 UNWIND(.fnend )
719ENDPROC(__pabt_usr)
720ENDPROC(ret_from_exception)
721
722/*
723 * Register switch for ARMv3 and ARMv4 processors
724 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
725 * previous and next are guaranteed not to be the same.
726 */
727ENTRY(__switch_to)
728 UNWIND(.fnstart )
729 UNWIND(.cantunwind )
730 add ip, r1, #TI_CPU_SAVE
731 ldr r3, [r2, #TI_TP_VALUE]
732 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
733 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
734 THUMB( str sp, [ip], #4 )
735 THUMB( str lr, [ip], #4 )
736#ifdef CONFIG_CPU_USE_DOMAINS
737 ldr r6, [r2, #TI_CPU_DOMAIN]
738#endif
739 set_tls r3, r4, r5
740#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
741 ldr r7, [r2, #TI_TASK]
742 ldr r8, =__stack_chk_guard
743 ldr r7, [r7, #TSK_STACK_CANARY]
744#endif
745#ifdef CONFIG_CPU_USE_DOMAINS
746 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
747#endif
748 mov r5, r0
749 add r4, r2, #TI_CPU_SAVE
750 ldr r0, =thread_notify_head
751 mov r1, #THREAD_NOTIFY_SWITCH
752 bl atomic_notifier_call_chain
753#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
754 str r7, [r8]
755#endif
756 THUMB( mov ip, r4 )
757 mov r0, r5
758 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
759 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
760 THUMB( ldr sp, [ip], #4 )
761 THUMB( ldr pc, [ip] )
762 UNWIND(.fnend )
763ENDPROC(__switch_to)
764
765 __INIT
766
767/*
768 * User helpers.
769 *
770 * Each segment is 32-byte aligned and will be moved to the top of the high
771 * vector page. New segments (if ever needed) must be added in front of
772 * existing ones. This mechanism should be used only for things that are
773 * really small and justified, and not be abused freely.
774 *
775 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
776 */
777 THUMB( .arm )
778
779 .macro usr_ret, reg
780#ifdef CONFIG_ARM_THUMB
781 bx \reg
782#else
783 mov pc, \reg
784#endif
785 .endm
786
787 .align 5
788 .globl __kuser_helper_start
789__kuser_helper_start:
790
791/*
792 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
793 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
794 */
795
796__kuser_cmpxchg64: @ 0xffff0f60
797
798#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
799
800 /*
801 * Poor you. No fast solution possible...
802 * The kernel itself must perform the operation.
803 * A special ghost syscall is used for that (see traps.c).
804 */
805 stmfd sp!, {r7, lr}
806 ldr r7, 1f @ it's 20 bits
807 swi __ARM_NR_cmpxchg64
808 ldmfd sp!, {r7, pc}
8091: .word __ARM_NR_cmpxchg64
810
811#elif defined(CONFIG_CPU_32v6K)
812
813 stmfd sp!, {r4, r5, r6, r7}
814 ldrd r4, r5, [r0] @ load old val
815 ldrd r6, r7, [r1] @ load new val
816 smp_dmb arm
8171: ldrexd r0, r1, [r2] @ load current val
818 eors r3, r0, r4 @ compare with oldval (1)
819 eoreqs r3, r1, r5 @ compare with oldval (2)
820 strexdeq r3, r6, r7, [r2] @ store newval if eq
821 teqeq r3, #1 @ success?
822 beq 1b @ if no then retry
823 smp_dmb arm
824 rsbs r0, r3, #0 @ set returned val and C flag
825 ldmfd sp!, {r4, r5, r6, r7}
826 usr_ret lr
827
828#elif !defined(CONFIG_SMP)
829
830#ifdef CONFIG_MMU
831
832 /*
833 * The only thing that can break atomicity in this cmpxchg64
834 * implementation is either an IRQ or a data abort exception
835 * causing another process/thread to be scheduled in the middle of
836 * the critical sequence. The same strategy as for cmpxchg is used.
837 */
838 stmfd sp!, {r4, r5, r6, lr}
839 ldmia r0, {r4, r5} @ load old val
840 ldmia r1, {r6, lr} @ load new val
8411: ldmia r2, {r0, r1} @ load current val
842 eors r3, r0, r4 @ compare with oldval (1)
843 eoreqs r3, r1, r5 @ compare with oldval (2)
8442: stmeqia r2, {r6, lr} @ store newval if eq
845 rsbs r0, r3, #0 @ set return val and C flag
846 ldmfd sp!, {r4, r5, r6, pc}
847
848 .text
849kuser_cmpxchg64_fixup:
850 @ Called from kuser_cmpxchg_fixup.
851 @ r4 = address of interrupted insn (must be preserved).
852 @ sp = saved regs. r7 and r8 are clobbered.
853 @ 1b = first critical insn, 2b = last critical insn.
854 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
855 mov r7, #0xffff0fff
856 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
857 subs r8, r4, r7
858 rsbcss r8, r8, #(2b - 1b)
859 strcs r7, [sp, #S_PC]
860#if __LINUX_ARM_ARCH__ < 6
861 bcc kuser_cmpxchg32_fixup
862#endif
863 mov pc, lr
864 .previous
865
866#else
867#warning "NPTL on non MMU needs fixing"
868 mov r0, #-1
869 adds r0, r0, #0
870 usr_ret lr
871#endif
872
873#else
874#error "incoherent kernel configuration"
875#endif
876
877 /* pad to next slot */
878 .rept (16 - (. - __kuser_cmpxchg64)/4)
879 .word 0
880 .endr
881
882 .align 5
883
884__kuser_memory_barrier: @ 0xffff0fa0
885 smp_dmb arm
886 usr_ret lr
887
888 .align 5
889
890__kuser_cmpxchg: @ 0xffff0fc0
891
892#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
893
894 /*
895 * Poor you. No fast solution possible...
896 * The kernel itself must perform the operation.
897 * A special ghost syscall is used for that (see traps.c).
898 */
899 stmfd sp!, {r7, lr}
900 ldr r7, 1f @ it's 20 bits
901 swi __ARM_NR_cmpxchg
902 ldmfd sp!, {r7, pc}
9031: .word __ARM_NR_cmpxchg
904
905#elif __LINUX_ARM_ARCH__ < 6
906
907#ifdef CONFIG_MMU
908
909 /*
910 * The only thing that can break atomicity in this cmpxchg
911 * implementation is either an IRQ or a data abort exception
912 * causing another process/thread to be scheduled in the middle
913 * of the critical sequence. To prevent this, code is added to
914 * the IRQ and data abort exception handlers to set the pc back
915 * to the beginning of the critical section if it is found to be
916 * within that critical section (see kuser_cmpxchg_fixup).
917 */
9181: ldr r3, [r2] @ load current val
919 subs r3, r3, r0 @ compare with oldval
9202: streq r1, [r2] @ store newval if eq
921 rsbs r0, r3, #0 @ set return val and C flag
922 usr_ret lr
923
924 .text
925kuser_cmpxchg32_fixup:
926 @ Called from kuser_cmpxchg_check macro.
927 @ r4 = address of interrupted insn (must be preserved).
928 @ sp = saved regs. r7 and r8 are clobbered.
929 @ 1b = first critical insn, 2b = last critical insn.
930 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
931 mov r7, #0xffff0fff
932 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
933 subs r8, r4, r7
934 rsbcss r8, r8, #(2b - 1b)
935 strcs r7, [sp, #S_PC]
936 mov pc, lr
937 .previous
938
939#else
940#warning "NPTL on non MMU needs fixing"
941 mov r0, #-1
942 adds r0, r0, #0
943 usr_ret lr
944#endif
945
946#else
947
948 smp_dmb arm
9491: ldrex r3, [r2]
950 subs r3, r3, r0
951 strexeq r3, r1, [r2]
952 teqeq r3, #1
953 beq 1b
954 rsbs r0, r3, #0
955 /* beware -- each __kuser slot must be 8 instructions max */
956 ALT_SMP(b __kuser_memory_barrier)
957 ALT_UP(usr_ret lr)
958
959#endif
960
961 .align 5
962
963__kuser_get_tls: @ 0xffff0fe0
964 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
965 usr_ret lr
966 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
967 .rep 4
968 .word 0 @ 0xffff0ff0 software TLS value, then
969 .endr @ pad up to __kuser_helper_version
970
971__kuser_helper_version: @ 0xffff0ffc
972 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
973
974 .globl __kuser_helper_end
975__kuser_helper_end:
976
977 THUMB( .thumb )
978
979/*
980 * Vector stubs.
981 *
982 * This code is copied to 0xffff0200 so we can use branches in the
983 * vectors, rather than ldr's. Note that this code must not
984 * exceed 0x300 bytes.
985 *
986 * Common stub entry macro:
987 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
988 *
989 * SP points to a minimal amount of processor-private memory, the address
990 * of which is copied into r0 for the mode specific abort handler.
991 */
992 .macro vector_stub, name, mode, correction=0
993 .align 5
994
995vector_\name:
996 .if \correction
997 sub lr, lr, #\correction
998 .endif
999
1000 @
1001 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1002 @ (parent CPSR)
1003 @
1004 stmia sp, {r0, lr} @ save r0, lr
1005 mrs lr, spsr
1006 str lr, [sp, #8] @ save spsr
1007
1008 @
1009 @ Prepare for SVC32 mode. IRQs remain disabled.
1010 @
1011 mrs r0, cpsr
1012 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1013 msr spsr_cxsf, r0
1014
1015 @
1016 @ the branch table must immediately follow this code
1017 @
1018 and lr, lr, #0x0f
1019 THUMB( adr r0, 1f )
1020 THUMB( ldr lr, [r0, lr, lsl #2] )
1021 mov r0, sp
1022 ARM( ldr lr, [pc, lr, lsl #2] )
1023 movs pc, lr @ branch to handler in SVC mode
1024ENDPROC(vector_\name)
1025
1026 .align 2
1027 @ handler addresses follow this label
10281:
1029 .endm
1030
1031 .globl __stubs_start
1032__stubs_start:
1033/*
1034 * Interrupt dispatcher
1035 */
1036 vector_stub irq, IRQ_MODE, 4
1037
1038 .long __irq_usr @ 0 (USR_26 / USR_32)
1039 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1040 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1041 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1042 .long __irq_invalid @ 4
1043 .long __irq_invalid @ 5
1044 .long __irq_invalid @ 6
1045 .long __irq_invalid @ 7
1046 .long __irq_invalid @ 8
1047 .long __irq_invalid @ 9
1048 .long __irq_invalid @ a
1049 .long __irq_invalid @ b
1050 .long __irq_invalid @ c
1051 .long __irq_invalid @ d
1052 .long __irq_invalid @ e
1053 .long __irq_invalid @ f
1054
1055/*
1056 * Data abort dispatcher
1057 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1058 */
1059 vector_stub dabt, ABT_MODE, 8
1060
1061 .long __dabt_usr @ 0 (USR_26 / USR_32)
1062 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1063 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1064 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1065 .long __dabt_invalid @ 4
1066 .long __dabt_invalid @ 5
1067 .long __dabt_invalid @ 6
1068 .long __dabt_invalid @ 7
1069 .long __dabt_invalid @ 8
1070 .long __dabt_invalid @ 9
1071 .long __dabt_invalid @ a
1072 .long __dabt_invalid @ b
1073 .long __dabt_invalid @ c
1074 .long __dabt_invalid @ d
1075 .long __dabt_invalid @ e
1076 .long __dabt_invalid @ f
1077
1078/*
1079 * Prefetch abort dispatcher
1080 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1081 */
1082 vector_stub pabt, ABT_MODE, 4
1083
1084 .long __pabt_usr @ 0 (USR_26 / USR_32)
1085 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1086 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1087 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1088 .long __pabt_invalid @ 4
1089 .long __pabt_invalid @ 5
1090 .long __pabt_invalid @ 6
1091 .long __pabt_invalid @ 7
1092 .long __pabt_invalid @ 8
1093 .long __pabt_invalid @ 9
1094 .long __pabt_invalid @ a
1095 .long __pabt_invalid @ b
1096 .long __pabt_invalid @ c
1097 .long __pabt_invalid @ d
1098 .long __pabt_invalid @ e
1099 .long __pabt_invalid @ f
1100
1101/*
1102 * Undef instr entry dispatcher
1103 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1104 */
1105 vector_stub und, UND_MODE
1106
1107 .long __und_usr @ 0 (USR_26 / USR_32)
1108 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1109 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1110 .long __und_svc @ 3 (SVC_26 / SVC_32)
1111 .long __und_invalid @ 4
1112 .long __und_invalid @ 5
1113 .long __und_invalid @ 6
1114 .long __und_invalid @ 7
1115 .long __und_invalid @ 8
1116 .long __und_invalid @ 9
1117 .long __und_invalid @ a
1118 .long __und_invalid @ b
1119 .long __und_invalid @ c
1120 .long __und_invalid @ d
1121 .long __und_invalid @ e
1122 .long __und_invalid @ f
1123
1124 .align 5
1125
1126/*=============================================================================
1127 * Undefined FIQs
1128 *-----------------------------------------------------------------------------
1129 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1130 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1131 * Basically to switch modes, we *HAVE* to clobber one register... brain
1132 * damage alert! I don't think that we can execute any code in here in any
1133 * other mode than FIQ... Ok you can switch to another mode, but you can't
1134 * get out of that mode without clobbering one register.
1135 */
1136vector_fiq:
1137 subs pc, lr, #4
1138
1139/*=============================================================================
1140 * Address exception handler
1141 *-----------------------------------------------------------------------------
1142 * These aren't too critical.
1143 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1144 */
1145
1146vector_addrexcptn:
1147 b vector_addrexcptn
1148
1149/*
1150 * We group all the following data together to optimise
1151 * for CPUs with separate I & D caches.
1152 */
1153 .align 5
1154
1155.LCvswi:
1156 .word vector_swi
1157
1158 .globl __stubs_end
1159__stubs_end:
1160
1161 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1162
1163 .globl __vectors_start
1164__vectors_start:
1165 ARM( swi SYS_ERROR0 )
1166 THUMB( svc #0 )
1167 THUMB( nop )
1168 W(b) vector_und + stubs_offset
1169 W(ldr) pc, .LCvswi + stubs_offset
1170 W(b) vector_pabt + stubs_offset
1171 W(b) vector_dabt + stubs_offset
1172 W(b) vector_addrexcptn + stubs_offset
1173 W(b) vector_irq + stubs_offset
1174 W(b) vector_fiq + stubs_offset
1175
1176 .globl __vectors_end
1177__vectors_end:
1178
1179 .data
1180
1181 .globl cr_alignment
1182 .globl cr_no_alignment
1183cr_alignment:
1184 .space 4
1185cr_no_alignment:
1186 .space 4
1187
1188#ifdef CONFIG_MULTI_IRQ_HANDLER
1189 .globl handle_arch_irq
1190handle_arch_irq:
1191 .space 4
1192#endif