Loading...
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
16 */
17
18#include <asm/assembler.h>
19#include <asm/memory.h>
20#include <asm/glue-df.h>
21#include <asm/glue-pf.h>
22#include <asm/vfpmacros.h>
23#ifndef CONFIG_MULTI_IRQ_HANDLER
24#include <mach/entry-macro.S>
25#endif
26#include <asm/thread_notify.h>
27#include <asm/unwind.h>
28#include <asm/unistd.h>
29#include <asm/tls.h>
30#include <asm/system_info.h>
31
32#include "entry-header.S"
33#include <asm/entry-macro-multi.S>
34
35/*
36 * Interrupt handling.
37 */
38 .macro irq_handler
39#ifdef CONFIG_MULTI_IRQ_HANDLER
40 ldr r1, =handle_arch_irq
41 mov r0, sp
42 adr lr, BSYM(9997f)
43 ldr pc, [r1]
44#else
45 arch_irq_handler_default
46#endif
479997:
48 .endm
49
50 .macro pabt_helper
51 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
52#ifdef MULTI_PABORT
53 ldr ip, .LCprocfns
54 mov lr, pc
55 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
56#else
57 bl CPU_PABORT_HANDLER
58#endif
59 .endm
60
61 .macro dabt_helper
62
63 @
64 @ Call the processor-specific abort handler:
65 @
66 @ r2 - pt_regs
67 @ r4 - aborted context pc
68 @ r5 - aborted context psr
69 @
70 @ The abort handler must return the aborted address in r0, and
71 @ the fault status register in r1. r9 must be preserved.
72 @
73#ifdef MULTI_DABORT
74 ldr ip, .LCprocfns
75 mov lr, pc
76 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
77#else
78 bl CPU_DABORT_HANDLER
79#endif
80 .endm
81
82#ifdef CONFIG_KPROBES
83 .section .kprobes.text,"ax",%progbits
84#else
85 .text
86#endif
87
88/*
89 * Invalid mode handlers
90 */
91 .macro inv_entry, reason
92 sub sp, sp, #S_FRAME_SIZE
93 ARM( stmib sp, {r1 - lr} )
94 THUMB( stmia sp, {r0 - r12} )
95 THUMB( str sp, [sp, #S_SP] )
96 THUMB( str lr, [sp, #S_LR] )
97 mov r1, #\reason
98 .endm
99
100__pabt_invalid:
101 inv_entry BAD_PREFETCH
102 b common_invalid
103ENDPROC(__pabt_invalid)
104
105__dabt_invalid:
106 inv_entry BAD_DATA
107 b common_invalid
108ENDPROC(__dabt_invalid)
109
110__irq_invalid:
111 inv_entry BAD_IRQ
112 b common_invalid
113ENDPROC(__irq_invalid)
114
115__und_invalid:
116 inv_entry BAD_UNDEFINSTR
117
118 @
119 @ XXX fall through to common_invalid
120 @
121
122@
123@ common_invalid - generic code for failed exception (re-entrant version of handlers)
124@
125common_invalid:
126 zero_fp
127
128 ldmia r0, {r4 - r6}
129 add r0, sp, #S_PC @ here for interlock avoidance
130 mov r7, #-1 @ "" "" "" ""
131 str r4, [sp] @ save preserved r0
132 stmia r0, {r5 - r7} @ lr_<exception>,
133 @ cpsr_<exception>, "old_r0"
134
135 mov r0, sp
136 b bad_mode
137ENDPROC(__und_invalid)
138
139/*
140 * SVC mode handlers
141 */
142
143#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
144#define SPFIX(code...) code
145#else
146#define SPFIX(code...)
147#endif
148
149 .macro svc_entry, stack_hole=0
150 UNWIND(.fnstart )
151 UNWIND(.save {r0 - pc} )
152 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
153#ifdef CONFIG_THUMB2_KERNEL
154 SPFIX( str r0, [sp] ) @ temporarily saved
155 SPFIX( mov r0, sp )
156 SPFIX( tst r0, #4 ) @ test original stack alignment
157 SPFIX( ldr r0, [sp] ) @ restored
158#else
159 SPFIX( tst sp, #4 )
160#endif
161 SPFIX( subeq sp, sp, #4 )
162 stmia sp, {r1 - r12}
163
164 ldmia r0, {r3 - r5}
165 add r7, sp, #S_SP - 4 @ here for interlock avoidance
166 mov r6, #-1 @ "" "" "" ""
167 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
168 SPFIX( addeq r2, r2, #4 )
169 str r3, [sp, #-4]! @ save the "real" r0 copied
170 @ from the exception stack
171
172 mov r3, lr
173
174 @
175 @ We are now ready to fill in the remaining blanks on the stack:
176 @
177 @ r2 - sp_svc
178 @ r3 - lr_svc
179 @ r4 - lr_<exception>, already fixed up for correct return/restart
180 @ r5 - spsr_<exception>
181 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
182 @
183 stmia r7, {r2 - r6}
184
185#ifdef CONFIG_TRACE_IRQFLAGS
186 bl trace_hardirqs_off
187#endif
188 .endm
189
190 .align 5
191__dabt_svc:
192 svc_entry
193 mov r2, sp
194 dabt_helper
195
196 @
197 @ IRQs off again before pulling preserved data off the stack
198 @
199 disable_irq_notrace
200
201#ifdef CONFIG_TRACE_IRQFLAGS
202 tst r5, #PSR_I_BIT
203 bleq trace_hardirqs_on
204 tst r5, #PSR_I_BIT
205 blne trace_hardirqs_off
206#endif
207 svc_exit r5 @ return from exception
208 UNWIND(.fnend )
209ENDPROC(__dabt_svc)
210
211 .align 5
212__irq_svc:
213 svc_entry
214 irq_handler
215
216#ifdef CONFIG_PREEMPT
217 get_thread_info tsk
218 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
219 ldr r0, [tsk, #TI_FLAGS] @ get flags
220 teq r8, #0 @ if preempt count != 0
221 movne r0, #0 @ force flags to 0
222 tst r0, #_TIF_NEED_RESCHED
223 blne svc_preempt
224#endif
225
226#ifdef CONFIG_TRACE_IRQFLAGS
227 @ The parent context IRQs must have been enabled to get here in
228 @ the first place, so there's no point checking the PSR I bit.
229 bl trace_hardirqs_on
230#endif
231 svc_exit r5 @ return from exception
232 UNWIND(.fnend )
233ENDPROC(__irq_svc)
234
235 .ltorg
236
237#ifdef CONFIG_PREEMPT
238svc_preempt:
239 mov r8, lr
2401: bl preempt_schedule_irq @ irq en/disable is done inside
241 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
242 tst r0, #_TIF_NEED_RESCHED
243 moveq pc, r8 @ go again
244 b 1b
245#endif
246
247__und_fault:
248 @ Correct the PC such that it is pointing at the instruction
249 @ which caused the fault. If the faulting instruction was ARM
250 @ the PC will be pointing at the next instruction, and have to
251 @ subtract 4. Otherwise, it is Thumb, and the PC will be
252 @ pointing at the second half of the Thumb instruction. We
253 @ have to subtract 2.
254 ldr r2, [r0, #S_PC]
255 sub r2, r2, r1
256 str r2, [r0, #S_PC]
257 b do_undefinstr
258ENDPROC(__und_fault)
259
260 .align 5
261__und_svc:
262#ifdef CONFIG_KPROBES
263 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
264 @ it obviously needs free stack space which then will belong to
265 @ the saved context.
266 svc_entry 64
267#else
268 svc_entry
269#endif
270 @
271 @ call emulation code, which returns using r9 if it has emulated
272 @ the instruction, or the more conventional lr if we are to treat
273 @ this as a real undefined instruction
274 @
275 @ r0 - instruction
276 @
277#ifndef CONFIG_THUMB2_KERNEL
278 ldr r0, [r4, #-4]
279#else
280 mov r1, #2
281 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
282 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
283 blo __und_svc_fault
284 ldrh r9, [r4] @ bottom 16 bits
285 add r4, r4, #2
286 str r4, [sp, #S_PC]
287 orr r0, r9, r0, lsl #16
288#endif
289 adr r9, BSYM(__und_svc_finish)
290 mov r2, r4
291 bl call_fpe
292
293 mov r1, #4 @ PC correction to apply
294__und_svc_fault:
295 mov r0, sp @ struct pt_regs *regs
296 bl __und_fault
297
298 @
299 @ IRQs off again before pulling preserved data off the stack
300 @
301__und_svc_finish:
302 disable_irq_notrace
303
304 @
305 @ restore SPSR and restart the instruction
306 @
307 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
308#ifdef CONFIG_TRACE_IRQFLAGS
309 tst r5, #PSR_I_BIT
310 bleq trace_hardirqs_on
311 tst r5, #PSR_I_BIT
312 blne trace_hardirqs_off
313#endif
314 svc_exit r5 @ return from exception
315 UNWIND(.fnend )
316ENDPROC(__und_svc)
317
318 .align 5
319__pabt_svc:
320 svc_entry
321 mov r2, sp @ regs
322 pabt_helper
323
324 @
325 @ IRQs off again before pulling preserved data off the stack
326 @
327 disable_irq_notrace
328
329#ifdef CONFIG_TRACE_IRQFLAGS
330 tst r5, #PSR_I_BIT
331 bleq trace_hardirqs_on
332 tst r5, #PSR_I_BIT
333 blne trace_hardirqs_off
334#endif
335 svc_exit r5 @ return from exception
336 UNWIND(.fnend )
337ENDPROC(__pabt_svc)
338
339 .align 5
340.LCcralign:
341 .word cr_alignment
342#ifdef MULTI_DABORT
343.LCprocfns:
344 .word processor
345#endif
346.LCfp:
347 .word fp_enter
348
349/*
350 * User mode handlers
351 *
352 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
353 */
354
355#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
356#error "sizeof(struct pt_regs) must be a multiple of 8"
357#endif
358
359 .macro usr_entry
360 UNWIND(.fnstart )
361 UNWIND(.cantunwind ) @ don't unwind the user space
362 sub sp, sp, #S_FRAME_SIZE
363 ARM( stmib sp, {r1 - r12} )
364 THUMB( stmia sp, {r0 - r12} )
365
366 ldmia r0, {r3 - r5}
367 add r0, sp, #S_PC @ here for interlock avoidance
368 mov r6, #-1 @ "" "" "" ""
369
370 str r3, [sp] @ save the "real" r0 copied
371 @ from the exception stack
372
373 @
374 @ We are now ready to fill in the remaining blanks on the stack:
375 @
376 @ r4 - lr_<exception>, already fixed up for correct return/restart
377 @ r5 - spsr_<exception>
378 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
379 @
380 @ Also, separately save sp_usr and lr_usr
381 @
382 stmia r0, {r4 - r6}
383 ARM( stmdb r0, {sp, lr}^ )
384 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
385
386 @
387 @ Enable the alignment trap while in kernel mode
388 @
389 alignment_trap r0
390
391 @
392 @ Clear FP to mark the first stack frame
393 @
394 zero_fp
395
396#ifdef CONFIG_IRQSOFF_TRACER
397 bl trace_hardirqs_off
398#endif
399 .endm
400
401 .macro kuser_cmpxchg_check
402#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
403#ifndef CONFIG_MMU
404#warning "NPTL on non MMU needs fixing"
405#else
406 @ Make sure our user space atomic helper is restarted
407 @ if it was interrupted in a critical region. Here we
408 @ perform a quick test inline since it should be false
409 @ 99.9999% of the time. The rest is done out of line.
410 cmp r4, #TASK_SIZE
411 blhs kuser_cmpxchg64_fixup
412#endif
413#endif
414 .endm
415
416 .align 5
417__dabt_usr:
418 usr_entry
419 kuser_cmpxchg_check
420 mov r2, sp
421 dabt_helper
422 b ret_from_exception
423 UNWIND(.fnend )
424ENDPROC(__dabt_usr)
425
426 .align 5
427__irq_usr:
428 usr_entry
429 kuser_cmpxchg_check
430 irq_handler
431 get_thread_info tsk
432 mov why, #0
433 b ret_to_user_from_irq
434 UNWIND(.fnend )
435ENDPROC(__irq_usr)
436
437 .ltorg
438
439 .align 5
440__und_usr:
441 usr_entry
442
443 mov r2, r4
444 mov r3, r5
445
446 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
447 @ faulting instruction depending on Thumb mode.
448 @ r3 = regs->ARM_cpsr
449 @
450 @ The emulation code returns using r9 if it has emulated the
451 @ instruction, or the more conventional lr if we are to treat
452 @ this as a real undefined instruction
453 @
454 adr r9, BSYM(ret_from_exception)
455
456 tst r3, #PSR_T_BIT @ Thumb mode?
457 bne __und_usr_thumb
458 sub r4, r2, #4 @ ARM instr at LR - 4
4591: ldrt r0, [r4]
460#ifdef CONFIG_CPU_ENDIAN_BE8
461 rev r0, r0 @ little endian instruction
462#endif
463 @ r0 = 32-bit ARM instruction which caused the exception
464 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
465 @ r4 = PC value for the faulting instruction
466 @ lr = 32-bit undefined instruction function
467 adr lr, BSYM(__und_usr_fault_32)
468 b call_fpe
469
470__und_usr_thumb:
471 @ Thumb instruction
472 sub r4, r2, #2 @ First half of thumb instr at LR - 2
473#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
474/*
475 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
476 * can never be supported in a single kernel, this code is not applicable at
477 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
478 * made about .arch directives.
479 */
480#if __LINUX_ARM_ARCH__ < 7
481/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
482#define NEED_CPU_ARCHITECTURE
483 ldr r5, .LCcpu_architecture
484 ldr r5, [r5]
485 cmp r5, #CPU_ARCH_ARMv7
486 blo __und_usr_fault_16 @ 16bit undefined instruction
487/*
488 * The following code won't get run unless the running CPU really is v7, so
489 * coding round the lack of ldrht on older arches is pointless. Temporarily
490 * override the assembler target arch with the minimum required instead:
491 */
492 .arch armv6t2
493#endif
4942: ldrht r5, [r4]
495 cmp r5, #0xe800 @ 32bit instruction if xx != 0
496 blo __und_usr_fault_16 @ 16bit undefined instruction
4973: ldrht r0, [r2]
498 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
499 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
500 orr r0, r0, r5, lsl #16
501 adr lr, BSYM(__und_usr_fault_32)
502 @ r0 = the two 16-bit Thumb instructions which caused the exception
503 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
504 @ r4 = PC value for the first 16-bit Thumb instruction
505 @ lr = 32bit undefined instruction function
506
507#if __LINUX_ARM_ARCH__ < 7
508/* If the target arch was overridden, change it back: */
509#ifdef CONFIG_CPU_32v6K
510 .arch armv6k
511#else
512 .arch armv6
513#endif
514#endif /* __LINUX_ARM_ARCH__ < 7 */
515#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
516 b __und_usr_fault_16
517#endif
518 UNWIND(.fnend)
519ENDPROC(__und_usr)
520
521/*
522 * The out of line fixup for the ldrt instructions above.
523 */
524 .pushsection .fixup, "ax"
525 .align 2
5264: mov pc, r9
527 .popsection
528 .pushsection __ex_table,"a"
529 .long 1b, 4b
530#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
531 .long 2b, 4b
532 .long 3b, 4b
533#endif
534 .popsection
535
536/*
537 * Check whether the instruction is a co-processor instruction.
538 * If yes, we need to call the relevant co-processor handler.
539 *
540 * Note that we don't do a full check here for the co-processor
541 * instructions; all instructions with bit 27 set are well
542 * defined. The only instructions that should fault are the
543 * co-processor instructions. However, we have to watch out
544 * for the ARM6/ARM7 SWI bug.
545 *
546 * NEON is a special case that has to be handled here. Not all
547 * NEON instructions are co-processor instructions, so we have
548 * to make a special case of checking for them. Plus, there's
549 * five groups of them, so we have a table of mask/opcode pairs
550 * to check against, and if any match then we branch off into the
551 * NEON handler code.
552 *
553 * Emulators may wish to make use of the following registers:
554 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
555 * r2 = PC value to resume execution after successful emulation
556 * r9 = normal "successful" return address
557 * r10 = this threads thread_info structure
558 * lr = unrecognised instruction return address
559 * IRQs disabled, FIQs enabled.
560 */
561 @
562 @ Fall-through from Thumb-2 __und_usr
563 @
564#ifdef CONFIG_NEON
565 adr r6, .LCneon_thumb_opcodes
566 b 2f
567#endif
568call_fpe:
569#ifdef CONFIG_NEON
570 adr r6, .LCneon_arm_opcodes
5712:
572 ldr r7, [r6], #4 @ mask value
573 cmp r7, #0 @ end mask?
574 beq 1f
575 and r8, r0, r7
576 ldr r7, [r6], #4 @ opcode bits matching in mask
577 cmp r8, r7 @ NEON instruction?
578 bne 2b
579 get_thread_info r10
580 mov r7, #1
581 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
582 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
583 b do_vfp @ let VFP handler handle this
5841:
585#endif
586 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
587 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
588 moveq pc, lr
589 get_thread_info r10 @ get current thread
590 and r8, r0, #0x00000f00 @ mask out CP number
591 THUMB( lsr r8, r8, #8 )
592 mov r7, #1
593 add r6, r10, #TI_USED_CP
594 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
595 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
596#ifdef CONFIG_IWMMXT
597 @ Test if we need to give access to iWMMXt coprocessors
598 ldr r5, [r10, #TI_FLAGS]
599 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
600 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
601 bcs iwmmxt_task_enable
602#endif
603 ARM( add pc, pc, r8, lsr #6 )
604 THUMB( lsl r8, r8, #2 )
605 THUMB( add pc, r8 )
606 nop
607
608 movw_pc lr @ CP#0
609 W(b) do_fpe @ CP#1 (FPE)
610 W(b) do_fpe @ CP#2 (FPE)
611 movw_pc lr @ CP#3
612#ifdef CONFIG_CRUNCH
613 b crunch_task_enable @ CP#4 (MaverickCrunch)
614 b crunch_task_enable @ CP#5 (MaverickCrunch)
615 b crunch_task_enable @ CP#6 (MaverickCrunch)
616#else
617 movw_pc lr @ CP#4
618 movw_pc lr @ CP#5
619 movw_pc lr @ CP#6
620#endif
621 movw_pc lr @ CP#7
622 movw_pc lr @ CP#8
623 movw_pc lr @ CP#9
624#ifdef CONFIG_VFP
625 W(b) do_vfp @ CP#10 (VFP)
626 W(b) do_vfp @ CP#11 (VFP)
627#else
628 movw_pc lr @ CP#10 (VFP)
629 movw_pc lr @ CP#11 (VFP)
630#endif
631 movw_pc lr @ CP#12
632 movw_pc lr @ CP#13
633 movw_pc lr @ CP#14 (Debug)
634 movw_pc lr @ CP#15 (Control)
635
636#ifdef NEED_CPU_ARCHITECTURE
637 .align 2
638.LCcpu_architecture:
639 .word __cpu_architecture
640#endif
641
642#ifdef CONFIG_NEON
643 .align 6
644
645.LCneon_arm_opcodes:
646 .word 0xfe000000 @ mask
647 .word 0xf2000000 @ opcode
648
649 .word 0xff100000 @ mask
650 .word 0xf4000000 @ opcode
651
652 .word 0x00000000 @ mask
653 .word 0x00000000 @ opcode
654
655.LCneon_thumb_opcodes:
656 .word 0xef000000 @ mask
657 .word 0xef000000 @ opcode
658
659 .word 0xff100000 @ mask
660 .word 0xf9000000 @ opcode
661
662 .word 0x00000000 @ mask
663 .word 0x00000000 @ opcode
664#endif
665
666do_fpe:
667 enable_irq
668 ldr r4, .LCfp
669 add r10, r10, #TI_FPSTATE @ r10 = workspace
670 ldr pc, [r4] @ Call FP module USR entry point
671
672/*
673 * The FP module is called with these registers set:
674 * r0 = instruction
675 * r2 = PC+4
676 * r9 = normal "successful" return address
677 * r10 = FP workspace
678 * lr = unrecognised FP instruction return address
679 */
680
681 .pushsection .data
682ENTRY(fp_enter)
683 .word no_fp
684 .popsection
685
686ENTRY(no_fp)
687 mov pc, lr
688ENDPROC(no_fp)
689
690__und_usr_fault_32:
691 mov r1, #4
692 b 1f
693__und_usr_fault_16:
694 mov r1, #2
6951: enable_irq
696 mov r0, sp
697 adr lr, BSYM(ret_from_exception)
698 b __und_fault
699ENDPROC(__und_usr_fault_32)
700ENDPROC(__und_usr_fault_16)
701
702 .align 5
703__pabt_usr:
704 usr_entry
705 mov r2, sp @ regs
706 pabt_helper
707 UNWIND(.fnend )
708 /* fall through */
709/*
710 * This is the return code to user mode for abort handlers
711 */
712ENTRY(ret_from_exception)
713 UNWIND(.fnstart )
714 UNWIND(.cantunwind )
715 get_thread_info tsk
716 mov why, #0
717 b ret_to_user
718 UNWIND(.fnend )
719ENDPROC(__pabt_usr)
720ENDPROC(ret_from_exception)
721
722/*
723 * Register switch for ARMv3 and ARMv4 processors
724 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
725 * previous and next are guaranteed not to be the same.
726 */
727ENTRY(__switch_to)
728 UNWIND(.fnstart )
729 UNWIND(.cantunwind )
730 add ip, r1, #TI_CPU_SAVE
731 ldr r3, [r2, #TI_TP_VALUE]
732 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
733 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
734 THUMB( str sp, [ip], #4 )
735 THUMB( str lr, [ip], #4 )
736#ifdef CONFIG_CPU_USE_DOMAINS
737 ldr r6, [r2, #TI_CPU_DOMAIN]
738#endif
739 set_tls r3, r4, r5
740#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
741 ldr r7, [r2, #TI_TASK]
742 ldr r8, =__stack_chk_guard
743 ldr r7, [r7, #TSK_STACK_CANARY]
744#endif
745#ifdef CONFIG_CPU_USE_DOMAINS
746 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
747#endif
748 mov r5, r0
749 add r4, r2, #TI_CPU_SAVE
750 ldr r0, =thread_notify_head
751 mov r1, #THREAD_NOTIFY_SWITCH
752 bl atomic_notifier_call_chain
753#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
754 str r7, [r8]
755#endif
756 THUMB( mov ip, r4 )
757 mov r0, r5
758 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
759 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
760 THUMB( ldr sp, [ip], #4 )
761 THUMB( ldr pc, [ip] )
762 UNWIND(.fnend )
763ENDPROC(__switch_to)
764
765 __INIT
766
767/*
768 * User helpers.
769 *
770 * Each segment is 32-byte aligned and will be moved to the top of the high
771 * vector page. New segments (if ever needed) must be added in front of
772 * existing ones. This mechanism should be used only for things that are
773 * really small and justified, and not be abused freely.
774 *
775 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
776 */
777 THUMB( .arm )
778
779 .macro usr_ret, reg
780#ifdef CONFIG_ARM_THUMB
781 bx \reg
782#else
783 mov pc, \reg
784#endif
785 .endm
786
787 .align 5
788 .globl __kuser_helper_start
789__kuser_helper_start:
790
791/*
792 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
793 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
794 */
795
796__kuser_cmpxchg64: @ 0xffff0f60
797
798#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
799
800 /*
801 * Poor you. No fast solution possible...
802 * The kernel itself must perform the operation.
803 * A special ghost syscall is used for that (see traps.c).
804 */
805 stmfd sp!, {r7, lr}
806 ldr r7, 1f @ it's 20 bits
807 swi __ARM_NR_cmpxchg64
808 ldmfd sp!, {r7, pc}
8091: .word __ARM_NR_cmpxchg64
810
811#elif defined(CONFIG_CPU_32v6K)
812
813 stmfd sp!, {r4, r5, r6, r7}
814 ldrd r4, r5, [r0] @ load old val
815 ldrd r6, r7, [r1] @ load new val
816 smp_dmb arm
8171: ldrexd r0, r1, [r2] @ load current val
818 eors r3, r0, r4 @ compare with oldval (1)
819 eoreqs r3, r1, r5 @ compare with oldval (2)
820 strexdeq r3, r6, r7, [r2] @ store newval if eq
821 teqeq r3, #1 @ success?
822 beq 1b @ if no then retry
823 smp_dmb arm
824 rsbs r0, r3, #0 @ set returned val and C flag
825 ldmfd sp!, {r4, r5, r6, r7}
826 usr_ret lr
827
828#elif !defined(CONFIG_SMP)
829
830#ifdef CONFIG_MMU
831
832 /*
833 * The only thing that can break atomicity in this cmpxchg64
834 * implementation is either an IRQ or a data abort exception
835 * causing another process/thread to be scheduled in the middle of
836 * the critical sequence. The same strategy as for cmpxchg is used.
837 */
838 stmfd sp!, {r4, r5, r6, lr}
839 ldmia r0, {r4, r5} @ load old val
840 ldmia r1, {r6, lr} @ load new val
8411: ldmia r2, {r0, r1} @ load current val
842 eors r3, r0, r4 @ compare with oldval (1)
843 eoreqs r3, r1, r5 @ compare with oldval (2)
8442: stmeqia r2, {r6, lr} @ store newval if eq
845 rsbs r0, r3, #0 @ set return val and C flag
846 ldmfd sp!, {r4, r5, r6, pc}
847
848 .text
849kuser_cmpxchg64_fixup:
850 @ Called from kuser_cmpxchg_fixup.
851 @ r4 = address of interrupted insn (must be preserved).
852 @ sp = saved regs. r7 and r8 are clobbered.
853 @ 1b = first critical insn, 2b = last critical insn.
854 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
855 mov r7, #0xffff0fff
856 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
857 subs r8, r4, r7
858 rsbcss r8, r8, #(2b - 1b)
859 strcs r7, [sp, #S_PC]
860#if __LINUX_ARM_ARCH__ < 6
861 bcc kuser_cmpxchg32_fixup
862#endif
863 mov pc, lr
864 .previous
865
866#else
867#warning "NPTL on non MMU needs fixing"
868 mov r0, #-1
869 adds r0, r0, #0
870 usr_ret lr
871#endif
872
873#else
874#error "incoherent kernel configuration"
875#endif
876
877 /* pad to next slot */
878 .rept (16 - (. - __kuser_cmpxchg64)/4)
879 .word 0
880 .endr
881
882 .align 5
883
884__kuser_memory_barrier: @ 0xffff0fa0
885 smp_dmb arm
886 usr_ret lr
887
888 .align 5
889
890__kuser_cmpxchg: @ 0xffff0fc0
891
892#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
893
894 /*
895 * Poor you. No fast solution possible...
896 * The kernel itself must perform the operation.
897 * A special ghost syscall is used for that (see traps.c).
898 */
899 stmfd sp!, {r7, lr}
900 ldr r7, 1f @ it's 20 bits
901 swi __ARM_NR_cmpxchg
902 ldmfd sp!, {r7, pc}
9031: .word __ARM_NR_cmpxchg
904
905#elif __LINUX_ARM_ARCH__ < 6
906
907#ifdef CONFIG_MMU
908
909 /*
910 * The only thing that can break atomicity in this cmpxchg
911 * implementation is either an IRQ or a data abort exception
912 * causing another process/thread to be scheduled in the middle
913 * of the critical sequence. To prevent this, code is added to
914 * the IRQ and data abort exception handlers to set the pc back
915 * to the beginning of the critical section if it is found to be
916 * within that critical section (see kuser_cmpxchg_fixup).
917 */
9181: ldr r3, [r2] @ load current val
919 subs r3, r3, r0 @ compare with oldval
9202: streq r1, [r2] @ store newval if eq
921 rsbs r0, r3, #0 @ set return val and C flag
922 usr_ret lr
923
924 .text
925kuser_cmpxchg32_fixup:
926 @ Called from kuser_cmpxchg_check macro.
927 @ r4 = address of interrupted insn (must be preserved).
928 @ sp = saved regs. r7 and r8 are clobbered.
929 @ 1b = first critical insn, 2b = last critical insn.
930 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
931 mov r7, #0xffff0fff
932 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
933 subs r8, r4, r7
934 rsbcss r8, r8, #(2b - 1b)
935 strcs r7, [sp, #S_PC]
936 mov pc, lr
937 .previous
938
939#else
940#warning "NPTL on non MMU needs fixing"
941 mov r0, #-1
942 adds r0, r0, #0
943 usr_ret lr
944#endif
945
946#else
947
948 smp_dmb arm
9491: ldrex r3, [r2]
950 subs r3, r3, r0
951 strexeq r3, r1, [r2]
952 teqeq r3, #1
953 beq 1b
954 rsbs r0, r3, #0
955 /* beware -- each __kuser slot must be 8 instructions max */
956 ALT_SMP(b __kuser_memory_barrier)
957 ALT_UP(usr_ret lr)
958
959#endif
960
961 .align 5
962
963__kuser_get_tls: @ 0xffff0fe0
964 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
965 usr_ret lr
966 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
967 .rep 4
968 .word 0 @ 0xffff0ff0 software TLS value, then
969 .endr @ pad up to __kuser_helper_version
970
971__kuser_helper_version: @ 0xffff0ffc
972 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
973
974 .globl __kuser_helper_end
975__kuser_helper_end:
976
977 THUMB( .thumb )
978
979/*
980 * Vector stubs.
981 *
982 * This code is copied to 0xffff0200 so we can use branches in the
983 * vectors, rather than ldr's. Note that this code must not
984 * exceed 0x300 bytes.
985 *
986 * Common stub entry macro:
987 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
988 *
989 * SP points to a minimal amount of processor-private memory, the address
990 * of which is copied into r0 for the mode specific abort handler.
991 */
992 .macro vector_stub, name, mode, correction=0
993 .align 5
994
995vector_\name:
996 .if \correction
997 sub lr, lr, #\correction
998 .endif
999
1000 @
1001 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1002 @ (parent CPSR)
1003 @
1004 stmia sp, {r0, lr} @ save r0, lr
1005 mrs lr, spsr
1006 str lr, [sp, #8] @ save spsr
1007
1008 @
1009 @ Prepare for SVC32 mode. IRQs remain disabled.
1010 @
1011 mrs r0, cpsr
1012 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1013 msr spsr_cxsf, r0
1014
1015 @
1016 @ the branch table must immediately follow this code
1017 @
1018 and lr, lr, #0x0f
1019 THUMB( adr r0, 1f )
1020 THUMB( ldr lr, [r0, lr, lsl #2] )
1021 mov r0, sp
1022 ARM( ldr lr, [pc, lr, lsl #2] )
1023 movs pc, lr @ branch to handler in SVC mode
1024ENDPROC(vector_\name)
1025
1026 .align 2
1027 @ handler addresses follow this label
10281:
1029 .endm
1030
1031 .globl __stubs_start
1032__stubs_start:
1033/*
1034 * Interrupt dispatcher
1035 */
1036 vector_stub irq, IRQ_MODE, 4
1037
1038 .long __irq_usr @ 0 (USR_26 / USR_32)
1039 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1040 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1041 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1042 .long __irq_invalid @ 4
1043 .long __irq_invalid @ 5
1044 .long __irq_invalid @ 6
1045 .long __irq_invalid @ 7
1046 .long __irq_invalid @ 8
1047 .long __irq_invalid @ 9
1048 .long __irq_invalid @ a
1049 .long __irq_invalid @ b
1050 .long __irq_invalid @ c
1051 .long __irq_invalid @ d
1052 .long __irq_invalid @ e
1053 .long __irq_invalid @ f
1054
1055/*
1056 * Data abort dispatcher
1057 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1058 */
1059 vector_stub dabt, ABT_MODE, 8
1060
1061 .long __dabt_usr @ 0 (USR_26 / USR_32)
1062 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1063 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1064 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1065 .long __dabt_invalid @ 4
1066 .long __dabt_invalid @ 5
1067 .long __dabt_invalid @ 6
1068 .long __dabt_invalid @ 7
1069 .long __dabt_invalid @ 8
1070 .long __dabt_invalid @ 9
1071 .long __dabt_invalid @ a
1072 .long __dabt_invalid @ b
1073 .long __dabt_invalid @ c
1074 .long __dabt_invalid @ d
1075 .long __dabt_invalid @ e
1076 .long __dabt_invalid @ f
1077
1078/*
1079 * Prefetch abort dispatcher
1080 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1081 */
1082 vector_stub pabt, ABT_MODE, 4
1083
1084 .long __pabt_usr @ 0 (USR_26 / USR_32)
1085 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1086 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1087 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1088 .long __pabt_invalid @ 4
1089 .long __pabt_invalid @ 5
1090 .long __pabt_invalid @ 6
1091 .long __pabt_invalid @ 7
1092 .long __pabt_invalid @ 8
1093 .long __pabt_invalid @ 9
1094 .long __pabt_invalid @ a
1095 .long __pabt_invalid @ b
1096 .long __pabt_invalid @ c
1097 .long __pabt_invalid @ d
1098 .long __pabt_invalid @ e
1099 .long __pabt_invalid @ f
1100
1101/*
1102 * Undef instr entry dispatcher
1103 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1104 */
1105 vector_stub und, UND_MODE
1106
1107 .long __und_usr @ 0 (USR_26 / USR_32)
1108 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1109 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1110 .long __und_svc @ 3 (SVC_26 / SVC_32)
1111 .long __und_invalid @ 4
1112 .long __und_invalid @ 5
1113 .long __und_invalid @ 6
1114 .long __und_invalid @ 7
1115 .long __und_invalid @ 8
1116 .long __und_invalid @ 9
1117 .long __und_invalid @ a
1118 .long __und_invalid @ b
1119 .long __und_invalid @ c
1120 .long __und_invalid @ d
1121 .long __und_invalid @ e
1122 .long __und_invalid @ f
1123
1124 .align 5
1125
1126/*=============================================================================
1127 * Undefined FIQs
1128 *-----------------------------------------------------------------------------
1129 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1130 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1131 * Basically to switch modes, we *HAVE* to clobber one register... brain
1132 * damage alert! I don't think that we can execute any code in here in any
1133 * other mode than FIQ... Ok you can switch to another mode, but you can't
1134 * get out of that mode without clobbering one register.
1135 */
1136vector_fiq:
1137 subs pc, lr, #4
1138
1139/*=============================================================================
1140 * Address exception handler
1141 *-----------------------------------------------------------------------------
1142 * These aren't too critical.
1143 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1144 */
1145
1146vector_addrexcptn:
1147 b vector_addrexcptn
1148
1149/*
1150 * We group all the following data together to optimise
1151 * for CPUs with separate I & D caches.
1152 */
1153 .align 5
1154
1155.LCvswi:
1156 .word vector_swi
1157
1158 .globl __stubs_end
1159__stubs_end:
1160
1161 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1162
1163 .globl __vectors_start
1164__vectors_start:
1165 ARM( swi SYS_ERROR0 )
1166 THUMB( svc #0 )
1167 THUMB( nop )
1168 W(b) vector_und + stubs_offset
1169 W(ldr) pc, .LCvswi + stubs_offset
1170 W(b) vector_pabt + stubs_offset
1171 W(b) vector_dabt + stubs_offset
1172 W(b) vector_addrexcptn + stubs_offset
1173 W(b) vector_irq + stubs_offset
1174 W(b) vector_fiq + stubs_offset
1175
1176 .globl __vectors_end
1177__vectors_end:
1178
1179 .data
1180
1181 .globl cr_alignment
1182 .globl cr_no_alignment
1183cr_alignment:
1184 .space 4
1185cr_no_alignment:
1186 .space 4
1187
1188#ifdef CONFIG_MULTI_IRQ_HANDLER
1189 .globl handle_arch_irq
1190handle_arch_irq:
1191 .space 4
1192#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/kernel/entry-armv.S
4 *
5 * Copyright (C) 1996,1997,1998 Russell King.
6 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
7 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 *
9 * Low-level vector interface routines
10 *
11 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
12 * that causes it to save wrong values... Be aware!
13 */
14
15#include <linux/init.h>
16
17#include <asm/assembler.h>
18#include <asm/memory.h>
19#include <asm/glue-df.h>
20#include <asm/glue-pf.h>
21#include <asm/vfpmacros.h>
22#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
23#include <mach/entry-macro.S>
24#endif
25#include <asm/thread_notify.h>
26#include <asm/unwind.h>
27#include <asm/unistd.h>
28#include <asm/tls.h>
29#include <asm/system_info.h>
30#include <asm/uaccess-asm.h>
31
32#include "entry-header.S"
33#include <asm/entry-macro-multi.S>
34#include <asm/probes.h>
35
36/*
37 * Interrupt handling.
38 */
39 .macro irq_handler
40#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
41 ldr r1, =handle_arch_irq
42 mov r0, sp
43 badr lr, 9997f
44 ldr pc, [r1]
45#else
46 arch_irq_handler_default
47#endif
489997:
49 .endm
50
51 .macro pabt_helper
52 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
53#ifdef MULTI_PABORT
54 ldr ip, .LCprocfns
55 mov lr, pc
56 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
57#else
58 bl CPU_PABORT_HANDLER
59#endif
60 .endm
61
62 .macro dabt_helper
63
64 @
65 @ Call the processor-specific abort handler:
66 @
67 @ r2 - pt_regs
68 @ r4 - aborted context pc
69 @ r5 - aborted context psr
70 @
71 @ The abort handler must return the aborted address in r0, and
72 @ the fault status register in r1. r9 must be preserved.
73 @
74#ifdef MULTI_DABORT
75 ldr ip, .LCprocfns
76 mov lr, pc
77 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
78#else
79 bl CPU_DABORT_HANDLER
80#endif
81 .endm
82
83 .section .entry.text,"ax",%progbits
84
85/*
86 * Invalid mode handlers
87 */
88 .macro inv_entry, reason
89 sub sp, sp, #PT_REGS_SIZE
90 ARM( stmib sp, {r1 - lr} )
91 THUMB( stmia sp, {r0 - r12} )
92 THUMB( str sp, [sp, #S_SP] )
93 THUMB( str lr, [sp, #S_LR] )
94 mov r1, #\reason
95 .endm
96
97__pabt_invalid:
98 inv_entry BAD_PREFETCH
99 b common_invalid
100ENDPROC(__pabt_invalid)
101
102__dabt_invalid:
103 inv_entry BAD_DATA
104 b common_invalid
105ENDPROC(__dabt_invalid)
106
107__irq_invalid:
108 inv_entry BAD_IRQ
109 b common_invalid
110ENDPROC(__irq_invalid)
111
112__und_invalid:
113 inv_entry BAD_UNDEFINSTR
114
115 @
116 @ XXX fall through to common_invalid
117 @
118
119@
120@ common_invalid - generic code for failed exception (re-entrant version of handlers)
121@
122common_invalid:
123 zero_fp
124
125 ldmia r0, {r4 - r6}
126 add r0, sp, #S_PC @ here for interlock avoidance
127 mov r7, #-1 @ "" "" "" ""
128 str r4, [sp] @ save preserved r0
129 stmia r0, {r5 - r7} @ lr_<exception>,
130 @ cpsr_<exception>, "old_r0"
131
132 mov r0, sp
133 b bad_mode
134ENDPROC(__und_invalid)
135
136/*
137 * SVC mode handlers
138 */
139
140#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
141#define SPFIX(code...) code
142#else
143#define SPFIX(code...)
144#endif
145
146 .macro svc_entry, stack_hole=0, trace=1, uaccess=1
147 UNWIND(.fnstart )
148 UNWIND(.save {r0 - pc} )
149 sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
150#ifdef CONFIG_THUMB2_KERNEL
151 SPFIX( str r0, [sp] ) @ temporarily saved
152 SPFIX( mov r0, sp )
153 SPFIX( tst r0, #4 ) @ test original stack alignment
154 SPFIX( ldr r0, [sp] ) @ restored
155#else
156 SPFIX( tst sp, #4 )
157#endif
158 SPFIX( subeq sp, sp, #4 )
159 stmia sp, {r1 - r12}
160
161 ldmia r0, {r3 - r5}
162 add r7, sp, #S_SP - 4 @ here for interlock avoidance
163 mov r6, #-1 @ "" "" "" ""
164 add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
165 SPFIX( addeq r2, r2, #4 )
166 str r3, [sp, #-4]! @ save the "real" r0 copied
167 @ from the exception stack
168
169 mov r3, lr
170
171 @
172 @ We are now ready to fill in the remaining blanks on the stack:
173 @
174 @ r2 - sp_svc
175 @ r3 - lr_svc
176 @ r4 - lr_<exception>, already fixed up for correct return/restart
177 @ r5 - spsr_<exception>
178 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
179 @
180 stmia r7, {r2 - r6}
181
182 get_thread_info tsk
183 uaccess_entry tsk, r0, r1, r2, \uaccess
184
185 .if \trace
186#ifdef CONFIG_TRACE_IRQFLAGS
187 bl trace_hardirqs_off
188#endif
189 .endif
190 .endm
191
192 .align 5
193__dabt_svc:
194 svc_entry uaccess=0
195 mov r2, sp
196 dabt_helper
197 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
198 svc_exit r5 @ return from exception
199 UNWIND(.fnend )
200ENDPROC(__dabt_svc)
201
202 .align 5
203__irq_svc:
204 svc_entry
205 irq_handler
206
207#ifdef CONFIG_PREEMPTION
208 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
209 ldr r0, [tsk, #TI_FLAGS] @ get flags
210 teq r8, #0 @ if preempt count != 0
211 movne r0, #0 @ force flags to 0
212 tst r0, #_TIF_NEED_RESCHED
213 blne svc_preempt
214#endif
215
216 svc_exit r5, irq = 1 @ return from exception
217 UNWIND(.fnend )
218ENDPROC(__irq_svc)
219
220 .ltorg
221
222#ifdef CONFIG_PREEMPTION
223svc_preempt:
224 mov r8, lr
2251: bl preempt_schedule_irq @ irq en/disable is done inside
226 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
227 tst r0, #_TIF_NEED_RESCHED
228 reteq r8 @ go again
229 b 1b
230#endif
231
232__und_fault:
233 @ Correct the PC such that it is pointing at the instruction
234 @ which caused the fault. If the faulting instruction was ARM
235 @ the PC will be pointing at the next instruction, and have to
236 @ subtract 4. Otherwise, it is Thumb, and the PC will be
237 @ pointing at the second half of the Thumb instruction. We
238 @ have to subtract 2.
239 ldr r2, [r0, #S_PC]
240 sub r2, r2, r1
241 str r2, [r0, #S_PC]
242 b do_undefinstr
243ENDPROC(__und_fault)
244
245 .align 5
246__und_svc:
247#ifdef CONFIG_KPROBES
248 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
249 @ it obviously needs free stack space which then will belong to
250 @ the saved context.
251 svc_entry MAX_STACK_SIZE
252#else
253 svc_entry
254#endif
255 @
256 @ call emulation code, which returns using r9 if it has emulated
257 @ the instruction, or the more conventional lr if we are to treat
258 @ this as a real undefined instruction
259 @
260 @ r0 - instruction
261 @
262#ifndef CONFIG_THUMB2_KERNEL
263 ldr r0, [r4, #-4]
264#else
265 mov r1, #2
266 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
267 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
268 blo __und_svc_fault
269 ldrh r9, [r4] @ bottom 16 bits
270 add r4, r4, #2
271 str r4, [sp, #S_PC]
272 orr r0, r9, r0, lsl #16
273#endif
274 badr r9, __und_svc_finish
275 mov r2, r4
276 bl call_fpe
277
278 mov r1, #4 @ PC correction to apply
279__und_svc_fault:
280 mov r0, sp @ struct pt_regs *regs
281 bl __und_fault
282
283__und_svc_finish:
284 get_thread_info tsk
285 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
286 svc_exit r5 @ return from exception
287 UNWIND(.fnend )
288ENDPROC(__und_svc)
289
290 .align 5
291__pabt_svc:
292 svc_entry
293 mov r2, sp @ regs
294 pabt_helper
295 svc_exit r5 @ return from exception
296 UNWIND(.fnend )
297ENDPROC(__pabt_svc)
298
299 .align 5
300__fiq_svc:
301 svc_entry trace=0
302 mov r0, sp @ struct pt_regs *regs
303 bl handle_fiq_as_nmi
304 svc_exit_via_fiq
305 UNWIND(.fnend )
306ENDPROC(__fiq_svc)
307
308 .align 5
309.LCcralign:
310 .word cr_alignment
311#ifdef MULTI_DABORT
312.LCprocfns:
313 .word processor
314#endif
315.LCfp:
316 .word fp_enter
317
318/*
319 * Abort mode handlers
320 */
321
322@
323@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
324@ and reuses the same macros. However in abort mode we must also
325@ save/restore lr_abt and spsr_abt to make nested aborts safe.
326@
327 .align 5
328__fiq_abt:
329 svc_entry trace=0
330
331 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
332 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
333 THUMB( msr cpsr_c, r0 )
334 mov r1, lr @ Save lr_abt
335 mrs r2, spsr @ Save spsr_abt, abort is now safe
336 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
337 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
338 THUMB( msr cpsr_c, r0 )
339 stmfd sp!, {r1 - r2}
340
341 add r0, sp, #8 @ struct pt_regs *regs
342 bl handle_fiq_as_nmi
343
344 ldmfd sp!, {r1 - r2}
345 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
346 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
347 THUMB( msr cpsr_c, r0 )
348 mov lr, r1 @ Restore lr_abt, abort is unsafe
349 msr spsr_cxsf, r2 @ Restore spsr_abt
350 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
351 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
352 THUMB( msr cpsr_c, r0 )
353
354 svc_exit_via_fiq
355 UNWIND(.fnend )
356ENDPROC(__fiq_abt)
357
358/*
359 * User mode handlers
360 *
361 * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
362 */
363
364#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
365#error "sizeof(struct pt_regs) must be a multiple of 8"
366#endif
367
368 .macro usr_entry, trace=1, uaccess=1
369 UNWIND(.fnstart )
370 UNWIND(.cantunwind ) @ don't unwind the user space
371 sub sp, sp, #PT_REGS_SIZE
372 ARM( stmib sp, {r1 - r12} )
373 THUMB( stmia sp, {r0 - r12} )
374
375 ATRAP( mrc p15, 0, r7, c1, c0, 0)
376 ATRAP( ldr r8, .LCcralign)
377
378 ldmia r0, {r3 - r5}
379 add r0, sp, #S_PC @ here for interlock avoidance
380 mov r6, #-1 @ "" "" "" ""
381
382 str r3, [sp] @ save the "real" r0 copied
383 @ from the exception stack
384
385 ATRAP( ldr r8, [r8, #0])
386
387 @
388 @ We are now ready to fill in the remaining blanks on the stack:
389 @
390 @ r4 - lr_<exception>, already fixed up for correct return/restart
391 @ r5 - spsr_<exception>
392 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
393 @
394 @ Also, separately save sp_usr and lr_usr
395 @
396 stmia r0, {r4 - r6}
397 ARM( stmdb r0, {sp, lr}^ )
398 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
399
400 .if \uaccess
401 uaccess_disable ip
402 .endif
403
404 @ Enable the alignment trap while in kernel mode
405 ATRAP( teq r8, r7)
406 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
407
408 @
409 @ Clear FP to mark the first stack frame
410 @
411 zero_fp
412
413 .if \trace
414#ifdef CONFIG_TRACE_IRQFLAGS
415 bl trace_hardirqs_off
416#endif
417 ct_user_exit save = 0
418 .endif
419 .endm
420
421 .macro kuser_cmpxchg_check
422#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
423#ifndef CONFIG_MMU
424#warning "NPTL on non MMU needs fixing"
425#else
426 @ Make sure our user space atomic helper is restarted
427 @ if it was interrupted in a critical region. Here we
428 @ perform a quick test inline since it should be false
429 @ 99.9999% of the time. The rest is done out of line.
430 cmp r4, #TASK_SIZE
431 blhs kuser_cmpxchg64_fixup
432#endif
433#endif
434 .endm
435
436 .align 5
437__dabt_usr:
438 usr_entry uaccess=0
439 kuser_cmpxchg_check
440 mov r2, sp
441 dabt_helper
442 b ret_from_exception
443 UNWIND(.fnend )
444ENDPROC(__dabt_usr)
445
446 .align 5
447__irq_usr:
448 usr_entry
449 kuser_cmpxchg_check
450 irq_handler
451 get_thread_info tsk
452 mov why, #0
453 b ret_to_user_from_irq
454 UNWIND(.fnend )
455ENDPROC(__irq_usr)
456
457 .ltorg
458
459 .align 5
460__und_usr:
461 usr_entry uaccess=0
462
463 mov r2, r4
464 mov r3, r5
465
466 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
467 @ faulting instruction depending on Thumb mode.
468 @ r3 = regs->ARM_cpsr
469 @
470 @ The emulation code returns using r9 if it has emulated the
471 @ instruction, or the more conventional lr if we are to treat
472 @ this as a real undefined instruction
473 @
474 badr r9, ret_from_exception
475
476 @ IRQs must be enabled before attempting to read the instruction from
477 @ user space since that could cause a page/translation fault if the
478 @ page table was modified by another CPU.
479 enable_irq
480
481 tst r3, #PSR_T_BIT @ Thumb mode?
482 bne __und_usr_thumb
483 sub r4, r2, #4 @ ARM instr at LR - 4
4841: ldrt r0, [r4]
485 ARM_BE8(rev r0, r0) @ little endian instruction
486
487 uaccess_disable ip
488
489 @ r0 = 32-bit ARM instruction which caused the exception
490 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
491 @ r4 = PC value for the faulting instruction
492 @ lr = 32-bit undefined instruction function
493 badr lr, __und_usr_fault_32
494 b call_fpe
495
496__und_usr_thumb:
497 @ Thumb instruction
498 sub r4, r2, #2 @ First half of thumb instr at LR - 2
499#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
500/*
501 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
502 * can never be supported in a single kernel, this code is not applicable at
503 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
504 * made about .arch directives.
505 */
506#if __LINUX_ARM_ARCH__ < 7
507/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
508#define NEED_CPU_ARCHITECTURE
509 ldr r5, .LCcpu_architecture
510 ldr r5, [r5]
511 cmp r5, #CPU_ARCH_ARMv7
512 blo __und_usr_fault_16 @ 16bit undefined instruction
513/*
514 * The following code won't get run unless the running CPU really is v7, so
515 * coding round the lack of ldrht on older arches is pointless. Temporarily
516 * override the assembler target arch with the minimum required instead:
517 */
518 .arch armv6t2
519#endif
5202: ldrht r5, [r4]
521ARM_BE8(rev16 r5, r5) @ little endian instruction
522 cmp r5, #0xe800 @ 32bit instruction if xx != 0
523 blo __und_usr_fault_16_pan @ 16bit undefined instruction
5243: ldrht r0, [r2]
525ARM_BE8(rev16 r0, r0) @ little endian instruction
526 uaccess_disable ip
527 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
528 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
529 orr r0, r0, r5, lsl #16
530 badr lr, __und_usr_fault_32
531 @ r0 = the two 16-bit Thumb instructions which caused the exception
532 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
533 @ r4 = PC value for the first 16-bit Thumb instruction
534 @ lr = 32bit undefined instruction function
535
536#if __LINUX_ARM_ARCH__ < 7
537/* If the target arch was overridden, change it back: */
538#ifdef CONFIG_CPU_32v6K
539 .arch armv6k
540#else
541 .arch armv6
542#endif
543#endif /* __LINUX_ARM_ARCH__ < 7 */
544#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
545 b __und_usr_fault_16
546#endif
547 UNWIND(.fnend)
548ENDPROC(__und_usr)
549
550/*
551 * The out of line fixup for the ldrt instructions above.
552 */
553 .pushsection .text.fixup, "ax"
554 .align 2
5554: str r4, [sp, #S_PC] @ retry current instruction
556 ret r9
557 .popsection
558 .pushsection __ex_table,"a"
559 .long 1b, 4b
560#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
561 .long 2b, 4b
562 .long 3b, 4b
563#endif
564 .popsection
565
566/*
567 * Check whether the instruction is a co-processor instruction.
568 * If yes, we need to call the relevant co-processor handler.
569 *
570 * Note that we don't do a full check here for the co-processor
571 * instructions; all instructions with bit 27 set are well
572 * defined. The only instructions that should fault are the
573 * co-processor instructions. However, we have to watch out
574 * for the ARM6/ARM7 SWI bug.
575 *
576 * NEON is a special case that has to be handled here. Not all
577 * NEON instructions are co-processor instructions, so we have
578 * to make a special case of checking for them. Plus, there's
579 * five groups of them, so we have a table of mask/opcode pairs
580 * to check against, and if any match then we branch off into the
581 * NEON handler code.
582 *
583 * Emulators may wish to make use of the following registers:
584 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
585 * r2 = PC value to resume execution after successful emulation
586 * r9 = normal "successful" return address
587 * r10 = this threads thread_info structure
588 * lr = unrecognised instruction return address
589 * IRQs enabled, FIQs enabled.
590 */
591 @
592 @ Fall-through from Thumb-2 __und_usr
593 @
594#ifdef CONFIG_NEON
595 get_thread_info r10 @ get current thread
596 adr r6, .LCneon_thumb_opcodes
597 b 2f
598#endif
599call_fpe:
600 get_thread_info r10 @ get current thread
601#ifdef CONFIG_NEON
602 adr r6, .LCneon_arm_opcodes
6032: ldr r5, [r6], #4 @ mask value
604 ldr r7, [r6], #4 @ opcode bits matching in mask
605 cmp r5, #0 @ end mask?
606 beq 1f
607 and r8, r0, r5
608 cmp r8, r7 @ NEON instruction?
609 bne 2b
610 mov r7, #1
611 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
612 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
613 b do_vfp @ let VFP handler handle this
6141:
615#endif
616 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
617 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
618 reteq lr
619 and r8, r0, #0x00000f00 @ mask out CP number
620 THUMB( lsr r8, r8, #8 )
621 mov r7, #1
622 add r6, r10, #TI_USED_CP
623 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
624 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
625#ifdef CONFIG_IWMMXT
626 @ Test if we need to give access to iWMMXt coprocessors
627 ldr r5, [r10, #TI_FLAGS]
628 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
629 movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1)
630 bcs iwmmxt_task_enable
631#endif
632 ARM( add pc, pc, r8, lsr #6 )
633 THUMB( lsl r8, r8, #2 )
634 THUMB( add pc, r8 )
635 nop
636
637 ret.w lr @ CP#0
638 W(b) do_fpe @ CP#1 (FPE)
639 W(b) do_fpe @ CP#2 (FPE)
640 ret.w lr @ CP#3
641#ifdef CONFIG_CRUNCH
642 b crunch_task_enable @ CP#4 (MaverickCrunch)
643 b crunch_task_enable @ CP#5 (MaverickCrunch)
644 b crunch_task_enable @ CP#6 (MaverickCrunch)
645#else
646 ret.w lr @ CP#4
647 ret.w lr @ CP#5
648 ret.w lr @ CP#6
649#endif
650 ret.w lr @ CP#7
651 ret.w lr @ CP#8
652 ret.w lr @ CP#9
653#ifdef CONFIG_VFP
654 W(b) do_vfp @ CP#10 (VFP)
655 W(b) do_vfp @ CP#11 (VFP)
656#else
657 ret.w lr @ CP#10 (VFP)
658 ret.w lr @ CP#11 (VFP)
659#endif
660 ret.w lr @ CP#12
661 ret.w lr @ CP#13
662 ret.w lr @ CP#14 (Debug)
663 ret.w lr @ CP#15 (Control)
664
665#ifdef NEED_CPU_ARCHITECTURE
666 .align 2
667.LCcpu_architecture:
668 .word __cpu_architecture
669#endif
670
671#ifdef CONFIG_NEON
672 .align 6
673
674.LCneon_arm_opcodes:
675 .word 0xfe000000 @ mask
676 .word 0xf2000000 @ opcode
677
678 .word 0xff100000 @ mask
679 .word 0xf4000000 @ opcode
680
681 .word 0x00000000 @ mask
682 .word 0x00000000 @ opcode
683
684.LCneon_thumb_opcodes:
685 .word 0xef000000 @ mask
686 .word 0xef000000 @ opcode
687
688 .word 0xff100000 @ mask
689 .word 0xf9000000 @ opcode
690
691 .word 0x00000000 @ mask
692 .word 0x00000000 @ opcode
693#endif
694
695do_fpe:
696 ldr r4, .LCfp
697 add r10, r10, #TI_FPSTATE @ r10 = workspace
698 ldr pc, [r4] @ Call FP module USR entry point
699
700/*
701 * The FP module is called with these registers set:
702 * r0 = instruction
703 * r2 = PC+4
704 * r9 = normal "successful" return address
705 * r10 = FP workspace
706 * lr = unrecognised FP instruction return address
707 */
708
709 .pushsection .data
710 .align 2
711ENTRY(fp_enter)
712 .word no_fp
713 .popsection
714
715ENTRY(no_fp)
716 ret lr
717ENDPROC(no_fp)
718
719__und_usr_fault_32:
720 mov r1, #4
721 b 1f
722__und_usr_fault_16_pan:
723 uaccess_disable ip
724__und_usr_fault_16:
725 mov r1, #2
7261: mov r0, sp
727 badr lr, ret_from_exception
728 b __und_fault
729ENDPROC(__und_usr_fault_32)
730ENDPROC(__und_usr_fault_16)
731
732 .align 5
733__pabt_usr:
734 usr_entry
735 mov r2, sp @ regs
736 pabt_helper
737 UNWIND(.fnend )
738 /* fall through */
739/*
740 * This is the return code to user mode for abort handlers
741 */
742ENTRY(ret_from_exception)
743 UNWIND(.fnstart )
744 UNWIND(.cantunwind )
745 get_thread_info tsk
746 mov why, #0
747 b ret_to_user
748 UNWIND(.fnend )
749ENDPROC(__pabt_usr)
750ENDPROC(ret_from_exception)
751
752 .align 5
753__fiq_usr:
754 usr_entry trace=0
755 kuser_cmpxchg_check
756 mov r0, sp @ struct pt_regs *regs
757 bl handle_fiq_as_nmi
758 get_thread_info tsk
759 restore_user_regs fast = 0, offset = 0
760 UNWIND(.fnend )
761ENDPROC(__fiq_usr)
762
763/*
764 * Register switch for ARMv3 and ARMv4 processors
765 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
766 * previous and next are guaranteed not to be the same.
767 */
768ENTRY(__switch_to)
769 UNWIND(.fnstart )
770 UNWIND(.cantunwind )
771 add ip, r1, #TI_CPU_SAVE
772 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
773 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
774 THUMB( str sp, [ip], #4 )
775 THUMB( str lr, [ip], #4 )
776 ldr r4, [r2, #TI_TP_VALUE]
777 ldr r5, [r2, #TI_TP_VALUE + 4]
778#ifdef CONFIG_CPU_USE_DOMAINS
779 mrc p15, 0, r6, c3, c0, 0 @ Get domain register
780 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
781 ldr r6, [r2, #TI_CPU_DOMAIN]
782#endif
783 switch_tls r1, r4, r5, r3, r7
784#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
785 ldr r7, [r2, #TI_TASK]
786 ldr r8, =__stack_chk_guard
787 .if (TSK_STACK_CANARY > IMM12_MASK)
788 add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
789 .endif
790 ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
791#endif
792#ifdef CONFIG_CPU_USE_DOMAINS
793 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
794#endif
795 mov r5, r0
796 add r4, r2, #TI_CPU_SAVE
797 ldr r0, =thread_notify_head
798 mov r1, #THREAD_NOTIFY_SWITCH
799 bl atomic_notifier_call_chain
800#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
801 str r7, [r8]
802#endif
803 THUMB( mov ip, r4 )
804 mov r0, r5
805 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
806 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
807 THUMB( ldr sp, [ip], #4 )
808 THUMB( ldr pc, [ip] )
809 UNWIND(.fnend )
810ENDPROC(__switch_to)
811
812 __INIT
813
814/*
815 * User helpers.
816 *
817 * Each segment is 32-byte aligned and will be moved to the top of the high
818 * vector page. New segments (if ever needed) must be added in front of
819 * existing ones. This mechanism should be used only for things that are
820 * really small and justified, and not be abused freely.
821 *
822 * See Documentation/arm/kernel_user_helpers.rst for formal definitions.
823 */
824 THUMB( .arm )
825
826 .macro usr_ret, reg
827#ifdef CONFIG_ARM_THUMB
828 bx \reg
829#else
830 ret \reg
831#endif
832 .endm
833
834 .macro kuser_pad, sym, size
835 .if (. - \sym) & 3
836 .rept 4 - (. - \sym) & 3
837 .byte 0
838 .endr
839 .endif
840 .rept (\size - (. - \sym)) / 4
841 .word 0xe7fddef1
842 .endr
843 .endm
844
845#ifdef CONFIG_KUSER_HELPERS
846 .align 5
847 .globl __kuser_helper_start
848__kuser_helper_start:
849
850/*
851 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
852 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
853 */
854
855__kuser_cmpxchg64: @ 0xffff0f60
856
857#if defined(CONFIG_CPU_32v6K)
858
859 stmfd sp!, {r4, r5, r6, r7}
860 ldrd r4, r5, [r0] @ load old val
861 ldrd r6, r7, [r1] @ load new val
862 smp_dmb arm
8631: ldrexd r0, r1, [r2] @ load current val
864 eors r3, r0, r4 @ compare with oldval (1)
865 eorseq r3, r1, r5 @ compare with oldval (2)
866 strexdeq r3, r6, r7, [r2] @ store newval if eq
867 teqeq r3, #1 @ success?
868 beq 1b @ if no then retry
869 smp_dmb arm
870 rsbs r0, r3, #0 @ set returned val and C flag
871 ldmfd sp!, {r4, r5, r6, r7}
872 usr_ret lr
873
874#elif !defined(CONFIG_SMP)
875
876#ifdef CONFIG_MMU
877
878 /*
879 * The only thing that can break atomicity in this cmpxchg64
880 * implementation is either an IRQ or a data abort exception
881 * causing another process/thread to be scheduled in the middle of
882 * the critical sequence. The same strategy as for cmpxchg is used.
883 */
884 stmfd sp!, {r4, r5, r6, lr}
885 ldmia r0, {r4, r5} @ load old val
886 ldmia r1, {r6, lr} @ load new val
8871: ldmia r2, {r0, r1} @ load current val
888 eors r3, r0, r4 @ compare with oldval (1)
889 eorseq r3, r1, r5 @ compare with oldval (2)
8902: stmiaeq r2, {r6, lr} @ store newval if eq
891 rsbs r0, r3, #0 @ set return val and C flag
892 ldmfd sp!, {r4, r5, r6, pc}
893
894 .text
895kuser_cmpxchg64_fixup:
896 @ Called from kuser_cmpxchg_fixup.
897 @ r4 = address of interrupted insn (must be preserved).
898 @ sp = saved regs. r7 and r8 are clobbered.
899 @ 1b = first critical insn, 2b = last critical insn.
900 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
901 mov r7, #0xffff0fff
902 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
903 subs r8, r4, r7
904 rsbscs r8, r8, #(2b - 1b)
905 strcs r7, [sp, #S_PC]
906#if __LINUX_ARM_ARCH__ < 6
907 bcc kuser_cmpxchg32_fixup
908#endif
909 ret lr
910 .previous
911
912#else
913#warning "NPTL on non MMU needs fixing"
914 mov r0, #-1
915 adds r0, r0, #0
916 usr_ret lr
917#endif
918
919#else
920#error "incoherent kernel configuration"
921#endif
922
923 kuser_pad __kuser_cmpxchg64, 64
924
925__kuser_memory_barrier: @ 0xffff0fa0
926 smp_dmb arm
927 usr_ret lr
928
929 kuser_pad __kuser_memory_barrier, 32
930
931__kuser_cmpxchg: @ 0xffff0fc0
932
933#if __LINUX_ARM_ARCH__ < 6
934
935#ifdef CONFIG_MMU
936
937 /*
938 * The only thing that can break atomicity in this cmpxchg
939 * implementation is either an IRQ or a data abort exception
940 * causing another process/thread to be scheduled in the middle
941 * of the critical sequence. To prevent this, code is added to
942 * the IRQ and data abort exception handlers to set the pc back
943 * to the beginning of the critical section if it is found to be
944 * within that critical section (see kuser_cmpxchg_fixup).
945 */
9461: ldr r3, [r2] @ load current val
947 subs r3, r3, r0 @ compare with oldval
9482: streq r1, [r2] @ store newval if eq
949 rsbs r0, r3, #0 @ set return val and C flag
950 usr_ret lr
951
952 .text
953kuser_cmpxchg32_fixup:
954 @ Called from kuser_cmpxchg_check macro.
955 @ r4 = address of interrupted insn (must be preserved).
956 @ sp = saved regs. r7 and r8 are clobbered.
957 @ 1b = first critical insn, 2b = last critical insn.
958 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
959 mov r7, #0xffff0fff
960 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
961 subs r8, r4, r7
962 rsbscs r8, r8, #(2b - 1b)
963 strcs r7, [sp, #S_PC]
964 ret lr
965 .previous
966
967#else
968#warning "NPTL on non MMU needs fixing"
969 mov r0, #-1
970 adds r0, r0, #0
971 usr_ret lr
972#endif
973
974#else
975
976 smp_dmb arm
9771: ldrex r3, [r2]
978 subs r3, r3, r0
979 strexeq r3, r1, [r2]
980 teqeq r3, #1
981 beq 1b
982 rsbs r0, r3, #0
983 /* beware -- each __kuser slot must be 8 instructions max */
984 ALT_SMP(b __kuser_memory_barrier)
985 ALT_UP(usr_ret lr)
986
987#endif
988
989 kuser_pad __kuser_cmpxchg, 32
990
991__kuser_get_tls: @ 0xffff0fe0
992 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
993 usr_ret lr
994 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
995 kuser_pad __kuser_get_tls, 16
996 .rep 3
997 .word 0 @ 0xffff0ff0 software TLS value, then
998 .endr @ pad up to __kuser_helper_version
999
1000__kuser_helper_version: @ 0xffff0ffc
1001 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
1002
1003 .globl __kuser_helper_end
1004__kuser_helper_end:
1005
1006#endif
1007
1008 THUMB( .thumb )
1009
1010/*
1011 * Vector stubs.
1012 *
1013 * This code is copied to 0xffff1000 so we can use branches in the
1014 * vectors, rather than ldr's. Note that this code must not exceed
1015 * a page size.
1016 *
1017 * Common stub entry macro:
1018 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1019 *
1020 * SP points to a minimal amount of processor-private memory, the address
1021 * of which is copied into r0 for the mode specific abort handler.
1022 */
1023 .macro vector_stub, name, mode, correction=0
1024 .align 5
1025
1026vector_\name:
1027 .if \correction
1028 sub lr, lr, #\correction
1029 .endif
1030
1031 @
1032 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1033 @ (parent CPSR)
1034 @
1035 stmia sp, {r0, lr} @ save r0, lr
1036 mrs lr, spsr
1037 str lr, [sp, #8] @ save spsr
1038
1039 @
1040 @ Prepare for SVC32 mode. IRQs remain disabled.
1041 @
1042 mrs r0, cpsr
1043 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1044 msr spsr_cxsf, r0
1045
1046 @
1047 @ the branch table must immediately follow this code
1048 @
1049 and lr, lr, #0x0f
1050 THUMB( adr r0, 1f )
1051 THUMB( ldr lr, [r0, lr, lsl #2] )
1052 mov r0, sp
1053 ARM( ldr lr, [pc, lr, lsl #2] )
1054 movs pc, lr @ branch to handler in SVC mode
1055ENDPROC(vector_\name)
1056
1057 .align 2
1058 @ handler addresses follow this label
10591:
1060 .endm
1061
1062 .section .stubs, "ax", %progbits
1063 @ This must be the first word
1064 .word vector_swi
1065
1066vector_rst:
1067 ARM( swi SYS_ERROR0 )
1068 THUMB( svc #0 )
1069 THUMB( nop )
1070 b vector_und
1071
1072/*
1073 * Interrupt dispatcher
1074 */
1075 vector_stub irq, IRQ_MODE, 4
1076
1077 .long __irq_usr @ 0 (USR_26 / USR_32)
1078 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1079 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1080 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1081 .long __irq_invalid @ 4
1082 .long __irq_invalid @ 5
1083 .long __irq_invalid @ 6
1084 .long __irq_invalid @ 7
1085 .long __irq_invalid @ 8
1086 .long __irq_invalid @ 9
1087 .long __irq_invalid @ a
1088 .long __irq_invalid @ b
1089 .long __irq_invalid @ c
1090 .long __irq_invalid @ d
1091 .long __irq_invalid @ e
1092 .long __irq_invalid @ f
1093
1094/*
1095 * Data abort dispatcher
1096 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1097 */
1098 vector_stub dabt, ABT_MODE, 8
1099
1100 .long __dabt_usr @ 0 (USR_26 / USR_32)
1101 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1102 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1103 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1104 .long __dabt_invalid @ 4
1105 .long __dabt_invalid @ 5
1106 .long __dabt_invalid @ 6
1107 .long __dabt_invalid @ 7
1108 .long __dabt_invalid @ 8
1109 .long __dabt_invalid @ 9
1110 .long __dabt_invalid @ a
1111 .long __dabt_invalid @ b
1112 .long __dabt_invalid @ c
1113 .long __dabt_invalid @ d
1114 .long __dabt_invalid @ e
1115 .long __dabt_invalid @ f
1116
1117/*
1118 * Prefetch abort dispatcher
1119 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1120 */
1121 vector_stub pabt, ABT_MODE, 4
1122
1123 .long __pabt_usr @ 0 (USR_26 / USR_32)
1124 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1125 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1126 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1127 .long __pabt_invalid @ 4
1128 .long __pabt_invalid @ 5
1129 .long __pabt_invalid @ 6
1130 .long __pabt_invalid @ 7
1131 .long __pabt_invalid @ 8
1132 .long __pabt_invalid @ 9
1133 .long __pabt_invalid @ a
1134 .long __pabt_invalid @ b
1135 .long __pabt_invalid @ c
1136 .long __pabt_invalid @ d
1137 .long __pabt_invalid @ e
1138 .long __pabt_invalid @ f
1139
1140/*
1141 * Undef instr entry dispatcher
1142 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1143 */
1144 vector_stub und, UND_MODE
1145
1146 .long __und_usr @ 0 (USR_26 / USR_32)
1147 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1148 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1149 .long __und_svc @ 3 (SVC_26 / SVC_32)
1150 .long __und_invalid @ 4
1151 .long __und_invalid @ 5
1152 .long __und_invalid @ 6
1153 .long __und_invalid @ 7
1154 .long __und_invalid @ 8
1155 .long __und_invalid @ 9
1156 .long __und_invalid @ a
1157 .long __und_invalid @ b
1158 .long __und_invalid @ c
1159 .long __und_invalid @ d
1160 .long __und_invalid @ e
1161 .long __und_invalid @ f
1162
1163 .align 5
1164
1165/*=============================================================================
1166 * Address exception handler
1167 *-----------------------------------------------------------------------------
1168 * These aren't too critical.
1169 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1170 */
1171
1172vector_addrexcptn:
1173 b vector_addrexcptn
1174
1175/*=============================================================================
1176 * FIQ "NMI" handler
1177 *-----------------------------------------------------------------------------
1178 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1179 * systems.
1180 */
1181 vector_stub fiq, FIQ_MODE, 4
1182
1183 .long __fiq_usr @ 0 (USR_26 / USR_32)
1184 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
1185 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
1186 .long __fiq_svc @ 3 (SVC_26 / SVC_32)
1187 .long __fiq_svc @ 4
1188 .long __fiq_svc @ 5
1189 .long __fiq_svc @ 6
1190 .long __fiq_abt @ 7
1191 .long __fiq_svc @ 8
1192 .long __fiq_svc @ 9
1193 .long __fiq_svc @ a
1194 .long __fiq_svc @ b
1195 .long __fiq_svc @ c
1196 .long __fiq_svc @ d
1197 .long __fiq_svc @ e
1198 .long __fiq_svc @ f
1199
1200 .globl vector_fiq
1201
1202 .section .vectors, "ax", %progbits
1203.L__vectors_start:
1204 W(b) vector_rst
1205 W(b) vector_und
1206 W(ldr) pc, .L__vectors_start + 0x1000
1207 W(b) vector_pabt
1208 W(b) vector_dabt
1209 W(b) vector_addrexcptn
1210 W(b) vector_irq
1211 W(b) vector_fiq
1212
1213 .data
1214 .align 2
1215
1216 .globl cr_alignment
1217cr_alignment:
1218 .space 4