Loading...
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
16 */
17
18#include <asm/memory.h>
19#include <asm/glue-df.h>
20#include <asm/glue-pf.h>
21#include <asm/vfpmacros.h>
22#include <mach/entry-macro.S>
23#include <asm/thread_notify.h>
24#include <asm/unwind.h>
25#include <asm/unistd.h>
26#include <asm/tls.h>
27
28#include "entry-header.S"
29#include <asm/entry-macro-multi.S>
30
31/*
32 * Interrupt handling.
33 */
34 .macro irq_handler
35#ifdef CONFIG_MULTI_IRQ_HANDLER
36 ldr r1, =handle_arch_irq
37 mov r0, sp
38 ldr r1, [r1]
39 adr lr, BSYM(9997f)
40 teq r1, #0
41 movne pc, r1
42#endif
43 arch_irq_handler_default
449997:
45 .endm
46
47 .macro pabt_helper
48 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
49#ifdef MULTI_PABORT
50 ldr ip, .LCprocfns
51 mov lr, pc
52 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
53#else
54 bl CPU_PABORT_HANDLER
55#endif
56 .endm
57
58 .macro dabt_helper
59
60 @
61 @ Call the processor-specific abort handler:
62 @
63 @ r2 - pt_regs
64 @ r4 - aborted context pc
65 @ r5 - aborted context psr
66 @
67 @ The abort handler must return the aborted address in r0, and
68 @ the fault status register in r1. r9 must be preserved.
69 @
70#ifdef MULTI_DABORT
71 ldr ip, .LCprocfns
72 mov lr, pc
73 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
74#else
75 bl CPU_DABORT_HANDLER
76#endif
77 .endm
78
79#ifdef CONFIG_KPROBES
80 .section .kprobes.text,"ax",%progbits
81#else
82 .text
83#endif
84
85/*
86 * Invalid mode handlers
87 */
88 .macro inv_entry, reason
89 sub sp, sp, #S_FRAME_SIZE
90 ARM( stmib sp, {r1 - lr} )
91 THUMB( stmia sp, {r0 - r12} )
92 THUMB( str sp, [sp, #S_SP] )
93 THUMB( str lr, [sp, #S_LR] )
94 mov r1, #\reason
95 .endm
96
97__pabt_invalid:
98 inv_entry BAD_PREFETCH
99 b common_invalid
100ENDPROC(__pabt_invalid)
101
102__dabt_invalid:
103 inv_entry BAD_DATA
104 b common_invalid
105ENDPROC(__dabt_invalid)
106
107__irq_invalid:
108 inv_entry BAD_IRQ
109 b common_invalid
110ENDPROC(__irq_invalid)
111
112__und_invalid:
113 inv_entry BAD_UNDEFINSTR
114
115 @
116 @ XXX fall through to common_invalid
117 @
118
119@
120@ common_invalid - generic code for failed exception (re-entrant version of handlers)
121@
122common_invalid:
123 zero_fp
124
125 ldmia r0, {r4 - r6}
126 add r0, sp, #S_PC @ here for interlock avoidance
127 mov r7, #-1 @ "" "" "" ""
128 str r4, [sp] @ save preserved r0
129 stmia r0, {r5 - r7} @ lr_<exception>,
130 @ cpsr_<exception>, "old_r0"
131
132 mov r0, sp
133 b bad_mode
134ENDPROC(__und_invalid)
135
136/*
137 * SVC mode handlers
138 */
139
140#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
141#define SPFIX(code...) code
142#else
143#define SPFIX(code...)
144#endif
145
146 .macro svc_entry, stack_hole=0
147 UNWIND(.fnstart )
148 UNWIND(.save {r0 - pc} )
149 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
150#ifdef CONFIG_THUMB2_KERNEL
151 SPFIX( str r0, [sp] ) @ temporarily saved
152 SPFIX( mov r0, sp )
153 SPFIX( tst r0, #4 ) @ test original stack alignment
154 SPFIX( ldr r0, [sp] ) @ restored
155#else
156 SPFIX( tst sp, #4 )
157#endif
158 SPFIX( subeq sp, sp, #4 )
159 stmia sp, {r1 - r12}
160
161 ldmia r0, {r3 - r5}
162 add r7, sp, #S_SP - 4 @ here for interlock avoidance
163 mov r6, #-1 @ "" "" "" ""
164 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
165 SPFIX( addeq r2, r2, #4 )
166 str r3, [sp, #-4]! @ save the "real" r0 copied
167 @ from the exception stack
168
169 mov r3, lr
170
171 @
172 @ We are now ready to fill in the remaining blanks on the stack:
173 @
174 @ r2 - sp_svc
175 @ r3 - lr_svc
176 @ r4 - lr_<exception>, already fixed up for correct return/restart
177 @ r5 - spsr_<exception>
178 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
179 @
180 stmia r7, {r2 - r6}
181
182#ifdef CONFIG_TRACE_IRQFLAGS
183 bl trace_hardirqs_off
184#endif
185 .endm
186
187 .align 5
188__dabt_svc:
189 svc_entry
190 mov r2, sp
191 dabt_helper
192
193 @
194 @ IRQs off again before pulling preserved data off the stack
195 @
196 disable_irq_notrace
197
198#ifdef CONFIG_TRACE_IRQFLAGS
199 tst r5, #PSR_I_BIT
200 bleq trace_hardirqs_on
201 tst r5, #PSR_I_BIT
202 blne trace_hardirqs_off
203#endif
204 svc_exit r5 @ return from exception
205 UNWIND(.fnend )
206ENDPROC(__dabt_svc)
207
208 .align 5
209__irq_svc:
210 svc_entry
211 irq_handler
212
213#ifdef CONFIG_PREEMPT
214 get_thread_info tsk
215 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
216 ldr r0, [tsk, #TI_FLAGS] @ get flags
217 teq r8, #0 @ if preempt count != 0
218 movne r0, #0 @ force flags to 0
219 tst r0, #_TIF_NEED_RESCHED
220 blne svc_preempt
221#endif
222
223#ifdef CONFIG_TRACE_IRQFLAGS
224 @ The parent context IRQs must have been enabled to get here in
225 @ the first place, so there's no point checking the PSR I bit.
226 bl trace_hardirqs_on
227#endif
228 svc_exit r5 @ return from exception
229 UNWIND(.fnend )
230ENDPROC(__irq_svc)
231
232 .ltorg
233
234#ifdef CONFIG_PREEMPT
235svc_preempt:
236 mov r8, lr
2371: bl preempt_schedule_irq @ irq en/disable is done inside
238 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
239 tst r0, #_TIF_NEED_RESCHED
240 moveq pc, r8 @ go again
241 b 1b
242#endif
243
244 .align 5
245__und_svc:
246#ifdef CONFIG_KPROBES
247 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
248 @ it obviously needs free stack space which then will belong to
249 @ the saved context.
250 svc_entry 64
251#else
252 svc_entry
253#endif
254 @
255 @ call emulation code, which returns using r9 if it has emulated
256 @ the instruction, or the more conventional lr if we are to treat
257 @ this as a real undefined instruction
258 @
259 @ r0 - instruction
260 @
261#ifndef CONFIG_THUMB2_KERNEL
262 ldr r0, [r4, #-4]
263#else
264 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
265 and r9, r0, #0xf800
266 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
267 ldrhhs r9, [r4] @ bottom 16 bits
268 orrhs r0, r9, r0, lsl #16
269#endif
270 adr r9, BSYM(1f)
271 mov r2, r4
272 bl call_fpe
273
274 mov r0, sp @ struct pt_regs *regs
275 bl do_undefinstr
276
277 @
278 @ IRQs off again before pulling preserved data off the stack
279 @
2801: disable_irq_notrace
281
282 @
283 @ restore SPSR and restart the instruction
284 @
285 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
286#ifdef CONFIG_TRACE_IRQFLAGS
287 tst r5, #PSR_I_BIT
288 bleq trace_hardirqs_on
289 tst r5, #PSR_I_BIT
290 blne trace_hardirqs_off
291#endif
292 svc_exit r5 @ return from exception
293 UNWIND(.fnend )
294ENDPROC(__und_svc)
295
296 .align 5
297__pabt_svc:
298 svc_entry
299 mov r2, sp @ regs
300 pabt_helper
301
302 @
303 @ IRQs off again before pulling preserved data off the stack
304 @
305 disable_irq_notrace
306
307#ifdef CONFIG_TRACE_IRQFLAGS
308 tst r5, #PSR_I_BIT
309 bleq trace_hardirqs_on
310 tst r5, #PSR_I_BIT
311 blne trace_hardirqs_off
312#endif
313 svc_exit r5 @ return from exception
314 UNWIND(.fnend )
315ENDPROC(__pabt_svc)
316
317 .align 5
318.LCcralign:
319 .word cr_alignment
320#ifdef MULTI_DABORT
321.LCprocfns:
322 .word processor
323#endif
324.LCfp:
325 .word fp_enter
326
327/*
328 * User mode handlers
329 *
330 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
331 */
332
333#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
334#error "sizeof(struct pt_regs) must be a multiple of 8"
335#endif
336
337 .macro usr_entry
338 UNWIND(.fnstart )
339 UNWIND(.cantunwind ) @ don't unwind the user space
340 sub sp, sp, #S_FRAME_SIZE
341 ARM( stmib sp, {r1 - r12} )
342 THUMB( stmia sp, {r0 - r12} )
343
344 ldmia r0, {r3 - r5}
345 add r0, sp, #S_PC @ here for interlock avoidance
346 mov r6, #-1 @ "" "" "" ""
347
348 str r3, [sp] @ save the "real" r0 copied
349 @ from the exception stack
350
351 @
352 @ We are now ready to fill in the remaining blanks on the stack:
353 @
354 @ r4 - lr_<exception>, already fixed up for correct return/restart
355 @ r5 - spsr_<exception>
356 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
357 @
358 @ Also, separately save sp_usr and lr_usr
359 @
360 stmia r0, {r4 - r6}
361 ARM( stmdb r0, {sp, lr}^ )
362 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
363
364 @
365 @ Enable the alignment trap while in kernel mode
366 @
367 alignment_trap r0
368
369 @
370 @ Clear FP to mark the first stack frame
371 @
372 zero_fp
373
374#ifdef CONFIG_IRQSOFF_TRACER
375 bl trace_hardirqs_off
376#endif
377 .endm
378
379 .macro kuser_cmpxchg_check
380#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
381#ifndef CONFIG_MMU
382#warning "NPTL on non MMU needs fixing"
383#else
384 @ Make sure our user space atomic helper is restarted
385 @ if it was interrupted in a critical region. Here we
386 @ perform a quick test inline since it should be false
387 @ 99.9999% of the time. The rest is done out of line.
388 cmp r4, #TASK_SIZE
389 blhs kuser_cmpxchg64_fixup
390#endif
391#endif
392 .endm
393
394 .align 5
395__dabt_usr:
396 usr_entry
397 kuser_cmpxchg_check
398 mov r2, sp
399 dabt_helper
400 b ret_from_exception
401 UNWIND(.fnend )
402ENDPROC(__dabt_usr)
403
404 .align 5
405__irq_usr:
406 usr_entry
407 kuser_cmpxchg_check
408 irq_handler
409 get_thread_info tsk
410 mov why, #0
411 b ret_to_user_from_irq
412 UNWIND(.fnend )
413ENDPROC(__irq_usr)
414
415 .ltorg
416
417 .align 5
418__und_usr:
419 usr_entry
420
421 mov r2, r4
422 mov r3, r5
423
424 @
425 @ fall through to the emulation code, which returns using r9 if
426 @ it has emulated the instruction, or the more conventional lr
427 @ if we are to treat this as a real undefined instruction
428 @
429 @ r0 - instruction
430 @
431 adr r9, BSYM(ret_from_exception)
432 adr lr, BSYM(__und_usr_unknown)
433 tst r3, #PSR_T_BIT @ Thumb mode?
434 itet eq @ explicit IT needed for the 1f label
435 subeq r4, r2, #4 @ ARM instr at LR - 4
436 subne r4, r2, #2 @ Thumb instr at LR - 2
4371: ldreqt r0, [r4]
438#ifdef CONFIG_CPU_ENDIAN_BE8
439 reveq r0, r0 @ little endian instruction
440#endif
441 beq call_fpe
442 @ Thumb instruction
443#if __LINUX_ARM_ARCH__ >= 7
4442:
445 ARM( ldrht r5, [r4], #2 )
446 THUMB( ldrht r5, [r4] )
447 THUMB( add r4, r4, #2 )
448 and r0, r5, #0xf800 @ mask bits 111x x... .... ....
449 cmp r0, #0xe800 @ 32bit instruction if xx != 0
450 blo __und_usr_unknown
4513: ldrht r0, [r4]
452 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
453 orr r0, r0, r5, lsl #16
454#else
455 b __und_usr_unknown
456#endif
457 UNWIND(.fnend )
458ENDPROC(__und_usr)
459
460 @
461 @ fallthrough to call_fpe
462 @
463
464/*
465 * The out of line fixup for the ldrt above.
466 */
467 .pushsection .fixup, "ax"
4684: mov pc, r9
469 .popsection
470 .pushsection __ex_table,"a"
471 .long 1b, 4b
472#if __LINUX_ARM_ARCH__ >= 7
473 .long 2b, 4b
474 .long 3b, 4b
475#endif
476 .popsection
477
478/*
479 * Check whether the instruction is a co-processor instruction.
480 * If yes, we need to call the relevant co-processor handler.
481 *
482 * Note that we don't do a full check here for the co-processor
483 * instructions; all instructions with bit 27 set are well
484 * defined. The only instructions that should fault are the
485 * co-processor instructions. However, we have to watch out
486 * for the ARM6/ARM7 SWI bug.
487 *
488 * NEON is a special case that has to be handled here. Not all
489 * NEON instructions are co-processor instructions, so we have
490 * to make a special case of checking for them. Plus, there's
491 * five groups of them, so we have a table of mask/opcode pairs
492 * to check against, and if any match then we branch off into the
493 * NEON handler code.
494 *
495 * Emulators may wish to make use of the following registers:
496 * r0 = instruction opcode.
497 * r2 = PC+4
498 * r9 = normal "successful" return address
499 * r10 = this threads thread_info structure.
500 * lr = unrecognised instruction return address
501 */
502 @
503 @ Fall-through from Thumb-2 __und_usr
504 @
505#ifdef CONFIG_NEON
506 adr r6, .LCneon_thumb_opcodes
507 b 2f
508#endif
509call_fpe:
510#ifdef CONFIG_NEON
511 adr r6, .LCneon_arm_opcodes
5122:
513 ldr r7, [r6], #4 @ mask value
514 cmp r7, #0 @ end mask?
515 beq 1f
516 and r8, r0, r7
517 ldr r7, [r6], #4 @ opcode bits matching in mask
518 cmp r8, r7 @ NEON instruction?
519 bne 2b
520 get_thread_info r10
521 mov r7, #1
522 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
523 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
524 b do_vfp @ let VFP handler handle this
5251:
526#endif
527 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
528 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
529#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
530 and r8, r0, #0x0f000000 @ mask out op-code bits
531 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
532#endif
533 moveq pc, lr
534 get_thread_info r10 @ get current thread
535 and r8, r0, #0x00000f00 @ mask out CP number
536 THUMB( lsr r8, r8, #8 )
537 mov r7, #1
538 add r6, r10, #TI_USED_CP
539 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
540 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
541#ifdef CONFIG_IWMMXT
542 @ Test if we need to give access to iWMMXt coprocessors
543 ldr r5, [r10, #TI_FLAGS]
544 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
545 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
546 bcs iwmmxt_task_enable
547#endif
548 ARM( add pc, pc, r8, lsr #6 )
549 THUMB( lsl r8, r8, #2 )
550 THUMB( add pc, r8 )
551 nop
552
553 movw_pc lr @ CP#0
554 W(b) do_fpe @ CP#1 (FPE)
555 W(b) do_fpe @ CP#2 (FPE)
556 movw_pc lr @ CP#3
557#ifdef CONFIG_CRUNCH
558 b crunch_task_enable @ CP#4 (MaverickCrunch)
559 b crunch_task_enable @ CP#5 (MaverickCrunch)
560 b crunch_task_enable @ CP#6 (MaverickCrunch)
561#else
562 movw_pc lr @ CP#4
563 movw_pc lr @ CP#5
564 movw_pc lr @ CP#6
565#endif
566 movw_pc lr @ CP#7
567 movw_pc lr @ CP#8
568 movw_pc lr @ CP#9
569#ifdef CONFIG_VFP
570 W(b) do_vfp @ CP#10 (VFP)
571 W(b) do_vfp @ CP#11 (VFP)
572#else
573 movw_pc lr @ CP#10 (VFP)
574 movw_pc lr @ CP#11 (VFP)
575#endif
576 movw_pc lr @ CP#12
577 movw_pc lr @ CP#13
578 movw_pc lr @ CP#14 (Debug)
579 movw_pc lr @ CP#15 (Control)
580
581#ifdef CONFIG_NEON
582 .align 6
583
584.LCneon_arm_opcodes:
585 .word 0xfe000000 @ mask
586 .word 0xf2000000 @ opcode
587
588 .word 0xff100000 @ mask
589 .word 0xf4000000 @ opcode
590
591 .word 0x00000000 @ mask
592 .word 0x00000000 @ opcode
593
594.LCneon_thumb_opcodes:
595 .word 0xef000000 @ mask
596 .word 0xef000000 @ opcode
597
598 .word 0xff100000 @ mask
599 .word 0xf9000000 @ opcode
600
601 .word 0x00000000 @ mask
602 .word 0x00000000 @ opcode
603#endif
604
605do_fpe:
606 enable_irq
607 ldr r4, .LCfp
608 add r10, r10, #TI_FPSTATE @ r10 = workspace
609 ldr pc, [r4] @ Call FP module USR entry point
610
611/*
612 * The FP module is called with these registers set:
613 * r0 = instruction
614 * r2 = PC+4
615 * r9 = normal "successful" return address
616 * r10 = FP workspace
617 * lr = unrecognised FP instruction return address
618 */
619
620 .pushsection .data
621ENTRY(fp_enter)
622 .word no_fp
623 .popsection
624
625ENTRY(no_fp)
626 mov pc, lr
627ENDPROC(no_fp)
628
629__und_usr_unknown:
630 enable_irq
631 mov r0, sp
632 adr lr, BSYM(ret_from_exception)
633 b do_undefinstr
634ENDPROC(__und_usr_unknown)
635
636 .align 5
637__pabt_usr:
638 usr_entry
639 mov r2, sp @ regs
640 pabt_helper
641 UNWIND(.fnend )
642 /* fall through */
643/*
644 * This is the return code to user mode for abort handlers
645 */
646ENTRY(ret_from_exception)
647 UNWIND(.fnstart )
648 UNWIND(.cantunwind )
649 get_thread_info tsk
650 mov why, #0
651 b ret_to_user
652 UNWIND(.fnend )
653ENDPROC(__pabt_usr)
654ENDPROC(ret_from_exception)
655
656/*
657 * Register switch for ARMv3 and ARMv4 processors
658 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
659 * previous and next are guaranteed not to be the same.
660 */
661ENTRY(__switch_to)
662 UNWIND(.fnstart )
663 UNWIND(.cantunwind )
664 add ip, r1, #TI_CPU_SAVE
665 ldr r3, [r2, #TI_TP_VALUE]
666 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
667 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
668 THUMB( str sp, [ip], #4 )
669 THUMB( str lr, [ip], #4 )
670#ifdef CONFIG_CPU_USE_DOMAINS
671 ldr r6, [r2, #TI_CPU_DOMAIN]
672#endif
673 set_tls r3, r4, r5
674#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
675 ldr r7, [r2, #TI_TASK]
676 ldr r8, =__stack_chk_guard
677 ldr r7, [r7, #TSK_STACK_CANARY]
678#endif
679#ifdef CONFIG_CPU_USE_DOMAINS
680 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
681#endif
682 mov r5, r0
683 add r4, r2, #TI_CPU_SAVE
684 ldr r0, =thread_notify_head
685 mov r1, #THREAD_NOTIFY_SWITCH
686 bl atomic_notifier_call_chain
687#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
688 str r7, [r8]
689#endif
690 THUMB( mov ip, r4 )
691 mov r0, r5
692 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
693 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
694 THUMB( ldr sp, [ip], #4 )
695 THUMB( ldr pc, [ip] )
696 UNWIND(.fnend )
697ENDPROC(__switch_to)
698
699 __INIT
700
701/*
702 * User helpers.
703 *
704 * Each segment is 32-byte aligned and will be moved to the top of the high
705 * vector page. New segments (if ever needed) must be added in front of
706 * existing ones. This mechanism should be used only for things that are
707 * really small and justified, and not be abused freely.
708 *
709 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
710 */
711 THUMB( .arm )
712
713 .macro usr_ret, reg
714#ifdef CONFIG_ARM_THUMB
715 bx \reg
716#else
717 mov pc, \reg
718#endif
719 .endm
720
721 .align 5
722 .globl __kuser_helper_start
723__kuser_helper_start:
724
725/*
726 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
727 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
728 */
729
730__kuser_cmpxchg64: @ 0xffff0f60
731
732#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
733
734 /*
735 * Poor you. No fast solution possible...
736 * The kernel itself must perform the operation.
737 * A special ghost syscall is used for that (see traps.c).
738 */
739 stmfd sp!, {r7, lr}
740 ldr r7, 1f @ it's 20 bits
741 swi __ARM_NR_cmpxchg64
742 ldmfd sp!, {r7, pc}
7431: .word __ARM_NR_cmpxchg64
744
745#elif defined(CONFIG_CPU_32v6K)
746
747 stmfd sp!, {r4, r5, r6, r7}
748 ldrd r4, r5, [r0] @ load old val
749 ldrd r6, r7, [r1] @ load new val
750 smp_dmb arm
7511: ldrexd r0, r1, [r2] @ load current val
752 eors r3, r0, r4 @ compare with oldval (1)
753 eoreqs r3, r1, r5 @ compare with oldval (2)
754 strexdeq r3, r6, r7, [r2] @ store newval if eq
755 teqeq r3, #1 @ success?
756 beq 1b @ if no then retry
757 smp_dmb arm
758 rsbs r0, r3, #0 @ set returned val and C flag
759 ldmfd sp!, {r4, r5, r6, r7}
760 bx lr
761
762#elif !defined(CONFIG_SMP)
763
764#ifdef CONFIG_MMU
765
766 /*
767 * The only thing that can break atomicity in this cmpxchg64
768 * implementation is either an IRQ or a data abort exception
769 * causing another process/thread to be scheduled in the middle of
770 * the critical sequence. The same strategy as for cmpxchg is used.
771 */
772 stmfd sp!, {r4, r5, r6, lr}
773 ldmia r0, {r4, r5} @ load old val
774 ldmia r1, {r6, lr} @ load new val
7751: ldmia r2, {r0, r1} @ load current val
776 eors r3, r0, r4 @ compare with oldval (1)
777 eoreqs r3, r1, r5 @ compare with oldval (2)
7782: stmeqia r2, {r6, lr} @ store newval if eq
779 rsbs r0, r3, #0 @ set return val and C flag
780 ldmfd sp!, {r4, r5, r6, pc}
781
782 .text
783kuser_cmpxchg64_fixup:
784 @ Called from kuser_cmpxchg_fixup.
785 @ r4 = address of interrupted insn (must be preserved).
786 @ sp = saved regs. r7 and r8 are clobbered.
787 @ 1b = first critical insn, 2b = last critical insn.
788 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
789 mov r7, #0xffff0fff
790 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
791 subs r8, r4, r7
792 rsbcss r8, r8, #(2b - 1b)
793 strcs r7, [sp, #S_PC]
794#if __LINUX_ARM_ARCH__ < 6
795 bcc kuser_cmpxchg32_fixup
796#endif
797 mov pc, lr
798 .previous
799
800#else
801#warning "NPTL on non MMU needs fixing"
802 mov r0, #-1
803 adds r0, r0, #0
804 usr_ret lr
805#endif
806
807#else
808#error "incoherent kernel configuration"
809#endif
810
811 /* pad to next slot */
812 .rept (16 - (. - __kuser_cmpxchg64)/4)
813 .word 0
814 .endr
815
816 .align 5
817
818__kuser_memory_barrier: @ 0xffff0fa0
819 smp_dmb arm
820 usr_ret lr
821
822 .align 5
823
824__kuser_cmpxchg: @ 0xffff0fc0
825
826#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
827
828 /*
829 * Poor you. No fast solution possible...
830 * The kernel itself must perform the operation.
831 * A special ghost syscall is used for that (see traps.c).
832 */
833 stmfd sp!, {r7, lr}
834 ldr r7, 1f @ it's 20 bits
835 swi __ARM_NR_cmpxchg
836 ldmfd sp!, {r7, pc}
8371: .word __ARM_NR_cmpxchg
838
839#elif __LINUX_ARM_ARCH__ < 6
840
841#ifdef CONFIG_MMU
842
843 /*
844 * The only thing that can break atomicity in this cmpxchg
845 * implementation is either an IRQ or a data abort exception
846 * causing another process/thread to be scheduled in the middle
847 * of the critical sequence. To prevent this, code is added to
848 * the IRQ and data abort exception handlers to set the pc back
849 * to the beginning of the critical section if it is found to be
850 * within that critical section (see kuser_cmpxchg_fixup).
851 */
8521: ldr r3, [r2] @ load current val
853 subs r3, r3, r0 @ compare with oldval
8542: streq r1, [r2] @ store newval if eq
855 rsbs r0, r3, #0 @ set return val and C flag
856 usr_ret lr
857
858 .text
859kuser_cmpxchg32_fixup:
860 @ Called from kuser_cmpxchg_check macro.
861 @ r4 = address of interrupted insn (must be preserved).
862 @ sp = saved regs. r7 and r8 are clobbered.
863 @ 1b = first critical insn, 2b = last critical insn.
864 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
865 mov r7, #0xffff0fff
866 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
867 subs r8, r4, r7
868 rsbcss r8, r8, #(2b - 1b)
869 strcs r7, [sp, #S_PC]
870 mov pc, lr
871 .previous
872
873#else
874#warning "NPTL on non MMU needs fixing"
875 mov r0, #-1
876 adds r0, r0, #0
877 usr_ret lr
878#endif
879
880#else
881
882 smp_dmb arm
8831: ldrex r3, [r2]
884 subs r3, r3, r0
885 strexeq r3, r1, [r2]
886 teqeq r3, #1
887 beq 1b
888 rsbs r0, r3, #0
889 /* beware -- each __kuser slot must be 8 instructions max */
890 ALT_SMP(b __kuser_memory_barrier)
891 ALT_UP(usr_ret lr)
892
893#endif
894
895 .align 5
896
897__kuser_get_tls: @ 0xffff0fe0
898 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
899 usr_ret lr
900 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
901 .rep 4
902 .word 0 @ 0xffff0ff0 software TLS value, then
903 .endr @ pad up to __kuser_helper_version
904
905__kuser_helper_version: @ 0xffff0ffc
906 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
907
908 .globl __kuser_helper_end
909__kuser_helper_end:
910
911 THUMB( .thumb )
912
913/*
914 * Vector stubs.
915 *
916 * This code is copied to 0xffff0200 so we can use branches in the
917 * vectors, rather than ldr's. Note that this code must not
918 * exceed 0x300 bytes.
919 *
920 * Common stub entry macro:
921 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
922 *
923 * SP points to a minimal amount of processor-private memory, the address
924 * of which is copied into r0 for the mode specific abort handler.
925 */
926 .macro vector_stub, name, mode, correction=0
927 .align 5
928
929vector_\name:
930 .if \correction
931 sub lr, lr, #\correction
932 .endif
933
934 @
935 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
936 @ (parent CPSR)
937 @
938 stmia sp, {r0, lr} @ save r0, lr
939 mrs lr, spsr
940 str lr, [sp, #8] @ save spsr
941
942 @
943 @ Prepare for SVC32 mode. IRQs remain disabled.
944 @
945 mrs r0, cpsr
946 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
947 msr spsr_cxsf, r0
948
949 @
950 @ the branch table must immediately follow this code
951 @
952 and lr, lr, #0x0f
953 THUMB( adr r0, 1f )
954 THUMB( ldr lr, [r0, lr, lsl #2] )
955 mov r0, sp
956 ARM( ldr lr, [pc, lr, lsl #2] )
957 movs pc, lr @ branch to handler in SVC mode
958ENDPROC(vector_\name)
959
960 .align 2
961 @ handler addresses follow this label
9621:
963 .endm
964
965 .globl __stubs_start
966__stubs_start:
967/*
968 * Interrupt dispatcher
969 */
970 vector_stub irq, IRQ_MODE, 4
971
972 .long __irq_usr @ 0 (USR_26 / USR_32)
973 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
974 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
975 .long __irq_svc @ 3 (SVC_26 / SVC_32)
976 .long __irq_invalid @ 4
977 .long __irq_invalid @ 5
978 .long __irq_invalid @ 6
979 .long __irq_invalid @ 7
980 .long __irq_invalid @ 8
981 .long __irq_invalid @ 9
982 .long __irq_invalid @ a
983 .long __irq_invalid @ b
984 .long __irq_invalid @ c
985 .long __irq_invalid @ d
986 .long __irq_invalid @ e
987 .long __irq_invalid @ f
988
989/*
990 * Data abort dispatcher
991 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
992 */
993 vector_stub dabt, ABT_MODE, 8
994
995 .long __dabt_usr @ 0 (USR_26 / USR_32)
996 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
997 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
998 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
999 .long __dabt_invalid @ 4
1000 .long __dabt_invalid @ 5
1001 .long __dabt_invalid @ 6
1002 .long __dabt_invalid @ 7
1003 .long __dabt_invalid @ 8
1004 .long __dabt_invalid @ 9
1005 .long __dabt_invalid @ a
1006 .long __dabt_invalid @ b
1007 .long __dabt_invalid @ c
1008 .long __dabt_invalid @ d
1009 .long __dabt_invalid @ e
1010 .long __dabt_invalid @ f
1011
1012/*
1013 * Prefetch abort dispatcher
1014 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1015 */
1016 vector_stub pabt, ABT_MODE, 4
1017
1018 .long __pabt_usr @ 0 (USR_26 / USR_32)
1019 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1020 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1021 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1022 .long __pabt_invalid @ 4
1023 .long __pabt_invalid @ 5
1024 .long __pabt_invalid @ 6
1025 .long __pabt_invalid @ 7
1026 .long __pabt_invalid @ 8
1027 .long __pabt_invalid @ 9
1028 .long __pabt_invalid @ a
1029 .long __pabt_invalid @ b
1030 .long __pabt_invalid @ c
1031 .long __pabt_invalid @ d
1032 .long __pabt_invalid @ e
1033 .long __pabt_invalid @ f
1034
1035/*
1036 * Undef instr entry dispatcher
1037 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1038 */
1039 vector_stub und, UND_MODE
1040
1041 .long __und_usr @ 0 (USR_26 / USR_32)
1042 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1043 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1044 .long __und_svc @ 3 (SVC_26 / SVC_32)
1045 .long __und_invalid @ 4
1046 .long __und_invalid @ 5
1047 .long __und_invalid @ 6
1048 .long __und_invalid @ 7
1049 .long __und_invalid @ 8
1050 .long __und_invalid @ 9
1051 .long __und_invalid @ a
1052 .long __und_invalid @ b
1053 .long __und_invalid @ c
1054 .long __und_invalid @ d
1055 .long __und_invalid @ e
1056 .long __und_invalid @ f
1057
1058 .align 5
1059
1060/*=============================================================================
1061 * Undefined FIQs
1062 *-----------------------------------------------------------------------------
1063 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1064 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1065 * Basically to switch modes, we *HAVE* to clobber one register... brain
1066 * damage alert! I don't think that we can execute any code in here in any
1067 * other mode than FIQ... Ok you can switch to another mode, but you can't
1068 * get out of that mode without clobbering one register.
1069 */
1070vector_fiq:
1071 disable_fiq
1072 subs pc, lr, #4
1073
1074/*=============================================================================
1075 * Address exception handler
1076 *-----------------------------------------------------------------------------
1077 * These aren't too critical.
1078 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1079 */
1080
1081vector_addrexcptn:
1082 b vector_addrexcptn
1083
1084/*
1085 * We group all the following data together to optimise
1086 * for CPUs with separate I & D caches.
1087 */
1088 .align 5
1089
1090.LCvswi:
1091 .word vector_swi
1092
1093 .globl __stubs_end
1094__stubs_end:
1095
1096 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1097
1098 .globl __vectors_start
1099__vectors_start:
1100 ARM( swi SYS_ERROR0 )
1101 THUMB( svc #0 )
1102 THUMB( nop )
1103 W(b) vector_und + stubs_offset
1104 W(ldr) pc, .LCvswi + stubs_offset
1105 W(b) vector_pabt + stubs_offset
1106 W(b) vector_dabt + stubs_offset
1107 W(b) vector_addrexcptn + stubs_offset
1108 W(b) vector_irq + stubs_offset
1109 W(b) vector_fiq + stubs_offset
1110
1111 .globl __vectors_end
1112__vectors_end:
1113
1114 .data
1115
1116 .globl cr_alignment
1117 .globl cr_no_alignment
1118cr_alignment:
1119 .space 4
1120cr_no_alignment:
1121 .space 4
1122
1123#ifdef CONFIG_MULTI_IRQ_HANDLER
1124 .globl handle_arch_irq
1125handle_arch_irq:
1126 .space 4
1127#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/kernel/entry-armv.S
4 *
5 * Copyright (C) 1996,1997,1998 Russell King.
6 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
7 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 *
9 * Low-level vector interface routines
10 *
11 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
12 * that causes it to save wrong values... Be aware!
13 */
14
15#include <linux/init.h>
16
17#include <asm/assembler.h>
18#include <asm/memory.h>
19#include <asm/glue-df.h>
20#include <asm/glue-pf.h>
21#include <asm/vfpmacros.h>
22#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
23#include <mach/entry-macro.S>
24#endif
25#include <asm/thread_notify.h>
26#include <asm/unwind.h>
27#include <asm/unistd.h>
28#include <asm/tls.h>
29#include <asm/system_info.h>
30
31#include "entry-header.S"
32#include <asm/entry-macro-multi.S>
33#include <asm/probes.h>
34
35/*
36 * Interrupt handling.
37 */
38 .macro irq_handler
39#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
40 ldr r1, =handle_arch_irq
41 mov r0, sp
42 badr lr, 9997f
43 ldr pc, [r1]
44#else
45 arch_irq_handler_default
46#endif
479997:
48 .endm
49
50 .macro pabt_helper
51 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
52#ifdef MULTI_PABORT
53 ldr ip, .LCprocfns
54 mov lr, pc
55 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
56#else
57 bl CPU_PABORT_HANDLER
58#endif
59 .endm
60
61 .macro dabt_helper
62
63 @
64 @ Call the processor-specific abort handler:
65 @
66 @ r2 - pt_regs
67 @ r4 - aborted context pc
68 @ r5 - aborted context psr
69 @
70 @ The abort handler must return the aborted address in r0, and
71 @ the fault status register in r1. r9 must be preserved.
72 @
73#ifdef MULTI_DABORT
74 ldr ip, .LCprocfns
75 mov lr, pc
76 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
77#else
78 bl CPU_DABORT_HANDLER
79#endif
80 .endm
81
82 .section .entry.text,"ax",%progbits
83
84/*
85 * Invalid mode handlers
86 */
87 .macro inv_entry, reason
88 sub sp, sp, #PT_REGS_SIZE
89 ARM( stmib sp, {r1 - lr} )
90 THUMB( stmia sp, {r0 - r12} )
91 THUMB( str sp, [sp, #S_SP] )
92 THUMB( str lr, [sp, #S_LR] )
93 mov r1, #\reason
94 .endm
95
96__pabt_invalid:
97 inv_entry BAD_PREFETCH
98 b common_invalid
99ENDPROC(__pabt_invalid)
100
101__dabt_invalid:
102 inv_entry BAD_DATA
103 b common_invalid
104ENDPROC(__dabt_invalid)
105
106__irq_invalid:
107 inv_entry BAD_IRQ
108 b common_invalid
109ENDPROC(__irq_invalid)
110
111__und_invalid:
112 inv_entry BAD_UNDEFINSTR
113
114 @
115 @ XXX fall through to common_invalid
116 @
117
118@
119@ common_invalid - generic code for failed exception (re-entrant version of handlers)
120@
121common_invalid:
122 zero_fp
123
124 ldmia r0, {r4 - r6}
125 add r0, sp, #S_PC @ here for interlock avoidance
126 mov r7, #-1 @ "" "" "" ""
127 str r4, [sp] @ save preserved r0
128 stmia r0, {r5 - r7} @ lr_<exception>,
129 @ cpsr_<exception>, "old_r0"
130
131 mov r0, sp
132 b bad_mode
133ENDPROC(__und_invalid)
134
135/*
136 * SVC mode handlers
137 */
138
139#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
140#define SPFIX(code...) code
141#else
142#define SPFIX(code...)
143#endif
144
145 .macro svc_entry, stack_hole=0, trace=1, uaccess=1
146 UNWIND(.fnstart )
147 UNWIND(.save {r0 - pc} )
148 sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
149#ifdef CONFIG_THUMB2_KERNEL
150 SPFIX( str r0, [sp] ) @ temporarily saved
151 SPFIX( mov r0, sp )
152 SPFIX( tst r0, #4 ) @ test original stack alignment
153 SPFIX( ldr r0, [sp] ) @ restored
154#else
155 SPFIX( tst sp, #4 )
156#endif
157 SPFIX( subeq sp, sp, #4 )
158 stmia sp, {r1 - r12}
159
160 ldmia r0, {r3 - r5}
161 add r7, sp, #S_SP - 4 @ here for interlock avoidance
162 mov r6, #-1 @ "" "" "" ""
163 add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
164 SPFIX( addeq r2, r2, #4 )
165 str r3, [sp, #-4]! @ save the "real" r0 copied
166 @ from the exception stack
167
168 mov r3, lr
169
170 @
171 @ We are now ready to fill in the remaining blanks on the stack:
172 @
173 @ r2 - sp_svc
174 @ r3 - lr_svc
175 @ r4 - lr_<exception>, already fixed up for correct return/restart
176 @ r5 - spsr_<exception>
177 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
178 @
179 stmia r7, {r2 - r6}
180
181 get_thread_info tsk
182 ldr r0, [tsk, #TI_ADDR_LIMIT]
183 mov r1, #TASK_SIZE
184 str r1, [tsk, #TI_ADDR_LIMIT]
185 str r0, [sp, #SVC_ADDR_LIMIT]
186
187 uaccess_save r0
188 .if \uaccess
189 uaccess_disable r0
190 .endif
191
192 .if \trace
193#ifdef CONFIG_TRACE_IRQFLAGS
194 bl trace_hardirqs_off
195#endif
196 .endif
197 .endm
198
199 .align 5
200__dabt_svc:
201 svc_entry uaccess=0
202 mov r2, sp
203 dabt_helper
204 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
205 svc_exit r5 @ return from exception
206 UNWIND(.fnend )
207ENDPROC(__dabt_svc)
208
209 .align 5
210__irq_svc:
211 svc_entry
212 irq_handler
213
214#ifdef CONFIG_PREEMPT
215 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
216 ldr r0, [tsk, #TI_FLAGS] @ get flags
217 teq r8, #0 @ if preempt count != 0
218 movne r0, #0 @ force flags to 0
219 tst r0, #_TIF_NEED_RESCHED
220 blne svc_preempt
221#endif
222
223 svc_exit r5, irq = 1 @ return from exception
224 UNWIND(.fnend )
225ENDPROC(__irq_svc)
226
227 .ltorg
228
229#ifdef CONFIG_PREEMPT
230svc_preempt:
231 mov r8, lr
2321: bl preempt_schedule_irq @ irq en/disable is done inside
233 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
234 tst r0, #_TIF_NEED_RESCHED
235 reteq r8 @ go again
236 b 1b
237#endif
238
239__und_fault:
240 @ Correct the PC such that it is pointing at the instruction
241 @ which caused the fault. If the faulting instruction was ARM
242 @ the PC will be pointing at the next instruction, and have to
243 @ subtract 4. Otherwise, it is Thumb, and the PC will be
244 @ pointing at the second half of the Thumb instruction. We
245 @ have to subtract 2.
246 ldr r2, [r0, #S_PC]
247 sub r2, r2, r1
248 str r2, [r0, #S_PC]
249 b do_undefinstr
250ENDPROC(__und_fault)
251
252 .align 5
253__und_svc:
254#ifdef CONFIG_KPROBES
255 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
256 @ it obviously needs free stack space which then will belong to
257 @ the saved context.
258 svc_entry MAX_STACK_SIZE
259#else
260 svc_entry
261#endif
262 @
263 @ call emulation code, which returns using r9 if it has emulated
264 @ the instruction, or the more conventional lr if we are to treat
265 @ this as a real undefined instruction
266 @
267 @ r0 - instruction
268 @
269#ifndef CONFIG_THUMB2_KERNEL
270 ldr r0, [r4, #-4]
271#else
272 mov r1, #2
273 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
274 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
275 blo __und_svc_fault
276 ldrh r9, [r4] @ bottom 16 bits
277 add r4, r4, #2
278 str r4, [sp, #S_PC]
279 orr r0, r9, r0, lsl #16
280#endif
281 badr r9, __und_svc_finish
282 mov r2, r4
283 bl call_fpe
284
285 mov r1, #4 @ PC correction to apply
286__und_svc_fault:
287 mov r0, sp @ struct pt_regs *regs
288 bl __und_fault
289
290__und_svc_finish:
291 get_thread_info tsk
292 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
293 svc_exit r5 @ return from exception
294 UNWIND(.fnend )
295ENDPROC(__und_svc)
296
297 .align 5
298__pabt_svc:
299 svc_entry
300 mov r2, sp @ regs
301 pabt_helper
302 svc_exit r5 @ return from exception
303 UNWIND(.fnend )
304ENDPROC(__pabt_svc)
305
306 .align 5
307__fiq_svc:
308 svc_entry trace=0
309 mov r0, sp @ struct pt_regs *regs
310 bl handle_fiq_as_nmi
311 svc_exit_via_fiq
312 UNWIND(.fnend )
313ENDPROC(__fiq_svc)
314
315 .align 5
316.LCcralign:
317 .word cr_alignment
318#ifdef MULTI_DABORT
319.LCprocfns:
320 .word processor
321#endif
322.LCfp:
323 .word fp_enter
324
325/*
326 * Abort mode handlers
327 */
328
329@
330@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
331@ and reuses the same macros. However in abort mode we must also
332@ save/restore lr_abt and spsr_abt to make nested aborts safe.
333@
334 .align 5
335__fiq_abt:
336 svc_entry trace=0
337
338 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
339 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
340 THUMB( msr cpsr_c, r0 )
341 mov r1, lr @ Save lr_abt
342 mrs r2, spsr @ Save spsr_abt, abort is now safe
343 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
344 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
345 THUMB( msr cpsr_c, r0 )
346 stmfd sp!, {r1 - r2}
347
348 add r0, sp, #8 @ struct pt_regs *regs
349 bl handle_fiq_as_nmi
350
351 ldmfd sp!, {r1 - r2}
352 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
353 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
354 THUMB( msr cpsr_c, r0 )
355 mov lr, r1 @ Restore lr_abt, abort is unsafe
356 msr spsr_cxsf, r2 @ Restore spsr_abt
357 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
358 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
359 THUMB( msr cpsr_c, r0 )
360
361 svc_exit_via_fiq
362 UNWIND(.fnend )
363ENDPROC(__fiq_abt)
364
365/*
366 * User mode handlers
367 *
368 * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
369 */
370
371#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
372#error "sizeof(struct pt_regs) must be a multiple of 8"
373#endif
374
375 .macro usr_entry, trace=1, uaccess=1
376 UNWIND(.fnstart )
377 UNWIND(.cantunwind ) @ don't unwind the user space
378 sub sp, sp, #PT_REGS_SIZE
379 ARM( stmib sp, {r1 - r12} )
380 THUMB( stmia sp, {r0 - r12} )
381
382 ATRAP( mrc p15, 0, r7, c1, c0, 0)
383 ATRAP( ldr r8, .LCcralign)
384
385 ldmia r0, {r3 - r5}
386 add r0, sp, #S_PC @ here for interlock avoidance
387 mov r6, #-1 @ "" "" "" ""
388
389 str r3, [sp] @ save the "real" r0 copied
390 @ from the exception stack
391
392 ATRAP( ldr r8, [r8, #0])
393
394 @
395 @ We are now ready to fill in the remaining blanks on the stack:
396 @
397 @ r4 - lr_<exception>, already fixed up for correct return/restart
398 @ r5 - spsr_<exception>
399 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
400 @
401 @ Also, separately save sp_usr and lr_usr
402 @
403 stmia r0, {r4 - r6}
404 ARM( stmdb r0, {sp, lr}^ )
405 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
406
407 .if \uaccess
408 uaccess_disable ip
409 .endif
410
411 @ Enable the alignment trap while in kernel mode
412 ATRAP( teq r8, r7)
413 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
414
415 @
416 @ Clear FP to mark the first stack frame
417 @
418 zero_fp
419
420 .if \trace
421#ifdef CONFIG_TRACE_IRQFLAGS
422 bl trace_hardirqs_off
423#endif
424 ct_user_exit save = 0
425 .endif
426 .endm
427
428 .macro kuser_cmpxchg_check
429#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
430#ifndef CONFIG_MMU
431#warning "NPTL on non MMU needs fixing"
432#else
433 @ Make sure our user space atomic helper is restarted
434 @ if it was interrupted in a critical region. Here we
435 @ perform a quick test inline since it should be false
436 @ 99.9999% of the time. The rest is done out of line.
437 cmp r4, #TASK_SIZE
438 blhs kuser_cmpxchg64_fixup
439#endif
440#endif
441 .endm
442
443 .align 5
444__dabt_usr:
445 usr_entry uaccess=0
446 kuser_cmpxchg_check
447 mov r2, sp
448 dabt_helper
449 b ret_from_exception
450 UNWIND(.fnend )
451ENDPROC(__dabt_usr)
452
453 .align 5
454__irq_usr:
455 usr_entry
456 kuser_cmpxchg_check
457 irq_handler
458 get_thread_info tsk
459 mov why, #0
460 b ret_to_user_from_irq
461 UNWIND(.fnend )
462ENDPROC(__irq_usr)
463
464 .ltorg
465
466 .align 5
467__und_usr:
468 usr_entry uaccess=0
469
470 mov r2, r4
471 mov r3, r5
472
473 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
474 @ faulting instruction depending on Thumb mode.
475 @ r3 = regs->ARM_cpsr
476 @
477 @ The emulation code returns using r9 if it has emulated the
478 @ instruction, or the more conventional lr if we are to treat
479 @ this as a real undefined instruction
480 @
481 badr r9, ret_from_exception
482
483 @ IRQs must be enabled before attempting to read the instruction from
484 @ user space since that could cause a page/translation fault if the
485 @ page table was modified by another CPU.
486 enable_irq
487
488 tst r3, #PSR_T_BIT @ Thumb mode?
489 bne __und_usr_thumb
490 sub r4, r2, #4 @ ARM instr at LR - 4
4911: ldrt r0, [r4]
492 ARM_BE8(rev r0, r0) @ little endian instruction
493
494 uaccess_disable ip
495
496 @ r0 = 32-bit ARM instruction which caused the exception
497 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
498 @ r4 = PC value for the faulting instruction
499 @ lr = 32-bit undefined instruction function
500 badr lr, __und_usr_fault_32
501 b call_fpe
502
503__und_usr_thumb:
504 @ Thumb instruction
505 sub r4, r2, #2 @ First half of thumb instr at LR - 2
506#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
507/*
508 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
509 * can never be supported in a single kernel, this code is not applicable at
510 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
511 * made about .arch directives.
512 */
513#if __LINUX_ARM_ARCH__ < 7
514/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
515#define NEED_CPU_ARCHITECTURE
516 ldr r5, .LCcpu_architecture
517 ldr r5, [r5]
518 cmp r5, #CPU_ARCH_ARMv7
519 blo __und_usr_fault_16 @ 16bit undefined instruction
520/*
521 * The following code won't get run unless the running CPU really is v7, so
522 * coding round the lack of ldrht on older arches is pointless. Temporarily
523 * override the assembler target arch with the minimum required instead:
524 */
525 .arch armv6t2
526#endif
5272: ldrht r5, [r4]
528ARM_BE8(rev16 r5, r5) @ little endian instruction
529 cmp r5, #0xe800 @ 32bit instruction if xx != 0
530 blo __und_usr_fault_16_pan @ 16bit undefined instruction
5313: ldrht r0, [r2]
532ARM_BE8(rev16 r0, r0) @ little endian instruction
533 uaccess_disable ip
534 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
535 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
536 orr r0, r0, r5, lsl #16
537 badr lr, __und_usr_fault_32
538 @ r0 = the two 16-bit Thumb instructions which caused the exception
539 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
540 @ r4 = PC value for the first 16-bit Thumb instruction
541 @ lr = 32bit undefined instruction function
542
543#if __LINUX_ARM_ARCH__ < 7
544/* If the target arch was overridden, change it back: */
545#ifdef CONFIG_CPU_32v6K
546 .arch armv6k
547#else
548 .arch armv6
549#endif
550#endif /* __LINUX_ARM_ARCH__ < 7 */
551#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
552 b __und_usr_fault_16
553#endif
554 UNWIND(.fnend)
555ENDPROC(__und_usr)
556
557/*
558 * The out of line fixup for the ldrt instructions above.
559 */
560 .pushsection .text.fixup, "ax"
561 .align 2
5624: str r4, [sp, #S_PC] @ retry current instruction
563 ret r9
564 .popsection
565 .pushsection __ex_table,"a"
566 .long 1b, 4b
567#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
568 .long 2b, 4b
569 .long 3b, 4b
570#endif
571 .popsection
572
573/*
574 * Check whether the instruction is a co-processor instruction.
575 * If yes, we need to call the relevant co-processor handler.
576 *
577 * Note that we don't do a full check here for the co-processor
578 * instructions; all instructions with bit 27 set are well
579 * defined. The only instructions that should fault are the
580 * co-processor instructions. However, we have to watch out
581 * for the ARM6/ARM7 SWI bug.
582 *
583 * NEON is a special case that has to be handled here. Not all
584 * NEON instructions are co-processor instructions, so we have
585 * to make a special case of checking for them. Plus, there's
586 * five groups of them, so we have a table of mask/opcode pairs
587 * to check against, and if any match then we branch off into the
588 * NEON handler code.
589 *
590 * Emulators may wish to make use of the following registers:
591 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
592 * r2 = PC value to resume execution after successful emulation
593 * r9 = normal "successful" return address
594 * r10 = this threads thread_info structure
595 * lr = unrecognised instruction return address
596 * IRQs enabled, FIQs enabled.
597 */
598 @
599 @ Fall-through from Thumb-2 __und_usr
600 @
601#ifdef CONFIG_NEON
602 get_thread_info r10 @ get current thread
603 adr r6, .LCneon_thumb_opcodes
604 b 2f
605#endif
606call_fpe:
607 get_thread_info r10 @ get current thread
608#ifdef CONFIG_NEON
609 adr r6, .LCneon_arm_opcodes
6102: ldr r5, [r6], #4 @ mask value
611 ldr r7, [r6], #4 @ opcode bits matching in mask
612 cmp r5, #0 @ end mask?
613 beq 1f
614 and r8, r0, r5
615 cmp r8, r7 @ NEON instruction?
616 bne 2b
617 mov r7, #1
618 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
619 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
620 b do_vfp @ let VFP handler handle this
6211:
622#endif
623 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
624 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
625 reteq lr
626 and r8, r0, #0x00000f00 @ mask out CP number
627 THUMB( lsr r8, r8, #8 )
628 mov r7, #1
629 add r6, r10, #TI_USED_CP
630 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
631 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
632#ifdef CONFIG_IWMMXT
633 @ Test if we need to give access to iWMMXt coprocessors
634 ldr r5, [r10, #TI_FLAGS]
635 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
636 movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1)
637 bcs iwmmxt_task_enable
638#endif
639 ARM( add pc, pc, r8, lsr #6 )
640 THUMB( lsl r8, r8, #2 )
641 THUMB( add pc, r8 )
642 nop
643
644 ret.w lr @ CP#0
645 W(b) do_fpe @ CP#1 (FPE)
646 W(b) do_fpe @ CP#2 (FPE)
647 ret.w lr @ CP#3
648#ifdef CONFIG_CRUNCH
649 b crunch_task_enable @ CP#4 (MaverickCrunch)
650 b crunch_task_enable @ CP#5 (MaverickCrunch)
651 b crunch_task_enable @ CP#6 (MaverickCrunch)
652#else
653 ret.w lr @ CP#4
654 ret.w lr @ CP#5
655 ret.w lr @ CP#6
656#endif
657 ret.w lr @ CP#7
658 ret.w lr @ CP#8
659 ret.w lr @ CP#9
660#ifdef CONFIG_VFP
661 W(b) do_vfp @ CP#10 (VFP)
662 W(b) do_vfp @ CP#11 (VFP)
663#else
664 ret.w lr @ CP#10 (VFP)
665 ret.w lr @ CP#11 (VFP)
666#endif
667 ret.w lr @ CP#12
668 ret.w lr @ CP#13
669 ret.w lr @ CP#14 (Debug)
670 ret.w lr @ CP#15 (Control)
671
672#ifdef NEED_CPU_ARCHITECTURE
673 .align 2
674.LCcpu_architecture:
675 .word __cpu_architecture
676#endif
677
678#ifdef CONFIG_NEON
679 .align 6
680
681.LCneon_arm_opcodes:
682 .word 0xfe000000 @ mask
683 .word 0xf2000000 @ opcode
684
685 .word 0xff100000 @ mask
686 .word 0xf4000000 @ opcode
687
688 .word 0x00000000 @ mask
689 .word 0x00000000 @ opcode
690
691.LCneon_thumb_opcodes:
692 .word 0xef000000 @ mask
693 .word 0xef000000 @ opcode
694
695 .word 0xff100000 @ mask
696 .word 0xf9000000 @ opcode
697
698 .word 0x00000000 @ mask
699 .word 0x00000000 @ opcode
700#endif
701
702do_fpe:
703 ldr r4, .LCfp
704 add r10, r10, #TI_FPSTATE @ r10 = workspace
705 ldr pc, [r4] @ Call FP module USR entry point
706
707/*
708 * The FP module is called with these registers set:
709 * r0 = instruction
710 * r2 = PC+4
711 * r9 = normal "successful" return address
712 * r10 = FP workspace
713 * lr = unrecognised FP instruction return address
714 */
715
716 .pushsection .data
717 .align 2
718ENTRY(fp_enter)
719 .word no_fp
720 .popsection
721
722ENTRY(no_fp)
723 ret lr
724ENDPROC(no_fp)
725
726__und_usr_fault_32:
727 mov r1, #4
728 b 1f
729__und_usr_fault_16_pan:
730 uaccess_disable ip
731__und_usr_fault_16:
732 mov r1, #2
7331: mov r0, sp
734 badr lr, ret_from_exception
735 b __und_fault
736ENDPROC(__und_usr_fault_32)
737ENDPROC(__und_usr_fault_16)
738
739 .align 5
740__pabt_usr:
741 usr_entry
742 mov r2, sp @ regs
743 pabt_helper
744 UNWIND(.fnend )
745 /* fall through */
746/*
747 * This is the return code to user mode for abort handlers
748 */
749ENTRY(ret_from_exception)
750 UNWIND(.fnstart )
751 UNWIND(.cantunwind )
752 get_thread_info tsk
753 mov why, #0
754 b ret_to_user
755 UNWIND(.fnend )
756ENDPROC(__pabt_usr)
757ENDPROC(ret_from_exception)
758
759 .align 5
760__fiq_usr:
761 usr_entry trace=0
762 kuser_cmpxchg_check
763 mov r0, sp @ struct pt_regs *regs
764 bl handle_fiq_as_nmi
765 get_thread_info tsk
766 restore_user_regs fast = 0, offset = 0
767 UNWIND(.fnend )
768ENDPROC(__fiq_usr)
769
770/*
771 * Register switch for ARMv3 and ARMv4 processors
772 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
773 * previous and next are guaranteed not to be the same.
774 */
775ENTRY(__switch_to)
776 UNWIND(.fnstart )
777 UNWIND(.cantunwind )
778 add ip, r1, #TI_CPU_SAVE
779 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
780 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
781 THUMB( str sp, [ip], #4 )
782 THUMB( str lr, [ip], #4 )
783 ldr r4, [r2, #TI_TP_VALUE]
784 ldr r5, [r2, #TI_TP_VALUE + 4]
785#ifdef CONFIG_CPU_USE_DOMAINS
786 mrc p15, 0, r6, c3, c0, 0 @ Get domain register
787 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
788 ldr r6, [r2, #TI_CPU_DOMAIN]
789#endif
790 switch_tls r1, r4, r5, r3, r7
791#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
792 ldr r7, [r2, #TI_TASK]
793 ldr r8, =__stack_chk_guard
794 .if (TSK_STACK_CANARY > IMM12_MASK)
795 add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
796 .endif
797 ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
798#endif
799#ifdef CONFIG_CPU_USE_DOMAINS
800 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
801#endif
802 mov r5, r0
803 add r4, r2, #TI_CPU_SAVE
804 ldr r0, =thread_notify_head
805 mov r1, #THREAD_NOTIFY_SWITCH
806 bl atomic_notifier_call_chain
807#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
808 str r7, [r8]
809#endif
810 THUMB( mov ip, r4 )
811 mov r0, r5
812 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
813 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
814 THUMB( ldr sp, [ip], #4 )
815 THUMB( ldr pc, [ip] )
816 UNWIND(.fnend )
817ENDPROC(__switch_to)
818
819 __INIT
820
821/*
822 * User helpers.
823 *
824 * Each segment is 32-byte aligned and will be moved to the top of the high
825 * vector page. New segments (if ever needed) must be added in front of
826 * existing ones. This mechanism should be used only for things that are
827 * really small and justified, and not be abused freely.
828 *
829 * See Documentation/arm/kernel_user_helpers.rst for formal definitions.
830 */
831 THUMB( .arm )
832
833 .macro usr_ret, reg
834#ifdef CONFIG_ARM_THUMB
835 bx \reg
836#else
837 ret \reg
838#endif
839 .endm
840
841 .macro kuser_pad, sym, size
842 .if (. - \sym) & 3
843 .rept 4 - (. - \sym) & 3
844 .byte 0
845 .endr
846 .endif
847 .rept (\size - (. - \sym)) / 4
848 .word 0xe7fddef1
849 .endr
850 .endm
851
852#ifdef CONFIG_KUSER_HELPERS
853 .align 5
854 .globl __kuser_helper_start
855__kuser_helper_start:
856
857/*
858 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
859 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
860 */
861
862__kuser_cmpxchg64: @ 0xffff0f60
863
864#if defined(CONFIG_CPU_32v6K)
865
866 stmfd sp!, {r4, r5, r6, r7}
867 ldrd r4, r5, [r0] @ load old val
868 ldrd r6, r7, [r1] @ load new val
869 smp_dmb arm
8701: ldrexd r0, r1, [r2] @ load current val
871 eors r3, r0, r4 @ compare with oldval (1)
872 eorseq r3, r1, r5 @ compare with oldval (2)
873 strexdeq r3, r6, r7, [r2] @ store newval if eq
874 teqeq r3, #1 @ success?
875 beq 1b @ if no then retry
876 smp_dmb arm
877 rsbs r0, r3, #0 @ set returned val and C flag
878 ldmfd sp!, {r4, r5, r6, r7}
879 usr_ret lr
880
881#elif !defined(CONFIG_SMP)
882
883#ifdef CONFIG_MMU
884
885 /*
886 * The only thing that can break atomicity in this cmpxchg64
887 * implementation is either an IRQ or a data abort exception
888 * causing another process/thread to be scheduled in the middle of
889 * the critical sequence. The same strategy as for cmpxchg is used.
890 */
891 stmfd sp!, {r4, r5, r6, lr}
892 ldmia r0, {r4, r5} @ load old val
893 ldmia r1, {r6, lr} @ load new val
8941: ldmia r2, {r0, r1} @ load current val
895 eors r3, r0, r4 @ compare with oldval (1)
896 eorseq r3, r1, r5 @ compare with oldval (2)
8972: stmiaeq r2, {r6, lr} @ store newval if eq
898 rsbs r0, r3, #0 @ set return val and C flag
899 ldmfd sp!, {r4, r5, r6, pc}
900
901 .text
902kuser_cmpxchg64_fixup:
903 @ Called from kuser_cmpxchg_fixup.
904 @ r4 = address of interrupted insn (must be preserved).
905 @ sp = saved regs. r7 and r8 are clobbered.
906 @ 1b = first critical insn, 2b = last critical insn.
907 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
908 mov r7, #0xffff0fff
909 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
910 subs r8, r4, r7
911 rsbscs r8, r8, #(2b - 1b)
912 strcs r7, [sp, #S_PC]
913#if __LINUX_ARM_ARCH__ < 6
914 bcc kuser_cmpxchg32_fixup
915#endif
916 ret lr
917 .previous
918
919#else
920#warning "NPTL on non MMU needs fixing"
921 mov r0, #-1
922 adds r0, r0, #0
923 usr_ret lr
924#endif
925
926#else
927#error "incoherent kernel configuration"
928#endif
929
930 kuser_pad __kuser_cmpxchg64, 64
931
932__kuser_memory_barrier: @ 0xffff0fa0
933 smp_dmb arm
934 usr_ret lr
935
936 kuser_pad __kuser_memory_barrier, 32
937
938__kuser_cmpxchg: @ 0xffff0fc0
939
940#if __LINUX_ARM_ARCH__ < 6
941
942#ifdef CONFIG_MMU
943
944 /*
945 * The only thing that can break atomicity in this cmpxchg
946 * implementation is either an IRQ or a data abort exception
947 * causing another process/thread to be scheduled in the middle
948 * of the critical sequence. To prevent this, code is added to
949 * the IRQ and data abort exception handlers to set the pc back
950 * to the beginning of the critical section if it is found to be
951 * within that critical section (see kuser_cmpxchg_fixup).
952 */
9531: ldr r3, [r2] @ load current val
954 subs r3, r3, r0 @ compare with oldval
9552: streq r1, [r2] @ store newval if eq
956 rsbs r0, r3, #0 @ set return val and C flag
957 usr_ret lr
958
959 .text
960kuser_cmpxchg32_fixup:
961 @ Called from kuser_cmpxchg_check macro.
962 @ r4 = address of interrupted insn (must be preserved).
963 @ sp = saved regs. r7 and r8 are clobbered.
964 @ 1b = first critical insn, 2b = last critical insn.
965 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
966 mov r7, #0xffff0fff
967 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
968 subs r8, r4, r7
969 rsbscs r8, r8, #(2b - 1b)
970 strcs r7, [sp, #S_PC]
971 ret lr
972 .previous
973
974#else
975#warning "NPTL on non MMU needs fixing"
976 mov r0, #-1
977 adds r0, r0, #0
978 usr_ret lr
979#endif
980
981#else
982
983 smp_dmb arm
9841: ldrex r3, [r2]
985 subs r3, r3, r0
986 strexeq r3, r1, [r2]
987 teqeq r3, #1
988 beq 1b
989 rsbs r0, r3, #0
990 /* beware -- each __kuser slot must be 8 instructions max */
991 ALT_SMP(b __kuser_memory_barrier)
992 ALT_UP(usr_ret lr)
993
994#endif
995
996 kuser_pad __kuser_cmpxchg, 32
997
998__kuser_get_tls: @ 0xffff0fe0
999 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
1000 usr_ret lr
1001 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
1002 kuser_pad __kuser_get_tls, 16
1003 .rep 3
1004 .word 0 @ 0xffff0ff0 software TLS value, then
1005 .endr @ pad up to __kuser_helper_version
1006
1007__kuser_helper_version: @ 0xffff0ffc
1008 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
1009
1010 .globl __kuser_helper_end
1011__kuser_helper_end:
1012
1013#endif
1014
1015 THUMB( .thumb )
1016
1017/*
1018 * Vector stubs.
1019 *
1020 * This code is copied to 0xffff1000 so we can use branches in the
1021 * vectors, rather than ldr's. Note that this code must not exceed
1022 * a page size.
1023 *
1024 * Common stub entry macro:
1025 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1026 *
1027 * SP points to a minimal amount of processor-private memory, the address
1028 * of which is copied into r0 for the mode specific abort handler.
1029 */
1030 .macro vector_stub, name, mode, correction=0
1031 .align 5
1032
1033vector_\name:
1034 .if \correction
1035 sub lr, lr, #\correction
1036 .endif
1037
1038 @
1039 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1040 @ (parent CPSR)
1041 @
1042 stmia sp, {r0, lr} @ save r0, lr
1043 mrs lr, spsr
1044 str lr, [sp, #8] @ save spsr
1045
1046 @
1047 @ Prepare for SVC32 mode. IRQs remain disabled.
1048 @
1049 mrs r0, cpsr
1050 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1051 msr spsr_cxsf, r0
1052
1053 @
1054 @ the branch table must immediately follow this code
1055 @
1056 and lr, lr, #0x0f
1057 THUMB( adr r0, 1f )
1058 THUMB( ldr lr, [r0, lr, lsl #2] )
1059 mov r0, sp
1060 ARM( ldr lr, [pc, lr, lsl #2] )
1061 movs pc, lr @ branch to handler in SVC mode
1062ENDPROC(vector_\name)
1063
1064 .align 2
1065 @ handler addresses follow this label
10661:
1067 .endm
1068
1069 .section .stubs, "ax", %progbits
1070 @ This must be the first word
1071 .word vector_swi
1072
1073vector_rst:
1074 ARM( swi SYS_ERROR0 )
1075 THUMB( svc #0 )
1076 THUMB( nop )
1077 b vector_und
1078
1079/*
1080 * Interrupt dispatcher
1081 */
1082 vector_stub irq, IRQ_MODE, 4
1083
1084 .long __irq_usr @ 0 (USR_26 / USR_32)
1085 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1086 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1087 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1088 .long __irq_invalid @ 4
1089 .long __irq_invalid @ 5
1090 .long __irq_invalid @ 6
1091 .long __irq_invalid @ 7
1092 .long __irq_invalid @ 8
1093 .long __irq_invalid @ 9
1094 .long __irq_invalid @ a
1095 .long __irq_invalid @ b
1096 .long __irq_invalid @ c
1097 .long __irq_invalid @ d
1098 .long __irq_invalid @ e
1099 .long __irq_invalid @ f
1100
1101/*
1102 * Data abort dispatcher
1103 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1104 */
1105 vector_stub dabt, ABT_MODE, 8
1106
1107 .long __dabt_usr @ 0 (USR_26 / USR_32)
1108 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1109 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1110 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1111 .long __dabt_invalid @ 4
1112 .long __dabt_invalid @ 5
1113 .long __dabt_invalid @ 6
1114 .long __dabt_invalid @ 7
1115 .long __dabt_invalid @ 8
1116 .long __dabt_invalid @ 9
1117 .long __dabt_invalid @ a
1118 .long __dabt_invalid @ b
1119 .long __dabt_invalid @ c
1120 .long __dabt_invalid @ d
1121 .long __dabt_invalid @ e
1122 .long __dabt_invalid @ f
1123
1124/*
1125 * Prefetch abort dispatcher
1126 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1127 */
1128 vector_stub pabt, ABT_MODE, 4
1129
1130 .long __pabt_usr @ 0 (USR_26 / USR_32)
1131 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1132 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1133 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1134 .long __pabt_invalid @ 4
1135 .long __pabt_invalid @ 5
1136 .long __pabt_invalid @ 6
1137 .long __pabt_invalid @ 7
1138 .long __pabt_invalid @ 8
1139 .long __pabt_invalid @ 9
1140 .long __pabt_invalid @ a
1141 .long __pabt_invalid @ b
1142 .long __pabt_invalid @ c
1143 .long __pabt_invalid @ d
1144 .long __pabt_invalid @ e
1145 .long __pabt_invalid @ f
1146
1147/*
1148 * Undef instr entry dispatcher
1149 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1150 */
1151 vector_stub und, UND_MODE
1152
1153 .long __und_usr @ 0 (USR_26 / USR_32)
1154 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1155 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1156 .long __und_svc @ 3 (SVC_26 / SVC_32)
1157 .long __und_invalid @ 4
1158 .long __und_invalid @ 5
1159 .long __und_invalid @ 6
1160 .long __und_invalid @ 7
1161 .long __und_invalid @ 8
1162 .long __und_invalid @ 9
1163 .long __und_invalid @ a
1164 .long __und_invalid @ b
1165 .long __und_invalid @ c
1166 .long __und_invalid @ d
1167 .long __und_invalid @ e
1168 .long __und_invalid @ f
1169
1170 .align 5
1171
1172/*=============================================================================
1173 * Address exception handler
1174 *-----------------------------------------------------------------------------
1175 * These aren't too critical.
1176 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1177 */
1178
1179vector_addrexcptn:
1180 b vector_addrexcptn
1181
1182/*=============================================================================
1183 * FIQ "NMI" handler
1184 *-----------------------------------------------------------------------------
1185 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1186 * systems.
1187 */
1188 vector_stub fiq, FIQ_MODE, 4
1189
1190 .long __fiq_usr @ 0 (USR_26 / USR_32)
1191 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
1192 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
1193 .long __fiq_svc @ 3 (SVC_26 / SVC_32)
1194 .long __fiq_svc @ 4
1195 .long __fiq_svc @ 5
1196 .long __fiq_svc @ 6
1197 .long __fiq_abt @ 7
1198 .long __fiq_svc @ 8
1199 .long __fiq_svc @ 9
1200 .long __fiq_svc @ a
1201 .long __fiq_svc @ b
1202 .long __fiq_svc @ c
1203 .long __fiq_svc @ d
1204 .long __fiq_svc @ e
1205 .long __fiq_svc @ f
1206
1207 .globl vector_fiq
1208
1209 .section .vectors, "ax", %progbits
1210.L__vectors_start:
1211 W(b) vector_rst
1212 W(b) vector_und
1213 W(ldr) pc, .L__vectors_start + 0x1000
1214 W(b) vector_pabt
1215 W(b) vector_dabt
1216 W(b) vector_addrexcptn
1217 W(b) vector_irq
1218 W(b) vector_fiq
1219
1220 .data
1221 .align 2
1222
1223 .globl cr_alignment
1224cr_alignment:
1225 .space 4