Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v3.5.6
   1/*
   2 *  linux/arch/arm/kernel/entry-armv.S
   3 *
   4 *  Copyright (C) 1996,1997,1998 Russell King.
   5 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
   6 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 *  Low-level vector interface routines
  13 *
  14 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
  15 *  that causes it to save wrong values...  Be aware!
  16 */
  17
  18#include <asm/assembler.h>
  19#include <asm/memory.h>
  20#include <asm/glue-df.h>
  21#include <asm/glue-pf.h>
  22#include <asm/vfpmacros.h>
  23#ifndef CONFIG_MULTI_IRQ_HANDLER
  24#include <mach/entry-macro.S>
  25#endif
  26#include <asm/thread_notify.h>
  27#include <asm/unwind.h>
  28#include <asm/unistd.h>
  29#include <asm/tls.h>
  30#include <asm/system_info.h>
  31
  32#include "entry-header.S"
  33#include <asm/entry-macro-multi.S>
  34
  35/*
  36 * Interrupt handling.
  37 */
  38	.macro	irq_handler
  39#ifdef CONFIG_MULTI_IRQ_HANDLER
  40	ldr	r1, =handle_arch_irq
  41	mov	r0, sp
 
  42	adr	lr, BSYM(9997f)
  43	ldr	pc, [r1]
  44#else
  45	arch_irq_handler_default
  46#endif
 
  479997:
  48	.endm
  49
  50	.macro	pabt_helper
  51	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
  52#ifdef MULTI_PABORT
  53	ldr	ip, .LCprocfns
  54	mov	lr, pc
  55	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
  56#else
  57	bl	CPU_PABORT_HANDLER
  58#endif
  59	.endm
  60
  61	.macro	dabt_helper
  62
  63	@
  64	@ Call the processor-specific abort handler:
  65	@
  66	@  r2 - pt_regs
  67	@  r4 - aborted context pc
  68	@  r5 - aborted context psr
  69	@
  70	@ The abort handler must return the aborted address in r0, and
  71	@ the fault status register in r1.  r9 must be preserved.
  72	@
  73#ifdef MULTI_DABORT
  74	ldr	ip, .LCprocfns
  75	mov	lr, pc
  76	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
  77#else
  78	bl	CPU_DABORT_HANDLER
  79#endif
  80	.endm
  81
  82#ifdef CONFIG_KPROBES
  83	.section	.kprobes.text,"ax",%progbits
  84#else
  85	.text
  86#endif
  87
  88/*
  89 * Invalid mode handlers
  90 */
  91	.macro	inv_entry, reason
  92	sub	sp, sp, #S_FRAME_SIZE
  93 ARM(	stmib	sp, {r1 - lr}		)
  94 THUMB(	stmia	sp, {r0 - r12}		)
  95 THUMB(	str	sp, [sp, #S_SP]		)
  96 THUMB(	str	lr, [sp, #S_LR]		)
  97	mov	r1, #\reason
  98	.endm
  99
 100__pabt_invalid:
 101	inv_entry BAD_PREFETCH
 102	b	common_invalid
 103ENDPROC(__pabt_invalid)
 104
 105__dabt_invalid:
 106	inv_entry BAD_DATA
 107	b	common_invalid
 108ENDPROC(__dabt_invalid)
 109
 110__irq_invalid:
 111	inv_entry BAD_IRQ
 112	b	common_invalid
 113ENDPROC(__irq_invalid)
 114
 115__und_invalid:
 116	inv_entry BAD_UNDEFINSTR
 117
 118	@
 119	@ XXX fall through to common_invalid
 120	@
 121
 122@
 123@ common_invalid - generic code for failed exception (re-entrant version of handlers)
 124@
 125common_invalid:
 126	zero_fp
 127
 128	ldmia	r0, {r4 - r6}
 129	add	r0, sp, #S_PC		@ here for interlock avoidance
 130	mov	r7, #-1			@  ""   ""    ""        ""
 131	str	r4, [sp]		@ save preserved r0
 132	stmia	r0, {r5 - r7}		@ lr_<exception>,
 133					@ cpsr_<exception>, "old_r0"
 134
 135	mov	r0, sp
 136	b	bad_mode
 137ENDPROC(__und_invalid)
 138
 139/*
 140 * SVC mode handlers
 141 */
 142
 143#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
 144#define SPFIX(code...) code
 145#else
 146#define SPFIX(code...)
 147#endif
 148
 149	.macro	svc_entry, stack_hole=0
 150 UNWIND(.fnstart		)
 151 UNWIND(.save {r0 - pc}		)
 152	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 153#ifdef CONFIG_THUMB2_KERNEL
 154 SPFIX(	str	r0, [sp]	)	@ temporarily saved
 155 SPFIX(	mov	r0, sp		)
 156 SPFIX(	tst	r0, #4		)	@ test original stack alignment
 157 SPFIX(	ldr	r0, [sp]	)	@ restored
 158#else
 159 SPFIX(	tst	sp, #4		)
 160#endif
 161 SPFIX(	subeq	sp, sp, #4	)
 162	stmia	sp, {r1 - r12}
 163
 164	ldmia	r0, {r3 - r5}
 165	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 166	mov	r6, #-1			@  ""  ""      ""       ""
 167	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 168 SPFIX(	addeq	r2, r2, #4	)
 169	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 170					@ from the exception stack
 171
 172	mov	r3, lr
 173
 174	@
 175	@ We are now ready to fill in the remaining blanks on the stack:
 176	@
 177	@  r2 - sp_svc
 178	@  r3 - lr_svc
 179	@  r4 - lr_<exception>, already fixed up for correct return/restart
 180	@  r5 - spsr_<exception>
 181	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 182	@
 183	stmia	r7, {r2 - r6}
 184
 185#ifdef CONFIG_TRACE_IRQFLAGS
 186	bl	trace_hardirqs_off
 187#endif
 188	.endm
 189
 190	.align	5
 191__dabt_svc:
 192	svc_entry
 193	mov	r2, sp
 194	dabt_helper
 195
 196	@
 197	@ IRQs off again before pulling preserved data off the stack
 198	@
 199	disable_irq_notrace
 200
 201#ifdef CONFIG_TRACE_IRQFLAGS
 202	tst	r5, #PSR_I_BIT
 203	bleq	trace_hardirqs_on
 204	tst	r5, #PSR_I_BIT
 205	blne	trace_hardirqs_off
 206#endif
 207	svc_exit r5				@ return from exception
 208 UNWIND(.fnend		)
 209ENDPROC(__dabt_svc)
 210
 211	.align	5
 212__irq_svc:
 213	svc_entry
 214	irq_handler
 215
 216#ifdef CONFIG_PREEMPT
 217	get_thread_info tsk
 218	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
 219	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
 220	teq	r8, #0				@ if preempt count != 0
 221	movne	r0, #0				@ force flags to 0
 222	tst	r0, #_TIF_NEED_RESCHED
 223	blne	svc_preempt
 224#endif
 225
 226#ifdef CONFIG_TRACE_IRQFLAGS
 227	@ The parent context IRQs must have been enabled to get here in
 228	@ the first place, so there's no point checking the PSR I bit.
 229	bl	trace_hardirqs_on
 230#endif
 231	svc_exit r5				@ return from exception
 232 UNWIND(.fnend		)
 233ENDPROC(__irq_svc)
 234
 235	.ltorg
 236
 237#ifdef CONFIG_PREEMPT
 238svc_preempt:
 239	mov	r8, lr
 2401:	bl	preempt_schedule_irq		@ irq en/disable is done inside
 241	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
 242	tst	r0, #_TIF_NEED_RESCHED
 243	moveq	pc, r8				@ go again
 244	b	1b
 245#endif
 246
 247__und_fault:
 248	@ Correct the PC such that it is pointing at the instruction
 249	@ which caused the fault.  If the faulting instruction was ARM
 250	@ the PC will be pointing at the next instruction, and have to
 251	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
 252	@ pointing at the second half of the Thumb instruction.  We
 253	@ have to subtract 2.
 254	ldr	r2, [r0, #S_PC]
 255	sub	r2, r2, r1
 256	str	r2, [r0, #S_PC]
 257	b	do_undefinstr
 258ENDPROC(__und_fault)
 259
 260	.align	5
 261__und_svc:
 262#ifdef CONFIG_KPROBES
 263	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
 264	@ it obviously needs free stack space which then will belong to
 265	@ the saved context.
 266	svc_entry 64
 267#else
 268	svc_entry
 269#endif
 270	@
 271	@ call emulation code, which returns using r9 if it has emulated
 272	@ the instruction, or the more conventional lr if we are to treat
 273	@ this as a real undefined instruction
 274	@
 275	@  r0 - instruction
 276	@
 277#ifndef CONFIG_THUMB2_KERNEL
 278	ldr	r0, [r4, #-4]
 279#else
 280	mov	r1, #2
 281	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
 282	cmp	r0, #0xe800			@ 32-bit instruction if xx >= 0
 283	blo	__und_svc_fault
 284	ldrh	r9, [r4]			@ bottom 16 bits
 285	add	r4, r4, #2
 286	str	r4, [sp, #S_PC]
 287	orr	r0, r9, r0, lsl #16
 288#endif
 289	adr	r9, BSYM(__und_svc_finish)
 290	mov	r2, r4
 291	bl	call_fpe
 292
 293	mov	r1, #4				@ PC correction to apply
 294__und_svc_fault:
 295	mov	r0, sp				@ struct pt_regs *regs
 296	bl	__und_fault
 297
 298	@
 299	@ IRQs off again before pulling preserved data off the stack
 300	@
 301__und_svc_finish:
 302	disable_irq_notrace
 303
 304	@
 305	@ restore SPSR and restart the instruction
 306	@
 307	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
 308#ifdef CONFIG_TRACE_IRQFLAGS
 309	tst	r5, #PSR_I_BIT
 310	bleq	trace_hardirqs_on
 311	tst	r5, #PSR_I_BIT
 312	blne	trace_hardirqs_off
 313#endif
 314	svc_exit r5				@ return from exception
 315 UNWIND(.fnend		)
 316ENDPROC(__und_svc)
 317
 318	.align	5
 319__pabt_svc:
 320	svc_entry
 321	mov	r2, sp				@ regs
 322	pabt_helper
 323
 324	@
 325	@ IRQs off again before pulling preserved data off the stack
 326	@
 327	disable_irq_notrace
 328
 329#ifdef CONFIG_TRACE_IRQFLAGS
 330	tst	r5, #PSR_I_BIT
 331	bleq	trace_hardirqs_on
 332	tst	r5, #PSR_I_BIT
 333	blne	trace_hardirqs_off
 334#endif
 335	svc_exit r5				@ return from exception
 336 UNWIND(.fnend		)
 337ENDPROC(__pabt_svc)
 338
 339	.align	5
 340.LCcralign:
 341	.word	cr_alignment
 342#ifdef MULTI_DABORT
 343.LCprocfns:
 344	.word	processor
 345#endif
 346.LCfp:
 347	.word	fp_enter
 348
 349/*
 350 * User mode handlers
 351 *
 352 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
 353 */
 354
 355#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
 356#error "sizeof(struct pt_regs) must be a multiple of 8"
 357#endif
 358
 359	.macro	usr_entry
 360 UNWIND(.fnstart	)
 361 UNWIND(.cantunwind	)	@ don't unwind the user space
 362	sub	sp, sp, #S_FRAME_SIZE
 363 ARM(	stmib	sp, {r1 - r12}	)
 364 THUMB(	stmia	sp, {r0 - r12}	)
 365
 366	ldmia	r0, {r3 - r5}
 367	add	r0, sp, #S_PC		@ here for interlock avoidance
 368	mov	r6, #-1			@  ""  ""     ""        ""
 369
 370	str	r3, [sp]		@ save the "real" r0 copied
 371					@ from the exception stack
 372
 373	@
 374	@ We are now ready to fill in the remaining blanks on the stack:
 375	@
 376	@  r4 - lr_<exception>, already fixed up for correct return/restart
 377	@  r5 - spsr_<exception>
 378	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 379	@
 380	@ Also, separately save sp_usr and lr_usr
 381	@
 382	stmia	r0, {r4 - r6}
 383 ARM(	stmdb	r0, {sp, lr}^			)
 384 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
 385
 386	@
 387	@ Enable the alignment trap while in kernel mode
 388	@
 389	alignment_trap r0
 390
 391	@
 392	@ Clear FP to mark the first stack frame
 393	@
 394	zero_fp
 395
 396#ifdef CONFIG_IRQSOFF_TRACER
 397	bl	trace_hardirqs_off
 398#endif
 399	.endm
 400
 401	.macro	kuser_cmpxchg_check
 402#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 403#ifndef CONFIG_MMU
 404#warning "NPTL on non MMU needs fixing"
 405#else
 406	@ Make sure our user space atomic helper is restarted
 407	@ if it was interrupted in a critical region.  Here we
 408	@ perform a quick test inline since it should be false
 409	@ 99.9999% of the time.  The rest is done out of line.
 410	cmp	r4, #TASK_SIZE
 411	blhs	kuser_cmpxchg64_fixup
 412#endif
 413#endif
 414	.endm
 415
 416	.align	5
 417__dabt_usr:
 418	usr_entry
 419	kuser_cmpxchg_check
 420	mov	r2, sp
 421	dabt_helper
 422	b	ret_from_exception
 423 UNWIND(.fnend		)
 424ENDPROC(__dabt_usr)
 425
 426	.align	5
 427__irq_usr:
 428	usr_entry
 429	kuser_cmpxchg_check
 430	irq_handler
 431	get_thread_info tsk
 432	mov	why, #0
 433	b	ret_to_user_from_irq
 434 UNWIND(.fnend		)
 435ENDPROC(__irq_usr)
 436
 437	.ltorg
 438
 439	.align	5
 440__und_usr:
 441	usr_entry
 442
 443	mov	r2, r4
 444	mov	r3, r5
 445
 446	@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
 447	@      faulting instruction depending on Thumb mode.
 448	@ r3 = regs->ARM_cpsr
 449	@
 450	@ The emulation code returns using r9 if it has emulated the
 451	@ instruction, or the more conventional lr if we are to treat
 452	@ this as a real undefined instruction
 
 
 453	@
 454	adr	r9, BSYM(ret_from_exception)
 455
 456	tst	r3, #PSR_T_BIT			@ Thumb mode?
 457	bne	__und_usr_thumb
 458	sub	r4, r2, #4			@ ARM instr at LR - 4
 4591:	ldrt	r0, [r4]
 
 460#ifdef CONFIG_CPU_ENDIAN_BE8
 461	rev	r0, r0				@ little endian instruction
 462#endif
 463	@ r0 = 32-bit ARM instruction which caused the exception
 464	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 465	@ r4 = PC value for the faulting instruction
 466	@ lr = 32-bit undefined instruction function
 467	adr	lr, BSYM(__und_usr_fault_32)
 468	b	call_fpe
 469
 470__und_usr_thumb:
 471	@ Thumb instruction
 472	sub	r4, r2, #2			@ First half of thumb instr at LR - 2
 473#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
 474/*
 475 * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
 476 * can never be supported in a single kernel, this code is not applicable at
 477 * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
 478 * made about .arch directives.
 479 */
 480#if __LINUX_ARM_ARCH__ < 7
 481/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
 482#define NEED_CPU_ARCHITECTURE
 483	ldr	r5, .LCcpu_architecture
 484	ldr	r5, [r5]
 485	cmp	r5, #CPU_ARCH_ARMv7
 486	blo	__und_usr_fault_16		@ 16bit undefined instruction
 487/*
 488 * The following code won't get run unless the running CPU really is v7, so
 489 * coding round the lack of ldrht on older arches is pointless.  Temporarily
 490 * override the assembler target arch with the minimum required instead:
 491 */
 492	.arch	armv6t2
 493#endif
 4942:	ldrht	r5, [r4]
 495	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
 496	blo	__und_usr_fault_16		@ 16bit undefined instruction
 4973:	ldrht	r0, [r2]
 498	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 499	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
 500	orr	r0, r0, r5, lsl #16
 501	adr	lr, BSYM(__und_usr_fault_32)
 502	@ r0 = the two 16-bit Thumb instructions which caused the exception
 503	@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
 504	@ r4 = PC value for the first 16-bit Thumb instruction
 505	@ lr = 32bit undefined instruction function
 506
 507#if __LINUX_ARM_ARCH__ < 7
 508/* If the target arch was overridden, change it back: */
 509#ifdef CONFIG_CPU_32v6K
 510	.arch	armv6k
 511#else
 512	.arch	armv6
 513#endif
 514#endif /* __LINUX_ARM_ARCH__ < 7 */
 515#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
 516	b	__und_usr_fault_16
 517#endif
 518 UNWIND(.fnend)
 519ENDPROC(__und_usr)
 520
 
 
 
 
 521/*
 522 * The out of line fixup for the ldrt instructions above.
 523 */
 524	.pushsection .fixup, "ax"
 525	.align	2
 5264:	mov	pc, r9
 527	.popsection
 528	.pushsection __ex_table,"a"
 529	.long	1b, 4b
 530#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
 531	.long	2b, 4b
 532	.long	3b, 4b
 533#endif
 534	.popsection
 535
 536/*
 537 * Check whether the instruction is a co-processor instruction.
 538 * If yes, we need to call the relevant co-processor handler.
 539 *
 540 * Note that we don't do a full check here for the co-processor
 541 * instructions; all instructions with bit 27 set are well
 542 * defined.  The only instructions that should fault are the
 543 * co-processor instructions.  However, we have to watch out
 544 * for the ARM6/ARM7 SWI bug.
 545 *
 546 * NEON is a special case that has to be handled here. Not all
 547 * NEON instructions are co-processor instructions, so we have
 548 * to make a special case of checking for them. Plus, there's
 549 * five groups of them, so we have a table of mask/opcode pairs
 550 * to check against, and if any match then we branch off into the
 551 * NEON handler code.
 552 *
 553 * Emulators may wish to make use of the following registers:
 554 *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
 555 *  r2  = PC value to resume execution after successful emulation
 556 *  r9  = normal "successful" return address
 557 *  r10 = this threads thread_info structure
 558 *  lr  = unrecognised instruction return address
 559 * IRQs disabled, FIQs enabled.
 560 */
 561	@
 562	@ Fall-through from Thumb-2 __und_usr
 563	@
 564#ifdef CONFIG_NEON
 565	adr	r6, .LCneon_thumb_opcodes
 566	b	2f
 567#endif
 568call_fpe:
 569#ifdef CONFIG_NEON
 570	adr	r6, .LCneon_arm_opcodes
 5712:
 572	ldr	r7, [r6], #4			@ mask value
 573	cmp	r7, #0				@ end mask?
 574	beq	1f
 575	and	r8, r0, r7
 576	ldr	r7, [r6], #4			@ opcode bits matching in mask
 577	cmp	r8, r7				@ NEON instruction?
 578	bne	2b
 579	get_thread_info r10
 580	mov	r7, #1
 581	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
 582	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
 583	b	do_vfp				@ let VFP handler handle this
 5841:
 585#endif
 586	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
 587	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
 
 
 
 
 588	moveq	pc, lr
 589	get_thread_info r10			@ get current thread
 590	and	r8, r0, #0x00000f00		@ mask out CP number
 591 THUMB(	lsr	r8, r8, #8		)
 592	mov	r7, #1
 593	add	r6, r10, #TI_USED_CP
 594 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 595 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
 596#ifdef CONFIG_IWMMXT
 597	@ Test if we need to give access to iWMMXt coprocessors
 598	ldr	r5, [r10, #TI_FLAGS]
 599	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
 600	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
 601	bcs	iwmmxt_task_enable
 602#endif
 603 ARM(	add	pc, pc, r8, lsr #6	)
 604 THUMB(	lsl	r8, r8, #2		)
 605 THUMB(	add	pc, r8			)
 606	nop
 607
 608	movw_pc	lr				@ CP#0
 609	W(b)	do_fpe				@ CP#1 (FPE)
 610	W(b)	do_fpe				@ CP#2 (FPE)
 611	movw_pc	lr				@ CP#3
 612#ifdef CONFIG_CRUNCH
 613	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
 614	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
 615	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
 616#else
 617	movw_pc	lr				@ CP#4
 618	movw_pc	lr				@ CP#5
 619	movw_pc	lr				@ CP#6
 620#endif
 621	movw_pc	lr				@ CP#7
 622	movw_pc	lr				@ CP#8
 623	movw_pc	lr				@ CP#9
 624#ifdef CONFIG_VFP
 625	W(b)	do_vfp				@ CP#10 (VFP)
 626	W(b)	do_vfp				@ CP#11 (VFP)
 627#else
 628	movw_pc	lr				@ CP#10 (VFP)
 629	movw_pc	lr				@ CP#11 (VFP)
 630#endif
 631	movw_pc	lr				@ CP#12
 632	movw_pc	lr				@ CP#13
 633	movw_pc	lr				@ CP#14 (Debug)
 634	movw_pc	lr				@ CP#15 (Control)
 635
 636#ifdef NEED_CPU_ARCHITECTURE
 637	.align	2
 638.LCcpu_architecture:
 639	.word	__cpu_architecture
 640#endif
 641
 642#ifdef CONFIG_NEON
 643	.align	6
 644
 645.LCneon_arm_opcodes:
 646	.word	0xfe000000			@ mask
 647	.word	0xf2000000			@ opcode
 648
 649	.word	0xff100000			@ mask
 650	.word	0xf4000000			@ opcode
 651
 652	.word	0x00000000			@ mask
 653	.word	0x00000000			@ opcode
 654
 655.LCneon_thumb_opcodes:
 656	.word	0xef000000			@ mask
 657	.word	0xef000000			@ opcode
 658
 659	.word	0xff100000			@ mask
 660	.word	0xf9000000			@ opcode
 661
 662	.word	0x00000000			@ mask
 663	.word	0x00000000			@ opcode
 664#endif
 665
 666do_fpe:
 667	enable_irq
 668	ldr	r4, .LCfp
 669	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
 670	ldr	pc, [r4]			@ Call FP module USR entry point
 671
 672/*
 673 * The FP module is called with these registers set:
 674 *  r0  = instruction
 675 *  r2  = PC+4
 676 *  r9  = normal "successful" return address
 677 *  r10 = FP workspace
 678 *  lr  = unrecognised FP instruction return address
 679 */
 680
 681	.pushsection .data
 682ENTRY(fp_enter)
 683	.word	no_fp
 684	.popsection
 685
 686ENTRY(no_fp)
 687	mov	pc, lr
 688ENDPROC(no_fp)
 689
 690__und_usr_fault_32:
 691	mov	r1, #4
 692	b	1f
 693__und_usr_fault_16:
 694	mov	r1, #2
 6951:	enable_irq
 696	mov	r0, sp
 697	adr	lr, BSYM(ret_from_exception)
 698	b	__und_fault
 699ENDPROC(__und_usr_fault_32)
 700ENDPROC(__und_usr_fault_16)
 701
 702	.align	5
 703__pabt_usr:
 704	usr_entry
 705	mov	r2, sp				@ regs
 706	pabt_helper
 707 UNWIND(.fnend		)
 708	/* fall through */
 709/*
 710 * This is the return code to user mode for abort handlers
 711 */
 712ENTRY(ret_from_exception)
 713 UNWIND(.fnstart	)
 714 UNWIND(.cantunwind	)
 715	get_thread_info tsk
 716	mov	why, #0
 717	b	ret_to_user
 718 UNWIND(.fnend		)
 719ENDPROC(__pabt_usr)
 720ENDPROC(ret_from_exception)
 721
 722/*
 723 * Register switch for ARMv3 and ARMv4 processors
 724 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 725 * previous and next are guaranteed not to be the same.
 726 */
 727ENTRY(__switch_to)
 728 UNWIND(.fnstart	)
 729 UNWIND(.cantunwind	)
 730	add	ip, r1, #TI_CPU_SAVE
 731	ldr	r3, [r2, #TI_TP_VALUE]
 732 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 733 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 734 THUMB(	str	sp, [ip], #4		   )
 735 THUMB(	str	lr, [ip], #4		   )
 736#ifdef CONFIG_CPU_USE_DOMAINS
 737	ldr	r6, [r2, #TI_CPU_DOMAIN]
 738#endif
 739	set_tls	r3, r4, r5
 740#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 741	ldr	r7, [r2, #TI_TASK]
 742	ldr	r8, =__stack_chk_guard
 743	ldr	r7, [r7, #TSK_STACK_CANARY]
 744#endif
 745#ifdef CONFIG_CPU_USE_DOMAINS
 746	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
 747#endif
 748	mov	r5, r0
 749	add	r4, r2, #TI_CPU_SAVE
 750	ldr	r0, =thread_notify_head
 751	mov	r1, #THREAD_NOTIFY_SWITCH
 752	bl	atomic_notifier_call_chain
 753#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 754	str	r7, [r8]
 755#endif
 756 THUMB(	mov	ip, r4			   )
 757	mov	r0, r5
 758 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 759 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 760 THUMB(	ldr	sp, [ip], #4		   )
 761 THUMB(	ldr	pc, [ip]		   )
 762 UNWIND(.fnend		)
 763ENDPROC(__switch_to)
 764
 765	__INIT
 766
 767/*
 768 * User helpers.
 769 *
 770 * Each segment is 32-byte aligned and will be moved to the top of the high
 771 * vector page.  New segments (if ever needed) must be added in front of
 772 * existing ones.  This mechanism should be used only for things that are
 773 * really small and justified, and not be abused freely.
 774 *
 775 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
 776 */
 777 THUMB(	.arm	)
 778
 779	.macro	usr_ret, reg
 780#ifdef CONFIG_ARM_THUMB
 781	bx	\reg
 782#else
 783	mov	pc, \reg
 784#endif
 785	.endm
 786
 787	.align	5
 788	.globl	__kuser_helper_start
 789__kuser_helper_start:
 790
 791/*
 792 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
 793 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
 794 */
 795
 796__kuser_cmpxchg64:				@ 0xffff0f60
 797
 798#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 799
 800	/*
 801	 * Poor you.  No fast solution possible...
 802	 * The kernel itself must perform the operation.
 803	 * A special ghost syscall is used for that (see traps.c).
 804	 */
 805	stmfd	sp!, {r7, lr}
 806	ldr	r7, 1f			@ it's 20 bits
 807	swi	__ARM_NR_cmpxchg64
 808	ldmfd	sp!, {r7, pc}
 8091:	.word	__ARM_NR_cmpxchg64
 810
 811#elif defined(CONFIG_CPU_32v6K)
 812
 813	stmfd	sp!, {r4, r5, r6, r7}
 814	ldrd	r4, r5, [r0]			@ load old val
 815	ldrd	r6, r7, [r1]			@ load new val
 816	smp_dmb	arm
 8171:	ldrexd	r0, r1, [r2]			@ load current val
 818	eors	r3, r0, r4			@ compare with oldval (1)
 819	eoreqs	r3, r1, r5			@ compare with oldval (2)
 820	strexdeq r3, r6, r7, [r2]		@ store newval if eq
 821	teqeq	r3, #1				@ success?
 822	beq	1b				@ if no then retry
 823	smp_dmb	arm
 824	rsbs	r0, r3, #0			@ set returned val and C flag
 825	ldmfd	sp!, {r4, r5, r6, r7}
 826	usr_ret	lr
 827
 828#elif !defined(CONFIG_SMP)
 829
 830#ifdef CONFIG_MMU
 831
 832	/*
 833	 * The only thing that can break atomicity in this cmpxchg64
 834	 * implementation is either an IRQ or a data abort exception
 835	 * causing another process/thread to be scheduled in the middle of
 836	 * the critical sequence.  The same strategy as for cmpxchg is used.
 837	 */
 838	stmfd	sp!, {r4, r5, r6, lr}
 839	ldmia	r0, {r4, r5}			@ load old val
 840	ldmia	r1, {r6, lr}			@ load new val
 8411:	ldmia	r2, {r0, r1}			@ load current val
 842	eors	r3, r0, r4			@ compare with oldval (1)
 843	eoreqs	r3, r1, r5			@ compare with oldval (2)
 8442:	stmeqia	r2, {r6, lr}			@ store newval if eq
 845	rsbs	r0, r3, #0			@ set return val and C flag
 846	ldmfd	sp!, {r4, r5, r6, pc}
 847
 848	.text
 849kuser_cmpxchg64_fixup:
 850	@ Called from kuser_cmpxchg_fixup.
 851	@ r4 = address of interrupted insn (must be preserved).
 852	@ sp = saved regs. r7 and r8 are clobbered.
 853	@ 1b = first critical insn, 2b = last critical insn.
 854	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 855	mov	r7, #0xffff0fff
 856	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
 857	subs	r8, r4, r7
 858	rsbcss	r8, r8, #(2b - 1b)
 859	strcs	r7, [sp, #S_PC]
 860#if __LINUX_ARM_ARCH__ < 6
 861	bcc	kuser_cmpxchg32_fixup
 862#endif
 863	mov	pc, lr
 864	.previous
 865
 866#else
 867#warning "NPTL on non MMU needs fixing"
 868	mov	r0, #-1
 869	adds	r0, r0, #0
 870	usr_ret	lr
 871#endif
 872
 873#else
 874#error "incoherent kernel configuration"
 875#endif
 876
 877	/* pad to next slot */
 878	.rept	(16 - (. - __kuser_cmpxchg64)/4)
 879	.word	0
 880	.endr
 881
 882	.align	5
 883
 884__kuser_memory_barrier:				@ 0xffff0fa0
 885	smp_dmb	arm
 886	usr_ret	lr
 887
 888	.align	5
 889
 890__kuser_cmpxchg:				@ 0xffff0fc0
 891
 892#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 893
 894	/*
 895	 * Poor you.  No fast solution possible...
 896	 * The kernel itself must perform the operation.
 897	 * A special ghost syscall is used for that (see traps.c).
 898	 */
 899	stmfd	sp!, {r7, lr}
 900	ldr	r7, 1f			@ it's 20 bits
 901	swi	__ARM_NR_cmpxchg
 902	ldmfd	sp!, {r7, pc}
 9031:	.word	__ARM_NR_cmpxchg
 904
 905#elif __LINUX_ARM_ARCH__ < 6
 906
 907#ifdef CONFIG_MMU
 908
 909	/*
 910	 * The only thing that can break atomicity in this cmpxchg
 911	 * implementation is either an IRQ or a data abort exception
 912	 * causing another process/thread to be scheduled in the middle
 913	 * of the critical sequence.  To prevent this, code is added to
 914	 * the IRQ and data abort exception handlers to set the pc back
 915	 * to the beginning of the critical section if it is found to be
 916	 * within that critical section (see kuser_cmpxchg_fixup).
 917	 */
 9181:	ldr	r3, [r2]			@ load current val
 919	subs	r3, r3, r0			@ compare with oldval
 9202:	streq	r1, [r2]			@ store newval if eq
 921	rsbs	r0, r3, #0			@ set return val and C flag
 922	usr_ret	lr
 923
 924	.text
 925kuser_cmpxchg32_fixup:
 926	@ Called from kuser_cmpxchg_check macro.
 927	@ r4 = address of interrupted insn (must be preserved).
 928	@ sp = saved regs. r7 and r8 are clobbered.
 929	@ 1b = first critical insn, 2b = last critical insn.
 930	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 931	mov	r7, #0xffff0fff
 932	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
 933	subs	r8, r4, r7
 934	rsbcss	r8, r8, #(2b - 1b)
 935	strcs	r7, [sp, #S_PC]
 936	mov	pc, lr
 937	.previous
 938
 939#else
 940#warning "NPTL on non MMU needs fixing"
 941	mov	r0, #-1
 942	adds	r0, r0, #0
 943	usr_ret	lr
 944#endif
 945
 946#else
 947
 948	smp_dmb	arm
 9491:	ldrex	r3, [r2]
 950	subs	r3, r3, r0
 951	strexeq	r3, r1, [r2]
 952	teqeq	r3, #1
 953	beq	1b
 954	rsbs	r0, r3, #0
 955	/* beware -- each __kuser slot must be 8 instructions max */
 956	ALT_SMP(b	__kuser_memory_barrier)
 957	ALT_UP(usr_ret	lr)
 958
 959#endif
 960
 961	.align	5
 962
 963__kuser_get_tls:				@ 0xffff0fe0
 964	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
 965	usr_ret	lr
 966	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
 967	.rep	4
 968	.word	0			@ 0xffff0ff0 software TLS value, then
 969	.endr				@ pad up to __kuser_helper_version
 970
 971__kuser_helper_version:				@ 0xffff0ffc
 972	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)
 973
 974	.globl	__kuser_helper_end
 975__kuser_helper_end:
 976
 977 THUMB(	.thumb	)
 978
 979/*
 980 * Vector stubs.
 981 *
 982 * This code is copied to 0xffff0200 so we can use branches in the
 983 * vectors, rather than ldr's.  Note that this code must not
 984 * exceed 0x300 bytes.
 985 *
 986 * Common stub entry macro:
 987 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 988 *
 989 * SP points to a minimal amount of processor-private memory, the address
 990 * of which is copied into r0 for the mode specific abort handler.
 991 */
 992	.macro	vector_stub, name, mode, correction=0
 993	.align	5
 994
 995vector_\name:
 996	.if \correction
 997	sub	lr, lr, #\correction
 998	.endif
 999
1000	@
1001	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1002	@ (parent CPSR)
1003	@
1004	stmia	sp, {r0, lr}		@ save r0, lr
1005	mrs	lr, spsr
1006	str	lr, [sp, #8]		@ save spsr
1007
1008	@
1009	@ Prepare for SVC32 mode.  IRQs remain disabled.
1010	@
1011	mrs	r0, cpsr
1012	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1013	msr	spsr_cxsf, r0
1014
1015	@
1016	@ the branch table must immediately follow this code
1017	@
1018	and	lr, lr, #0x0f
1019 THUMB(	adr	r0, 1f			)
1020 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
1021	mov	r0, sp
1022 ARM(	ldr	lr, [pc, lr, lsl #2]	)
1023	movs	pc, lr			@ branch to handler in SVC mode
1024ENDPROC(vector_\name)
1025
1026	.align	2
1027	@ handler addresses follow this label
10281:
1029	.endm
1030
1031	.globl	__stubs_start
1032__stubs_start:
1033/*
1034 * Interrupt dispatcher
1035 */
1036	vector_stub	irq, IRQ_MODE, 4
1037
1038	.long	__irq_usr			@  0  (USR_26 / USR_32)
1039	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
1040	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
1041	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
1042	.long	__irq_invalid			@  4
1043	.long	__irq_invalid			@  5
1044	.long	__irq_invalid			@  6
1045	.long	__irq_invalid			@  7
1046	.long	__irq_invalid			@  8
1047	.long	__irq_invalid			@  9
1048	.long	__irq_invalid			@  a
1049	.long	__irq_invalid			@  b
1050	.long	__irq_invalid			@  c
1051	.long	__irq_invalid			@  d
1052	.long	__irq_invalid			@  e
1053	.long	__irq_invalid			@  f
1054
1055/*
1056 * Data abort dispatcher
1057 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1058 */
1059	vector_stub	dabt, ABT_MODE, 8
1060
1061	.long	__dabt_usr			@  0  (USR_26 / USR_32)
1062	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
1063	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
1064	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
1065	.long	__dabt_invalid			@  4
1066	.long	__dabt_invalid			@  5
1067	.long	__dabt_invalid			@  6
1068	.long	__dabt_invalid			@  7
1069	.long	__dabt_invalid			@  8
1070	.long	__dabt_invalid			@  9
1071	.long	__dabt_invalid			@  a
1072	.long	__dabt_invalid			@  b
1073	.long	__dabt_invalid			@  c
1074	.long	__dabt_invalid			@  d
1075	.long	__dabt_invalid			@  e
1076	.long	__dabt_invalid			@  f
1077
1078/*
1079 * Prefetch abort dispatcher
1080 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1081 */
1082	vector_stub	pabt, ABT_MODE, 4
1083
1084	.long	__pabt_usr			@  0 (USR_26 / USR_32)
1085	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
1086	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
1087	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
1088	.long	__pabt_invalid			@  4
1089	.long	__pabt_invalid			@  5
1090	.long	__pabt_invalid			@  6
1091	.long	__pabt_invalid			@  7
1092	.long	__pabt_invalid			@  8
1093	.long	__pabt_invalid			@  9
1094	.long	__pabt_invalid			@  a
1095	.long	__pabt_invalid			@  b
1096	.long	__pabt_invalid			@  c
1097	.long	__pabt_invalid			@  d
1098	.long	__pabt_invalid			@  e
1099	.long	__pabt_invalid			@  f
1100
1101/*
1102 * Undef instr entry dispatcher
1103 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1104 */
1105	vector_stub	und, UND_MODE
1106
1107	.long	__und_usr			@  0 (USR_26 / USR_32)
1108	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
1109	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
1110	.long	__und_svc			@  3 (SVC_26 / SVC_32)
1111	.long	__und_invalid			@  4
1112	.long	__und_invalid			@  5
1113	.long	__und_invalid			@  6
1114	.long	__und_invalid			@  7
1115	.long	__und_invalid			@  8
1116	.long	__und_invalid			@  9
1117	.long	__und_invalid			@  a
1118	.long	__und_invalid			@  b
1119	.long	__und_invalid			@  c
1120	.long	__und_invalid			@  d
1121	.long	__und_invalid			@  e
1122	.long	__und_invalid			@  f
1123
1124	.align	5
1125
1126/*=============================================================================
1127 * Undefined FIQs
1128 *-----------------------------------------------------------------------------
1129 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1130 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1131 * Basically to switch modes, we *HAVE* to clobber one register...  brain
1132 * damage alert!  I don't think that we can execute any code in here in any
1133 * other mode than FIQ...  Ok you can switch to another mode, but you can't
1134 * get out of that mode without clobbering one register.
1135 */
1136vector_fiq:
 
1137	subs	pc, lr, #4
1138
1139/*=============================================================================
1140 * Address exception handler
1141 *-----------------------------------------------------------------------------
1142 * These aren't too critical.
1143 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1144 */
1145
1146vector_addrexcptn:
1147	b	vector_addrexcptn
1148
1149/*
1150 * We group all the following data together to optimise
1151 * for CPUs with separate I & D caches.
1152 */
1153	.align	5
1154
1155.LCvswi:
1156	.word	vector_swi
1157
1158	.globl	__stubs_end
1159__stubs_end:
1160
1161	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start
1162
1163	.globl	__vectors_start
1164__vectors_start:
1165 ARM(	swi	SYS_ERROR0	)
1166 THUMB(	svc	#0		)
1167 THUMB(	nop			)
1168	W(b)	vector_und + stubs_offset
1169	W(ldr)	pc, .LCvswi + stubs_offset
1170	W(b)	vector_pabt + stubs_offset
1171	W(b)	vector_dabt + stubs_offset
1172	W(b)	vector_addrexcptn + stubs_offset
1173	W(b)	vector_irq + stubs_offset
1174	W(b)	vector_fiq + stubs_offset
1175
1176	.globl	__vectors_end
1177__vectors_end:
1178
1179	.data
1180
1181	.globl	cr_alignment
1182	.globl	cr_no_alignment
1183cr_alignment:
1184	.space	4
1185cr_no_alignment:
1186	.space	4
1187
1188#ifdef CONFIG_MULTI_IRQ_HANDLER
1189	.globl	handle_arch_irq
1190handle_arch_irq:
1191	.space	4
1192#endif
v3.1
   1/*
   2 *  linux/arch/arm/kernel/entry-armv.S
   3 *
   4 *  Copyright (C) 1996,1997,1998 Russell King.
   5 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
   6 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 *  Low-level vector interface routines
  13 *
  14 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
  15 *  that causes it to save wrong values...  Be aware!
  16 */
  17
 
  18#include <asm/memory.h>
  19#include <asm/glue-df.h>
  20#include <asm/glue-pf.h>
  21#include <asm/vfpmacros.h>
 
  22#include <mach/entry-macro.S>
 
  23#include <asm/thread_notify.h>
  24#include <asm/unwind.h>
  25#include <asm/unistd.h>
  26#include <asm/tls.h>
 
  27
  28#include "entry-header.S"
  29#include <asm/entry-macro-multi.S>
  30
  31/*
  32 * Interrupt handling.
  33 */
  34	.macro	irq_handler
  35#ifdef CONFIG_MULTI_IRQ_HANDLER
  36	ldr	r1, =handle_arch_irq
  37	mov	r0, sp
  38	ldr	r1, [r1]
  39	adr	lr, BSYM(9997f)
  40	teq	r1, #0
  41	movne	pc, r1
 
  42#endif
  43	arch_irq_handler_default
  449997:
  45	.endm
  46
  47	.macro	pabt_helper
  48	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
  49#ifdef MULTI_PABORT
  50	ldr	ip, .LCprocfns
  51	mov	lr, pc
  52	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
  53#else
  54	bl	CPU_PABORT_HANDLER
  55#endif
  56	.endm
  57
  58	.macro	dabt_helper
  59
  60	@
  61	@ Call the processor-specific abort handler:
  62	@
  63	@  r2 - pt_regs
  64	@  r4 - aborted context pc
  65	@  r5 - aborted context psr
  66	@
  67	@ The abort handler must return the aborted address in r0, and
  68	@ the fault status register in r1.  r9 must be preserved.
  69	@
  70#ifdef MULTI_DABORT
  71	ldr	ip, .LCprocfns
  72	mov	lr, pc
  73	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
  74#else
  75	bl	CPU_DABORT_HANDLER
  76#endif
  77	.endm
  78
  79#ifdef CONFIG_KPROBES
  80	.section	.kprobes.text,"ax",%progbits
  81#else
  82	.text
  83#endif
  84
  85/*
  86 * Invalid mode handlers
  87 */
  88	.macro	inv_entry, reason
  89	sub	sp, sp, #S_FRAME_SIZE
  90 ARM(	stmib	sp, {r1 - lr}		)
  91 THUMB(	stmia	sp, {r0 - r12}		)
  92 THUMB(	str	sp, [sp, #S_SP]		)
  93 THUMB(	str	lr, [sp, #S_LR]		)
  94	mov	r1, #\reason
  95	.endm
  96
  97__pabt_invalid:
  98	inv_entry BAD_PREFETCH
  99	b	common_invalid
 100ENDPROC(__pabt_invalid)
 101
 102__dabt_invalid:
 103	inv_entry BAD_DATA
 104	b	common_invalid
 105ENDPROC(__dabt_invalid)
 106
 107__irq_invalid:
 108	inv_entry BAD_IRQ
 109	b	common_invalid
 110ENDPROC(__irq_invalid)
 111
 112__und_invalid:
 113	inv_entry BAD_UNDEFINSTR
 114
 115	@
 116	@ XXX fall through to common_invalid
 117	@
 118
 119@
 120@ common_invalid - generic code for failed exception (re-entrant version of handlers)
 121@
 122common_invalid:
 123	zero_fp
 124
 125	ldmia	r0, {r4 - r6}
 126	add	r0, sp, #S_PC		@ here for interlock avoidance
 127	mov	r7, #-1			@  ""   ""    ""        ""
 128	str	r4, [sp]		@ save preserved r0
 129	stmia	r0, {r5 - r7}		@ lr_<exception>,
 130					@ cpsr_<exception>, "old_r0"
 131
 132	mov	r0, sp
 133	b	bad_mode
 134ENDPROC(__und_invalid)
 135
 136/*
 137 * SVC mode handlers
 138 */
 139
 140#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
 141#define SPFIX(code...) code
 142#else
 143#define SPFIX(code...)
 144#endif
 145
 146	.macro	svc_entry, stack_hole=0
 147 UNWIND(.fnstart		)
 148 UNWIND(.save {r0 - pc}		)
 149	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 150#ifdef CONFIG_THUMB2_KERNEL
 151 SPFIX(	str	r0, [sp]	)	@ temporarily saved
 152 SPFIX(	mov	r0, sp		)
 153 SPFIX(	tst	r0, #4		)	@ test original stack alignment
 154 SPFIX(	ldr	r0, [sp]	)	@ restored
 155#else
 156 SPFIX(	tst	sp, #4		)
 157#endif
 158 SPFIX(	subeq	sp, sp, #4	)
 159	stmia	sp, {r1 - r12}
 160
 161	ldmia	r0, {r3 - r5}
 162	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 163	mov	r6, #-1			@  ""  ""      ""       ""
 164	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 165 SPFIX(	addeq	r2, r2, #4	)
 166	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 167					@ from the exception stack
 168
 169	mov	r3, lr
 170
 171	@
 172	@ We are now ready to fill in the remaining blanks on the stack:
 173	@
 174	@  r2 - sp_svc
 175	@  r3 - lr_svc
 176	@  r4 - lr_<exception>, already fixed up for correct return/restart
 177	@  r5 - spsr_<exception>
 178	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 179	@
 180	stmia	r7, {r2 - r6}
 181
 182#ifdef CONFIG_TRACE_IRQFLAGS
 183	bl	trace_hardirqs_off
 184#endif
 185	.endm
 186
 187	.align	5
 188__dabt_svc:
 189	svc_entry
 190	mov	r2, sp
 191	dabt_helper
 192
 193	@
 194	@ IRQs off again before pulling preserved data off the stack
 195	@
 196	disable_irq_notrace
 197
 198#ifdef CONFIG_TRACE_IRQFLAGS
 199	tst	r5, #PSR_I_BIT
 200	bleq	trace_hardirqs_on
 201	tst	r5, #PSR_I_BIT
 202	blne	trace_hardirqs_off
 203#endif
 204	svc_exit r5				@ return from exception
 205 UNWIND(.fnend		)
 206ENDPROC(__dabt_svc)
 207
 208	.align	5
 209__irq_svc:
 210	svc_entry
 211	irq_handler
 212
 213#ifdef CONFIG_PREEMPT
 214	get_thread_info tsk
 215	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
 216	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
 217	teq	r8, #0				@ if preempt count != 0
 218	movne	r0, #0				@ force flags to 0
 219	tst	r0, #_TIF_NEED_RESCHED
 220	blne	svc_preempt
 221#endif
 222
 223#ifdef CONFIG_TRACE_IRQFLAGS
 224	@ The parent context IRQs must have been enabled to get here in
 225	@ the first place, so there's no point checking the PSR I bit.
 226	bl	trace_hardirqs_on
 227#endif
 228	svc_exit r5				@ return from exception
 229 UNWIND(.fnend		)
 230ENDPROC(__irq_svc)
 231
 232	.ltorg
 233
 234#ifdef CONFIG_PREEMPT
 235svc_preempt:
 236	mov	r8, lr
 2371:	bl	preempt_schedule_irq		@ irq en/disable is done inside
 238	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
 239	tst	r0, #_TIF_NEED_RESCHED
 240	moveq	pc, r8				@ go again
 241	b	1b
 242#endif
 243
 
 
 
 
 
 
 
 
 
 
 
 
 
 244	.align	5
 245__und_svc:
 246#ifdef CONFIG_KPROBES
 247	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
 248	@ it obviously needs free stack space which then will belong to
 249	@ the saved context.
 250	svc_entry 64
 251#else
 252	svc_entry
 253#endif
 254	@
 255	@ call emulation code, which returns using r9 if it has emulated
 256	@ the instruction, or the more conventional lr if we are to treat
 257	@ this as a real undefined instruction
 258	@
 259	@  r0 - instruction
 260	@
 261#ifndef	CONFIG_THUMB2_KERNEL
 262	ldr	r0, [r4, #-4]
 263#else
 
 264	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
 265	and	r9, r0, #0xf800
 266	cmp	r9, #0xe800			@ 32-bit instruction if xx >= 0
 267	ldrhhs	r9, [r4]			@ bottom 16 bits
 268	orrhs	r0, r9, r0, lsl #16
 
 
 269#endif
 270	adr	r9, BSYM(1f)
 271	mov	r2, r4
 272	bl	call_fpe
 273
 
 
 274	mov	r0, sp				@ struct pt_regs *regs
 275	bl	do_undefinstr
 276
 277	@
 278	@ IRQs off again before pulling preserved data off the stack
 279	@
 2801:	disable_irq_notrace
 
 281
 282	@
 283	@ restore SPSR and restart the instruction
 284	@
 285	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
 286#ifdef CONFIG_TRACE_IRQFLAGS
 287	tst	r5, #PSR_I_BIT
 288	bleq	trace_hardirqs_on
 289	tst	r5, #PSR_I_BIT
 290	blne	trace_hardirqs_off
 291#endif
 292	svc_exit r5				@ return from exception
 293 UNWIND(.fnend		)
 294ENDPROC(__und_svc)
 295
 296	.align	5
 297__pabt_svc:
 298	svc_entry
 299	mov	r2, sp				@ regs
 300	pabt_helper
 301
 302	@
 303	@ IRQs off again before pulling preserved data off the stack
 304	@
 305	disable_irq_notrace
 306
 307#ifdef CONFIG_TRACE_IRQFLAGS
 308	tst	r5, #PSR_I_BIT
 309	bleq	trace_hardirqs_on
 310	tst	r5, #PSR_I_BIT
 311	blne	trace_hardirqs_off
 312#endif
 313	svc_exit r5				@ return from exception
 314 UNWIND(.fnend		)
 315ENDPROC(__pabt_svc)
 316
 317	.align	5
 318.LCcralign:
 319	.word	cr_alignment
 320#ifdef MULTI_DABORT
 321.LCprocfns:
 322	.word	processor
 323#endif
 324.LCfp:
 325	.word	fp_enter
 326
 327/*
 328 * User mode handlers
 329 *
 330 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
 331 */
 332
 333#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
 334#error "sizeof(struct pt_regs) must be a multiple of 8"
 335#endif
 336
 337	.macro	usr_entry
 338 UNWIND(.fnstart	)
 339 UNWIND(.cantunwind	)	@ don't unwind the user space
 340	sub	sp, sp, #S_FRAME_SIZE
 341 ARM(	stmib	sp, {r1 - r12}	)
 342 THUMB(	stmia	sp, {r0 - r12}	)
 343
 344	ldmia	r0, {r3 - r5}
 345	add	r0, sp, #S_PC		@ here for interlock avoidance
 346	mov	r6, #-1			@  ""  ""     ""        ""
 347
 348	str	r3, [sp]		@ save the "real" r0 copied
 349					@ from the exception stack
 350
 351	@
 352	@ We are now ready to fill in the remaining blanks on the stack:
 353	@
 354	@  r4 - lr_<exception>, already fixed up for correct return/restart
 355	@  r5 - spsr_<exception>
 356	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 357	@
 358	@ Also, separately save sp_usr and lr_usr
 359	@
 360	stmia	r0, {r4 - r6}
 361 ARM(	stmdb	r0, {sp, lr}^			)
 362 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
 363
 364	@
 365	@ Enable the alignment trap while in kernel mode
 366	@
 367	alignment_trap r0
 368
 369	@
 370	@ Clear FP to mark the first stack frame
 371	@
 372	zero_fp
 373
 374#ifdef CONFIG_IRQSOFF_TRACER
 375	bl	trace_hardirqs_off
 376#endif
 377	.endm
 378
 379	.macro	kuser_cmpxchg_check
 380#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 381#ifndef CONFIG_MMU
 382#warning "NPTL on non MMU needs fixing"
 383#else
 384	@ Make sure our user space atomic helper is restarted
 385	@ if it was interrupted in a critical region.  Here we
 386	@ perform a quick test inline since it should be false
 387	@ 99.9999% of the time.  The rest is done out of line.
 388	cmp	r4, #TASK_SIZE
 389	blhs	kuser_cmpxchg64_fixup
 390#endif
 391#endif
 392	.endm
 393
 394	.align	5
 395__dabt_usr:
 396	usr_entry
 397	kuser_cmpxchg_check
 398	mov	r2, sp
 399	dabt_helper
 400	b	ret_from_exception
 401 UNWIND(.fnend		)
 402ENDPROC(__dabt_usr)
 403
 404	.align	5
 405__irq_usr:
 406	usr_entry
 407	kuser_cmpxchg_check
 408	irq_handler
 409	get_thread_info tsk
 410	mov	why, #0
 411	b	ret_to_user_from_irq
 412 UNWIND(.fnend		)
 413ENDPROC(__irq_usr)
 414
 415	.ltorg
 416
 417	.align	5
 418__und_usr:
 419	usr_entry
 420
 421	mov	r2, r4
 422	mov	r3, r5
 423
 
 
 
 424	@
 425	@ fall through to the emulation code, which returns using r9 if
 426	@ it has emulated the instruction, or the more conventional lr
 427	@ if we are to treat this as a real undefined instruction
 428	@
 429	@  r0 - instruction
 430	@
 431	adr	r9, BSYM(ret_from_exception)
 432	adr	lr, BSYM(__und_usr_unknown)
 433	tst	r3, #PSR_T_BIT			@ Thumb mode?
 434	itet	eq				@ explicit IT needed for the 1f label
 435	subeq	r4, r2, #4			@ ARM instr at LR - 4
 436	subne	r4, r2, #2			@ Thumb instr at LR - 2
 4371:	ldreqt	r0, [r4]
 438#ifdef CONFIG_CPU_ENDIAN_BE8
 439	reveq	r0, r0				@ little endian instruction
 440#endif
 441	beq	call_fpe
 
 
 
 
 
 
 
 442	@ Thumb instruction
 443#if __LINUX_ARM_ARCH__ >= 7
 4442:
 445 ARM(	ldrht	r5, [r4], #2	)
 446 THUMB(	ldrht	r5, [r4]	)
 447 THUMB(	add	r4, r4, #2	)
 448	and	r0, r5, #0xf800			@ mask bits 111x x... .... ....
 449	cmp	r0, #0xe800			@ 32bit instruction if xx != 0
 450	blo	__und_usr_unknown
 4513:	ldrht	r0, [r4]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 
 453	orr	r0, r0, r5, lsl #16
 
 
 
 
 
 
 
 
 
 
 454#else
 455	b	__und_usr_unknown
 456#endif
 457 UNWIND(.fnend		)
 
 
 
 
 458ENDPROC(__und_usr)
 459
 460	@
 461	@ fallthrough to call_fpe
 462	@
 463
 464/*
 465 * The out of line fixup for the ldrt above.
 466 */
 467	.pushsection .fixup, "ax"
 
 4684:	mov	pc, r9
 469	.popsection
 470	.pushsection __ex_table,"a"
 471	.long	1b, 4b
 472#if __LINUX_ARM_ARCH__ >= 7
 473	.long	2b, 4b
 474	.long	3b, 4b
 475#endif
 476	.popsection
 477
 478/*
 479 * Check whether the instruction is a co-processor instruction.
 480 * If yes, we need to call the relevant co-processor handler.
 481 *
 482 * Note that we don't do a full check here for the co-processor
 483 * instructions; all instructions with bit 27 set are well
 484 * defined.  The only instructions that should fault are the
 485 * co-processor instructions.  However, we have to watch out
 486 * for the ARM6/ARM7 SWI bug.
 487 *
 488 * NEON is a special case that has to be handled here. Not all
 489 * NEON instructions are co-processor instructions, so we have
 490 * to make a special case of checking for them. Plus, there's
 491 * five groups of them, so we have a table of mask/opcode pairs
 492 * to check against, and if any match then we branch off into the
 493 * NEON handler code.
 494 *
 495 * Emulators may wish to make use of the following registers:
 496 *  r0  = instruction opcode.
 497 *  r2  = PC+4
 498 *  r9  = normal "successful" return address
 499 *  r10 = this threads thread_info structure.
 500 *  lr  = unrecognised instruction return address
 
 501 */
 502	@
 503	@ Fall-through from Thumb-2 __und_usr
 504	@
 505#ifdef CONFIG_NEON
 506	adr	r6, .LCneon_thumb_opcodes
 507	b	2f
 508#endif
 509call_fpe:
 510#ifdef CONFIG_NEON
 511	adr	r6, .LCneon_arm_opcodes
 5122:
 513	ldr	r7, [r6], #4			@ mask value
 514	cmp	r7, #0				@ end mask?
 515	beq	1f
 516	and	r8, r0, r7
 517	ldr	r7, [r6], #4			@ opcode bits matching in mask
 518	cmp	r8, r7				@ NEON instruction?
 519	bne	2b
 520	get_thread_info r10
 521	mov	r7, #1
 522	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
 523	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
 524	b	do_vfp				@ let VFP handler handle this
 5251:
 526#endif
 527	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
 528	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
 529#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
 530	and	r8, r0, #0x0f000000		@ mask out op-code bits
 531	teqne	r8, #0x0f000000			@ SWI (ARM6/7 bug)?
 532#endif
 533	moveq	pc, lr
 534	get_thread_info r10			@ get current thread
 535	and	r8, r0, #0x00000f00		@ mask out CP number
 536 THUMB(	lsr	r8, r8, #8		)
 537	mov	r7, #1
 538	add	r6, r10, #TI_USED_CP
 539 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 540 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
 541#ifdef CONFIG_IWMMXT
 542	@ Test if we need to give access to iWMMXt coprocessors
 543	ldr	r5, [r10, #TI_FLAGS]
 544	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
 545	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
 546	bcs	iwmmxt_task_enable
 547#endif
 548 ARM(	add	pc, pc, r8, lsr #6	)
 549 THUMB(	lsl	r8, r8, #2		)
 550 THUMB(	add	pc, r8			)
 551	nop
 552
 553	movw_pc	lr				@ CP#0
 554	W(b)	do_fpe				@ CP#1 (FPE)
 555	W(b)	do_fpe				@ CP#2 (FPE)
 556	movw_pc	lr				@ CP#3
 557#ifdef CONFIG_CRUNCH
 558	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
 559	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
 560	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
 561#else
 562	movw_pc	lr				@ CP#4
 563	movw_pc	lr				@ CP#5
 564	movw_pc	lr				@ CP#6
 565#endif
 566	movw_pc	lr				@ CP#7
 567	movw_pc	lr				@ CP#8
 568	movw_pc	lr				@ CP#9
 569#ifdef CONFIG_VFP
 570	W(b)	do_vfp				@ CP#10 (VFP)
 571	W(b)	do_vfp				@ CP#11 (VFP)
 572#else
 573	movw_pc	lr				@ CP#10 (VFP)
 574	movw_pc	lr				@ CP#11 (VFP)
 575#endif
 576	movw_pc	lr				@ CP#12
 577	movw_pc	lr				@ CP#13
 578	movw_pc	lr				@ CP#14 (Debug)
 579	movw_pc	lr				@ CP#15 (Control)
 580
 
 
 
 
 
 
 581#ifdef CONFIG_NEON
 582	.align	6
 583
 584.LCneon_arm_opcodes:
 585	.word	0xfe000000			@ mask
 586	.word	0xf2000000			@ opcode
 587
 588	.word	0xff100000			@ mask
 589	.word	0xf4000000			@ opcode
 590
 591	.word	0x00000000			@ mask
 592	.word	0x00000000			@ opcode
 593
 594.LCneon_thumb_opcodes:
 595	.word	0xef000000			@ mask
 596	.word	0xef000000			@ opcode
 597
 598	.word	0xff100000			@ mask
 599	.word	0xf9000000			@ opcode
 600
 601	.word	0x00000000			@ mask
 602	.word	0x00000000			@ opcode
 603#endif
 604
 605do_fpe:
 606	enable_irq
 607	ldr	r4, .LCfp
 608	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
 609	ldr	pc, [r4]			@ Call FP module USR entry point
 610
 611/*
 612 * The FP module is called with these registers set:
 613 *  r0  = instruction
 614 *  r2  = PC+4
 615 *  r9  = normal "successful" return address
 616 *  r10 = FP workspace
 617 *  lr  = unrecognised FP instruction return address
 618 */
 619
 620	.pushsection .data
 621ENTRY(fp_enter)
 622	.word	no_fp
 623	.popsection
 624
 625ENTRY(no_fp)
 626	mov	pc, lr
 627ENDPROC(no_fp)
 628
 629__und_usr_unknown:
 630	enable_irq
 
 
 
 
 631	mov	r0, sp
 632	adr	lr, BSYM(ret_from_exception)
 633	b	do_undefinstr
 634ENDPROC(__und_usr_unknown)
 
 635
 636	.align	5
 637__pabt_usr:
 638	usr_entry
 639	mov	r2, sp				@ regs
 640	pabt_helper
 641 UNWIND(.fnend		)
 642	/* fall through */
 643/*
 644 * This is the return code to user mode for abort handlers
 645 */
 646ENTRY(ret_from_exception)
 647 UNWIND(.fnstart	)
 648 UNWIND(.cantunwind	)
 649	get_thread_info tsk
 650	mov	why, #0
 651	b	ret_to_user
 652 UNWIND(.fnend		)
 653ENDPROC(__pabt_usr)
 654ENDPROC(ret_from_exception)
 655
 656/*
 657 * Register switch for ARMv3 and ARMv4 processors
 658 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 659 * previous and next are guaranteed not to be the same.
 660 */
 661ENTRY(__switch_to)
 662 UNWIND(.fnstart	)
 663 UNWIND(.cantunwind	)
 664	add	ip, r1, #TI_CPU_SAVE
 665	ldr	r3, [r2, #TI_TP_VALUE]
 666 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 667 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 668 THUMB(	str	sp, [ip], #4		   )
 669 THUMB(	str	lr, [ip], #4		   )
 670#ifdef CONFIG_CPU_USE_DOMAINS
 671	ldr	r6, [r2, #TI_CPU_DOMAIN]
 672#endif
 673	set_tls	r3, r4, r5
 674#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 675	ldr	r7, [r2, #TI_TASK]
 676	ldr	r8, =__stack_chk_guard
 677	ldr	r7, [r7, #TSK_STACK_CANARY]
 678#endif
 679#ifdef CONFIG_CPU_USE_DOMAINS
 680	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
 681#endif
 682	mov	r5, r0
 683	add	r4, r2, #TI_CPU_SAVE
 684	ldr	r0, =thread_notify_head
 685	mov	r1, #THREAD_NOTIFY_SWITCH
 686	bl	atomic_notifier_call_chain
 687#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 688	str	r7, [r8]
 689#endif
 690 THUMB(	mov	ip, r4			   )
 691	mov	r0, r5
 692 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 693 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 694 THUMB(	ldr	sp, [ip], #4		   )
 695 THUMB(	ldr	pc, [ip]		   )
 696 UNWIND(.fnend		)
 697ENDPROC(__switch_to)
 698
 699	__INIT
 700
 701/*
 702 * User helpers.
 703 *
 704 * Each segment is 32-byte aligned and will be moved to the top of the high
 705 * vector page.  New segments (if ever needed) must be added in front of
 706 * existing ones.  This mechanism should be used only for things that are
 707 * really small and justified, and not be abused freely.
 708 *
 709 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
 710 */
 711 THUMB(	.arm	)
 712
 713	.macro	usr_ret, reg
 714#ifdef CONFIG_ARM_THUMB
 715	bx	\reg
 716#else
 717	mov	pc, \reg
 718#endif
 719	.endm
 720
 721	.align	5
 722	.globl	__kuser_helper_start
 723__kuser_helper_start:
 724
 725/*
 726 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
 727 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
 728 */
 729
 730__kuser_cmpxchg64:				@ 0xffff0f60
 731
 732#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 733
 734	/*
 735	 * Poor you.  No fast solution possible...
 736	 * The kernel itself must perform the operation.
 737	 * A special ghost syscall is used for that (see traps.c).
 738	 */
 739	stmfd	sp!, {r7, lr}
 740	ldr	r7, 1f			@ it's 20 bits
 741	swi	__ARM_NR_cmpxchg64
 742	ldmfd	sp!, {r7, pc}
 7431:	.word	__ARM_NR_cmpxchg64
 744
 745#elif defined(CONFIG_CPU_32v6K)
 746
 747	stmfd	sp!, {r4, r5, r6, r7}
 748	ldrd	r4, r5, [r0]			@ load old val
 749	ldrd	r6, r7, [r1]			@ load new val
 750	smp_dmb	arm
 7511:	ldrexd	r0, r1, [r2]			@ load current val
 752	eors	r3, r0, r4			@ compare with oldval (1)
 753	eoreqs	r3, r1, r5			@ compare with oldval (2)
 754	strexdeq r3, r6, r7, [r2]		@ store newval if eq
 755	teqeq	r3, #1				@ success?
 756	beq	1b				@ if no then retry
 757	smp_dmb	arm
 758	rsbs	r0, r3, #0			@ set returned val and C flag
 759	ldmfd	sp!, {r4, r5, r6, r7}
 760	bx	lr
 761
 762#elif !defined(CONFIG_SMP)
 763
 764#ifdef CONFIG_MMU
 765
 766	/*
 767	 * The only thing that can break atomicity in this cmpxchg64
 768	 * implementation is either an IRQ or a data abort exception
 769	 * causing another process/thread to be scheduled in the middle of
 770	 * the critical sequence.  The same strategy as for cmpxchg is used.
 771	 */
 772	stmfd	sp!, {r4, r5, r6, lr}
 773	ldmia	r0, {r4, r5}			@ load old val
 774	ldmia	r1, {r6, lr}			@ load new val
 7751:	ldmia	r2, {r0, r1}			@ load current val
 776	eors	r3, r0, r4			@ compare with oldval (1)
 777	eoreqs	r3, r1, r5			@ compare with oldval (2)
 7782:	stmeqia	r2, {r6, lr}			@ store newval if eq
 779	rsbs	r0, r3, #0			@ set return val and C flag
 780	ldmfd	sp!, {r4, r5, r6, pc}
 781
 782	.text
 783kuser_cmpxchg64_fixup:
 784	@ Called from kuser_cmpxchg_fixup.
 785	@ r4 = address of interrupted insn (must be preserved).
 786	@ sp = saved regs. r7 and r8 are clobbered.
 787	@ 1b = first critical insn, 2b = last critical insn.
 788	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 789	mov	r7, #0xffff0fff
 790	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
 791	subs	r8, r4, r7
 792	rsbcss	r8, r8, #(2b - 1b)
 793	strcs	r7, [sp, #S_PC]
 794#if __LINUX_ARM_ARCH__ < 6
 795	bcc	kuser_cmpxchg32_fixup
 796#endif
 797	mov	pc, lr
 798	.previous
 799
 800#else
 801#warning "NPTL on non MMU needs fixing"
 802	mov	r0, #-1
 803	adds	r0, r0, #0
 804	usr_ret	lr
 805#endif
 806
 807#else
 808#error "incoherent kernel configuration"
 809#endif
 810
 811	/* pad to next slot */
 812	.rept	(16 - (. - __kuser_cmpxchg64)/4)
 813	.word	0
 814	.endr
 815
 816	.align	5
 817
 818__kuser_memory_barrier:				@ 0xffff0fa0
 819	smp_dmb	arm
 820	usr_ret	lr
 821
 822	.align	5
 823
 824__kuser_cmpxchg:				@ 0xffff0fc0
 825
 826#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 827
 828	/*
 829	 * Poor you.  No fast solution possible...
 830	 * The kernel itself must perform the operation.
 831	 * A special ghost syscall is used for that (see traps.c).
 832	 */
 833	stmfd	sp!, {r7, lr}
 834	ldr	r7, 1f			@ it's 20 bits
 835	swi	__ARM_NR_cmpxchg
 836	ldmfd	sp!, {r7, pc}
 8371:	.word	__ARM_NR_cmpxchg
 838
 839#elif __LINUX_ARM_ARCH__ < 6
 840
 841#ifdef CONFIG_MMU
 842
 843	/*
 844	 * The only thing that can break atomicity in this cmpxchg
 845	 * implementation is either an IRQ or a data abort exception
 846	 * causing another process/thread to be scheduled in the middle
 847	 * of the critical sequence.  To prevent this, code is added to
 848	 * the IRQ and data abort exception handlers to set the pc back
 849	 * to the beginning of the critical section if it is found to be
 850	 * within that critical section (see kuser_cmpxchg_fixup).
 851	 */
 8521:	ldr	r3, [r2]			@ load current val
 853	subs	r3, r3, r0			@ compare with oldval
 8542:	streq	r1, [r2]			@ store newval if eq
 855	rsbs	r0, r3, #0			@ set return val and C flag
 856	usr_ret	lr
 857
 858	.text
 859kuser_cmpxchg32_fixup:
 860	@ Called from kuser_cmpxchg_check macro.
 861	@ r4 = address of interrupted insn (must be preserved).
 862	@ sp = saved regs. r7 and r8 are clobbered.
 863	@ 1b = first critical insn, 2b = last critical insn.
 864	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 865	mov	r7, #0xffff0fff
 866	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
 867	subs	r8, r4, r7
 868	rsbcss	r8, r8, #(2b - 1b)
 869	strcs	r7, [sp, #S_PC]
 870	mov	pc, lr
 871	.previous
 872
 873#else
 874#warning "NPTL on non MMU needs fixing"
 875	mov	r0, #-1
 876	adds	r0, r0, #0
 877	usr_ret	lr
 878#endif
 879
 880#else
 881
 882	smp_dmb	arm
 8831:	ldrex	r3, [r2]
 884	subs	r3, r3, r0
 885	strexeq	r3, r1, [r2]
 886	teqeq	r3, #1
 887	beq	1b
 888	rsbs	r0, r3, #0
 889	/* beware -- each __kuser slot must be 8 instructions max */
 890	ALT_SMP(b	__kuser_memory_barrier)
 891	ALT_UP(usr_ret	lr)
 892
 893#endif
 894
 895	.align	5
 896
 897__kuser_get_tls:				@ 0xffff0fe0
 898	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
 899	usr_ret	lr
 900	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
 901	.rep	4
 902	.word	0			@ 0xffff0ff0 software TLS value, then
 903	.endr				@ pad up to __kuser_helper_version
 904
 905__kuser_helper_version:				@ 0xffff0ffc
 906	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)
 907
 908	.globl	__kuser_helper_end
 909__kuser_helper_end:
 910
 911 THUMB(	.thumb	)
 912
 913/*
 914 * Vector stubs.
 915 *
 916 * This code is copied to 0xffff0200 so we can use branches in the
 917 * vectors, rather than ldr's.  Note that this code must not
 918 * exceed 0x300 bytes.
 919 *
 920 * Common stub entry macro:
 921 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 922 *
 923 * SP points to a minimal amount of processor-private memory, the address
 924 * of which is copied into r0 for the mode specific abort handler.
 925 */
 926	.macro	vector_stub, name, mode, correction=0
 927	.align	5
 928
 929vector_\name:
 930	.if \correction
 931	sub	lr, lr, #\correction
 932	.endif
 933
 934	@
 935	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
 936	@ (parent CPSR)
 937	@
 938	stmia	sp, {r0, lr}		@ save r0, lr
 939	mrs	lr, spsr
 940	str	lr, [sp, #8]		@ save spsr
 941
 942	@
 943	@ Prepare for SVC32 mode.  IRQs remain disabled.
 944	@
 945	mrs	r0, cpsr
 946	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
 947	msr	spsr_cxsf, r0
 948
 949	@
 950	@ the branch table must immediately follow this code
 951	@
 952	and	lr, lr, #0x0f
 953 THUMB(	adr	r0, 1f			)
 954 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
 955	mov	r0, sp
 956 ARM(	ldr	lr, [pc, lr, lsl #2]	)
 957	movs	pc, lr			@ branch to handler in SVC mode
 958ENDPROC(vector_\name)
 959
 960	.align	2
 961	@ handler addresses follow this label
 9621:
 963	.endm
 964
 965	.globl	__stubs_start
 966__stubs_start:
 967/*
 968 * Interrupt dispatcher
 969 */
 970	vector_stub	irq, IRQ_MODE, 4
 971
 972	.long	__irq_usr			@  0  (USR_26 / USR_32)
 973	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
 974	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
 975	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
 976	.long	__irq_invalid			@  4
 977	.long	__irq_invalid			@  5
 978	.long	__irq_invalid			@  6
 979	.long	__irq_invalid			@  7
 980	.long	__irq_invalid			@  8
 981	.long	__irq_invalid			@  9
 982	.long	__irq_invalid			@  a
 983	.long	__irq_invalid			@  b
 984	.long	__irq_invalid			@  c
 985	.long	__irq_invalid			@  d
 986	.long	__irq_invalid			@  e
 987	.long	__irq_invalid			@  f
 988
 989/*
 990 * Data abort dispatcher
 991 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 992 */
 993	vector_stub	dabt, ABT_MODE, 8
 994
 995	.long	__dabt_usr			@  0  (USR_26 / USR_32)
 996	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
 997	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
 998	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
 999	.long	__dabt_invalid			@  4
1000	.long	__dabt_invalid			@  5
1001	.long	__dabt_invalid			@  6
1002	.long	__dabt_invalid			@  7
1003	.long	__dabt_invalid			@  8
1004	.long	__dabt_invalid			@  9
1005	.long	__dabt_invalid			@  a
1006	.long	__dabt_invalid			@  b
1007	.long	__dabt_invalid			@  c
1008	.long	__dabt_invalid			@  d
1009	.long	__dabt_invalid			@  e
1010	.long	__dabt_invalid			@  f
1011
1012/*
1013 * Prefetch abort dispatcher
1014 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1015 */
1016	vector_stub	pabt, ABT_MODE, 4
1017
1018	.long	__pabt_usr			@  0 (USR_26 / USR_32)
1019	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
1020	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
1021	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
1022	.long	__pabt_invalid			@  4
1023	.long	__pabt_invalid			@  5
1024	.long	__pabt_invalid			@  6
1025	.long	__pabt_invalid			@  7
1026	.long	__pabt_invalid			@  8
1027	.long	__pabt_invalid			@  9
1028	.long	__pabt_invalid			@  a
1029	.long	__pabt_invalid			@  b
1030	.long	__pabt_invalid			@  c
1031	.long	__pabt_invalid			@  d
1032	.long	__pabt_invalid			@  e
1033	.long	__pabt_invalid			@  f
1034
1035/*
1036 * Undef instr entry dispatcher
1037 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1038 */
1039	vector_stub	und, UND_MODE
1040
1041	.long	__und_usr			@  0 (USR_26 / USR_32)
1042	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
1043	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
1044	.long	__und_svc			@  3 (SVC_26 / SVC_32)
1045	.long	__und_invalid			@  4
1046	.long	__und_invalid			@  5
1047	.long	__und_invalid			@  6
1048	.long	__und_invalid			@  7
1049	.long	__und_invalid			@  8
1050	.long	__und_invalid			@  9
1051	.long	__und_invalid			@  a
1052	.long	__und_invalid			@  b
1053	.long	__und_invalid			@  c
1054	.long	__und_invalid			@  d
1055	.long	__und_invalid			@  e
1056	.long	__und_invalid			@  f
1057
1058	.align	5
1059
1060/*=============================================================================
1061 * Undefined FIQs
1062 *-----------------------------------------------------------------------------
1063 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1064 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1065 * Basically to switch modes, we *HAVE* to clobber one register...  brain
1066 * damage alert!  I don't think that we can execute any code in here in any
1067 * other mode than FIQ...  Ok you can switch to another mode, but you can't
1068 * get out of that mode without clobbering one register.
1069 */
1070vector_fiq:
1071	disable_fiq
1072	subs	pc, lr, #4
1073
1074/*=============================================================================
1075 * Address exception handler
1076 *-----------------------------------------------------------------------------
1077 * These aren't too critical.
1078 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1079 */
1080
1081vector_addrexcptn:
1082	b	vector_addrexcptn
1083
1084/*
1085 * We group all the following data together to optimise
1086 * for CPUs with separate I & D caches.
1087 */
1088	.align	5
1089
1090.LCvswi:
1091	.word	vector_swi
1092
1093	.globl	__stubs_end
1094__stubs_end:
1095
1096	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start
1097
1098	.globl	__vectors_start
1099__vectors_start:
1100 ARM(	swi	SYS_ERROR0	)
1101 THUMB(	svc	#0		)
1102 THUMB(	nop			)
1103	W(b)	vector_und + stubs_offset
1104	W(ldr)	pc, .LCvswi + stubs_offset
1105	W(b)	vector_pabt + stubs_offset
1106	W(b)	vector_dabt + stubs_offset
1107	W(b)	vector_addrexcptn + stubs_offset
1108	W(b)	vector_irq + stubs_offset
1109	W(b)	vector_fiq + stubs_offset
1110
1111	.globl	__vectors_end
1112__vectors_end:
1113
1114	.data
1115
1116	.globl	cr_alignment
1117	.globl	cr_no_alignment
1118cr_alignment:
1119	.space	4
1120cr_no_alignment:
1121	.space	4
1122
1123#ifdef CONFIG_MULTI_IRQ_HANDLER
1124	.globl	handle_arch_irq
1125handle_arch_irq:
1126	.space	4
1127#endif