Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 *  linux/arch/arm/kernel/entry-armv.S
   3 *
   4 *  Copyright (C) 1996,1997,1998 Russell King.
   5 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
   6 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 *  Low-level vector interface routines
  13 *
  14 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
  15 *  that causes it to save wrong values...  Be aware!
  16 */
  17
 
 
 
  18#include <asm/memory.h>
  19#include <asm/glue-df.h>
  20#include <asm/glue-pf.h>
  21#include <asm/vfpmacros.h>
  22#include <mach/entry-macro.S>
  23#include <asm/thread_notify.h>
  24#include <asm/unwind.h>
  25#include <asm/unistd.h>
  26#include <asm/tls.h>
 
 
  27
  28#include "entry-header.S"
  29#include <asm/entry-macro-multi.S>
  30
  31/*
  32 * Interrupt handling.
  33 */
  34	.macro	irq_handler
  35#ifdef CONFIG_MULTI_IRQ_HANDLER
  36	ldr	r1, =handle_arch_irq
  37	mov	r0, sp
  38	ldr	r1, [r1]
  39	adr	lr, BSYM(9997f)
  40	teq	r1, #0
  41	movne	pc, r1
  42#endif
  43	arch_irq_handler_default
  449997:
 
 
 
 
 
 
 
 
 
 
 
 
 
  45	.endm
  46
  47	.macro	pabt_helper
  48	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
  49#ifdef MULTI_PABORT
  50	ldr	ip, .LCprocfns
  51	mov	lr, pc
  52	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
  53#else
  54	bl	CPU_PABORT_HANDLER
  55#endif
  56	.endm
  57
  58	.macro	dabt_helper
  59
  60	@
  61	@ Call the processor-specific abort handler:
  62	@
  63	@  r2 - pt_regs
  64	@  r4 - aborted context pc
  65	@  r5 - aborted context psr
  66	@
  67	@ The abort handler must return the aborted address in r0, and
  68	@ the fault status register in r1.  r9 must be preserved.
  69	@
  70#ifdef MULTI_DABORT
  71	ldr	ip, .LCprocfns
  72	mov	lr, pc
  73	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
  74#else
  75	bl	CPU_DABORT_HANDLER
  76#endif
  77	.endm
  78
  79#ifdef CONFIG_KPROBES
  80	.section	.kprobes.text,"ax",%progbits
  81#else
  82	.text
  83#endif
  84
  85/*
  86 * Invalid mode handlers
  87 */
  88	.macro	inv_entry, reason
  89	sub	sp, sp, #S_FRAME_SIZE
  90 ARM(	stmib	sp, {r1 - lr}		)
  91 THUMB(	stmia	sp, {r0 - r12}		)
  92 THUMB(	str	sp, [sp, #S_SP]		)
  93 THUMB(	str	lr, [sp, #S_LR]		)
  94	mov	r1, #\reason
  95	.endm
  96
  97__pabt_invalid:
  98	inv_entry BAD_PREFETCH
  99	b	common_invalid
 100ENDPROC(__pabt_invalid)
 101
 102__dabt_invalid:
 103	inv_entry BAD_DATA
 104	b	common_invalid
 105ENDPROC(__dabt_invalid)
 106
 107__irq_invalid:
 108	inv_entry BAD_IRQ
 109	b	common_invalid
 110ENDPROC(__irq_invalid)
 111
 112__und_invalid:
 113	inv_entry BAD_UNDEFINSTR
 114
 115	@
 116	@ XXX fall through to common_invalid
 117	@
 118
 119@
 120@ common_invalid - generic code for failed exception (re-entrant version of handlers)
 121@
 122common_invalid:
 123	zero_fp
 124
 125	ldmia	r0, {r4 - r6}
 126	add	r0, sp, #S_PC		@ here for interlock avoidance
 127	mov	r7, #-1			@  ""   ""    ""        ""
 128	str	r4, [sp]		@ save preserved r0
 129	stmia	r0, {r5 - r7}		@ lr_<exception>,
 130					@ cpsr_<exception>, "old_r0"
 131
 132	mov	r0, sp
 133	b	bad_mode
 134ENDPROC(__und_invalid)
 135
 136/*
 137 * SVC mode handlers
 138 */
 139
 140#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
 141#define SPFIX(code...) code
 142#else
 143#define SPFIX(code...)
 144#endif
 145
 146	.macro	svc_entry, stack_hole=0
 147 UNWIND(.fnstart		)
 148 UNWIND(.save {r0 - pc}		)
 149	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 
 
 
 
 
 
 
 150#ifdef CONFIG_THUMB2_KERNEL
 151 SPFIX(	str	r0, [sp]	)	@ temporarily saved
 152 SPFIX(	mov	r0, sp		)
 153 SPFIX(	tst	r0, #4		)	@ test original stack alignment
 154 SPFIX(	ldr	r0, [sp]	)	@ restored
 155#else
 156 SPFIX(	tst	sp, #4		)
 157#endif
 158 SPFIX(	subeq	sp, sp, #4	)
 159	stmia	sp, {r1 - r12}
 
 
 160
 161	ldmia	r0, {r3 - r5}
 162	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 163	mov	r6, #-1			@  ""  ""      ""       ""
 164	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 165 SPFIX(	addeq	r2, r2, #4	)
 166	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 167					@ from the exception stack
 168
 169	mov	r3, lr
 170
 171	@
 172	@ We are now ready to fill in the remaining blanks on the stack:
 173	@
 174	@  r2 - sp_svc
 175	@  r3 - lr_svc
 176	@  r4 - lr_<exception>, already fixed up for correct return/restart
 177	@  r5 - spsr_<exception>
 178	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 179	@
 180	stmia	r7, {r2 - r6}
 181
 
 
 
 
 182#ifdef CONFIG_TRACE_IRQFLAGS
 183	bl	trace_hardirqs_off
 184#endif
 
 185	.endm
 186
 187	.align	5
 188__dabt_svc:
 189	svc_entry
 190	mov	r2, sp
 191	dabt_helper
 192
 193	@
 194	@ IRQs off again before pulling preserved data off the stack
 195	@
 196	disable_irq_notrace
 197
 198#ifdef CONFIG_TRACE_IRQFLAGS
 199	tst	r5, #PSR_I_BIT
 200	bleq	trace_hardirqs_on
 201	tst	r5, #PSR_I_BIT
 202	blne	trace_hardirqs_off
 203#endif
 204	svc_exit r5				@ return from exception
 205 UNWIND(.fnend		)
 206ENDPROC(__dabt_svc)
 207
 208	.align	5
 209__irq_svc:
 210	svc_entry
 211	irq_handler
 212
 213#ifdef CONFIG_PREEMPT
 214	get_thread_info tsk
 215	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
 216	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
 217	teq	r8, #0				@ if preempt count != 0
 218	movne	r0, #0				@ force flags to 0
 219	tst	r0, #_TIF_NEED_RESCHED
 220	blne	svc_preempt
 221#endif
 222
 223#ifdef CONFIG_TRACE_IRQFLAGS
 224	@ The parent context IRQs must have been enabled to get here in
 225	@ the first place, so there's no point checking the PSR I bit.
 226	bl	trace_hardirqs_on
 227#endif
 228	svc_exit r5				@ return from exception
 229 UNWIND(.fnend		)
 230ENDPROC(__irq_svc)
 231
 232	.ltorg
 233
 234#ifdef CONFIG_PREEMPT
 235svc_preempt:
 236	mov	r8, lr
 2371:	bl	preempt_schedule_irq		@ irq en/disable is done inside
 238	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
 239	tst	r0, #_TIF_NEED_RESCHED
 240	moveq	pc, r8				@ go again
 241	b	1b
 242#endif
 243
 
 
 
 
 
 
 
 
 
 
 
 
 
 244	.align	5
 245__und_svc:
 246#ifdef CONFIG_KPROBES
 247	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
 248	@ it obviously needs free stack space which then will belong to
 249	@ the saved context.
 250	svc_entry 64
 251#else
 252	svc_entry
 253#endif
 254	@
 255	@ call emulation code, which returns using r9 if it has emulated
 256	@ the instruction, or the more conventional lr if we are to treat
 257	@ this as a real undefined instruction
 258	@
 259	@  r0 - instruction
 260	@
 261#ifndef	CONFIG_THUMB2_KERNEL
 262	ldr	r0, [r4, #-4]
 263#else
 264	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
 265	and	r9, r0, #0xf800
 266	cmp	r9, #0xe800			@ 32-bit instruction if xx >= 0
 267	ldrhhs	r9, [r4]			@ bottom 16 bits
 268	orrhs	r0, r9, r0, lsl #16
 269#endif
 270	adr	r9, BSYM(1f)
 271	mov	r2, r4
 272	bl	call_fpe
 273
 
 
 
 274	mov	r0, sp				@ struct pt_regs *regs
 275	bl	do_undefinstr
 276
 277	@
 278	@ IRQs off again before pulling preserved data off the stack
 279	@
 2801:	disable_irq_notrace
 281
 282	@
 283	@ restore SPSR and restart the instruction
 284	@
 285	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
 286#ifdef CONFIG_TRACE_IRQFLAGS
 287	tst	r5, #PSR_I_BIT
 288	bleq	trace_hardirqs_on
 289	tst	r5, #PSR_I_BIT
 290	blne	trace_hardirqs_off
 291#endif
 292	svc_exit r5				@ return from exception
 293 UNWIND(.fnend		)
 294ENDPROC(__und_svc)
 295
 296	.align	5
 297__pabt_svc:
 298	svc_entry
 299	mov	r2, sp				@ regs
 300	pabt_helper
 301
 302	@
 303	@ IRQs off again before pulling preserved data off the stack
 304	@
 305	disable_irq_notrace
 306
 307#ifdef CONFIG_TRACE_IRQFLAGS
 308	tst	r5, #PSR_I_BIT
 309	bleq	trace_hardirqs_on
 310	tst	r5, #PSR_I_BIT
 311	blne	trace_hardirqs_off
 312#endif
 313	svc_exit r5				@ return from exception
 314 UNWIND(.fnend		)
 315ENDPROC(__pabt_svc)
 316
 317	.align	5
 318.LCcralign:
 319	.word	cr_alignment
 320#ifdef MULTI_DABORT
 321.LCprocfns:
 322	.word	processor
 323#endif
 324.LCfp:
 325	.word	fp_enter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 326
 327/*
 328 * User mode handlers
 329 *
 330 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
 331 */
 332
 333#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
 334#error "sizeof(struct pt_regs) must be a multiple of 8"
 335#endif
 336
 337	.macro	usr_entry
 338 UNWIND(.fnstart	)
 339 UNWIND(.cantunwind	)	@ don't unwind the user space
 340	sub	sp, sp, #S_FRAME_SIZE
 341 ARM(	stmib	sp, {r1 - r12}	)
 342 THUMB(	stmia	sp, {r0 - r12}	)
 343
 
 
 
 344	ldmia	r0, {r3 - r5}
 345	add	r0, sp, #S_PC		@ here for interlock avoidance
 346	mov	r6, #-1			@  ""  ""     ""        ""
 347
 348	str	r3, [sp]		@ save the "real" r0 copied
 349					@ from the exception stack
 350
 351	@
 352	@ We are now ready to fill in the remaining blanks on the stack:
 353	@
 354	@  r4 - lr_<exception>, already fixed up for correct return/restart
 355	@  r5 - spsr_<exception>
 356	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 357	@
 358	@ Also, separately save sp_usr and lr_usr
 359	@
 360	stmia	r0, {r4 - r6}
 361 ARM(	stmdb	r0, {sp, lr}^			)
 362 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
 363
 364	@
 
 
 
 365	@ Enable the alignment trap while in kernel mode
 366	@
 367	alignment_trap r0
 
 
 368
 369	@
 370	@ Clear FP to mark the first stack frame
 371	@
 372	zero_fp
 373
 374#ifdef CONFIG_IRQSOFF_TRACER
 
 375	bl	trace_hardirqs_off
 376#endif
 
 
 377	.endm
 378
 379	.macro	kuser_cmpxchg_check
 380#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 381#ifndef CONFIG_MMU
 382#warning "NPTL on non MMU needs fixing"
 383#else
 384	@ Make sure our user space atomic helper is restarted
 385	@ if it was interrupted in a critical region.  Here we
 386	@ perform a quick test inline since it should be false
 387	@ 99.9999% of the time.  The rest is done out of line.
 388	cmp	r4, #TASK_SIZE
 
 389	blhs	kuser_cmpxchg64_fixup
 390#endif
 391#endif
 392	.endm
 393
 394	.align	5
 395__dabt_usr:
 396	usr_entry
 397	kuser_cmpxchg_check
 398	mov	r2, sp
 399	dabt_helper
 400	b	ret_from_exception
 401 UNWIND(.fnend		)
 402ENDPROC(__dabt_usr)
 403
 404	.align	5
 405__irq_usr:
 406	usr_entry
 407	kuser_cmpxchg_check
 408	irq_handler
 409	get_thread_info tsk
 410	mov	why, #0
 411	b	ret_to_user_from_irq
 412 UNWIND(.fnend		)
 413ENDPROC(__irq_usr)
 414
 415	.ltorg
 416
 417	.align	5
 418__und_usr:
 419	usr_entry
 420
 421	mov	r2, r4
 422	mov	r3, r5
 423
 
 
 
 424	@
 425	@ fall through to the emulation code, which returns using r9 if
 426	@ it has emulated the instruction, or the more conventional lr
 427	@ if we are to treat this as a real undefined instruction
 428	@
 429	@  r0 - instruction
 430	@
 431	adr	r9, BSYM(ret_from_exception)
 432	adr	lr, BSYM(__und_usr_unknown)
 
 
 
 
 
 433	tst	r3, #PSR_T_BIT			@ Thumb mode?
 434	itet	eq				@ explicit IT needed for the 1f label
 435	subeq	r4, r2, #4			@ ARM instr at LR - 4
 436	subne	r4, r2, #2			@ Thumb instr at LR - 2
 4371:	ldreqt	r0, [r4]
 438#ifdef CONFIG_CPU_ENDIAN_BE8
 439	reveq	r0, r0				@ little endian instruction
 440#endif
 441	beq	call_fpe
 
 
 
 
 
 
 
 442	@ Thumb instruction
 443#if __LINUX_ARM_ARCH__ >= 7
 4442:
 445 ARM(	ldrht	r5, [r4], #2	)
 446 THUMB(	ldrht	r5, [r4]	)
 447 THUMB(	add	r4, r4, #2	)
 448	and	r0, r5, #0xf800			@ mask bits 111x x... .... ....
 449	cmp	r0, #0xe800			@ 32bit instruction if xx != 0
 450	blo	__und_usr_unknown
 4513:	ldrht	r0, [r4]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 
 453	orr	r0, r0, r5, lsl #16
 
 
 
 
 
 
 
 
 
 
 454#else
 455	b	__und_usr_unknown
 456#endif
 457 UNWIND(.fnend		)
 
 
 
 
 458ENDPROC(__und_usr)
 459
 460	@
 461	@ fallthrough to call_fpe
 462	@
 463
 464/*
 465 * The out of line fixup for the ldrt above.
 466 */
 467	.pushsection .fixup, "ax"
 4684:	mov	pc, r9
 
 
 469	.popsection
 470	.pushsection __ex_table,"a"
 471	.long	1b, 4b
 472#if __LINUX_ARM_ARCH__ >= 7
 473	.long	2b, 4b
 474	.long	3b, 4b
 475#endif
 476	.popsection
 477
 478/*
 479 * Check whether the instruction is a co-processor instruction.
 480 * If yes, we need to call the relevant co-processor handler.
 481 *
 482 * Note that we don't do a full check here for the co-processor
 483 * instructions; all instructions with bit 27 set are well
 484 * defined.  The only instructions that should fault are the
 485 * co-processor instructions.  However, we have to watch out
 486 * for the ARM6/ARM7 SWI bug.
 487 *
 488 * NEON is a special case that has to be handled here. Not all
 489 * NEON instructions are co-processor instructions, so we have
 490 * to make a special case of checking for them. Plus, there's
 491 * five groups of them, so we have a table of mask/opcode pairs
 492 * to check against, and if any match then we branch off into the
 493 * NEON handler code.
 494 *
 495 * Emulators may wish to make use of the following registers:
 496 *  r0  = instruction opcode.
 497 *  r2  = PC+4
 498 *  r9  = normal "successful" return address
 499 *  r10 = this threads thread_info structure.
 500 *  lr  = unrecognised instruction return address
 
 501 */
 502	@
 503	@ Fall-through from Thumb-2 __und_usr
 504	@
 505#ifdef CONFIG_NEON
 
 506	adr	r6, .LCneon_thumb_opcodes
 507	b	2f
 508#endif
 509call_fpe:
 
 510#ifdef CONFIG_NEON
 511	adr	r6, .LCneon_arm_opcodes
 5122:
 513	ldr	r7, [r6], #4			@ mask value
 514	cmp	r7, #0				@ end mask?
 515	beq	1f
 516	and	r8, r0, r7
 517	ldr	r7, [r6], #4			@ opcode bits matching in mask
 
 
 
 518	cmp	r8, r7				@ NEON instruction?
 519	bne	2b
 520	get_thread_info r10
 521	mov	r7, #1
 522	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
 523	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
 524	b	do_vfp				@ let VFP handler handle this
 5251:
 526#endif
 527	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
 528	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
 529#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
 530	and	r8, r0, #0x0f000000		@ mask out op-code bits
 531	teqne	r8, #0x0f000000			@ SWI (ARM6/7 bug)?
 532#endif
 533	moveq	pc, lr
 534	get_thread_info r10			@ get current thread
 535	and	r8, r0, #0x00000f00		@ mask out CP number
 536 THUMB(	lsr	r8, r8, #8		)
 537	mov	r7, #1
 538	add	r6, r10, #TI_USED_CP
 539 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 540 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
 541#ifdef CONFIG_IWMMXT
 542	@ Test if we need to give access to iWMMXt coprocessors
 543	ldr	r5, [r10, #TI_FLAGS]
 544	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
 545	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
 546	bcs	iwmmxt_task_enable
 547#endif
 548 ARM(	add	pc, pc, r8, lsr #6	)
 549 THUMB(	lsl	r8, r8, #2		)
 550 THUMB(	add	pc, r8			)
 551	nop
 552
 553	movw_pc	lr				@ CP#0
 554	W(b)	do_fpe				@ CP#1 (FPE)
 555	W(b)	do_fpe				@ CP#2 (FPE)
 556	movw_pc	lr				@ CP#3
 557#ifdef CONFIG_CRUNCH
 558	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
 559	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
 560	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
 561#else
 562	movw_pc	lr				@ CP#4
 563	movw_pc	lr				@ CP#5
 564	movw_pc	lr				@ CP#6
 565#endif
 566	movw_pc	lr				@ CP#7
 567	movw_pc	lr				@ CP#8
 568	movw_pc	lr				@ CP#9
 569#ifdef CONFIG_VFP
 570	W(b)	do_vfp				@ CP#10 (VFP)
 571	W(b)	do_vfp				@ CP#11 (VFP)
 572#else
 573	movw_pc	lr				@ CP#10 (VFP)
 574	movw_pc	lr				@ CP#11 (VFP)
 575#endif
 576	movw_pc	lr				@ CP#12
 577	movw_pc	lr				@ CP#13
 578	movw_pc	lr				@ CP#14 (Debug)
 579	movw_pc	lr				@ CP#15 (Control)
 580
 581#ifdef CONFIG_NEON
 582	.align	6
 583
 584.LCneon_arm_opcodes:
 585	.word	0xfe000000			@ mask
 586	.word	0xf2000000			@ opcode
 587
 588	.word	0xff100000			@ mask
 589	.word	0xf4000000			@ opcode
 590
 591	.word	0x00000000			@ mask
 592	.word	0x00000000			@ opcode
 593
 594.LCneon_thumb_opcodes:
 595	.word	0xef000000			@ mask
 596	.word	0xef000000			@ opcode
 597
 598	.word	0xff100000			@ mask
 599	.word	0xf9000000			@ opcode
 600
 601	.word	0x00000000			@ mask
 602	.word	0x00000000			@ opcode
 603#endif
 604
 605do_fpe:
 606	enable_irq
 607	ldr	r4, .LCfp
 608	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
 609	ldr	pc, [r4]			@ Call FP module USR entry point
 610
 611/*
 612 * The FP module is called with these registers set:
 613 *  r0  = instruction
 614 *  r2  = PC+4
 615 *  r9  = normal "successful" return address
 616 *  r10 = FP workspace
 617 *  lr  = unrecognised FP instruction return address
 618 */
 619
 620	.pushsection .data
 
 621ENTRY(fp_enter)
 622	.word	no_fp
 623	.popsection
 624
 625ENTRY(no_fp)
 626	mov	pc, lr
 627ENDPROC(no_fp)
 628
 629__und_usr_unknown:
 630	enable_irq
 631	mov	r0, sp
 632	adr	lr, BSYM(ret_from_exception)
 633	b	do_undefinstr
 634ENDPROC(__und_usr_unknown)
 
 
 
 
 
 
 635
 636	.align	5
 637__pabt_usr:
 638	usr_entry
 639	mov	r2, sp				@ regs
 640	pabt_helper
 641 UNWIND(.fnend		)
 642	/* fall through */
 643/*
 644 * This is the return code to user mode for abort handlers
 645 */
 646ENTRY(ret_from_exception)
 647 UNWIND(.fnstart	)
 648 UNWIND(.cantunwind	)
 649	get_thread_info tsk
 650	mov	why, #0
 651	b	ret_to_user
 652 UNWIND(.fnend		)
 653ENDPROC(__pabt_usr)
 654ENDPROC(ret_from_exception)
 655
 
 
 
 
 
 
 
 
 
 
 
 656/*
 657 * Register switch for ARMv3 and ARMv4 processors
 658 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 659 * previous and next are guaranteed not to be the same.
 660 */
 661ENTRY(__switch_to)
 662 UNWIND(.fnstart	)
 663 UNWIND(.cantunwind	)
 664	add	ip, r1, #TI_CPU_SAVE
 665	ldr	r3, [r2, #TI_TP_VALUE]
 666 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 667 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 668 THUMB(	str	sp, [ip], #4		   )
 669 THUMB(	str	lr, [ip], #4		   )
 
 
 670#ifdef CONFIG_CPU_USE_DOMAINS
 
 
 671	ldr	r6, [r2, #TI_CPU_DOMAIN]
 672#endif
 673	set_tls	r3, r4, r5
 674#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 675	ldr	r7, [r2, #TI_TASK]
 676	ldr	r8, =__stack_chk_guard
 677	ldr	r7, [r7, #TSK_STACK_CANARY]
 
 
 
 
 
 678#endif
 
 679#ifdef CONFIG_CPU_USE_DOMAINS
 680	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
 681#endif
 682	mov	r5, r0
 683	add	r4, r2, #TI_CPU_SAVE
 684	ldr	r0, =thread_notify_head
 685	mov	r1, #THREAD_NOTIFY_SWITCH
 686	bl	atomic_notifier_call_chain
 687#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 688	str	r7, [r8]
 
 689#endif
 690 THUMB(	mov	ip, r4			   )
 691	mov	r0, r5
 692 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 693 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 694 THUMB(	ldr	sp, [ip], #4		   )
 695 THUMB(	ldr	pc, [ip]		   )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 696 UNWIND(.fnend		)
 697ENDPROC(__switch_to)
 698
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 699	__INIT
 700
 701/*
 702 * User helpers.
 703 *
 704 * Each segment is 32-byte aligned and will be moved to the top of the high
 705 * vector page.  New segments (if ever needed) must be added in front of
 706 * existing ones.  This mechanism should be used only for things that are
 707 * really small and justified, and not be abused freely.
 708 *
 709 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
 710 */
 711 THUMB(	.arm	)
 712
 713	.macro	usr_ret, reg
 714#ifdef CONFIG_ARM_THUMB
 715	bx	\reg
 716#else
 717	mov	pc, \reg
 718#endif
 719	.endm
 720
 
 
 
 
 
 
 
 
 
 
 
 
 721	.align	5
 722	.globl	__kuser_helper_start
 723__kuser_helper_start:
 724
 725/*
 726 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
 727 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
 728 */
 729
 730__kuser_cmpxchg64:				@ 0xffff0f60
 731
 732#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 733
 734	/*
 735	 * Poor you.  No fast solution possible...
 736	 * The kernel itself must perform the operation.
 737	 * A special ghost syscall is used for that (see traps.c).
 738	 */
 739	stmfd	sp!, {r7, lr}
 740	ldr	r7, 1f			@ it's 20 bits
 741	swi	__ARM_NR_cmpxchg64
 742	ldmfd	sp!, {r7, pc}
 7431:	.word	__ARM_NR_cmpxchg64
 744
 745#elif defined(CONFIG_CPU_32v6K)
 746
 747	stmfd	sp!, {r4, r5, r6, r7}
 748	ldrd	r4, r5, [r0]			@ load old val
 749	ldrd	r6, r7, [r1]			@ load new val
 750	smp_dmb	arm
 7511:	ldrexd	r0, r1, [r2]			@ load current val
 752	eors	r3, r0, r4			@ compare with oldval (1)
 753	eoreqs	r3, r1, r5			@ compare with oldval (2)
 754	strexdeq r3, r6, r7, [r2]		@ store newval if eq
 755	teqeq	r3, #1				@ success?
 756	beq	1b				@ if no then retry
 757	smp_dmb	arm
 758	rsbs	r0, r3, #0			@ set returned val and C flag
 759	ldmfd	sp!, {r4, r5, r6, r7}
 760	bx	lr
 761
 762#elif !defined(CONFIG_SMP)
 763
 764#ifdef CONFIG_MMU
 765
 766	/*
 767	 * The only thing that can break atomicity in this cmpxchg64
 768	 * implementation is either an IRQ or a data abort exception
 769	 * causing another process/thread to be scheduled in the middle of
 770	 * the critical sequence.  The same strategy as for cmpxchg is used.
 771	 */
 772	stmfd	sp!, {r4, r5, r6, lr}
 773	ldmia	r0, {r4, r5}			@ load old val
 774	ldmia	r1, {r6, lr}			@ load new val
 7751:	ldmia	r2, {r0, r1}			@ load current val
 776	eors	r3, r0, r4			@ compare with oldval (1)
 777	eoreqs	r3, r1, r5			@ compare with oldval (2)
 7782:	stmeqia	r2, {r6, lr}			@ store newval if eq
 779	rsbs	r0, r3, #0			@ set return val and C flag
 780	ldmfd	sp!, {r4, r5, r6, pc}
 781
 782	.text
 783kuser_cmpxchg64_fixup:
 784	@ Called from kuser_cmpxchg_fixup.
 785	@ r4 = address of interrupted insn (must be preserved).
 786	@ sp = saved regs. r7 and r8 are clobbered.
 787	@ 1b = first critical insn, 2b = last critical insn.
 788	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 789	mov	r7, #0xffff0fff
 790	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
 791	subs	r8, r4, r7
 792	rsbcss	r8, r8, #(2b - 1b)
 793	strcs	r7, [sp, #S_PC]
 794#if __LINUX_ARM_ARCH__ < 6
 795	bcc	kuser_cmpxchg32_fixup
 796#endif
 797	mov	pc, lr
 798	.previous
 799
 800#else
 801#warning "NPTL on non MMU needs fixing"
 802	mov	r0, #-1
 803	adds	r0, r0, #0
 804	usr_ret	lr
 805#endif
 806
 807#else
 808#error "incoherent kernel configuration"
 809#endif
 810
 811	/* pad to next slot */
 812	.rept	(16 - (. - __kuser_cmpxchg64)/4)
 813	.word	0
 814	.endr
 815
 816	.align	5
 817
 818__kuser_memory_barrier:				@ 0xffff0fa0
 819	smp_dmb	arm
 820	usr_ret	lr
 821
 822	.align	5
 823
 824__kuser_cmpxchg:				@ 0xffff0fc0
 825
 826#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 827
 828	/*
 829	 * Poor you.  No fast solution possible...
 830	 * The kernel itself must perform the operation.
 831	 * A special ghost syscall is used for that (see traps.c).
 832	 */
 833	stmfd	sp!, {r7, lr}
 834	ldr	r7, 1f			@ it's 20 bits
 835	swi	__ARM_NR_cmpxchg
 836	ldmfd	sp!, {r7, pc}
 8371:	.word	__ARM_NR_cmpxchg
 838
 839#elif __LINUX_ARM_ARCH__ < 6
 840
 841#ifdef CONFIG_MMU
 842
 843	/*
 844	 * The only thing that can break atomicity in this cmpxchg
 845	 * implementation is either an IRQ or a data abort exception
 846	 * causing another process/thread to be scheduled in the middle
 847	 * of the critical sequence.  To prevent this, code is added to
 848	 * the IRQ and data abort exception handlers to set the pc back
 849	 * to the beginning of the critical section if it is found to be
 850	 * within that critical section (see kuser_cmpxchg_fixup).
 851	 */
 8521:	ldr	r3, [r2]			@ load current val
 853	subs	r3, r3, r0			@ compare with oldval
 8542:	streq	r1, [r2]			@ store newval if eq
 855	rsbs	r0, r3, #0			@ set return val and C flag
 856	usr_ret	lr
 857
 858	.text
 859kuser_cmpxchg32_fixup:
 860	@ Called from kuser_cmpxchg_check macro.
 861	@ r4 = address of interrupted insn (must be preserved).
 862	@ sp = saved regs. r7 and r8 are clobbered.
 863	@ 1b = first critical insn, 2b = last critical insn.
 864	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 865	mov	r7, #0xffff0fff
 866	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
 867	subs	r8, r4, r7
 868	rsbcss	r8, r8, #(2b - 1b)
 869	strcs	r7, [sp, #S_PC]
 870	mov	pc, lr
 871	.previous
 872
 873#else
 874#warning "NPTL on non MMU needs fixing"
 875	mov	r0, #-1
 876	adds	r0, r0, #0
 877	usr_ret	lr
 878#endif
 879
 880#else
 881
 882	smp_dmb	arm
 8831:	ldrex	r3, [r2]
 884	subs	r3, r3, r0
 885	strexeq	r3, r1, [r2]
 886	teqeq	r3, #1
 887	beq	1b
 888	rsbs	r0, r3, #0
 889	/* beware -- each __kuser slot must be 8 instructions max */
 890	ALT_SMP(b	__kuser_memory_barrier)
 891	ALT_UP(usr_ret	lr)
 892
 893#endif
 894
 895	.align	5
 896
 897__kuser_get_tls:				@ 0xffff0fe0
 898	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
 899	usr_ret	lr
 900	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
 901	.rep	4
 
 902	.word	0			@ 0xffff0ff0 software TLS value, then
 903	.endr				@ pad up to __kuser_helper_version
 904
 905__kuser_helper_version:				@ 0xffff0ffc
 906	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)
 907
 908	.globl	__kuser_helper_end
 909__kuser_helper_end:
 910
 
 
 911 THUMB(	.thumb	)
 912
 913/*
 914 * Vector stubs.
 915 *
 916 * This code is copied to 0xffff0200 so we can use branches in the
 917 * vectors, rather than ldr's.  Note that this code must not
 918 * exceed 0x300 bytes.
 919 *
 920 * Common stub entry macro:
 921 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 922 *
 923 * SP points to a minimal amount of processor-private memory, the address
 924 * of which is copied into r0 for the mode specific abort handler.
 925 */
 926	.macro	vector_stub, name, mode, correction=0
 927	.align	5
 
 
 
 
 
 
 928
 929vector_\name:
 930	.if \correction
 931	sub	lr, lr, #\correction
 932	.endif
 933
 934	@
 935	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
 936	@ (parent CPSR)
 937	@
 938	stmia	sp, {r0, lr}		@ save r0, lr
 
 
 
 939	mrs	lr, spsr
 940	str	lr, [sp, #8]		@ save spsr
 941
 942	@
 943	@ Prepare for SVC32 mode.  IRQs remain disabled.
 944	@
 945	mrs	r0, cpsr
 946	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
 947	msr	spsr_cxsf, r0
 948
 949	@
 950	@ the branch table must immediately follow this code
 951	@
 952	and	lr, lr, #0x0f
 953 THUMB(	adr	r0, 1f			)
 954 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
 955	mov	r0, sp
 956 ARM(	ldr	lr, [pc, lr, lsl #2]	)
 957	movs	pc, lr			@ branch to handler in SVC mode
 958ENDPROC(vector_\name)
 959
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960	.align	2
 961	@ handler addresses follow this label
 9621:
 963	.endm
 964
 965	.globl	__stubs_start
 966__stubs_start:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 967/*
 968 * Interrupt dispatcher
 969 */
 970	vector_stub	irq, IRQ_MODE, 4
 971
 972	.long	__irq_usr			@  0  (USR_26 / USR_32)
 973	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
 974	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
 975	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
 976	.long	__irq_invalid			@  4
 977	.long	__irq_invalid			@  5
 978	.long	__irq_invalid			@  6
 979	.long	__irq_invalid			@  7
 980	.long	__irq_invalid			@  8
 981	.long	__irq_invalid			@  9
 982	.long	__irq_invalid			@  a
 983	.long	__irq_invalid			@  b
 984	.long	__irq_invalid			@  c
 985	.long	__irq_invalid			@  d
 986	.long	__irq_invalid			@  e
 987	.long	__irq_invalid			@  f
 988
 989/*
 990 * Data abort dispatcher
 991 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 992 */
 993	vector_stub	dabt, ABT_MODE, 8
 994
 995	.long	__dabt_usr			@  0  (USR_26 / USR_32)
 996	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
 997	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
 998	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
 999	.long	__dabt_invalid			@  4
1000	.long	__dabt_invalid			@  5
1001	.long	__dabt_invalid			@  6
1002	.long	__dabt_invalid			@  7
1003	.long	__dabt_invalid			@  8
1004	.long	__dabt_invalid			@  9
1005	.long	__dabt_invalid			@  a
1006	.long	__dabt_invalid			@  b
1007	.long	__dabt_invalid			@  c
1008	.long	__dabt_invalid			@  d
1009	.long	__dabt_invalid			@  e
1010	.long	__dabt_invalid			@  f
1011
1012/*
1013 * Prefetch abort dispatcher
1014 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1015 */
1016	vector_stub	pabt, ABT_MODE, 4
1017
1018	.long	__pabt_usr			@  0 (USR_26 / USR_32)
1019	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
1020	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
1021	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
1022	.long	__pabt_invalid			@  4
1023	.long	__pabt_invalid			@  5
1024	.long	__pabt_invalid			@  6
1025	.long	__pabt_invalid			@  7
1026	.long	__pabt_invalid			@  8
1027	.long	__pabt_invalid			@  9
1028	.long	__pabt_invalid			@  a
1029	.long	__pabt_invalid			@  b
1030	.long	__pabt_invalid			@  c
1031	.long	__pabt_invalid			@  d
1032	.long	__pabt_invalid			@  e
1033	.long	__pabt_invalid			@  f
1034
1035/*
1036 * Undef instr entry dispatcher
1037 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1038 */
1039	vector_stub	und, UND_MODE
1040
1041	.long	__und_usr			@  0 (USR_26 / USR_32)
1042	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
1043	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
1044	.long	__und_svc			@  3 (SVC_26 / SVC_32)
1045	.long	__und_invalid			@  4
1046	.long	__und_invalid			@  5
1047	.long	__und_invalid			@  6
1048	.long	__und_invalid			@  7
1049	.long	__und_invalid			@  8
1050	.long	__und_invalid			@  9
1051	.long	__und_invalid			@  a
1052	.long	__und_invalid			@  b
1053	.long	__und_invalid			@  c
1054	.long	__und_invalid			@  d
1055	.long	__und_invalid			@  e
1056	.long	__und_invalid			@  f
1057
1058	.align	5
1059
1060/*=============================================================================
1061 * Undefined FIQs
1062 *-----------------------------------------------------------------------------
1063 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1064 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1065 * Basically to switch modes, we *HAVE* to clobber one register...  brain
1066 * damage alert!  I don't think that we can execute any code in here in any
1067 * other mode than FIQ...  Ok you can switch to another mode, but you can't
1068 * get out of that mode without clobbering one register.
1069 */
1070vector_fiq:
1071	disable_fiq
1072	subs	pc, lr, #4
1073
1074/*=============================================================================
1075 * Address exception handler
1076 *-----------------------------------------------------------------------------
1077 * These aren't too critical.
1078 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1079 */
1080
1081vector_addrexcptn:
1082	b	vector_addrexcptn
1083
1084/*
1085 * We group all the following data together to optimise
1086 * for CPUs with separate I & D caches.
 
 
 
1087 */
1088	.align	5
1089
1090.LCvswi:
1091	.word	vector_swi
1092
1093	.globl	__stubs_end
1094__stubs_end:
1095
1096	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start
1097
1098	.globl	__vectors_start
1099__vectors_start:
1100 ARM(	swi	SYS_ERROR0	)
1101 THUMB(	svc	#0		)
1102 THUMB(	nop			)
1103	W(b)	vector_und + stubs_offset
1104	W(ldr)	pc, .LCvswi + stubs_offset
1105	W(b)	vector_pabt + stubs_offset
1106	W(b)	vector_dabt + stubs_offset
1107	W(b)	vector_addrexcptn + stubs_offset
1108	W(b)	vector_irq + stubs_offset
1109	W(b)	vector_fiq + stubs_offset
1110
1111	.globl	__vectors_end
1112__vectors_end:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113
1114	.data
 
1115
1116	.globl	cr_alignment
1117	.globl	cr_no_alignment
1118cr_alignment:
1119	.space	4
1120cr_no_alignment:
1121	.space	4
1122
1123#ifdef CONFIG_MULTI_IRQ_HANDLER
1124	.globl	handle_arch_irq
1125handle_arch_irq:
1126	.space	4
1127#endif
v6.2
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *  linux/arch/arm/kernel/entry-armv.S
   4 *
   5 *  Copyright (C) 1996,1997,1998 Russell King.
   6 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
   7 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
   8 *
 
 
 
 
   9 *  Low-level vector interface routines
  10 *
  11 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
  12 *  that causes it to save wrong values...  Be aware!
  13 */
  14
  15#include <linux/init.h>
  16
  17#include <asm/assembler.h>
  18#include <asm/memory.h>
  19#include <asm/glue-df.h>
  20#include <asm/glue-pf.h>
  21#include <asm/vfpmacros.h>
 
  22#include <asm/thread_notify.h>
  23#include <asm/unwind.h>
  24#include <asm/unistd.h>
  25#include <asm/tls.h>
  26#include <asm/system_info.h>
  27#include <asm/uaccess-asm.h>
  28
  29#include "entry-header.S"
  30#include <asm/probes.h>
  31
  32/*
  33 * Interrupt handling.
  34 */
  35	.macro	irq_handler, from_user:req
  36	mov	r1, sp
  37	ldr_this_cpu r2, irq_stack_ptr, r2, r3
  38	.if	\from_user == 0
  39	@
  40	@ If we took the interrupt while running in the kernel, we may already
  41	@ be using the IRQ stack, so revert to the original value in that case.
  42	@
  43	subs	r3, r2, r1		@ SP above bottom of IRQ stack?
  44	rsbscs	r3, r3, #THREAD_SIZE	@ ... and below the top?
  45#ifdef CONFIG_VMAP_STACK
  46	ldr_va	r3, high_memory, cc	@ End of the linear region
  47	cmpcc	r3, r1			@ Stack pointer was below it?
  48#endif
  49	bcc	0f			@ If not, switch to the IRQ stack
  50	mov	r0, r1
  51	bl	generic_handle_arch_irq
  52	b	1f
  530:
  54	.endif
  55
  56	mov_l	r0, generic_handle_arch_irq
  57	bl	call_with_stack
  581:
  59	.endm
  60
  61	.macro	pabt_helper
  62	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
  63#ifdef MULTI_PABORT
  64	ldr_va	ip, processor, offset=PROCESSOR_PABT_FUNC
  65	bl_r	ip
 
  66#else
  67	bl	CPU_PABORT_HANDLER
  68#endif
  69	.endm
  70
  71	.macro	dabt_helper
  72
  73	@
  74	@ Call the processor-specific abort handler:
  75	@
  76	@  r2 - pt_regs
  77	@  r4 - aborted context pc
  78	@  r5 - aborted context psr
  79	@
  80	@ The abort handler must return the aborted address in r0, and
  81	@ the fault status register in r1.  r9 must be preserved.
  82	@
  83#ifdef MULTI_DABORT
  84	ldr_va	ip, processor, offset=PROCESSOR_DABT_FUNC
  85	bl_r	ip
 
  86#else
  87	bl	CPU_DABORT_HANDLER
  88#endif
  89	.endm
  90
  91	.section	.entry.text,"ax",%progbits
 
 
 
 
  92
  93/*
  94 * Invalid mode handlers
  95 */
  96	.macro	inv_entry, reason
  97	sub	sp, sp, #PT_REGS_SIZE
  98 ARM(	stmib	sp, {r1 - lr}		)
  99 THUMB(	stmia	sp, {r0 - r12}		)
 100 THUMB(	str	sp, [sp, #S_SP]		)
 101 THUMB(	str	lr, [sp, #S_LR]		)
 102	mov	r1, #\reason
 103	.endm
 104
 105__pabt_invalid:
 106	inv_entry BAD_PREFETCH
 107	b	common_invalid
 108ENDPROC(__pabt_invalid)
 109
 110__dabt_invalid:
 111	inv_entry BAD_DATA
 112	b	common_invalid
 113ENDPROC(__dabt_invalid)
 114
 115__irq_invalid:
 116	inv_entry BAD_IRQ
 117	b	common_invalid
 118ENDPROC(__irq_invalid)
 119
 120__und_invalid:
 121	inv_entry BAD_UNDEFINSTR
 122
 123	@
 124	@ XXX fall through to common_invalid
 125	@
 126
 127@
 128@ common_invalid - generic code for failed exception (re-entrant version of handlers)
 129@
 130common_invalid:
 131	zero_fp
 132
 133	ldmia	r0, {r4 - r6}
 134	add	r0, sp, #S_PC		@ here for interlock avoidance
 135	mov	r7, #-1			@  ""   ""    ""        ""
 136	str	r4, [sp]		@ save preserved r0
 137	stmia	r0, {r5 - r7}		@ lr_<exception>,
 138					@ cpsr_<exception>, "old_r0"
 139
 140	mov	r0, sp
 141	b	bad_mode
 142ENDPROC(__und_invalid)
 143
 144/*
 145 * SVC mode handlers
 146 */
 147
 148#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
 149#define SPFIX(code...) code
 150#else
 151#define SPFIX(code...)
 152#endif
 153
 154	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1
 155 UNWIND(.fnstart		)
 156	sub	sp, sp, #(SVC_REGS_SIZE + \stack_hole)
 157 THUMB(	add	sp, r1		)	@ get SP in a GPR without
 158 THUMB(	sub	r1, sp, r1	)	@ using a temp register
 159
 160	.if	\overflow_check
 161 UNWIND(.save	{r0 - pc}	)
 162	do_overflow_check (SVC_REGS_SIZE + \stack_hole)
 163	.endif
 164
 165#ifdef CONFIG_THUMB2_KERNEL
 166	tst	r1, #4			@ test stack pointer alignment
 167	sub	r1, sp, r1		@ restore original R1
 168	sub	sp, r1			@ restore original SP
 
 169#else
 170 SPFIX(	tst	sp, #4		)
 171#endif
 172 SPFIX(	subne	sp, sp, #4	)
 173
 174 ARM(	stmib	sp, {r1 - r12}	)
 175 THUMB(	stmia	sp, {r0 - r12}	)	@ No STMIB in Thumb-2
 176
 177	ldmia	r0, {r3 - r5}
 178	add	r7, sp, #S_SP		@ here for interlock avoidance
 179	mov	r6, #-1			@  ""  ""      ""       ""
 180	add	r2, sp, #(SVC_REGS_SIZE + \stack_hole)
 181 SPFIX(	addne	r2, r2, #4	)
 182	str	r3, [sp]		@ save the "real" r0 copied
 183					@ from the exception stack
 184
 185	mov	r3, lr
 186
 187	@
 188	@ We are now ready to fill in the remaining blanks on the stack:
 189	@
 190	@  r2 - sp_svc
 191	@  r3 - lr_svc
 192	@  r4 - lr_<exception>, already fixed up for correct return/restart
 193	@  r5 - spsr_<exception>
 194	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 195	@
 196	stmia	r7, {r2 - r6}
 197
 198	get_thread_info tsk
 199	uaccess_entry tsk, r0, r1, r2, \uaccess
 200
 201	.if \trace
 202#ifdef CONFIG_TRACE_IRQFLAGS
 203	bl	trace_hardirqs_off
 204#endif
 205	.endif
 206	.endm
 207
 208	.align	5
 209__dabt_svc:
 210	svc_entry uaccess=0
 211	mov	r2, sp
 212	dabt_helper
 213 THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
 
 
 
 
 
 
 
 
 
 
 
 214	svc_exit r5				@ return from exception
 215 UNWIND(.fnend		)
 216ENDPROC(__dabt_svc)
 217
 218	.align	5
 219__irq_svc:
 220	svc_entry
 221	irq_handler from_user=0
 222
 223#ifdef CONFIG_PREEMPTION
 
 224	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
 225	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
 226	teq	r8, #0				@ if preempt count != 0
 227	movne	r0, #0				@ force flags to 0
 228	tst	r0, #_TIF_NEED_RESCHED
 229	blne	svc_preempt
 230#endif
 231
 232	svc_exit r5, irq = 1			@ return from exception
 
 
 
 
 
 233 UNWIND(.fnend		)
 234ENDPROC(__irq_svc)
 235
 236	.ltorg
 237
 238#ifdef CONFIG_PREEMPTION
 239svc_preempt:
 240	mov	r8, lr
 2411:	bl	preempt_schedule_irq		@ irq en/disable is done inside
 242	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
 243	tst	r0, #_TIF_NEED_RESCHED
 244	reteq	r8				@ go again
 245	b	1b
 246#endif
 247
 248__und_fault:
 249	@ Correct the PC such that it is pointing at the instruction
 250	@ which caused the fault.  If the faulting instruction was ARM
 251	@ the PC will be pointing at the next instruction, and have to
 252	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
 253	@ pointing at the second half of the Thumb instruction.  We
 254	@ have to subtract 2.
 255	ldr	r2, [r0, #S_PC]
 256	sub	r2, r2, r1
 257	str	r2, [r0, #S_PC]
 258	b	do_undefinstr
 259ENDPROC(__und_fault)
 260
 261	.align	5
 262__und_svc:
 263#ifdef CONFIG_KPROBES
 264	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
 265	@ it obviously needs free stack space which then will belong to
 266	@ the saved context.
 267	svc_entry MAX_STACK_SIZE
 268#else
 269	svc_entry
 270#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271
 272	mov	r1, #4				@ PC correction to apply
 273 THUMB(	tst	r5, #PSR_T_BIT		)	@ exception taken in Thumb mode?
 274 THUMB(	movne	r1, #2			)	@ if so, fix up PC correction
 275	mov	r0, sp				@ struct pt_regs *regs
 276	bl	__und_fault
 
 
 
 
 
 277
 278__und_svc_finish:
 279	get_thread_info tsk
 
 280	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
 
 
 
 
 
 
 281	svc_exit r5				@ return from exception
 282 UNWIND(.fnend		)
 283ENDPROC(__und_svc)
 284
 285	.align	5
 286__pabt_svc:
 287	svc_entry
 288	mov	r2, sp				@ regs
 289	pabt_helper
 
 
 
 
 
 
 
 
 
 
 
 
 290	svc_exit r5				@ return from exception
 291 UNWIND(.fnend		)
 292ENDPROC(__pabt_svc)
 293
 294	.align	5
 295__fiq_svc:
 296	svc_entry trace=0
 297	mov	r0, sp				@ struct pt_regs *regs
 298	bl	handle_fiq_as_nmi
 299	svc_exit_via_fiq
 300 UNWIND(.fnend		)
 301ENDPROC(__fiq_svc)
 302
 303/*
 304 * Abort mode handlers
 305 */
 306
 307@
 308@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
 309@ and reuses the same macros. However in abort mode we must also
 310@ save/restore lr_abt and spsr_abt to make nested aborts safe.
 311@
 312	.align 5
 313__fiq_abt:
 314	svc_entry trace=0
 315
 316 ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 317 THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 318 THUMB( msr	cpsr_c, r0 )
 319	mov	r1, lr		@ Save lr_abt
 320	mrs	r2, spsr	@ Save spsr_abt, abort is now safe
 321 ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 322 THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 323 THUMB( msr	cpsr_c, r0 )
 324	stmfd	sp!, {r1 - r2}
 325
 326	add	r0, sp, #8			@ struct pt_regs *regs
 327	bl	handle_fiq_as_nmi
 328
 329	ldmfd	sp!, {r1 - r2}
 330 ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 331 THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 332 THUMB( msr	cpsr_c, r0 )
 333	mov	lr, r1		@ Restore lr_abt, abort is unsafe
 334	msr	spsr_cxsf, r2	@ Restore spsr_abt
 335 ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 336 THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 337 THUMB( msr	cpsr_c, r0 )
 338
 339	svc_exit_via_fiq
 340 UNWIND(.fnend		)
 341ENDPROC(__fiq_abt)
 342
 343/*
 344 * User mode handlers
 345 *
 346 * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
 347 */
 348
 349#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
 350#error "sizeof(struct pt_regs) must be a multiple of 8"
 351#endif
 352
 353	.macro	usr_entry, trace=1, uaccess=1
 354 UNWIND(.fnstart	)
 355 UNWIND(.cantunwind	)	@ don't unwind the user space
 356	sub	sp, sp, #PT_REGS_SIZE
 357 ARM(	stmib	sp, {r1 - r12}	)
 358 THUMB(	stmia	sp, {r0 - r12}	)
 359
 360 ATRAP(	mrc	p15, 0, r7, c1, c0, 0)
 361 ATRAP(	ldr_va	r8, cr_alignment)
 362
 363	ldmia	r0, {r3 - r5}
 364	add	r0, sp, #S_PC		@ here for interlock avoidance
 365	mov	r6, #-1			@  ""  ""     ""        ""
 366
 367	str	r3, [sp]		@ save the "real" r0 copied
 368					@ from the exception stack
 369
 370	@
 371	@ We are now ready to fill in the remaining blanks on the stack:
 372	@
 373	@  r4 - lr_<exception>, already fixed up for correct return/restart
 374	@  r5 - spsr_<exception>
 375	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 376	@
 377	@ Also, separately save sp_usr and lr_usr
 378	@
 379	stmia	r0, {r4 - r6}
 380 ARM(	stmdb	r0, {sp, lr}^			)
 381 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
 382
 383	.if \uaccess
 384	uaccess_disable ip
 385	.endif
 386
 387	@ Enable the alignment trap while in kernel mode
 388 ATRAP(	teq	r8, r7)
 389 ATRAP( mcrne	p15, 0, r8, c1, c0, 0)
 390
 391	reload_current r7, r8
 392
 393	@
 394	@ Clear FP to mark the first stack frame
 395	@
 396	zero_fp
 397
 398	.if	\trace
 399#ifdef CONFIG_TRACE_IRQFLAGS
 400	bl	trace_hardirqs_off
 401#endif
 402	ct_user_exit save = 0
 403	.endif
 404	.endm
 405
 406	.macro	kuser_cmpxchg_check
 407#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
 408#ifndef CONFIG_MMU
 409#warning "NPTL on non MMU needs fixing"
 410#else
 411	@ Make sure our user space atomic helper is restarted
 412	@ if it was interrupted in a critical region.  Here we
 413	@ perform a quick test inline since it should be false
 414	@ 99.9999% of the time.  The rest is done out of line.
 415	ldr	r0, =TASK_SIZE
 416	cmp	r4, r0
 417	blhs	kuser_cmpxchg64_fixup
 418#endif
 419#endif
 420	.endm
 421
 422	.align	5
 423__dabt_usr:
 424	usr_entry uaccess=0
 425	kuser_cmpxchg_check
 426	mov	r2, sp
 427	dabt_helper
 428	b	ret_from_exception
 429 UNWIND(.fnend		)
 430ENDPROC(__dabt_usr)
 431
 432	.align	5
 433__irq_usr:
 434	usr_entry
 435	kuser_cmpxchg_check
 436	irq_handler from_user=1
 437	get_thread_info tsk
 438	mov	why, #0
 439	b	ret_to_user_from_irq
 440 UNWIND(.fnend		)
 441ENDPROC(__irq_usr)
 442
 443	.ltorg
 444
 445	.align	5
 446__und_usr:
 447	usr_entry uaccess=0
 448
 449	mov	r2, r4
 450	mov	r3, r5
 451
 452	@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
 453	@      faulting instruction depending on Thumb mode.
 454	@ r3 = regs->ARM_cpsr
 455	@
 456	@ The emulation code returns using r9 if it has emulated the
 457	@ instruction, or the more conventional lr if we are to treat
 458	@ this as a real undefined instruction
 
 
 459	@
 460	badr	r9, ret_from_exception
 461
 462	@ IRQs must be enabled before attempting to read the instruction from
 463	@ user space since that could cause a page/translation fault if the
 464	@ page table was modified by another CPU.
 465	enable_irq
 466
 467	tst	r3, #PSR_T_BIT			@ Thumb mode?
 468	bne	__und_usr_thumb
 469	sub	r4, r2, #4			@ ARM instr at LR - 4
 4701:	ldrt	r0, [r4]
 471 ARM_BE8(rev	r0, r0)				@ little endian instruction
 472
 473	uaccess_disable ip
 474
 475	@ r0 = 32-bit ARM instruction which caused the exception
 476	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 477	@ r4 = PC value for the faulting instruction
 478	@ lr = 32-bit undefined instruction function
 479	badr	lr, __und_usr_fault_32
 480	b	call_fpe
 481
 482__und_usr_thumb:
 483	@ Thumb instruction
 484	sub	r4, r2, #2			@ First half of thumb instr at LR - 2
 485#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
 486/*
 487 * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
 488 * can never be supported in a single kernel, this code is not applicable at
 489 * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
 490 * made about .arch directives.
 491 */
 492#if __LINUX_ARM_ARCH__ < 7
 493/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
 494	ldr_va	r5, cpu_architecture
 495	cmp	r5, #CPU_ARCH_ARMv7
 496	blo	__und_usr_fault_16		@ 16bit undefined instruction
 497/*
 498 * The following code won't get run unless the running CPU really is v7, so
 499 * coding round the lack of ldrht on older arches is pointless.  Temporarily
 500 * override the assembler target arch with the minimum required instead:
 501 */
 502	.arch	armv6t2
 503#endif
 5042:	ldrht	r5, [r4]
 505ARM_BE8(rev16	r5, r5)				@ little endian instruction
 506	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
 507	blo	__und_usr_fault_16_pan		@ 16bit undefined instruction
 5083:	ldrht	r0, [r2]
 509ARM_BE8(rev16	r0, r0)				@ little endian instruction
 510	uaccess_disable ip
 511	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 512	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
 513	orr	r0, r0, r5, lsl #16
 514	badr	lr, __und_usr_fault_32
 515	@ r0 = the two 16-bit Thumb instructions which caused the exception
 516	@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
 517	@ r4 = PC value for the first 16-bit Thumb instruction
 518	@ lr = 32bit undefined instruction function
 519
 520#if __LINUX_ARM_ARCH__ < 7
 521/* If the target arch was overridden, change it back: */
 522#ifdef CONFIG_CPU_32v6K
 523	.arch	armv6k
 524#else
 525	.arch	armv6
 526#endif
 527#endif /* __LINUX_ARM_ARCH__ < 7 */
 528#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
 529	b	__und_usr_fault_16
 530#endif
 531 UNWIND(.fnend)
 532ENDPROC(__und_usr)
 533
 
 
 
 
 534/*
 535 * The out of line fixup for the ldrt instructions above.
 536 */
 537	.pushsection .text.fixup, "ax"
 538	.align	2
 5394:	str     r4, [sp, #S_PC]			@ retry current instruction
 540	ret	r9
 541	.popsection
 542	.pushsection __ex_table,"a"
 543	.long	1b, 4b
 544#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
 545	.long	2b, 4b
 546	.long	3b, 4b
 547#endif
 548	.popsection
 549
 550/*
 551 * Check whether the instruction is a co-processor instruction.
 552 * If yes, we need to call the relevant co-processor handler.
 553 *
 554 * Note that we don't do a full check here for the co-processor
 555 * instructions; all instructions with bit 27 set are well
 556 * defined.  The only instructions that should fault are the
 557 * co-processor instructions.  However, we have to watch out
 558 * for the ARM6/ARM7 SWI bug.
 559 *
 560 * NEON is a special case that has to be handled here. Not all
 561 * NEON instructions are co-processor instructions, so we have
 562 * to make a special case of checking for them. Plus, there's
 563 * five groups of them, so we have a table of mask/opcode pairs
 564 * to check against, and if any match then we branch off into the
 565 * NEON handler code.
 566 *
 567 * Emulators may wish to make use of the following registers:
 568 *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
 569 *  r2  = PC value to resume execution after successful emulation
 570 *  r9  = normal "successful" return address
 571 *  r10 = this threads thread_info structure
 572 *  lr  = unrecognised instruction return address
 573 * IRQs enabled, FIQs enabled.
 574 */
 575	@
 576	@ Fall-through from Thumb-2 __und_usr
 577	@
 578#ifdef CONFIG_NEON
 579	get_thread_info r10			@ get current thread
 580	adr	r6, .LCneon_thumb_opcodes
 581	b	2f
 582#endif
 583call_fpe:
 584	get_thread_info r10			@ get current thread
 585#ifdef CONFIG_NEON
 586	adr	r6, .LCneon_arm_opcodes
 5872:	ldr	r5, [r6], #4			@ mask value
 
 
 
 
 588	ldr	r7, [r6], #4			@ opcode bits matching in mask
 589	cmp	r5, #0				@ end mask?
 590	beq	1f
 591	and	r8, r0, r5
 592	cmp	r8, r7				@ NEON instruction?
 593	bne	2b
 
 594	mov	r7, #1
 595	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
 596	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
 597	b	do_vfp				@ let VFP handler handle this
 5981:
 599#endif
 600	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
 601	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
 602	reteq	lr
 
 
 
 
 
 603	and	r8, r0, #0x00000f00		@ mask out CP number
 
 604	mov	r7, #1
 605	add	r6, r10, r8, lsr #8		@ add used_cp[] array offset first
 606	strb	r7, [r6, #TI_USED_CP]		@ set appropriate used_cp[]
 
 607#ifdef CONFIG_IWMMXT
 608	@ Test if we need to give access to iWMMXt coprocessors
 609	ldr	r5, [r10, #TI_FLAGS]
 610	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
 611	movscs	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
 612	bcs	iwmmxt_task_enable
 613#endif
 614 ARM(	add	pc, pc, r8, lsr #6	)
 615 THUMB(	lsr	r8, r8, #6		)
 616 THUMB(	add	pc, r8			)
 617	nop
 618
 619	ret.w	lr				@ CP#0
 620	W(b)	do_fpe				@ CP#1 (FPE)
 621	W(b)	do_fpe				@ CP#2 (FPE)
 622	ret.w	lr				@ CP#3
 623	ret.w	lr				@ CP#4
 624	ret.w	lr				@ CP#5
 625	ret.w	lr				@ CP#6
 626	ret.w	lr				@ CP#7
 627	ret.w	lr				@ CP#8
 628	ret.w	lr				@ CP#9
 
 
 
 
 
 
 629#ifdef CONFIG_VFP
 630	W(b)	do_vfp				@ CP#10 (VFP)
 631	W(b)	do_vfp				@ CP#11 (VFP)
 632#else
 633	ret.w	lr				@ CP#10 (VFP)
 634	ret.w	lr				@ CP#11 (VFP)
 635#endif
 636	ret.w	lr				@ CP#12
 637	ret.w	lr				@ CP#13
 638	ret.w	lr				@ CP#14 (Debug)
 639	ret.w	lr				@ CP#15 (Control)
 640
 641#ifdef CONFIG_NEON
 642	.align	6
 643
 644.LCneon_arm_opcodes:
 645	.word	0xfe000000			@ mask
 646	.word	0xf2000000			@ opcode
 647
 648	.word	0xff100000			@ mask
 649	.word	0xf4000000			@ opcode
 650
 651	.word	0x00000000			@ mask
 652	.word	0x00000000			@ opcode
 653
 654.LCneon_thumb_opcodes:
 655	.word	0xef000000			@ mask
 656	.word	0xef000000			@ opcode
 657
 658	.word	0xff100000			@ mask
 659	.word	0xf9000000			@ opcode
 660
 661	.word	0x00000000			@ mask
 662	.word	0x00000000			@ opcode
 663#endif
 664
 665do_fpe:
 
 
 666	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
 667	ldr_va	pc, fp_enter, tmp=r4		@ Call FP module USR entry point
 668
 669/*
 670 * The FP module is called with these registers set:
 671 *  r0  = instruction
 672 *  r2  = PC+4
 673 *  r9  = normal "successful" return address
 674 *  r10 = FP workspace
 675 *  lr  = unrecognised FP instruction return address
 676 */
 677
 678	.pushsection .data
 679	.align	2
 680ENTRY(fp_enter)
 681	.word	no_fp
 682	.popsection
 683
 684ENTRY(no_fp)
 685	ret	lr
 686ENDPROC(no_fp)
 687
 688__und_usr_fault_32:
 689	mov	r1, #4
 690	b	1f
 691__und_usr_fault_16_pan:
 692	uaccess_disable ip
 693__und_usr_fault_16:
 694	mov	r1, #2
 6951:	mov	r0, sp
 696	badr	lr, ret_from_exception
 697	b	__und_fault
 698ENDPROC(__und_usr_fault_32)
 699ENDPROC(__und_usr_fault_16)
 700
 701	.align	5
 702__pabt_usr:
 703	usr_entry
 704	mov	r2, sp				@ regs
 705	pabt_helper
 706 UNWIND(.fnend		)
 707	/* fall through */
 708/*
 709 * This is the return code to user mode for abort handlers
 710 */
 711ENTRY(ret_from_exception)
 712 UNWIND(.fnstart	)
 713 UNWIND(.cantunwind	)
 714	get_thread_info tsk
 715	mov	why, #0
 716	b	ret_to_user
 717 UNWIND(.fnend		)
 718ENDPROC(__pabt_usr)
 719ENDPROC(ret_from_exception)
 720
 721	.align	5
 722__fiq_usr:
 723	usr_entry trace=0
 724	kuser_cmpxchg_check
 725	mov	r0, sp				@ struct pt_regs *regs
 726	bl	handle_fiq_as_nmi
 727	get_thread_info tsk
 728	restore_user_regs fast = 0, offset = 0
 729 UNWIND(.fnend		)
 730ENDPROC(__fiq_usr)
 731
 732/*
 733 * Register switch for ARMv3 and ARMv4 processors
 734 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 735 * previous and next are guaranteed not to be the same.
 736 */
 737ENTRY(__switch_to)
 738 UNWIND(.fnstart	)
 739 UNWIND(.cantunwind	)
 740	add	ip, r1, #TI_CPU_SAVE
 
 741 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 742 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 743 THUMB(	str	sp, [ip], #4		   )
 744 THUMB(	str	lr, [ip], #4		   )
 745	ldr	r4, [r2, #TI_TP_VALUE]
 746	ldr	r5, [r2, #TI_TP_VALUE + 4]
 747#ifdef CONFIG_CPU_USE_DOMAINS
 748	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
 749	str	r6, [r1, #TI_CPU_DOMAIN]	@ Save old domain register
 750	ldr	r6, [r2, #TI_CPU_DOMAIN]
 751#endif
 752	switch_tls r1, r4, r5, r3, r7
 753#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
 754    !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 755	ldr	r8, =__stack_chk_guard
 756	.if (TSK_STACK_CANARY > IMM12_MASK)
 757	add	r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
 758	ldr	r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
 759	.else
 760	ldr	r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
 761	.endif
 762#endif
 763	mov	r7, r2				@ Preserve 'next'
 764#ifdef CONFIG_CPU_USE_DOMAINS
 765	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
 766#endif
 767	mov	r5, r0
 768	add	r4, r2, #TI_CPU_SAVE
 769	ldr	r0, =thread_notify_head
 770	mov	r1, #THREAD_NOTIFY_SWITCH
 771	bl	atomic_notifier_call_chain
 772#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
 773    !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 774	str	r9, [r8]
 775#endif
 
 776	mov	r0, r5
 777#if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
 778	set_current r7, r8
 779	ldmia	r4, {r4 - sl, fp, sp, pc}	@ Load all regs saved previously
 780#else
 781	mov	r1, r7
 782	ldmia	r4, {r4 - sl, fp, ip, lr}	@ Load all regs saved previously
 783#ifdef CONFIG_VMAP_STACK
 784	@
 785	@ Do a dummy read from the new stack while running from the old one so
 786	@ that we can rely on do_translation_fault() to fix up any stale PMD
 787	@ entries covering the vmalloc region.
 788	@
 789	ldr	r2, [ip]
 790#endif
 791
 792	@ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
 793	@ effectuates the task switch, as that is what causes the observable
 794	@ values of current and current_thread_info to change. When
 795	@ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore
 796	@ current_thread_info) is done explicitly, and the update of SP just
 797	@ switches us to another stack, with few other side effects. In order
 798	@ to prevent this distinction from causing any inconsistencies, let's
 799	@ keep the 'set_current' call as close as we can to the update of SP.
 800	set_current r1, r2
 801	mov	sp, ip
 802	ret	lr
 803#endif
 804 UNWIND(.fnend		)
 805ENDPROC(__switch_to)
 806
 807#ifdef CONFIG_VMAP_STACK
 808	.text
 809	.align	2
 810__bad_stack:
 811	@
 812	@ We've just detected an overflow. We need to load the address of this
 813	@ CPU's overflow stack into the stack pointer register. We have only one
 814	@ scratch register so let's use a sequence of ADDs including one
 815	@ involving the PC, and decorate them with PC-relative group
 816	@ relocations. As these are ARM only, switch to ARM mode first.
 817	@
 818	@ We enter here with IP clobbered and its value stashed on the mode
 819	@ stack.
 820	@
 821THUMB(	bx	pc		)
 822THUMB(	nop			)
 823THUMB(	.arm			)
 824	ldr_this_cpu_armv6 ip, overflow_stack_ptr
 825
 826	str	sp, [ip, #-4]!			@ Preserve original SP value
 827	mov	sp, ip				@ Switch to overflow stack
 828	pop	{ip}				@ Original SP in IP
 829
 830#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
 831	mov	ip, ip				@ mov expected by unwinder
 832	push	{fp, ip, lr, pc}		@ GCC flavor frame record
 833#else
 834	str	ip, [sp, #-8]!			@ store original SP
 835	push	{fpreg, lr}			@ Clang flavor frame record
 836#endif
 837UNWIND( ldr	ip, [r0, #4]	)		@ load exception LR
 838UNWIND( str	ip, [sp, #12]	)		@ store in the frame record
 839	ldr	ip, [r0, #12]			@ reload IP
 840
 841	@ Store the original GPRs to the new stack.
 842	svc_entry uaccess=0, overflow_check=0
 843
 844UNWIND( .save   {sp, pc}	)
 845UNWIND( .save   {fpreg, lr}	)
 846UNWIND( .setfp  fpreg, sp	)
 847
 848	ldr	fpreg, [sp, #S_SP]		@ Add our frame record
 849						@ to the linked list
 850#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
 851	ldr	r1, [fp, #4]			@ reload SP at entry
 852	add	fp, fp, #12
 853#else
 854	ldr	r1, [fpreg, #8]
 855#endif
 856	str	r1, [sp, #S_SP]			@ store in pt_regs
 857
 858	@ Stash the regs for handle_bad_stack
 859	mov	r0, sp
 860
 861	@ Time to die
 862	bl	handle_bad_stack
 863	nop
 864UNWIND( .fnend			)
 865ENDPROC(__bad_stack)
 866#endif
 867
 868	__INIT
 869
 870/*
 871 * User helpers.
 872 *
 873 * Each segment is 32-byte aligned and will be moved to the top of the high
 874 * vector page.  New segments (if ever needed) must be added in front of
 875 * existing ones.  This mechanism should be used only for things that are
 876 * really small and justified, and not be abused freely.
 877 *
 878 * See Documentation/arm/kernel_user_helpers.rst for formal definitions.
 879 */
 880 THUMB(	.arm	)
 881
 882	.macro	usr_ret, reg
 883#ifdef CONFIG_ARM_THUMB
 884	bx	\reg
 885#else
 886	ret	\reg
 887#endif
 888	.endm
 889
 890	.macro	kuser_pad, sym, size
 891	.if	(. - \sym) & 3
 892	.rept	4 - (. - \sym) & 3
 893	.byte	0
 894	.endr
 895	.endif
 896	.rept	(\size - (. - \sym)) / 4
 897	.word	0xe7fddef1
 898	.endr
 899	.endm
 900
 901#ifdef CONFIG_KUSER_HELPERS
 902	.align	5
 903	.globl	__kuser_helper_start
 904__kuser_helper_start:
 905
 906/*
 907 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
 908 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
 909 */
 910
 911__kuser_cmpxchg64:				@ 0xffff0f60
 912
 913#if defined(CONFIG_CPU_32v6K)
 
 
 
 
 
 
 
 
 
 
 
 
 
 914
 915	stmfd	sp!, {r4, r5, r6, r7}
 916	ldrd	r4, r5, [r0]			@ load old val
 917	ldrd	r6, r7, [r1]			@ load new val
 918	smp_dmb	arm
 9191:	ldrexd	r0, r1, [r2]			@ load current val
 920	eors	r3, r0, r4			@ compare with oldval (1)
 921	eorseq	r3, r1, r5			@ compare with oldval (2)
 922	strexdeq r3, r6, r7, [r2]		@ store newval if eq
 923	teqeq	r3, #1				@ success?
 924	beq	1b				@ if no then retry
 925	smp_dmb	arm
 926	rsbs	r0, r3, #0			@ set returned val and C flag
 927	ldmfd	sp!, {r4, r5, r6, r7}
 928	usr_ret	lr
 929
 930#elif !defined(CONFIG_SMP)
 931
 932#ifdef CONFIG_MMU
 933
 934	/*
 935	 * The only thing that can break atomicity in this cmpxchg64
 936	 * implementation is either an IRQ or a data abort exception
 937	 * causing another process/thread to be scheduled in the middle of
 938	 * the critical sequence.  The same strategy as for cmpxchg is used.
 939	 */
 940	stmfd	sp!, {r4, r5, r6, lr}
 941	ldmia	r0, {r4, r5}			@ load old val
 942	ldmia	r1, {r6, lr}			@ load new val
 9431:	ldmia	r2, {r0, r1}			@ load current val
 944	eors	r3, r0, r4			@ compare with oldval (1)
 945	eorseq	r3, r1, r5			@ compare with oldval (2)
 9462:	stmiaeq	r2, {r6, lr}			@ store newval if eq
 947	rsbs	r0, r3, #0			@ set return val and C flag
 948	ldmfd	sp!, {r4, r5, r6, pc}
 949
 950	.text
 951kuser_cmpxchg64_fixup:
 952	@ Called from kuser_cmpxchg_fixup.
 953	@ r4 = address of interrupted insn (must be preserved).
 954	@ sp = saved regs. r7 and r8 are clobbered.
 955	@ 1b = first critical insn, 2b = last critical insn.
 956	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 957	mov	r7, #0xffff0fff
 958	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
 959	subs	r8, r4, r7
 960	rsbscs	r8, r8, #(2b - 1b)
 961	strcs	r7, [sp, #S_PC]
 962#if __LINUX_ARM_ARCH__ < 6
 963	bcc	kuser_cmpxchg32_fixup
 964#endif
 965	ret	lr
 966	.previous
 967
 968#else
 969#warning "NPTL on non MMU needs fixing"
 970	mov	r0, #-1
 971	adds	r0, r0, #0
 972	usr_ret	lr
 973#endif
 974
 975#else
 976#error "incoherent kernel configuration"
 977#endif
 978
 979	kuser_pad __kuser_cmpxchg64, 64
 
 
 
 
 
 980
 981__kuser_memory_barrier:				@ 0xffff0fa0
 982	smp_dmb	arm
 983	usr_ret	lr
 984
 985	kuser_pad __kuser_memory_barrier, 32
 986
 987__kuser_cmpxchg:				@ 0xffff0fc0
 988
 989#if __LINUX_ARM_ARCH__ < 6
 
 
 
 
 
 
 
 
 
 
 
 
 
 990
 991#ifdef CONFIG_MMU
 992
 993	/*
 994	 * The only thing that can break atomicity in this cmpxchg
 995	 * implementation is either an IRQ or a data abort exception
 996	 * causing another process/thread to be scheduled in the middle
 997	 * of the critical sequence.  To prevent this, code is added to
 998	 * the IRQ and data abort exception handlers to set the pc back
 999	 * to the beginning of the critical section if it is found to be
1000	 * within that critical section (see kuser_cmpxchg_fixup).
1001	 */
10021:	ldr	r3, [r2]			@ load current val
1003	subs	r3, r3, r0			@ compare with oldval
10042:	streq	r1, [r2]			@ store newval if eq
1005	rsbs	r0, r3, #0			@ set return val and C flag
1006	usr_ret	lr
1007
1008	.text
1009kuser_cmpxchg32_fixup:
1010	@ Called from kuser_cmpxchg_check macro.
1011	@ r4 = address of interrupted insn (must be preserved).
1012	@ sp = saved regs. r7 and r8 are clobbered.
1013	@ 1b = first critical insn, 2b = last critical insn.
1014	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
1015	mov	r7, #0xffff0fff
1016	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
1017	subs	r8, r4, r7
1018	rsbscs	r8, r8, #(2b - 1b)
1019	strcs	r7, [sp, #S_PC]
1020	ret	lr
1021	.previous
1022
1023#else
1024#warning "NPTL on non MMU needs fixing"
1025	mov	r0, #-1
1026	adds	r0, r0, #0
1027	usr_ret	lr
1028#endif
1029
1030#else
1031
1032	smp_dmb	arm
10331:	ldrex	r3, [r2]
1034	subs	r3, r3, r0
1035	strexeq	r3, r1, [r2]
1036	teqeq	r3, #1
1037	beq	1b
1038	rsbs	r0, r3, #0
1039	/* beware -- each __kuser slot must be 8 instructions max */
1040	ALT_SMP(b	__kuser_memory_barrier)
1041	ALT_UP(usr_ret	lr)
1042
1043#endif
1044
1045	kuser_pad __kuser_cmpxchg, 32
1046
1047__kuser_get_tls:				@ 0xffff0fe0
1048	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
1049	usr_ret	lr
1050	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
1051	kuser_pad __kuser_get_tls, 16
1052	.rep	3
1053	.word	0			@ 0xffff0ff0 software TLS value, then
1054	.endr				@ pad up to __kuser_helper_version
1055
1056__kuser_helper_version:				@ 0xffff0ffc
1057	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)
1058
1059	.globl	__kuser_helper_end
1060__kuser_helper_end:
1061
1062#endif
1063
1064 THUMB(	.thumb	)
1065
1066/*
1067 * Vector stubs.
1068 *
1069 * This code is copied to 0xffff1000 so we can use branches in the
1070 * vectors, rather than ldr's.  Note that this code must not exceed
1071 * a page size.
1072 *
1073 * Common stub entry macro:
1074 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1075 *
1076 * SP points to a minimal amount of processor-private memory, the address
1077 * of which is copied into r0 for the mode specific abort handler.
1078 */
1079	.macro	vector_stub, name, mode, correction=0
1080	.align	5
1081#ifdef CONFIG_HARDEN_BRANCH_HISTORY
1082vector_bhb_bpiall_\name:
1083	mcr	p15, 0, r0, c7, c5, 6	@ BPIALL
1084	@ isb not needed due to "movs pc, lr" in the vector stub
1085	@ which gives a "context synchronisation".
1086#endif
1087
1088vector_\name:
1089	.if \correction
1090	sub	lr, lr, #\correction
1091	.endif
1092
1093	@ Save r0, lr_<exception> (parent PC)
 
 
 
1094	stmia	sp, {r0, lr}		@ save r0, lr
1095
1096	@ Save spsr_<exception> (parent CPSR)
1097.Lvec_\name:
1098	mrs	lr, spsr
1099	str	lr, [sp, #8]		@ save spsr
1100
1101	@
1102	@ Prepare for SVC32 mode.  IRQs remain disabled.
1103	@
1104	mrs	r0, cpsr
1105	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1106	msr	spsr_cxsf, r0
1107
1108	@
1109	@ the branch table must immediately follow this code
1110	@
1111	and	lr, lr, #0x0f
1112 THUMB(	adr	r0, 1f			)
1113 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
1114	mov	r0, sp
1115 ARM(	ldr	lr, [pc, lr, lsl #2]	)
1116	movs	pc, lr			@ branch to handler in SVC mode
1117ENDPROC(vector_\name)
1118
1119#ifdef CONFIG_HARDEN_BRANCH_HISTORY
1120	.subsection 1
1121	.align 5
1122vector_bhb_loop8_\name:
1123	.if \correction
1124	sub	lr, lr, #\correction
1125	.endif
1126
1127	@ Save r0, lr_<exception> (parent PC)
1128	stmia	sp, {r0, lr}
1129
1130	@ bhb workaround
1131	mov	r0, #8
11323:	W(b)	. + 4
1133	subs	r0, r0, #1
1134	bne	3b
1135	dsb	nsh
1136	@ isb not needed due to "movs pc, lr" in the vector stub
1137	@ which gives a "context synchronisation".
1138	b	.Lvec_\name
1139ENDPROC(vector_bhb_loop8_\name)
1140	.previous
1141#endif
1142
1143	.align	2
1144	@ handler addresses follow this label
11451:
1146	.endm
1147
1148	.section .stubs, "ax", %progbits
1149	@ These need to remain at the start of the section so that
1150	@ they are in range of the 'SWI' entries in the vector tables
1151	@ located 4k down.
1152.L__vector_swi:
1153	.word	vector_swi
1154#ifdef CONFIG_HARDEN_BRANCH_HISTORY
1155.L__vector_bhb_loop8_swi:
1156	.word	vector_bhb_loop8_swi
1157.L__vector_bhb_bpiall_swi:
1158	.word	vector_bhb_bpiall_swi
1159#endif
1160
1161vector_rst:
1162 ARM(	swi	SYS_ERROR0	)
1163 THUMB(	svc	#0		)
1164 THUMB(	nop			)
1165	b	vector_und
1166
1167/*
1168 * Interrupt dispatcher
1169 */
1170	vector_stub	irq, IRQ_MODE, 4
1171
1172	.long	__irq_usr			@  0  (USR_26 / USR_32)
1173	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
1174	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
1175	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
1176	.long	__irq_invalid			@  4
1177	.long	__irq_invalid			@  5
1178	.long	__irq_invalid			@  6
1179	.long	__irq_invalid			@  7
1180	.long	__irq_invalid			@  8
1181	.long	__irq_invalid			@  9
1182	.long	__irq_invalid			@  a
1183	.long	__irq_invalid			@  b
1184	.long	__irq_invalid			@  c
1185	.long	__irq_invalid			@  d
1186	.long	__irq_invalid			@  e
1187	.long	__irq_invalid			@  f
1188
1189/*
1190 * Data abort dispatcher
1191 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1192 */
1193	vector_stub	dabt, ABT_MODE, 8
1194
1195	.long	__dabt_usr			@  0  (USR_26 / USR_32)
1196	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
1197	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
1198	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
1199	.long	__dabt_invalid			@  4
1200	.long	__dabt_invalid			@  5
1201	.long	__dabt_invalid			@  6
1202	.long	__dabt_invalid			@  7
1203	.long	__dabt_invalid			@  8
1204	.long	__dabt_invalid			@  9
1205	.long	__dabt_invalid			@  a
1206	.long	__dabt_invalid			@  b
1207	.long	__dabt_invalid			@  c
1208	.long	__dabt_invalid			@  d
1209	.long	__dabt_invalid			@  e
1210	.long	__dabt_invalid			@  f
1211
1212/*
1213 * Prefetch abort dispatcher
1214 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1215 */
1216	vector_stub	pabt, ABT_MODE, 4
1217
1218	.long	__pabt_usr			@  0 (USR_26 / USR_32)
1219	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
1220	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
1221	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
1222	.long	__pabt_invalid			@  4
1223	.long	__pabt_invalid			@  5
1224	.long	__pabt_invalid			@  6
1225	.long	__pabt_invalid			@  7
1226	.long	__pabt_invalid			@  8
1227	.long	__pabt_invalid			@  9
1228	.long	__pabt_invalid			@  a
1229	.long	__pabt_invalid			@  b
1230	.long	__pabt_invalid			@  c
1231	.long	__pabt_invalid			@  d
1232	.long	__pabt_invalid			@  e
1233	.long	__pabt_invalid			@  f
1234
1235/*
1236 * Undef instr entry dispatcher
1237 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1238 */
1239	vector_stub	und, UND_MODE
1240
1241	.long	__und_usr			@  0 (USR_26 / USR_32)
1242	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
1243	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
1244	.long	__und_svc			@  3 (SVC_26 / SVC_32)
1245	.long	__und_invalid			@  4
1246	.long	__und_invalid			@  5
1247	.long	__und_invalid			@  6
1248	.long	__und_invalid			@  7
1249	.long	__und_invalid			@  8
1250	.long	__und_invalid			@  9
1251	.long	__und_invalid			@  a
1252	.long	__und_invalid			@  b
1253	.long	__und_invalid			@  c
1254	.long	__und_invalid			@  d
1255	.long	__und_invalid			@  e
1256	.long	__und_invalid			@  f
1257
1258	.align	5
1259
1260/*=============================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1261 * Address exception handler
1262 *-----------------------------------------------------------------------------
1263 * These aren't too critical.
1264 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1265 */
1266
1267vector_addrexcptn:
1268	b	vector_addrexcptn
1269
1270/*=============================================================================
1271 * FIQ "NMI" handler
1272 *-----------------------------------------------------------------------------
1273 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1274 * systems. This must be the last vector stub, so lets place it in its own
1275 * subsection.
1276 */
1277	.subsection 2
1278	vector_stub	fiq, FIQ_MODE, 4
 
 
 
 
 
1279
1280	.long	__fiq_usr			@  0  (USR_26 / USR_32)
1281	.long	__fiq_svc			@  1  (FIQ_26 / FIQ_32)
1282	.long	__fiq_svc			@  2  (IRQ_26 / IRQ_32)
1283	.long	__fiq_svc			@  3  (SVC_26 / SVC_32)
1284	.long	__fiq_svc			@  4
1285	.long	__fiq_svc			@  5
1286	.long	__fiq_svc			@  6
1287	.long	__fiq_abt			@  7
1288	.long	__fiq_svc			@  8
1289	.long	__fiq_svc			@  9
1290	.long	__fiq_svc			@  a
1291	.long	__fiq_svc			@  b
1292	.long	__fiq_svc			@  c
1293	.long	__fiq_svc			@  d
1294	.long	__fiq_svc			@  e
1295	.long	__fiq_svc			@  f
1296
1297	.globl	vector_fiq
1298
1299	.section .vectors, "ax", %progbits
1300	W(b)	vector_rst
1301	W(b)	vector_und
1302ARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_swi		)
1303THUMB(	.reloc	., R_ARM_THM_PC12, .L__vector_swi		)
1304	W(ldr)	pc, .
1305	W(b)	vector_pabt
1306	W(b)	vector_dabt
1307	W(b)	vector_addrexcptn
1308	W(b)	vector_irq
1309	W(b)	vector_fiq
1310
1311#ifdef CONFIG_HARDEN_BRANCH_HISTORY
1312	.section .vectors.bhb.loop8, "ax", %progbits
1313	W(b)	vector_rst
1314	W(b)	vector_bhb_loop8_und
1315ARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi	)
1316THUMB(	.reloc	., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi	)
1317	W(ldr)	pc, .
1318	W(b)	vector_bhb_loop8_pabt
1319	W(b)	vector_bhb_loop8_dabt
1320	W(b)	vector_addrexcptn
1321	W(b)	vector_bhb_loop8_irq
1322	W(b)	vector_bhb_loop8_fiq
1323
1324	.section .vectors.bhb.bpiall, "ax", %progbits
1325	W(b)	vector_rst
1326	W(b)	vector_bhb_bpiall_und
1327ARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi	)
1328THUMB(	.reloc	., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi	)
1329	W(ldr)	pc, .
1330	W(b)	vector_bhb_bpiall_pabt
1331	W(b)	vector_bhb_bpiall_dabt
1332	W(b)	vector_addrexcptn
1333	W(b)	vector_bhb_bpiall_irq
1334	W(b)	vector_bhb_bpiall_fiq
1335#endif
1336
1337	.data
1338	.align	2
1339
1340	.globl	cr_alignment
 
1341cr_alignment:
1342	.space	4