Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *  linux/arch/arm/kernel/entry-armv.S
   3 *
   4 *  Copyright (C) 1996,1997,1998 Russell King.
   5 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
   6 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 *  Low-level vector interface routines
  13 *
  14 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
  15 *  that causes it to save wrong values...  Be aware!
  16 */
  17
  18#include <linux/init.h>
  19
  20#include <asm/assembler.h>
  21#include <asm/memory.h>
  22#include <asm/glue-df.h>
  23#include <asm/glue-pf.h>
  24#include <asm/vfpmacros.h>
  25#ifndef CONFIG_MULTI_IRQ_HANDLER
  26#include <mach/entry-macro.S>
  27#endif
  28#include <asm/thread_notify.h>
  29#include <asm/unwind.h>
  30#include <asm/unistd.h>
  31#include <asm/tls.h>
  32#include <asm/system_info.h>
  33
  34#include "entry-header.S"
  35#include <asm/entry-macro-multi.S>
  36#include <asm/probes.h>
  37
  38/*
  39 * Interrupt handling.
  40 */
  41	.macro	irq_handler
  42#ifdef CONFIG_MULTI_IRQ_HANDLER
  43	ldr	r1, =handle_arch_irq
  44	mov	r0, sp
  45	badr	lr, 9997f
  46	ldr	pc, [r1]
  47#else
  48	arch_irq_handler_default
  49#endif
 
  509997:
  51	.endm
  52
  53	.macro	pabt_helper
  54	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
  55#ifdef MULTI_PABORT
  56	ldr	ip, .LCprocfns
  57	mov	lr, pc
  58	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
  59#else
  60	bl	CPU_PABORT_HANDLER
  61#endif
  62	.endm
  63
  64	.macro	dabt_helper
  65
  66	@
  67	@ Call the processor-specific abort handler:
  68	@
  69	@  r2 - pt_regs
  70	@  r4 - aborted context pc
  71	@  r5 - aborted context psr
  72	@
  73	@ The abort handler must return the aborted address in r0, and
  74	@ the fault status register in r1.  r9 must be preserved.
  75	@
  76#ifdef MULTI_DABORT
  77	ldr	ip, .LCprocfns
  78	mov	lr, pc
  79	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
  80#else
  81	bl	CPU_DABORT_HANDLER
  82#endif
  83	.endm
  84
  85#ifdef CONFIG_KPROBES
  86	.section	.kprobes.text,"ax",%progbits
  87#else
  88	.text
  89#endif
  90
  91/*
  92 * Invalid mode handlers
  93 */
  94	.macro	inv_entry, reason
  95	sub	sp, sp, #S_FRAME_SIZE
  96 ARM(	stmib	sp, {r1 - lr}		)
  97 THUMB(	stmia	sp, {r0 - r12}		)
  98 THUMB(	str	sp, [sp, #S_SP]		)
  99 THUMB(	str	lr, [sp, #S_LR]		)
 100	mov	r1, #\reason
 101	.endm
 102
 103__pabt_invalid:
 104	inv_entry BAD_PREFETCH
 105	b	common_invalid
 106ENDPROC(__pabt_invalid)
 107
 108__dabt_invalid:
 109	inv_entry BAD_DATA
 110	b	common_invalid
 111ENDPROC(__dabt_invalid)
 112
 113__irq_invalid:
 114	inv_entry BAD_IRQ
 115	b	common_invalid
 116ENDPROC(__irq_invalid)
 117
 118__und_invalid:
 119	inv_entry BAD_UNDEFINSTR
 120
 121	@
 122	@ XXX fall through to common_invalid
 123	@
 124
 125@
 126@ common_invalid - generic code for failed exception (re-entrant version of handlers)
 127@
 128common_invalid:
 129	zero_fp
 130
 131	ldmia	r0, {r4 - r6}
 132	add	r0, sp, #S_PC		@ here for interlock avoidance
 133	mov	r7, #-1			@  ""   ""    ""        ""
 134	str	r4, [sp]		@ save preserved r0
 135	stmia	r0, {r5 - r7}		@ lr_<exception>,
 136					@ cpsr_<exception>, "old_r0"
 137
 138	mov	r0, sp
 139	b	bad_mode
 140ENDPROC(__und_invalid)
 141
 142/*
 143 * SVC mode handlers
 144 */
 145
 146#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
 147#define SPFIX(code...) code
 148#else
 149#define SPFIX(code...)
 150#endif
 151
 152	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1
 153 UNWIND(.fnstart		)
 154 UNWIND(.save {r0 - pc}		)
 155	sub	sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
 156#ifdef CONFIG_THUMB2_KERNEL
 157 SPFIX(	str	r0, [sp]	)	@ temporarily saved
 158 SPFIX(	mov	r0, sp		)
 159 SPFIX(	tst	r0, #4		)	@ test original stack alignment
 160 SPFIX(	ldr	r0, [sp]	)	@ restored
 161#else
 162 SPFIX(	tst	sp, #4		)
 163#endif
 164 SPFIX(	subeq	sp, sp, #4	)
 165	stmia	sp, {r1 - r12}
 166
 167	ldmia	r0, {r3 - r5}
 168	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 169	mov	r6, #-1			@  ""  ""      ""       ""
 170	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
 171 SPFIX(	addeq	r2, r2, #4	)
 172	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 173					@ from the exception stack
 174
 175	mov	r3, lr
 176
 177	@
 178	@ We are now ready to fill in the remaining blanks on the stack:
 179	@
 180	@  r2 - sp_svc
 181	@  r3 - lr_svc
 182	@  r4 - lr_<exception>, already fixed up for correct return/restart
 183	@  r5 - spsr_<exception>
 184	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 185	@
 186	stmia	r7, {r2 - r6}
 187
 188	uaccess_save r0
 189	.if \uaccess
 190	uaccess_disable r0
 191	.endif
 192
 193	.if \trace
 194#ifdef CONFIG_TRACE_IRQFLAGS
 195	bl	trace_hardirqs_off
 196#endif
 197	.endif
 198	.endm
 199
 200	.align	5
 201__dabt_svc:
 202	svc_entry uaccess=0
 203	mov	r2, sp
 204	dabt_helper
 205 THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
 
 
 
 
 
 
 
 
 
 
 
 206	svc_exit r5				@ return from exception
 207 UNWIND(.fnend		)
 208ENDPROC(__dabt_svc)
 209
 210	.align	5
 211__irq_svc:
 212	svc_entry
 213	irq_handler
 214
 215#ifdef CONFIG_PREEMPT
 216	get_thread_info tsk
 217	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
 218	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
 219	teq	r8, #0				@ if preempt count != 0
 220	movne	r0, #0				@ force flags to 0
 221	tst	r0, #_TIF_NEED_RESCHED
 222	blne	svc_preempt
 223#endif
 224
 225	svc_exit r5, irq = 1			@ return from exception
 
 
 
 
 
 226 UNWIND(.fnend		)
 227ENDPROC(__irq_svc)
 228
 229	.ltorg
 230
 231#ifdef CONFIG_PREEMPT
 232svc_preempt:
 233	mov	r8, lr
 2341:	bl	preempt_schedule_irq		@ irq en/disable is done inside
 235	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
 236	tst	r0, #_TIF_NEED_RESCHED
 237	reteq	r8				@ go again
 238	b	1b
 239#endif
 240
 241__und_fault:
 242	@ Correct the PC such that it is pointing at the instruction
 243	@ which caused the fault.  If the faulting instruction was ARM
 244	@ the PC will be pointing at the next instruction, and have to
 245	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
 246	@ pointing at the second half of the Thumb instruction.  We
 247	@ have to subtract 2.
 248	ldr	r2, [r0, #S_PC]
 249	sub	r2, r2, r1
 250	str	r2, [r0, #S_PC]
 251	b	do_undefinstr
 252ENDPROC(__und_fault)
 253
 254	.align	5
 255__und_svc:
 256#ifdef CONFIG_KPROBES
 257	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
 258	@ it obviously needs free stack space which then will belong to
 259	@ the saved context.
 260	svc_entry MAX_STACK_SIZE
 261#else
 262	svc_entry
 263#endif
 264	@
 265	@ call emulation code, which returns using r9 if it has emulated
 266	@ the instruction, or the more conventional lr if we are to treat
 267	@ this as a real undefined instruction
 268	@
 269	@  r0 - instruction
 270	@
 271#ifndef CONFIG_THUMB2_KERNEL
 272	ldr	r0, [r4, #-4]
 273#else
 274	mov	r1, #2
 275	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
 276	cmp	r0, #0xe800			@ 32-bit instruction if xx >= 0
 277	blo	__und_svc_fault
 278	ldrh	r9, [r4]			@ bottom 16 bits
 279	add	r4, r4, #2
 280	str	r4, [sp, #S_PC]
 281	orr	r0, r9, r0, lsl #16
 282#endif
 283	badr	r9, __und_svc_finish
 284	mov	r2, r4
 285	bl	call_fpe
 286
 287	mov	r1, #4				@ PC correction to apply
 288__und_svc_fault:
 289	mov	r0, sp				@ struct pt_regs *regs
 290	bl	__und_fault
 
 
 
 
 
 291
 292__und_svc_finish:
 
 
 293	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
 
 
 
 
 
 
 294	svc_exit r5				@ return from exception
 295 UNWIND(.fnend		)
 296ENDPROC(__und_svc)
 297
 298	.align	5
 299__pabt_svc:
 300	svc_entry
 301	mov	r2, sp				@ regs
 302	pabt_helper
 
 
 
 
 
 
 
 
 
 
 
 
 303	svc_exit r5				@ return from exception
 304 UNWIND(.fnend		)
 305ENDPROC(__pabt_svc)
 306
 307	.align	5
 308__fiq_svc:
 309	svc_entry trace=0
 310	mov	r0, sp				@ struct pt_regs *regs
 311	bl	handle_fiq_as_nmi
 312	svc_exit_via_fiq
 313 UNWIND(.fnend		)
 314ENDPROC(__fiq_svc)
 315
 316	.align	5
 317.LCcralign:
 318	.word	cr_alignment
 319#ifdef MULTI_DABORT
 320.LCprocfns:
 321	.word	processor
 322#endif
 323.LCfp:
 324	.word	fp_enter
 325
 326/*
 327 * Abort mode handlers
 328 */
 329
 330@
 331@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
 332@ and reuses the same macros. However in abort mode we must also
 333@ save/restore lr_abt and spsr_abt to make nested aborts safe.
 334@
 335	.align 5
 336__fiq_abt:
 337	svc_entry trace=0
 338
 339 ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 340 THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 341 THUMB( msr	cpsr_c, r0 )
 342	mov	r1, lr		@ Save lr_abt
 343	mrs	r2, spsr	@ Save spsr_abt, abort is now safe
 344 ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 345 THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 346 THUMB( msr	cpsr_c, r0 )
 347	stmfd	sp!, {r1 - r2}
 348
 349	add	r0, sp, #8			@ struct pt_regs *regs
 350	bl	handle_fiq_as_nmi
 351
 352	ldmfd	sp!, {r1 - r2}
 353 ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 354 THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 355 THUMB( msr	cpsr_c, r0 )
 356	mov	lr, r1		@ Restore lr_abt, abort is unsafe
 357	msr	spsr_cxsf, r2	@ Restore spsr_abt
 358 ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 359 THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 360 THUMB( msr	cpsr_c, r0 )
 361
 362	svc_exit_via_fiq
 363 UNWIND(.fnend		)
 364ENDPROC(__fiq_abt)
 365
 366/*
 367 * User mode handlers
 368 *
 369 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
 370 */
 371
 372#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
 373#error "sizeof(struct pt_regs) must be a multiple of 8"
 374#endif
 375
 376	.macro	usr_entry, trace=1, uaccess=1
 377 UNWIND(.fnstart	)
 378 UNWIND(.cantunwind	)	@ don't unwind the user space
 379	sub	sp, sp, #S_FRAME_SIZE
 380 ARM(	stmib	sp, {r1 - r12}	)
 381 THUMB(	stmia	sp, {r0 - r12}	)
 382
 383 ATRAP(	mrc	p15, 0, r7, c1, c0, 0)
 384 ATRAP(	ldr	r8, .LCcralign)
 385
 386	ldmia	r0, {r3 - r5}
 387	add	r0, sp, #S_PC		@ here for interlock avoidance
 388	mov	r6, #-1			@  ""  ""     ""        ""
 389
 390	str	r3, [sp]		@ save the "real" r0 copied
 391					@ from the exception stack
 392
 393 ATRAP(	ldr	r8, [r8, #0])
 394
 395	@
 396	@ We are now ready to fill in the remaining blanks on the stack:
 397	@
 398	@  r4 - lr_<exception>, already fixed up for correct return/restart
 399	@  r5 - spsr_<exception>
 400	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 401	@
 402	@ Also, separately save sp_usr and lr_usr
 403	@
 404	stmia	r0, {r4 - r6}
 405 ARM(	stmdb	r0, {sp, lr}^			)
 406 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
 407
 408	.if \uaccess
 409	uaccess_disable ip
 410	.endif
 411
 412	@ Enable the alignment trap while in kernel mode
 413 ATRAP(	teq	r8, r7)
 414 ATRAP( mcrne	p15, 0, r8, c1, c0, 0)
 415
 416	@
 417	@ Clear FP to mark the first stack frame
 418	@
 419	zero_fp
 420
 421	.if	\trace
 422#ifdef CONFIG_TRACE_IRQFLAGS
 423	bl	trace_hardirqs_off
 424#endif
 425	ct_user_exit save = 0
 426	.endif
 427	.endm
 428
 429	.macro	kuser_cmpxchg_check
 430#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
 431#ifndef CONFIG_MMU
 432#warning "NPTL on non MMU needs fixing"
 433#else
 434	@ Make sure our user space atomic helper is restarted
 435	@ if it was interrupted in a critical region.  Here we
 436	@ perform a quick test inline since it should be false
 437	@ 99.9999% of the time.  The rest is done out of line.
 438	cmp	r4, #TASK_SIZE
 439	blhs	kuser_cmpxchg64_fixup
 440#endif
 441#endif
 442	.endm
 443
 444	.align	5
 445__dabt_usr:
 446	usr_entry uaccess=0
 447	kuser_cmpxchg_check
 448	mov	r2, sp
 449	dabt_helper
 450	b	ret_from_exception
 451 UNWIND(.fnend		)
 452ENDPROC(__dabt_usr)
 453
 454	.align	5
 455__irq_usr:
 456	usr_entry
 457	kuser_cmpxchg_check
 458	irq_handler
 459	get_thread_info tsk
 460	mov	why, #0
 461	b	ret_to_user_from_irq
 462 UNWIND(.fnend		)
 463ENDPROC(__irq_usr)
 464
 465	.ltorg
 466
 467	.align	5
 468__und_usr:
 469	usr_entry uaccess=0
 470
 471	mov	r2, r4
 472	mov	r3, r5
 473
 474	@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
 475	@      faulting instruction depending on Thumb mode.
 476	@ r3 = regs->ARM_cpsr
 477	@
 478	@ The emulation code returns using r9 if it has emulated the
 479	@ instruction, or the more conventional lr if we are to treat
 480	@ this as a real undefined instruction
 481	@
 482	badr	r9, ret_from_exception
 483
 484	@ IRQs must be enabled before attempting to read the instruction from
 485	@ user space since that could cause a page/translation fault if the
 486	@ page table was modified by another CPU.
 487	enable_irq
 488
 489	tst	r3, #PSR_T_BIT			@ Thumb mode?
 490	bne	__und_usr_thumb
 491	sub	r4, r2, #4			@ ARM instr at LR - 4
 4921:	ldrt	r0, [r4]
 493 ARM_BE8(rev	r0, r0)				@ little endian instruction
 494
 495	uaccess_disable ip
 496
 497	@ r0 = 32-bit ARM instruction which caused the exception
 498	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 499	@ r4 = PC value for the faulting instruction
 500	@ lr = 32-bit undefined instruction function
 501	badr	lr, __und_usr_fault_32
 502	b	call_fpe
 503
 504__und_usr_thumb:
 505	@ Thumb instruction
 506	sub	r4, r2, #2			@ First half of thumb instr at LR - 2
 507#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
 508/*
 509 * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
 510 * can never be supported in a single kernel, this code is not applicable at
 511 * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
 512 * made about .arch directives.
 513 */
 514#if __LINUX_ARM_ARCH__ < 7
 515/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
 516#define NEED_CPU_ARCHITECTURE
 517	ldr	r5, .LCcpu_architecture
 518	ldr	r5, [r5]
 519	cmp	r5, #CPU_ARCH_ARMv7
 520	blo	__und_usr_fault_16		@ 16bit undefined instruction
 521/*
 522 * The following code won't get run unless the running CPU really is v7, so
 523 * coding round the lack of ldrht on older arches is pointless.  Temporarily
 524 * override the assembler target arch with the minimum required instead:
 525 */
 526	.arch	armv6t2
 527#endif
 5282:	ldrht	r5, [r4]
 529ARM_BE8(rev16	r5, r5)				@ little endian instruction
 530	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
 531	blo	__und_usr_fault_16_pan		@ 16bit undefined instruction
 5323:	ldrht	r0, [r2]
 533ARM_BE8(rev16	r0, r0)				@ little endian instruction
 534	uaccess_disable ip
 
 
 
 
 535	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 536	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
 537	orr	r0, r0, r5, lsl #16
 538	badr	lr, __und_usr_fault_32
 539	@ r0 = the two 16-bit Thumb instructions which caused the exception
 540	@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
 541	@ r4 = PC value for the first 16-bit Thumb instruction
 542	@ lr = 32bit undefined instruction function
 543
 544#if __LINUX_ARM_ARCH__ < 7
 545/* If the target arch was overridden, change it back: */
 546#ifdef CONFIG_CPU_32v6K
 547	.arch	armv6k
 548#else
 549	.arch	armv6
 550#endif
 551#endif /* __LINUX_ARM_ARCH__ < 7 */
 552#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
 553	b	__und_usr_fault_16
 554#endif
 555 UNWIND(.fnend)
 556ENDPROC(__und_usr)
 557
 
 
 
 
 558/*
 559 * The out of line fixup for the ldrt instructions above.
 560 */
 561	.pushsection .text.fixup, "ax"
 562	.align	2
 5634:	str     r4, [sp, #S_PC]			@ retry current instruction
 564	ret	r9
 565	.popsection
 566	.pushsection __ex_table,"a"
 567	.long	1b, 4b
 568#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
 569	.long	2b, 4b
 570	.long	3b, 4b
 571#endif
 572	.popsection
 573
 574/*
 575 * Check whether the instruction is a co-processor instruction.
 576 * If yes, we need to call the relevant co-processor handler.
 577 *
 578 * Note that we don't do a full check here for the co-processor
 579 * instructions; all instructions with bit 27 set are well
 580 * defined.  The only instructions that should fault are the
 581 * co-processor instructions.  However, we have to watch out
 582 * for the ARM6/ARM7 SWI bug.
 583 *
 584 * NEON is a special case that has to be handled here. Not all
 585 * NEON instructions are co-processor instructions, so we have
 586 * to make a special case of checking for them. Plus, there's
 587 * five groups of them, so we have a table of mask/opcode pairs
 588 * to check against, and if any match then we branch off into the
 589 * NEON handler code.
 590 *
 591 * Emulators may wish to make use of the following registers:
 592 *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
 593 *  r2  = PC value to resume execution after successful emulation
 594 *  r9  = normal "successful" return address
 595 *  r10 = this threads thread_info structure
 596 *  lr  = unrecognised instruction return address
 597 * IRQs enabled, FIQs enabled.
 598 */
 599	@
 600	@ Fall-through from Thumb-2 __und_usr
 601	@
 602#ifdef CONFIG_NEON
 603	get_thread_info r10			@ get current thread
 604	adr	r6, .LCneon_thumb_opcodes
 605	b	2f
 606#endif
 607call_fpe:
 608	get_thread_info r10			@ get current thread
 609#ifdef CONFIG_NEON
 610	adr	r6, .LCneon_arm_opcodes
 6112:	ldr	r5, [r6], #4			@ mask value
 612	ldr	r7, [r6], #4			@ opcode bits matching in mask
 613	cmp	r5, #0				@ end mask?
 614	beq	1f
 615	and	r8, r0, r5
 
 616	cmp	r8, r7				@ NEON instruction?
 617	bne	2b
 
 618	mov	r7, #1
 619	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
 620	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
 621	b	do_vfp				@ let VFP handler handle this
 6221:
 623#endif
 624	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
 625	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
 626	reteq	lr
 
 
 
 
 
 627	and	r8, r0, #0x00000f00		@ mask out CP number
 628 THUMB(	lsr	r8, r8, #8		)
 629	mov	r7, #1
 630	add	r6, r10, #TI_USED_CP
 631 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 632 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
 633#ifdef CONFIG_IWMMXT
 634	@ Test if we need to give access to iWMMXt coprocessors
 635	ldr	r5, [r10, #TI_FLAGS]
 636	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
 637	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
 638	bcs	iwmmxt_task_enable
 639#endif
 640 ARM(	add	pc, pc, r8, lsr #6	)
 641 THUMB(	lsl	r8, r8, #2		)
 642 THUMB(	add	pc, r8			)
 643	nop
 644
 645	ret.w	lr				@ CP#0
 646	W(b)	do_fpe				@ CP#1 (FPE)
 647	W(b)	do_fpe				@ CP#2 (FPE)
 648	ret.w	lr				@ CP#3
 649#ifdef CONFIG_CRUNCH
 650	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
 651	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
 652	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
 653#else
 654	ret.w	lr				@ CP#4
 655	ret.w	lr				@ CP#5
 656	ret.w	lr				@ CP#6
 657#endif
 658	ret.w	lr				@ CP#7
 659	ret.w	lr				@ CP#8
 660	ret.w	lr				@ CP#9
 661#ifdef CONFIG_VFP
 662	W(b)	do_vfp				@ CP#10 (VFP)
 663	W(b)	do_vfp				@ CP#11 (VFP)
 664#else
 665	ret.w	lr				@ CP#10 (VFP)
 666	ret.w	lr				@ CP#11 (VFP)
 667#endif
 668	ret.w	lr				@ CP#12
 669	ret.w	lr				@ CP#13
 670	ret.w	lr				@ CP#14 (Debug)
 671	ret.w	lr				@ CP#15 (Control)
 672
 673#ifdef NEED_CPU_ARCHITECTURE
 674	.align	2
 675.LCcpu_architecture:
 676	.word	__cpu_architecture
 677#endif
 
 
 
 
 678
 679#ifdef CONFIG_NEON
 680	.align	6
 681
 682.LCneon_arm_opcodes:
 683	.word	0xfe000000			@ mask
 684	.word	0xf2000000			@ opcode
 685
 686	.word	0xff100000			@ mask
 687	.word	0xf4000000			@ opcode
 688
 689	.word	0x00000000			@ mask
 690	.word	0x00000000			@ opcode
 691
 692.LCneon_thumb_opcodes:
 693	.word	0xef000000			@ mask
 694	.word	0xef000000			@ opcode
 695
 696	.word	0xff100000			@ mask
 697	.word	0xf9000000			@ opcode
 698
 699	.word	0x00000000			@ mask
 700	.word	0x00000000			@ opcode
 701#endif
 702
 703do_fpe:
 
 704	ldr	r4, .LCfp
 705	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
 706	ldr	pc, [r4]			@ Call FP module USR entry point
 707
 708/*
 709 * The FP module is called with these registers set:
 710 *  r0  = instruction
 711 *  r2  = PC+4
 712 *  r9  = normal "successful" return address
 713 *  r10 = FP workspace
 714 *  lr  = unrecognised FP instruction return address
 715 */
 716
 717	.pushsection .data
 718ENTRY(fp_enter)
 719	.word	no_fp
 720	.popsection
 721
 722ENTRY(no_fp)
 723	ret	lr
 724ENDPROC(no_fp)
 725
 726__und_usr_fault_32:
 727	mov	r1, #4
 728	b	1f
 729__und_usr_fault_16_pan:
 730	uaccess_disable ip
 731__und_usr_fault_16:
 732	mov	r1, #2
 7331:	mov	r0, sp
 734	badr	lr, ret_from_exception
 735	b	__und_fault
 736ENDPROC(__und_usr_fault_32)
 737ENDPROC(__und_usr_fault_16)
 738
 739	.align	5
 740__pabt_usr:
 741	usr_entry
 742	mov	r2, sp				@ regs
 743	pabt_helper
 744 UNWIND(.fnend		)
 745	/* fall through */
 746/*
 747 * This is the return code to user mode for abort handlers
 748 */
 749ENTRY(ret_from_exception)
 750 UNWIND(.fnstart	)
 751 UNWIND(.cantunwind	)
 752	get_thread_info tsk
 753	mov	why, #0
 754	b	ret_to_user
 755 UNWIND(.fnend		)
 756ENDPROC(__pabt_usr)
 757ENDPROC(ret_from_exception)
 758
 759	.align	5
 760__fiq_usr:
 761	usr_entry trace=0
 762	kuser_cmpxchg_check
 763	mov	r0, sp				@ struct pt_regs *regs
 764	bl	handle_fiq_as_nmi
 765	get_thread_info tsk
 766	restore_user_regs fast = 0, offset = 0
 767 UNWIND(.fnend		)
 768ENDPROC(__fiq_usr)
 769
 770/*
 771 * Register switch for ARMv3 and ARMv4 processors
 772 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 773 * previous and next are guaranteed not to be the same.
 774 */
 775ENTRY(__switch_to)
 776 UNWIND(.fnstart	)
 777 UNWIND(.cantunwind	)
 778	add	ip, r1, #TI_CPU_SAVE
 
 779 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 780 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 781 THUMB(	str	sp, [ip], #4		   )
 782 THUMB(	str	lr, [ip], #4		   )
 783	ldr	r4, [r2, #TI_TP_VALUE]
 784	ldr	r5, [r2, #TI_TP_VALUE + 4]
 785#ifdef CONFIG_CPU_USE_DOMAINS
 786	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
 787	str	r6, [r1, #TI_CPU_DOMAIN]	@ Save old domain register
 788	ldr	r6, [r2, #TI_CPU_DOMAIN]
 789#endif
 790	switch_tls r1, r4, r5, r3, r7
 791#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 792	ldr	r7, [r2, #TI_TASK]
 793	ldr	r8, =__stack_chk_guard
 794	ldr	r7, [r7, #TSK_STACK_CANARY]
 795#endif
 796#ifdef CONFIG_CPU_USE_DOMAINS
 797	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
 798#endif
 799	mov	r5, r0
 800	add	r4, r2, #TI_CPU_SAVE
 801	ldr	r0, =thread_notify_head
 802	mov	r1, #THREAD_NOTIFY_SWITCH
 803	bl	atomic_notifier_call_chain
 804#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 805	str	r7, [r8]
 806#endif
 807 THUMB(	mov	ip, r4			   )
 808	mov	r0, r5
 809 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 810 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 811 THUMB(	ldr	sp, [ip], #4		   )
 812 THUMB(	ldr	pc, [ip]		   )
 813 UNWIND(.fnend		)
 814ENDPROC(__switch_to)
 815
 816	__INIT
 817
 818/*
 819 * User helpers.
 820 *
 821 * Each segment is 32-byte aligned and will be moved to the top of the high
 822 * vector page.  New segments (if ever needed) must be added in front of
 823 * existing ones.  This mechanism should be used only for things that are
 824 * really small and justified, and not be abused freely.
 825 *
 826 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
 827 */
 828 THUMB(	.arm	)
 829
 830	.macro	usr_ret, reg
 831#ifdef CONFIG_ARM_THUMB
 832	bx	\reg
 833#else
 834	ret	\reg
 835#endif
 836	.endm
 837
 838	.macro	kuser_pad, sym, size
 839	.if	(. - \sym) & 3
 840	.rept	4 - (. - \sym) & 3
 841	.byte	0
 842	.endr
 843	.endif
 844	.rept	(\size - (. - \sym)) / 4
 845	.word	0xe7fddef1
 846	.endr
 847	.endm
 848
 849#ifdef CONFIG_KUSER_HELPERS
 850	.align	5
 851	.globl	__kuser_helper_start
 852__kuser_helper_start:
 853
 854/*
 855 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
 856 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
 857 */
 858
 859__kuser_cmpxchg64:				@ 0xffff0f60
 860
 861#if defined(CONFIG_CPU_32v6K)
 
 
 
 
 
 
 
 
 
 
 
 
 
 862
 863	stmfd	sp!, {r4, r5, r6, r7}
 864	ldrd	r4, r5, [r0]			@ load old val
 865	ldrd	r6, r7, [r1]			@ load new val
 866	smp_dmb	arm
 8671:	ldrexd	r0, r1, [r2]			@ load current val
 868	eors	r3, r0, r4			@ compare with oldval (1)
 869	eoreqs	r3, r1, r5			@ compare with oldval (2)
 870	strexdeq r3, r6, r7, [r2]		@ store newval if eq
 871	teqeq	r3, #1				@ success?
 872	beq	1b				@ if no then retry
 873	smp_dmb	arm
 874	rsbs	r0, r3, #0			@ set returned val and C flag
 875	ldmfd	sp!, {r4, r5, r6, r7}
 876	usr_ret	lr
 877
 878#elif !defined(CONFIG_SMP)
 879
 880#ifdef CONFIG_MMU
 881
 882	/*
 883	 * The only thing that can break atomicity in this cmpxchg64
 884	 * implementation is either an IRQ or a data abort exception
 885	 * causing another process/thread to be scheduled in the middle of
 886	 * the critical sequence.  The same strategy as for cmpxchg is used.
 887	 */
 888	stmfd	sp!, {r4, r5, r6, lr}
 889	ldmia	r0, {r4, r5}			@ load old val
 890	ldmia	r1, {r6, lr}			@ load new val
 8911:	ldmia	r2, {r0, r1}			@ load current val
 892	eors	r3, r0, r4			@ compare with oldval (1)
 893	eoreqs	r3, r1, r5			@ compare with oldval (2)
 8942:	stmeqia	r2, {r6, lr}			@ store newval if eq
 895	rsbs	r0, r3, #0			@ set return val and C flag
 896	ldmfd	sp!, {r4, r5, r6, pc}
 897
 898	.text
 899kuser_cmpxchg64_fixup:
 900	@ Called from kuser_cmpxchg_fixup.
 901	@ r4 = address of interrupted insn (must be preserved).
 902	@ sp = saved regs. r7 and r8 are clobbered.
 903	@ 1b = first critical insn, 2b = last critical insn.
 904	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 905	mov	r7, #0xffff0fff
 906	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
 907	subs	r8, r4, r7
 908	rsbcss	r8, r8, #(2b - 1b)
 909	strcs	r7, [sp, #S_PC]
 910#if __LINUX_ARM_ARCH__ < 6
 911	bcc	kuser_cmpxchg32_fixup
 912#endif
 913	ret	lr
 914	.previous
 915
 916#else
 917#warning "NPTL on non MMU needs fixing"
 918	mov	r0, #-1
 919	adds	r0, r0, #0
 920	usr_ret	lr
 921#endif
 922
 923#else
 924#error "incoherent kernel configuration"
 925#endif
 926
 927	kuser_pad __kuser_cmpxchg64, 64
 
 
 
 
 
 928
 929__kuser_memory_barrier:				@ 0xffff0fa0
 930	smp_dmb	arm
 931	usr_ret	lr
 932
 933	kuser_pad __kuser_memory_barrier, 32
 934
 935__kuser_cmpxchg:				@ 0xffff0fc0
 936
 937#if __LINUX_ARM_ARCH__ < 6
 
 
 
 
 
 
 
 
 
 
 
 
 
 938
 939#ifdef CONFIG_MMU
 940
 941	/*
 942	 * The only thing that can break atomicity in this cmpxchg
 943	 * implementation is either an IRQ or a data abort exception
 944	 * causing another process/thread to be scheduled in the middle
 945	 * of the critical sequence.  To prevent this, code is added to
 946	 * the IRQ and data abort exception handlers to set the pc back
 947	 * to the beginning of the critical section if it is found to be
 948	 * within that critical section (see kuser_cmpxchg_fixup).
 949	 */
 9501:	ldr	r3, [r2]			@ load current val
 951	subs	r3, r3, r0			@ compare with oldval
 9522:	streq	r1, [r2]			@ store newval if eq
 953	rsbs	r0, r3, #0			@ set return val and C flag
 954	usr_ret	lr
 955
 956	.text
 957kuser_cmpxchg32_fixup:
 958	@ Called from kuser_cmpxchg_check macro.
 959	@ r4 = address of interrupted insn (must be preserved).
 960	@ sp = saved regs. r7 and r8 are clobbered.
 961	@ 1b = first critical insn, 2b = last critical insn.
 962	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 963	mov	r7, #0xffff0fff
 964	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
 965	subs	r8, r4, r7
 966	rsbcss	r8, r8, #(2b - 1b)
 967	strcs	r7, [sp, #S_PC]
 968	ret	lr
 969	.previous
 970
 971#else
 972#warning "NPTL on non MMU needs fixing"
 973	mov	r0, #-1
 974	adds	r0, r0, #0
 975	usr_ret	lr
 976#endif
 977
 978#else
 979
 980	smp_dmb	arm
 9811:	ldrex	r3, [r2]
 982	subs	r3, r3, r0
 983	strexeq	r3, r1, [r2]
 984	teqeq	r3, #1
 985	beq	1b
 986	rsbs	r0, r3, #0
 987	/* beware -- each __kuser slot must be 8 instructions max */
 988	ALT_SMP(b	__kuser_memory_barrier)
 989	ALT_UP(usr_ret	lr)
 990
 991#endif
 992
 993	kuser_pad __kuser_cmpxchg, 32
 994
 995__kuser_get_tls:				@ 0xffff0fe0
 996	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
 997	usr_ret	lr
 998	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
 999	kuser_pad __kuser_get_tls, 16
1000	.rep	3
1001	.word	0			@ 0xffff0ff0 software TLS value, then
1002	.endr				@ pad up to __kuser_helper_version
1003
1004__kuser_helper_version:				@ 0xffff0ffc
1005	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)
1006
1007	.globl	__kuser_helper_end
1008__kuser_helper_end:
1009
1010#endif
1011
1012 THUMB(	.thumb	)
1013
1014/*
1015 * Vector stubs.
1016 *
1017 * This code is copied to 0xffff1000 so we can use branches in the
1018 * vectors, rather than ldr's.  Note that this code must not exceed
1019 * a page size.
1020 *
1021 * Common stub entry macro:
1022 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1023 *
1024 * SP points to a minimal amount of processor-private memory, the address
1025 * of which is copied into r0 for the mode specific abort handler.
1026 */
1027	.macro	vector_stub, name, mode, correction=0
1028	.align	5
1029
1030vector_\name:
1031	.if \correction
1032	sub	lr, lr, #\correction
1033	.endif
1034
1035	@
1036	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1037	@ (parent CPSR)
1038	@
1039	stmia	sp, {r0, lr}		@ save r0, lr
1040	mrs	lr, spsr
1041	str	lr, [sp, #8]		@ save spsr
1042
1043	@
1044	@ Prepare for SVC32 mode.  IRQs remain disabled.
1045	@
1046	mrs	r0, cpsr
1047	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1048	msr	spsr_cxsf, r0
1049
1050	@
1051	@ the branch table must immediately follow this code
1052	@
1053	and	lr, lr, #0x0f
1054 THUMB(	adr	r0, 1f			)
1055 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
1056	mov	r0, sp
1057 ARM(	ldr	lr, [pc, lr, lsl #2]	)
1058	movs	pc, lr			@ branch to handler in SVC mode
1059ENDPROC(vector_\name)
1060
1061	.align	2
1062	@ handler addresses follow this label
10631:
1064	.endm
1065
1066	.section .stubs, "ax", %progbits
1067	@ This must be the first word
1068	.word	vector_swi
1069
1070vector_rst:
1071 ARM(	swi	SYS_ERROR0	)
1072 THUMB(	svc	#0		)
1073 THUMB(	nop			)
1074	b	vector_und
1075
1076/*
1077 * Interrupt dispatcher
1078 */
1079	vector_stub	irq, IRQ_MODE, 4
1080
1081	.long	__irq_usr			@  0  (USR_26 / USR_32)
1082	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
1083	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
1084	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
1085	.long	__irq_invalid			@  4
1086	.long	__irq_invalid			@  5
1087	.long	__irq_invalid			@  6
1088	.long	__irq_invalid			@  7
1089	.long	__irq_invalid			@  8
1090	.long	__irq_invalid			@  9
1091	.long	__irq_invalid			@  a
1092	.long	__irq_invalid			@  b
1093	.long	__irq_invalid			@  c
1094	.long	__irq_invalid			@  d
1095	.long	__irq_invalid			@  e
1096	.long	__irq_invalid			@  f
1097
1098/*
1099 * Data abort dispatcher
1100 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1101 */
1102	vector_stub	dabt, ABT_MODE, 8
1103
1104	.long	__dabt_usr			@  0  (USR_26 / USR_32)
1105	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
1106	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
1107	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
1108	.long	__dabt_invalid			@  4
1109	.long	__dabt_invalid			@  5
1110	.long	__dabt_invalid			@  6
1111	.long	__dabt_invalid			@  7
1112	.long	__dabt_invalid			@  8
1113	.long	__dabt_invalid			@  9
1114	.long	__dabt_invalid			@  a
1115	.long	__dabt_invalid			@  b
1116	.long	__dabt_invalid			@  c
1117	.long	__dabt_invalid			@  d
1118	.long	__dabt_invalid			@  e
1119	.long	__dabt_invalid			@  f
1120
1121/*
1122 * Prefetch abort dispatcher
1123 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1124 */
1125	vector_stub	pabt, ABT_MODE, 4
1126
1127	.long	__pabt_usr			@  0 (USR_26 / USR_32)
1128	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
1129	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
1130	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
1131	.long	__pabt_invalid			@  4
1132	.long	__pabt_invalid			@  5
1133	.long	__pabt_invalid			@  6
1134	.long	__pabt_invalid			@  7
1135	.long	__pabt_invalid			@  8
1136	.long	__pabt_invalid			@  9
1137	.long	__pabt_invalid			@  a
1138	.long	__pabt_invalid			@  b
1139	.long	__pabt_invalid			@  c
1140	.long	__pabt_invalid			@  d
1141	.long	__pabt_invalid			@  e
1142	.long	__pabt_invalid			@  f
1143
1144/*
1145 * Undef instr entry dispatcher
1146 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1147 */
1148	vector_stub	und, UND_MODE
1149
1150	.long	__und_usr			@  0 (USR_26 / USR_32)
1151	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
1152	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
1153	.long	__und_svc			@  3 (SVC_26 / SVC_32)
1154	.long	__und_invalid			@  4
1155	.long	__und_invalid			@  5
1156	.long	__und_invalid			@  6
1157	.long	__und_invalid			@  7
1158	.long	__und_invalid			@  8
1159	.long	__und_invalid			@  9
1160	.long	__und_invalid			@  a
1161	.long	__und_invalid			@  b
1162	.long	__und_invalid			@  c
1163	.long	__und_invalid			@  d
1164	.long	__und_invalid			@  e
1165	.long	__und_invalid			@  f
1166
1167	.align	5
1168
1169/*=============================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170 * Address exception handler
1171 *-----------------------------------------------------------------------------
1172 * These aren't too critical.
1173 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1174 */
1175
1176vector_addrexcptn:
1177	b	vector_addrexcptn
1178
1179/*=============================================================================
1180 * FIQ "NMI" handler
1181 *-----------------------------------------------------------------------------
1182 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1183 * systems.
1184 */
1185	vector_stub	fiq, FIQ_MODE, 4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1186
1187	.long	__fiq_usr			@  0  (USR_26 / USR_32)
1188	.long	__fiq_svc			@  1  (FIQ_26 / FIQ_32)
1189	.long	__fiq_svc			@  2  (IRQ_26 / IRQ_32)
1190	.long	__fiq_svc			@  3  (SVC_26 / SVC_32)
1191	.long	__fiq_svc			@  4
1192	.long	__fiq_svc			@  5
1193	.long	__fiq_svc			@  6
1194	.long	__fiq_abt			@  7
1195	.long	__fiq_svc			@  8
1196	.long	__fiq_svc			@  9
1197	.long	__fiq_svc			@  a
1198	.long	__fiq_svc			@  b
1199	.long	__fiq_svc			@  c
1200	.long	__fiq_svc			@  d
1201	.long	__fiq_svc			@  e
1202	.long	__fiq_svc			@  f
1203
1204	.globl	vector_fiq
1205
1206	.section .vectors, "ax", %progbits
1207.L__vectors_start:
1208	W(b)	vector_rst
1209	W(b)	vector_und
1210	W(ldr)	pc, .L__vectors_start + 0x1000
1211	W(b)	vector_pabt
1212	W(b)	vector_dabt
1213	W(b)	vector_addrexcptn
1214	W(b)	vector_irq
1215	W(b)	vector_fiq
1216
1217	.data
1218
1219	.globl	cr_alignment
 
1220cr_alignment:
 
 
1221	.space	4
1222
1223#ifdef CONFIG_MULTI_IRQ_HANDLER
1224	.globl	handle_arch_irq
1225handle_arch_irq:
1226	.space	4
1227#endif
v3.1
   1/*
   2 *  linux/arch/arm/kernel/entry-armv.S
   3 *
   4 *  Copyright (C) 1996,1997,1998 Russell King.
   5 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
   6 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 *  Low-level vector interface routines
  13 *
  14 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
  15 *  that causes it to save wrong values...  Be aware!
  16 */
  17
 
 
 
  18#include <asm/memory.h>
  19#include <asm/glue-df.h>
  20#include <asm/glue-pf.h>
  21#include <asm/vfpmacros.h>
 
  22#include <mach/entry-macro.S>
 
  23#include <asm/thread_notify.h>
  24#include <asm/unwind.h>
  25#include <asm/unistd.h>
  26#include <asm/tls.h>
 
  27
  28#include "entry-header.S"
  29#include <asm/entry-macro-multi.S>
 
  30
  31/*
  32 * Interrupt handling.
  33 */
  34	.macro	irq_handler
  35#ifdef CONFIG_MULTI_IRQ_HANDLER
  36	ldr	r1, =handle_arch_irq
  37	mov	r0, sp
  38	ldr	r1, [r1]
  39	adr	lr, BSYM(9997f)
  40	teq	r1, #0
  41	movne	pc, r1
  42#endif
  43	arch_irq_handler_default
  449997:
  45	.endm
  46
  47	.macro	pabt_helper
  48	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
  49#ifdef MULTI_PABORT
  50	ldr	ip, .LCprocfns
  51	mov	lr, pc
  52	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
  53#else
  54	bl	CPU_PABORT_HANDLER
  55#endif
  56	.endm
  57
  58	.macro	dabt_helper
  59
  60	@
  61	@ Call the processor-specific abort handler:
  62	@
  63	@  r2 - pt_regs
  64	@  r4 - aborted context pc
  65	@  r5 - aborted context psr
  66	@
  67	@ The abort handler must return the aborted address in r0, and
  68	@ the fault status register in r1.  r9 must be preserved.
  69	@
  70#ifdef MULTI_DABORT
  71	ldr	ip, .LCprocfns
  72	mov	lr, pc
  73	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
  74#else
  75	bl	CPU_DABORT_HANDLER
  76#endif
  77	.endm
  78
  79#ifdef CONFIG_KPROBES
  80	.section	.kprobes.text,"ax",%progbits
  81#else
  82	.text
  83#endif
  84
  85/*
  86 * Invalid mode handlers
  87 */
  88	.macro	inv_entry, reason
  89	sub	sp, sp, #S_FRAME_SIZE
  90 ARM(	stmib	sp, {r1 - lr}		)
  91 THUMB(	stmia	sp, {r0 - r12}		)
  92 THUMB(	str	sp, [sp, #S_SP]		)
  93 THUMB(	str	lr, [sp, #S_LR]		)
  94	mov	r1, #\reason
  95	.endm
  96
  97__pabt_invalid:
  98	inv_entry BAD_PREFETCH
  99	b	common_invalid
 100ENDPROC(__pabt_invalid)
 101
 102__dabt_invalid:
 103	inv_entry BAD_DATA
 104	b	common_invalid
 105ENDPROC(__dabt_invalid)
 106
 107__irq_invalid:
 108	inv_entry BAD_IRQ
 109	b	common_invalid
 110ENDPROC(__irq_invalid)
 111
 112__und_invalid:
 113	inv_entry BAD_UNDEFINSTR
 114
 115	@
 116	@ XXX fall through to common_invalid
 117	@
 118
 119@
 120@ common_invalid - generic code for failed exception (re-entrant version of handlers)
 121@
 122common_invalid:
 123	zero_fp
 124
 125	ldmia	r0, {r4 - r6}
 126	add	r0, sp, #S_PC		@ here for interlock avoidance
 127	mov	r7, #-1			@  ""   ""    ""        ""
 128	str	r4, [sp]		@ save preserved r0
 129	stmia	r0, {r5 - r7}		@ lr_<exception>,
 130					@ cpsr_<exception>, "old_r0"
 131
 132	mov	r0, sp
 133	b	bad_mode
 134ENDPROC(__und_invalid)
 135
 136/*
 137 * SVC mode handlers
 138 */
 139
 140#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
 141#define SPFIX(code...) code
 142#else
 143#define SPFIX(code...)
 144#endif
 145
 146	.macro	svc_entry, stack_hole=0
 147 UNWIND(.fnstart		)
 148 UNWIND(.save {r0 - pc}		)
 149	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 150#ifdef CONFIG_THUMB2_KERNEL
 151 SPFIX(	str	r0, [sp]	)	@ temporarily saved
 152 SPFIX(	mov	r0, sp		)
 153 SPFIX(	tst	r0, #4		)	@ test original stack alignment
 154 SPFIX(	ldr	r0, [sp]	)	@ restored
 155#else
 156 SPFIX(	tst	sp, #4		)
 157#endif
 158 SPFIX(	subeq	sp, sp, #4	)
 159	stmia	sp, {r1 - r12}
 160
 161	ldmia	r0, {r3 - r5}
 162	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 163	mov	r6, #-1			@  ""  ""      ""       ""
 164	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 165 SPFIX(	addeq	r2, r2, #4	)
 166	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 167					@ from the exception stack
 168
 169	mov	r3, lr
 170
 171	@
 172	@ We are now ready to fill in the remaining blanks on the stack:
 173	@
 174	@  r2 - sp_svc
 175	@  r3 - lr_svc
 176	@  r4 - lr_<exception>, already fixed up for correct return/restart
 177	@  r5 - spsr_<exception>
 178	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 179	@
 180	stmia	r7, {r2 - r6}
 181
 
 
 
 
 
 
 182#ifdef CONFIG_TRACE_IRQFLAGS
 183	bl	trace_hardirqs_off
 184#endif
 
 185	.endm
 186
 187	.align	5
 188__dabt_svc:
 189	svc_entry
 190	mov	r2, sp
 191	dabt_helper
 192
 193	@
 194	@ IRQs off again before pulling preserved data off the stack
 195	@
 196	disable_irq_notrace
 197
 198#ifdef CONFIG_TRACE_IRQFLAGS
 199	tst	r5, #PSR_I_BIT
 200	bleq	trace_hardirqs_on
 201	tst	r5, #PSR_I_BIT
 202	blne	trace_hardirqs_off
 203#endif
 204	svc_exit r5				@ return from exception
 205 UNWIND(.fnend		)
 206ENDPROC(__dabt_svc)
 207
 208	.align	5
 209__irq_svc:
 210	svc_entry
 211	irq_handler
 212
 213#ifdef CONFIG_PREEMPT
 214	get_thread_info tsk
 215	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
 216	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
 217	teq	r8, #0				@ if preempt count != 0
 218	movne	r0, #0				@ force flags to 0
 219	tst	r0, #_TIF_NEED_RESCHED
 220	blne	svc_preempt
 221#endif
 222
 223#ifdef CONFIG_TRACE_IRQFLAGS
 224	@ The parent context IRQs must have been enabled to get here in
 225	@ the first place, so there's no point checking the PSR I bit.
 226	bl	trace_hardirqs_on
 227#endif
 228	svc_exit r5				@ return from exception
 229 UNWIND(.fnend		)
 230ENDPROC(__irq_svc)
 231
 232	.ltorg
 233
 234#ifdef CONFIG_PREEMPT
 235svc_preempt:
 236	mov	r8, lr
 2371:	bl	preempt_schedule_irq		@ irq en/disable is done inside
 238	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
 239	tst	r0, #_TIF_NEED_RESCHED
 240	moveq	pc, r8				@ go again
 241	b	1b
 242#endif
 243
 
 
 
 
 
 
 
 
 
 
 
 
 
 244	.align	5
 245__und_svc:
 246#ifdef CONFIG_KPROBES
 247	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
 248	@ it obviously needs free stack space which then will belong to
 249	@ the saved context.
 250	svc_entry 64
 251#else
 252	svc_entry
 253#endif
 254	@
 255	@ call emulation code, which returns using r9 if it has emulated
 256	@ the instruction, or the more conventional lr if we are to treat
 257	@ this as a real undefined instruction
 258	@
 259	@  r0 - instruction
 260	@
 261#ifndef	CONFIG_THUMB2_KERNEL
 262	ldr	r0, [r4, #-4]
 263#else
 
 264	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
 265	and	r9, r0, #0xf800
 266	cmp	r9, #0xe800			@ 32-bit instruction if xx >= 0
 267	ldrhhs	r9, [r4]			@ bottom 16 bits
 268	orrhs	r0, r9, r0, lsl #16
 
 
 269#endif
 270	adr	r9, BSYM(1f)
 271	mov	r2, r4
 272	bl	call_fpe
 273
 
 
 274	mov	r0, sp				@ struct pt_regs *regs
 275	bl	do_undefinstr
 276
 277	@
 278	@ IRQs off again before pulling preserved data off the stack
 279	@
 2801:	disable_irq_notrace
 281
 282	@
 283	@ restore SPSR and restart the instruction
 284	@
 285	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
 286#ifdef CONFIG_TRACE_IRQFLAGS
 287	tst	r5, #PSR_I_BIT
 288	bleq	trace_hardirqs_on
 289	tst	r5, #PSR_I_BIT
 290	blne	trace_hardirqs_off
 291#endif
 292	svc_exit r5				@ return from exception
 293 UNWIND(.fnend		)
 294ENDPROC(__und_svc)
 295
 296	.align	5
 297__pabt_svc:
 298	svc_entry
 299	mov	r2, sp				@ regs
 300	pabt_helper
 301
 302	@
 303	@ IRQs off again before pulling preserved data off the stack
 304	@
 305	disable_irq_notrace
 306
 307#ifdef CONFIG_TRACE_IRQFLAGS
 308	tst	r5, #PSR_I_BIT
 309	bleq	trace_hardirqs_on
 310	tst	r5, #PSR_I_BIT
 311	blne	trace_hardirqs_off
 312#endif
 313	svc_exit r5				@ return from exception
 314 UNWIND(.fnend		)
 315ENDPROC(__pabt_svc)
 316
 317	.align	5
 
 
 
 
 
 
 
 
 
 318.LCcralign:
 319	.word	cr_alignment
 320#ifdef MULTI_DABORT
 321.LCprocfns:
 322	.word	processor
 323#endif
 324.LCfp:
 325	.word	fp_enter
 326
 327/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 328 * User mode handlers
 329 *
 330 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
 331 */
 332
 333#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
 334#error "sizeof(struct pt_regs) must be a multiple of 8"
 335#endif
 336
 337	.macro	usr_entry
 338 UNWIND(.fnstart	)
 339 UNWIND(.cantunwind	)	@ don't unwind the user space
 340	sub	sp, sp, #S_FRAME_SIZE
 341 ARM(	stmib	sp, {r1 - r12}	)
 342 THUMB(	stmia	sp, {r0 - r12}	)
 343
 
 
 
 344	ldmia	r0, {r3 - r5}
 345	add	r0, sp, #S_PC		@ here for interlock avoidance
 346	mov	r6, #-1			@  ""  ""     ""        ""
 347
 348	str	r3, [sp]		@ save the "real" r0 copied
 349					@ from the exception stack
 350
 
 
 351	@
 352	@ We are now ready to fill in the remaining blanks on the stack:
 353	@
 354	@  r4 - lr_<exception>, already fixed up for correct return/restart
 355	@  r5 - spsr_<exception>
 356	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
 357	@
 358	@ Also, separately save sp_usr and lr_usr
 359	@
 360	stmia	r0, {r4 - r6}
 361 ARM(	stmdb	r0, {sp, lr}^			)
 362 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
 363
 364	@
 
 
 
 365	@ Enable the alignment trap while in kernel mode
 366	@
 367	alignment_trap r0
 368
 369	@
 370	@ Clear FP to mark the first stack frame
 371	@
 372	zero_fp
 373
 374#ifdef CONFIG_IRQSOFF_TRACER
 
 375	bl	trace_hardirqs_off
 376#endif
 
 
 377	.endm
 378
 379	.macro	kuser_cmpxchg_check
 380#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 381#ifndef CONFIG_MMU
 382#warning "NPTL on non MMU needs fixing"
 383#else
 384	@ Make sure our user space atomic helper is restarted
 385	@ if it was interrupted in a critical region.  Here we
 386	@ perform a quick test inline since it should be false
 387	@ 99.9999% of the time.  The rest is done out of line.
 388	cmp	r4, #TASK_SIZE
 389	blhs	kuser_cmpxchg64_fixup
 390#endif
 391#endif
 392	.endm
 393
 394	.align	5
 395__dabt_usr:
 396	usr_entry
 397	kuser_cmpxchg_check
 398	mov	r2, sp
 399	dabt_helper
 400	b	ret_from_exception
 401 UNWIND(.fnend		)
 402ENDPROC(__dabt_usr)
 403
 404	.align	5
 405__irq_usr:
 406	usr_entry
 407	kuser_cmpxchg_check
 408	irq_handler
 409	get_thread_info tsk
 410	mov	why, #0
 411	b	ret_to_user_from_irq
 412 UNWIND(.fnend		)
 413ENDPROC(__irq_usr)
 414
 415	.ltorg
 416
 417	.align	5
 418__und_usr:
 419	usr_entry
 420
 421	mov	r2, r4
 422	mov	r3, r5
 423
 
 
 
 424	@
 425	@ fall through to the emulation code, which returns using r9 if
 426	@ it has emulated the instruction, or the more conventional lr
 427	@ if we are to treat this as a real undefined instruction
 428	@
 429	@  r0 - instruction
 430	@
 431	adr	r9, BSYM(ret_from_exception)
 432	adr	lr, BSYM(__und_usr_unknown)
 
 
 
 433	tst	r3, #PSR_T_BIT			@ Thumb mode?
 434	itet	eq				@ explicit IT needed for the 1f label
 435	subeq	r4, r2, #4			@ ARM instr at LR - 4
 436	subne	r4, r2, #2			@ Thumb instr at LR - 2
 4371:	ldreqt	r0, [r4]
 438#ifdef CONFIG_CPU_ENDIAN_BE8
 439	reveq	r0, r0				@ little endian instruction
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 440#endif
 441	beq	call_fpe
 442	@ Thumb instruction
 443#if __LINUX_ARM_ARCH__ >= 7
 4442:
 445 ARM(	ldrht	r5, [r4], #2	)
 446 THUMB(	ldrht	r5, [r4]	)
 447 THUMB(	add	r4, r4, #2	)
 448	and	r0, r5, #0xf800			@ mask bits 111x x... .... ....
 449	cmp	r0, #0xe800			@ 32bit instruction if xx != 0
 450	blo	__und_usr_unknown
 4513:	ldrht	r0, [r4]
 452	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 
 453	orr	r0, r0, r5, lsl #16
 
 
 
 
 
 
 
 
 
 
 454#else
 455	b	__und_usr_unknown
 
 
 
 
 456#endif
 457 UNWIND(.fnend		)
 458ENDPROC(__und_usr)
 459
 460	@
 461	@ fallthrough to call_fpe
 462	@
 463
 464/*
 465 * The out of line fixup for the ldrt above.
 466 */
 467	.pushsection .fixup, "ax"
 4684:	mov	pc, r9
 
 
 469	.popsection
 470	.pushsection __ex_table,"a"
 471	.long	1b, 4b
 472#if __LINUX_ARM_ARCH__ >= 7
 473	.long	2b, 4b
 474	.long	3b, 4b
 475#endif
 476	.popsection
 477
 478/*
 479 * Check whether the instruction is a co-processor instruction.
 480 * If yes, we need to call the relevant co-processor handler.
 481 *
 482 * Note that we don't do a full check here for the co-processor
 483 * instructions; all instructions with bit 27 set are well
 484 * defined.  The only instructions that should fault are the
 485 * co-processor instructions.  However, we have to watch out
 486 * for the ARM6/ARM7 SWI bug.
 487 *
 488 * NEON is a special case that has to be handled here. Not all
 489 * NEON instructions are co-processor instructions, so we have
 490 * to make a special case of checking for them. Plus, there's
 491 * five groups of them, so we have a table of mask/opcode pairs
 492 * to check against, and if any match then we branch off into the
 493 * NEON handler code.
 494 *
 495 * Emulators may wish to make use of the following registers:
 496 *  r0  = instruction opcode.
 497 *  r2  = PC+4
 498 *  r9  = normal "successful" return address
 499 *  r10 = this threads thread_info structure.
 500 *  lr  = unrecognised instruction return address
 
 501 */
 502	@
 503	@ Fall-through from Thumb-2 __und_usr
 504	@
 505#ifdef CONFIG_NEON
 
 506	adr	r6, .LCneon_thumb_opcodes
 507	b	2f
 508#endif
 509call_fpe:
 
 510#ifdef CONFIG_NEON
 511	adr	r6, .LCneon_arm_opcodes
 5122:
 513	ldr	r7, [r6], #4			@ mask value
 514	cmp	r7, #0				@ end mask?
 515	beq	1f
 516	and	r8, r0, r7
 517	ldr	r7, [r6], #4			@ opcode bits matching in mask
 518	cmp	r8, r7				@ NEON instruction?
 519	bne	2b
 520	get_thread_info r10
 521	mov	r7, #1
 522	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
 523	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
 524	b	do_vfp				@ let VFP handler handle this
 5251:
 526#endif
 527	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
 528	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
 529#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
 530	and	r8, r0, #0x0f000000		@ mask out op-code bits
 531	teqne	r8, #0x0f000000			@ SWI (ARM6/7 bug)?
 532#endif
 533	moveq	pc, lr
 534	get_thread_info r10			@ get current thread
 535	and	r8, r0, #0x00000f00		@ mask out CP number
 536 THUMB(	lsr	r8, r8, #8		)
 537	mov	r7, #1
 538	add	r6, r10, #TI_USED_CP
 539 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 540 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
 541#ifdef CONFIG_IWMMXT
 542	@ Test if we need to give access to iWMMXt coprocessors
 543	ldr	r5, [r10, #TI_FLAGS]
 544	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
 545	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
 546	bcs	iwmmxt_task_enable
 547#endif
 548 ARM(	add	pc, pc, r8, lsr #6	)
 549 THUMB(	lsl	r8, r8, #2		)
 550 THUMB(	add	pc, r8			)
 551	nop
 552
 553	movw_pc	lr				@ CP#0
 554	W(b)	do_fpe				@ CP#1 (FPE)
 555	W(b)	do_fpe				@ CP#2 (FPE)
 556	movw_pc	lr				@ CP#3
 557#ifdef CONFIG_CRUNCH
 558	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
 559	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
 560	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
 561#else
 562	movw_pc	lr				@ CP#4
 563	movw_pc	lr				@ CP#5
 564	movw_pc	lr				@ CP#6
 565#endif
 566	movw_pc	lr				@ CP#7
 567	movw_pc	lr				@ CP#8
 568	movw_pc	lr				@ CP#9
 569#ifdef CONFIG_VFP
 570	W(b)	do_vfp				@ CP#10 (VFP)
 571	W(b)	do_vfp				@ CP#11 (VFP)
 572#else
 573	movw_pc	lr				@ CP#10 (VFP)
 574	movw_pc	lr				@ CP#11 (VFP)
 
 
 
 
 
 
 
 
 
 
 575#endif
 576	movw_pc	lr				@ CP#12
 577	movw_pc	lr				@ CP#13
 578	movw_pc	lr				@ CP#14 (Debug)
 579	movw_pc	lr				@ CP#15 (Control)
 580
 581#ifdef CONFIG_NEON
 582	.align	6
 583
 584.LCneon_arm_opcodes:
 585	.word	0xfe000000			@ mask
 586	.word	0xf2000000			@ opcode
 587
 588	.word	0xff100000			@ mask
 589	.word	0xf4000000			@ opcode
 590
 591	.word	0x00000000			@ mask
 592	.word	0x00000000			@ opcode
 593
 594.LCneon_thumb_opcodes:
 595	.word	0xef000000			@ mask
 596	.word	0xef000000			@ opcode
 597
 598	.word	0xff100000			@ mask
 599	.word	0xf9000000			@ opcode
 600
 601	.word	0x00000000			@ mask
 602	.word	0x00000000			@ opcode
 603#endif
 604
 605do_fpe:
 606	enable_irq
 607	ldr	r4, .LCfp
 608	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
 609	ldr	pc, [r4]			@ Call FP module USR entry point
 610
 611/*
 612 * The FP module is called with these registers set:
 613 *  r0  = instruction
 614 *  r2  = PC+4
 615 *  r9  = normal "successful" return address
 616 *  r10 = FP workspace
 617 *  lr  = unrecognised FP instruction return address
 618 */
 619
 620	.pushsection .data
 621ENTRY(fp_enter)
 622	.word	no_fp
 623	.popsection
 624
 625ENTRY(no_fp)
 626	mov	pc, lr
 627ENDPROC(no_fp)
 628
 629__und_usr_unknown:
 630	enable_irq
 631	mov	r0, sp
 632	adr	lr, BSYM(ret_from_exception)
 633	b	do_undefinstr
 634ENDPROC(__und_usr_unknown)
 
 
 
 
 
 
 635
 636	.align	5
 637__pabt_usr:
 638	usr_entry
 639	mov	r2, sp				@ regs
 640	pabt_helper
 641 UNWIND(.fnend		)
 642	/* fall through */
 643/*
 644 * This is the return code to user mode for abort handlers
 645 */
 646ENTRY(ret_from_exception)
 647 UNWIND(.fnstart	)
 648 UNWIND(.cantunwind	)
 649	get_thread_info tsk
 650	mov	why, #0
 651	b	ret_to_user
 652 UNWIND(.fnend		)
 653ENDPROC(__pabt_usr)
 654ENDPROC(ret_from_exception)
 655
 
 
 
 
 
 
 
 
 
 
 
 656/*
 657 * Register switch for ARMv3 and ARMv4 processors
 658 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 659 * previous and next are guaranteed not to be the same.
 660 */
 661ENTRY(__switch_to)
 662 UNWIND(.fnstart	)
 663 UNWIND(.cantunwind	)
 664	add	ip, r1, #TI_CPU_SAVE
 665	ldr	r3, [r2, #TI_TP_VALUE]
 666 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 667 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 668 THUMB(	str	sp, [ip], #4		   )
 669 THUMB(	str	lr, [ip], #4		   )
 
 
 670#ifdef CONFIG_CPU_USE_DOMAINS
 
 
 671	ldr	r6, [r2, #TI_CPU_DOMAIN]
 672#endif
 673	set_tls	r3, r4, r5
 674#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 675	ldr	r7, [r2, #TI_TASK]
 676	ldr	r8, =__stack_chk_guard
 677	ldr	r7, [r7, #TSK_STACK_CANARY]
 678#endif
 679#ifdef CONFIG_CPU_USE_DOMAINS
 680	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
 681#endif
 682	mov	r5, r0
 683	add	r4, r2, #TI_CPU_SAVE
 684	ldr	r0, =thread_notify_head
 685	mov	r1, #THREAD_NOTIFY_SWITCH
 686	bl	atomic_notifier_call_chain
 687#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 688	str	r7, [r8]
 689#endif
 690 THUMB(	mov	ip, r4			   )
 691	mov	r0, r5
 692 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 693 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 694 THUMB(	ldr	sp, [ip], #4		   )
 695 THUMB(	ldr	pc, [ip]		   )
 696 UNWIND(.fnend		)
 697ENDPROC(__switch_to)
 698
 699	__INIT
 700
 701/*
 702 * User helpers.
 703 *
 704 * Each segment is 32-byte aligned and will be moved to the top of the high
 705 * vector page.  New segments (if ever needed) must be added in front of
 706 * existing ones.  This mechanism should be used only for things that are
 707 * really small and justified, and not be abused freely.
 708 *
 709 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
 710 */
 711 THUMB(	.arm	)
 712
 713	.macro	usr_ret, reg
 714#ifdef CONFIG_ARM_THUMB
 715	bx	\reg
 716#else
 717	mov	pc, \reg
 718#endif
 719	.endm
 720
 
 
 
 
 
 
 
 
 
 
 
 
 721	.align	5
 722	.globl	__kuser_helper_start
 723__kuser_helper_start:
 724
 725/*
 726 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
 727 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
 728 */
 729
 730__kuser_cmpxchg64:				@ 0xffff0f60
 731
 732#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 733
 734	/*
 735	 * Poor you.  No fast solution possible...
 736	 * The kernel itself must perform the operation.
 737	 * A special ghost syscall is used for that (see traps.c).
 738	 */
 739	stmfd	sp!, {r7, lr}
 740	ldr	r7, 1f			@ it's 20 bits
 741	swi	__ARM_NR_cmpxchg64
 742	ldmfd	sp!, {r7, pc}
 7431:	.word	__ARM_NR_cmpxchg64
 744
 745#elif defined(CONFIG_CPU_32v6K)
 746
 747	stmfd	sp!, {r4, r5, r6, r7}
 748	ldrd	r4, r5, [r0]			@ load old val
 749	ldrd	r6, r7, [r1]			@ load new val
 750	smp_dmb	arm
 7511:	ldrexd	r0, r1, [r2]			@ load current val
 752	eors	r3, r0, r4			@ compare with oldval (1)
 753	eoreqs	r3, r1, r5			@ compare with oldval (2)
 754	strexdeq r3, r6, r7, [r2]		@ store newval if eq
 755	teqeq	r3, #1				@ success?
 756	beq	1b				@ if no then retry
 757	smp_dmb	arm
 758	rsbs	r0, r3, #0			@ set returned val and C flag
 759	ldmfd	sp!, {r4, r5, r6, r7}
 760	bx	lr
 761
 762#elif !defined(CONFIG_SMP)
 763
 764#ifdef CONFIG_MMU
 765
 766	/*
 767	 * The only thing that can break atomicity in this cmpxchg64
 768	 * implementation is either an IRQ or a data abort exception
 769	 * causing another process/thread to be scheduled in the middle of
 770	 * the critical sequence.  The same strategy as for cmpxchg is used.
 771	 */
 772	stmfd	sp!, {r4, r5, r6, lr}
 773	ldmia	r0, {r4, r5}			@ load old val
 774	ldmia	r1, {r6, lr}			@ load new val
 7751:	ldmia	r2, {r0, r1}			@ load current val
 776	eors	r3, r0, r4			@ compare with oldval (1)
 777	eoreqs	r3, r1, r5			@ compare with oldval (2)
 7782:	stmeqia	r2, {r6, lr}			@ store newval if eq
 779	rsbs	r0, r3, #0			@ set return val and C flag
 780	ldmfd	sp!, {r4, r5, r6, pc}
 781
 782	.text
 783kuser_cmpxchg64_fixup:
 784	@ Called from kuser_cmpxchg_fixup.
 785	@ r4 = address of interrupted insn (must be preserved).
 786	@ sp = saved regs. r7 and r8 are clobbered.
 787	@ 1b = first critical insn, 2b = last critical insn.
 788	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 789	mov	r7, #0xffff0fff
 790	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
 791	subs	r8, r4, r7
 792	rsbcss	r8, r8, #(2b - 1b)
 793	strcs	r7, [sp, #S_PC]
 794#if __LINUX_ARM_ARCH__ < 6
 795	bcc	kuser_cmpxchg32_fixup
 796#endif
 797	mov	pc, lr
 798	.previous
 799
 800#else
 801#warning "NPTL on non MMU needs fixing"
 802	mov	r0, #-1
 803	adds	r0, r0, #0
 804	usr_ret	lr
 805#endif
 806
 807#else
 808#error "incoherent kernel configuration"
 809#endif
 810
 811	/* pad to next slot */
 812	.rept	(16 - (. - __kuser_cmpxchg64)/4)
 813	.word	0
 814	.endr
 815
 816	.align	5
 817
 818__kuser_memory_barrier:				@ 0xffff0fa0
 819	smp_dmb	arm
 820	usr_ret	lr
 821
 822	.align	5
 823
 824__kuser_cmpxchg:				@ 0xffff0fc0
 825
 826#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
 827
 828	/*
 829	 * Poor you.  No fast solution possible...
 830	 * The kernel itself must perform the operation.
 831	 * A special ghost syscall is used for that (see traps.c).
 832	 */
 833	stmfd	sp!, {r7, lr}
 834	ldr	r7, 1f			@ it's 20 bits
 835	swi	__ARM_NR_cmpxchg
 836	ldmfd	sp!, {r7, pc}
 8371:	.word	__ARM_NR_cmpxchg
 838
 839#elif __LINUX_ARM_ARCH__ < 6
 840
 841#ifdef CONFIG_MMU
 842
 843	/*
 844	 * The only thing that can break atomicity in this cmpxchg
 845	 * implementation is either an IRQ or a data abort exception
 846	 * causing another process/thread to be scheduled in the middle
 847	 * of the critical sequence.  To prevent this, code is added to
 848	 * the IRQ and data abort exception handlers to set the pc back
 849	 * to the beginning of the critical section if it is found to be
 850	 * within that critical section (see kuser_cmpxchg_fixup).
 851	 */
 8521:	ldr	r3, [r2]			@ load current val
 853	subs	r3, r3, r0			@ compare with oldval
 8542:	streq	r1, [r2]			@ store newval if eq
 855	rsbs	r0, r3, #0			@ set return val and C flag
 856	usr_ret	lr
 857
 858	.text
 859kuser_cmpxchg32_fixup:
 860	@ Called from kuser_cmpxchg_check macro.
 861	@ r4 = address of interrupted insn (must be preserved).
 862	@ sp = saved regs. r7 and r8 are clobbered.
 863	@ 1b = first critical insn, 2b = last critical insn.
 864	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
 865	mov	r7, #0xffff0fff
 866	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
 867	subs	r8, r4, r7
 868	rsbcss	r8, r8, #(2b - 1b)
 869	strcs	r7, [sp, #S_PC]
 870	mov	pc, lr
 871	.previous
 872
 873#else
 874#warning "NPTL on non MMU needs fixing"
 875	mov	r0, #-1
 876	adds	r0, r0, #0
 877	usr_ret	lr
 878#endif
 879
 880#else
 881
 882	smp_dmb	arm
 8831:	ldrex	r3, [r2]
 884	subs	r3, r3, r0
 885	strexeq	r3, r1, [r2]
 886	teqeq	r3, #1
 887	beq	1b
 888	rsbs	r0, r3, #0
 889	/* beware -- each __kuser slot must be 8 instructions max */
 890	ALT_SMP(b	__kuser_memory_barrier)
 891	ALT_UP(usr_ret	lr)
 892
 893#endif
 894
 895	.align	5
 896
 897__kuser_get_tls:				@ 0xffff0fe0
 898	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
 899	usr_ret	lr
 900	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
 901	.rep	4
 
 902	.word	0			@ 0xffff0ff0 software TLS value, then
 903	.endr				@ pad up to __kuser_helper_version
 904
 905__kuser_helper_version:				@ 0xffff0ffc
 906	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)
 907
 908	.globl	__kuser_helper_end
 909__kuser_helper_end:
 910
 
 
 911 THUMB(	.thumb	)
 912
 913/*
 914 * Vector stubs.
 915 *
 916 * This code is copied to 0xffff0200 so we can use branches in the
 917 * vectors, rather than ldr's.  Note that this code must not
 918 * exceed 0x300 bytes.
 919 *
 920 * Common stub entry macro:
 921 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 922 *
 923 * SP points to a minimal amount of processor-private memory, the address
 924 * of which is copied into r0 for the mode specific abort handler.
 925 */
 926	.macro	vector_stub, name, mode, correction=0
 927	.align	5
 928
 929vector_\name:
 930	.if \correction
 931	sub	lr, lr, #\correction
 932	.endif
 933
 934	@
 935	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
 936	@ (parent CPSR)
 937	@
 938	stmia	sp, {r0, lr}		@ save r0, lr
 939	mrs	lr, spsr
 940	str	lr, [sp, #8]		@ save spsr
 941
 942	@
 943	@ Prepare for SVC32 mode.  IRQs remain disabled.
 944	@
 945	mrs	r0, cpsr
 946	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
 947	msr	spsr_cxsf, r0
 948
 949	@
 950	@ the branch table must immediately follow this code
 951	@
 952	and	lr, lr, #0x0f
 953 THUMB(	adr	r0, 1f			)
 954 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
 955	mov	r0, sp
 956 ARM(	ldr	lr, [pc, lr, lsl #2]	)
 957	movs	pc, lr			@ branch to handler in SVC mode
 958ENDPROC(vector_\name)
 959
 960	.align	2
 961	@ handler addresses follow this label
 9621:
 963	.endm
 964
 965	.globl	__stubs_start
 966__stubs_start:
 
 
 
 
 
 
 
 
 967/*
 968 * Interrupt dispatcher
 969 */
 970	vector_stub	irq, IRQ_MODE, 4
 971
 972	.long	__irq_usr			@  0  (USR_26 / USR_32)
 973	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
 974	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
 975	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
 976	.long	__irq_invalid			@  4
 977	.long	__irq_invalid			@  5
 978	.long	__irq_invalid			@  6
 979	.long	__irq_invalid			@  7
 980	.long	__irq_invalid			@  8
 981	.long	__irq_invalid			@  9
 982	.long	__irq_invalid			@  a
 983	.long	__irq_invalid			@  b
 984	.long	__irq_invalid			@  c
 985	.long	__irq_invalid			@  d
 986	.long	__irq_invalid			@  e
 987	.long	__irq_invalid			@  f
 988
 989/*
 990 * Data abort dispatcher
 991 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 992 */
 993	vector_stub	dabt, ABT_MODE, 8
 994
 995	.long	__dabt_usr			@  0  (USR_26 / USR_32)
 996	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
 997	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
 998	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
 999	.long	__dabt_invalid			@  4
1000	.long	__dabt_invalid			@  5
1001	.long	__dabt_invalid			@  6
1002	.long	__dabt_invalid			@  7
1003	.long	__dabt_invalid			@  8
1004	.long	__dabt_invalid			@  9
1005	.long	__dabt_invalid			@  a
1006	.long	__dabt_invalid			@  b
1007	.long	__dabt_invalid			@  c
1008	.long	__dabt_invalid			@  d
1009	.long	__dabt_invalid			@  e
1010	.long	__dabt_invalid			@  f
1011
1012/*
1013 * Prefetch abort dispatcher
1014 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1015 */
1016	vector_stub	pabt, ABT_MODE, 4
1017
1018	.long	__pabt_usr			@  0 (USR_26 / USR_32)
1019	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
1020	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
1021	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
1022	.long	__pabt_invalid			@  4
1023	.long	__pabt_invalid			@  5
1024	.long	__pabt_invalid			@  6
1025	.long	__pabt_invalid			@  7
1026	.long	__pabt_invalid			@  8
1027	.long	__pabt_invalid			@  9
1028	.long	__pabt_invalid			@  a
1029	.long	__pabt_invalid			@  b
1030	.long	__pabt_invalid			@  c
1031	.long	__pabt_invalid			@  d
1032	.long	__pabt_invalid			@  e
1033	.long	__pabt_invalid			@  f
1034
1035/*
1036 * Undef instr entry dispatcher
1037 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1038 */
1039	vector_stub	und, UND_MODE
1040
1041	.long	__und_usr			@  0 (USR_26 / USR_32)
1042	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
1043	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
1044	.long	__und_svc			@  3 (SVC_26 / SVC_32)
1045	.long	__und_invalid			@  4
1046	.long	__und_invalid			@  5
1047	.long	__und_invalid			@  6
1048	.long	__und_invalid			@  7
1049	.long	__und_invalid			@  8
1050	.long	__und_invalid			@  9
1051	.long	__und_invalid			@  a
1052	.long	__und_invalid			@  b
1053	.long	__und_invalid			@  c
1054	.long	__und_invalid			@  d
1055	.long	__und_invalid			@  e
1056	.long	__und_invalid			@  f
1057
1058	.align	5
1059
1060/*=============================================================================
1061 * Undefined FIQs
1062 *-----------------------------------------------------------------------------
1063 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1064 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1065 * Basically to switch modes, we *HAVE* to clobber one register...  brain
1066 * damage alert!  I don't think that we can execute any code in here in any
1067 * other mode than FIQ...  Ok you can switch to another mode, but you can't
1068 * get out of that mode without clobbering one register.
1069 */
1070vector_fiq:
1071	disable_fiq
1072	subs	pc, lr, #4
1073
1074/*=============================================================================
1075 * Address exception handler
1076 *-----------------------------------------------------------------------------
1077 * These aren't too critical.
1078 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1079 */
1080
1081vector_addrexcptn:
1082	b	vector_addrexcptn
1083
1084/*
1085 * We group all the following data together to optimise
1086 * for CPUs with separate I & D caches.
 
 
1087 */
1088	.align	5
1089
1090.LCvswi:
1091	.word	vector_swi
1092
1093	.globl	__stubs_end
1094__stubs_end:
1095
1096	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start
1097
1098	.globl	__vectors_start
1099__vectors_start:
1100 ARM(	swi	SYS_ERROR0	)
1101 THUMB(	svc	#0		)
1102 THUMB(	nop			)
1103	W(b)	vector_und + stubs_offset
1104	W(ldr)	pc, .LCvswi + stubs_offset
1105	W(b)	vector_pabt + stubs_offset
1106	W(b)	vector_dabt + stubs_offset
1107	W(b)	vector_addrexcptn + stubs_offset
1108	W(b)	vector_irq + stubs_offset
1109	W(b)	vector_fiq + stubs_offset
1110
1111	.globl	__vectors_end
1112__vectors_end:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113
1114	.data
1115
1116	.globl	cr_alignment
1117	.globl	cr_no_alignment
1118cr_alignment:
1119	.space	4
1120cr_no_alignment:
1121	.space	4
1122
1123#ifdef CONFIG_MULTI_IRQ_HANDLER
1124	.globl	handle_arch_irq
1125handle_arch_irq:
1126	.space	4
1127#endif