Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
   5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
   6 *  Adapted for Power Macintosh by Paul Mackerras.
   7 *  Low-level exception handlers and MMU support
   8 *  rewritten by Paul Mackerras.
   9 *    Copyright (C) 1996 Paul Mackerras.
  10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11 *
  12 *  This file contains the system call entry code, context switch
  13 *  code, and exception/interrupt return code for PowerPC.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#include <linux/errno.h>
  23#include <linux/err.h>
  24#include <linux/sys.h>
  25#include <linux/threads.h>
  26#include <asm/reg.h>
  27#include <asm/page.h>
  28#include <asm/mmu.h>
  29#include <asm/cputable.h>
  30#include <asm/thread_info.h>
  31#include <asm/ppc_asm.h>
  32#include <asm/asm-offsets.h>
  33#include <asm/unistd.h>
 
  34#include <asm/ptrace.h>
  35#include <asm/export.h>
  36
  37/*
  38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
  39 */
  40#if MSR_KERNEL >= 0x10000
  41#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
  42#else
  43#define LOAD_MSR_KERNEL(r, x)	li r,(x)
  44#endif
  45
  46/*
  47 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
  48 * fit into one page in order to not encounter a TLB miss between the
  49 * modification of srr0/srr1 and the associated rfi.
  50 */
  51	.align	12
  52
  53#ifdef CONFIG_BOOKE
  54	.globl	mcheck_transfer_to_handler
  55mcheck_transfer_to_handler:
  56	mfspr	r0,SPRN_DSRR0
  57	stw	r0,_DSRR0(r11)
  58	mfspr	r0,SPRN_DSRR1
  59	stw	r0,_DSRR1(r11)
  60	/* fall through */
  61
  62	.globl	debug_transfer_to_handler
  63debug_transfer_to_handler:
  64	mfspr	r0,SPRN_CSRR0
  65	stw	r0,_CSRR0(r11)
  66	mfspr	r0,SPRN_CSRR1
  67	stw	r0,_CSRR1(r11)
  68	/* fall through */
  69
  70	.globl	crit_transfer_to_handler
  71crit_transfer_to_handler:
  72#ifdef CONFIG_PPC_BOOK3E_MMU
  73	mfspr	r0,SPRN_MAS0
  74	stw	r0,MAS0(r11)
  75	mfspr	r0,SPRN_MAS1
  76	stw	r0,MAS1(r11)
  77	mfspr	r0,SPRN_MAS2
  78	stw	r0,MAS2(r11)
  79	mfspr	r0,SPRN_MAS3
  80	stw	r0,MAS3(r11)
  81	mfspr	r0,SPRN_MAS6
  82	stw	r0,MAS6(r11)
  83#ifdef CONFIG_PHYS_64BIT
  84	mfspr	r0,SPRN_MAS7
  85	stw	r0,MAS7(r11)
  86#endif /* CONFIG_PHYS_64BIT */
  87#endif /* CONFIG_PPC_BOOK3E_MMU */
  88#ifdef CONFIG_44x
  89	mfspr	r0,SPRN_MMUCR
  90	stw	r0,MMUCR(r11)
  91#endif
  92	mfspr	r0,SPRN_SRR0
  93	stw	r0,_SRR0(r11)
  94	mfspr	r0,SPRN_SRR1
  95	stw	r0,_SRR1(r11)
  96
  97	/* set the stack limit to the current stack
  98	 * and set the limit to protect the thread_info
  99	 * struct
 100	 */
 101	mfspr	r8,SPRN_SPRG_THREAD
 102	lwz	r0,KSP_LIMIT(r8)
 103	stw	r0,SAVED_KSP_LIMIT(r11)
 104	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
 105	stw	r0,KSP_LIMIT(r8)
 106	/* fall through */
 107#endif
 108
 109#ifdef CONFIG_40x
 110	.globl	crit_transfer_to_handler
 111crit_transfer_to_handler:
 112	lwz	r0,crit_r10@l(0)
 113	stw	r0,GPR10(r11)
 114	lwz	r0,crit_r11@l(0)
 115	stw	r0,GPR11(r11)
 116	mfspr	r0,SPRN_SRR0
 117	stw	r0,crit_srr0@l(0)
 118	mfspr	r0,SPRN_SRR1
 119	stw	r0,crit_srr1@l(0)
 120
 121	/* set the stack limit to the current stack
 122	 * and set the limit to protect the thread_info
 123	 * struct
 124	 */
 125	mfspr	r8,SPRN_SPRG_THREAD
 126	lwz	r0,KSP_LIMIT(r8)
 127	stw	r0,saved_ksp_limit@l(0)
 128	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
 129	stw	r0,KSP_LIMIT(r8)
 130	/* fall through */
 131#endif
 132
 133/*
 134 * This code finishes saving the registers to the exception frame
 135 * and jumps to the appropriate handler for the exception, turning
 136 * on address translation.
 137 * Note that we rely on the caller having set cr0.eq iff the exception
 138 * occurred in kernel mode (i.e. MSR:PR = 0).
 139 */
 140	.globl	transfer_to_handler_full
 141transfer_to_handler_full:
 142	SAVE_NVGPRS(r11)
 143	/* fall through */
 144
 145	.globl	transfer_to_handler
 146transfer_to_handler:
 147	stw	r2,GPR2(r11)
 148	stw	r12,_NIP(r11)
 149	stw	r9,_MSR(r11)
 150	andi.	r2,r9,MSR_PR
 151	mfctr	r12
 152	mfspr	r2,SPRN_XER
 153	stw	r12,_CTR(r11)
 154	stw	r2,_XER(r11)
 155	mfspr	r12,SPRN_SPRG_THREAD
 156	addi	r2,r12,-THREAD
 157	tovirt(r2,r2)			/* set r2 to current */
 158	beq	2f			/* if from user, fix up THREAD.regs */
 159	addi	r11,r1,STACK_FRAME_OVERHEAD
 160	stw	r11,PT_REGS(r12)
 161#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 162	/* Check to see if the dbcr0 register is set up to debug.  Use the
 163	   internal debug mode bit to do this. */
 164	lwz	r12,THREAD_DBCR0(r12)
 165	andis.	r12,r12,DBCR0_IDM@h
 166	beq+	3f
 167	/* From user and task is ptraced - load up global dbcr0 */
 168	li	r12,-1			/* clear all pending debug events */
 169	mtspr	SPRN_DBSR,r12
 170	lis	r11,global_dbcr0@ha
 171	tophys(r11,r11)
 172	addi	r11,r11,global_dbcr0@l
 173#ifdef CONFIG_SMP
 174	CURRENT_THREAD_INFO(r9, r1)
 175	lwz	r9,TI_CPU(r9)
 176	slwi	r9,r9,3
 177	add	r11,r11,r9
 178#endif
 179	lwz	r12,0(r11)
 180	mtspr	SPRN_DBCR0,r12
 181	lwz	r12,4(r11)
 182	addi	r12,r12,-1
 183	stw	r12,4(r11)
 184#endif
 185#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 186	CURRENT_THREAD_INFO(r9, r1)
 187	tophys(r9, r9)
 188	ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
 189#endif
 190
 191	b	3f
 192
 1932:	/* if from kernel, check interrupted DOZE/NAP mode and
 194         * check for stack overflow
 195         */
 196	lwz	r9,KSP_LIMIT(r12)
 197	cmplw	r1,r9			/* if r1 <= ksp_limit */
 198	ble-	stack_ovf		/* then the kernel stack overflowed */
 1995:
 200#if defined(CONFIG_6xx) || defined(CONFIG_E500)
 201	CURRENT_THREAD_INFO(r9, r1)
 202	tophys(r9,r9)			/* check local flags */
 203	lwz	r12,TI_LOCAL_FLAGS(r9)
 204	mtcrf	0x01,r12
 205	bt-	31-TLF_NAPPING,4f
 206	bt-	31-TLF_SLEEPING,7f
 207#endif /* CONFIG_6xx || CONFIG_E500 */
 208	.globl transfer_to_handler_cont
 209transfer_to_handler_cont:
 2103:
 211	mflr	r9
 212	lwz	r11,0(r9)		/* virtual address of handler */
 213	lwz	r9,4(r9)		/* where to go when done */
 214#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 215	mtspr	SPRN_NRI, r0
 216#endif
 217#ifdef CONFIG_TRACE_IRQFLAGS
 218	lis	r12,reenable_mmu@h
 219	ori	r12,r12,reenable_mmu@l
 220	mtspr	SPRN_SRR0,r12
 221	mtspr	SPRN_SRR1,r10
 222	SYNC
 223	RFI
 224reenable_mmu:				/* re-enable mmu so we can */
 225	mfmsr	r10
 226	lwz	r12,_MSR(r1)
 227	xor	r10,r10,r12
 228	andi.	r10,r10,MSR_EE		/* Did EE change? */
 229	beq	1f
 230
 231	/*
 232	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
 233	 * If from user mode there is only one stack frame on the stack, and
 234	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
 235	 * stack frame to make trace_hardirqs_off happy.
 236	 *
 237	 * This is handy because we also need to save a bunch of GPRs,
 238	 * r3 can be different from GPR3(r1) at this point, r9 and r11
 239	 * contains the old MSR and handler address respectively,
 240	 * r4 & r5 can contain page fault arguments that need to be passed
 241	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
 242	 * they aren't useful past this point (aren't syscall arguments),
 243	 * the rest is restored from the exception frame.
 244	 */
 245	stwu	r1,-32(r1)
 246	stw	r9,8(r1)
 247	stw	r11,12(r1)
 248	stw	r3,16(r1)
 249	stw	r4,20(r1)
 250	stw	r5,24(r1)
 251	bl	trace_hardirqs_off
 252	lwz	r5,24(r1)
 253	lwz	r4,20(r1)
 254	lwz	r3,16(r1)
 255	lwz	r11,12(r1)
 256	lwz	r9,8(r1)
 257	addi	r1,r1,32
 258	lwz	r0,GPR0(r1)
 259	lwz	r6,GPR6(r1)
 260	lwz	r7,GPR7(r1)
 261	lwz	r8,GPR8(r1)
 2621:	mtctr	r11
 263	mtlr	r9
 264	bctr				/* jump to handler */
 265#else /* CONFIG_TRACE_IRQFLAGS */
 266	mtspr	SPRN_SRR0,r11
 267	mtspr	SPRN_SRR1,r10
 268	mtlr	r9
 269	SYNC
 270	RFI				/* jump to handler, enable MMU */
 271#endif /* CONFIG_TRACE_IRQFLAGS */
 272
 273#if defined (CONFIG_6xx) || defined(CONFIG_E500)
 2744:	rlwinm	r12,r12,0,~_TLF_NAPPING
 275	stw	r12,TI_LOCAL_FLAGS(r9)
 276	b	power_save_ppc32_restore
 277
 2787:	rlwinm	r12,r12,0,~_TLF_SLEEPING
 279	stw	r12,TI_LOCAL_FLAGS(r9)
 280	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
 281	rlwinm	r9,r9,0,~MSR_EE
 282	lwz	r12,_LINK(r11)		/* and return to address in LR */
 283	b	fast_exception_return
 284#endif
 285
 286/*
 287 * On kernel stack overflow, load up an initial stack pointer
 288 * and call StackOverflow(regs), which should not return.
 289 */
 290stack_ovf:
 291	/* sometimes we use a statically-allocated stack, which is OK. */
 292	lis	r12,_end@h
 293	ori	r12,r12,_end@l
 294	cmplw	r1,r12
 295	ble	5b			/* r1 <= &_end is OK */
 296	SAVE_NVGPRS(r11)
 297	addi	r3,r1,STACK_FRAME_OVERHEAD
 298	lis	r1,init_thread_union@ha
 299	addi	r1,r1,init_thread_union@l
 300	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 301	lis	r9,StackOverflow@ha
 302	addi	r9,r9,StackOverflow@l
 303	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 304#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 305	mtspr	SPRN_NRI, r0
 306#endif
 307	mtspr	SPRN_SRR0,r9
 308	mtspr	SPRN_SRR1,r10
 309	SYNC
 310	RFI
 311
 312/*
 313 * Handle a system call.
 314 */
 315	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
 316	.stabs	"entry_32.S",N_SO,0,0,0f
 3170:
 318
 319_GLOBAL(DoSyscall)
 320	stw	r3,ORIG_GPR3(r1)
 321	li	r12,0
 322	stw	r12,RESULT(r1)
 323	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
 324	rlwinm	r11,r11,0,4,2
 325	stw	r11,_CCR(r1)
 326#ifdef CONFIG_TRACE_IRQFLAGS
 327	/* Return from syscalls can (and generally will) hard enable
 328	 * interrupts. You aren't supposed to call a syscall with
 329	 * interrupts disabled in the first place. However, to ensure
 330	 * that we get it right vs. lockdep if it happens, we force
 331	 * that hard enable here with appropriate tracing if we see
 332	 * that we have been called with interrupts off
 333	 */
 334	mfmsr	r11
 335	andi.	r12,r11,MSR_EE
 336	bne+	1f
 337	/* We came in with interrupts disabled, we enable them now */
 338	bl	trace_hardirqs_on
 339	mfmsr	r11
 340	lwz	r0,GPR0(r1)
 341	lwz	r3,GPR3(r1)
 342	lwz	r4,GPR4(r1)
 343	ori	r11,r11,MSR_EE
 344	lwz	r5,GPR5(r1)
 345	lwz	r6,GPR6(r1)
 346	lwz	r7,GPR7(r1)
 347	lwz	r8,GPR8(r1)
 348	mtmsr	r11
 3491:
 350#endif /* CONFIG_TRACE_IRQFLAGS */
 351	CURRENT_THREAD_INFO(r10, r1)
 352	lwz	r11,TI_FLAGS(r10)
 353	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
 354	bne-	syscall_dotrace
 355syscall_dotrace_cont:
 356	cmplwi	0,r0,NR_syscalls
 357	lis	r10,sys_call_table@h
 358	ori	r10,r10,sys_call_table@l
 359	slwi	r0,r0,2
 360	bge-	66f
 361	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
 362	mtlr	r10
 363	addi	r9,r1,STACK_FRAME_OVERHEAD
 364	PPC440EP_ERR42
 365	blrl			/* Call handler */
 366	.globl	ret_from_syscall
 367ret_from_syscall:
 368	mr	r6,r3
 369	CURRENT_THREAD_INFO(r12, r1)
 370	/* disable interrupts so current_thread_info()->flags can't change */
 371	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
 372	/* Note: We don't bother telling lockdep about it */
 373	SYNC
 374	MTMSRD(r10)
 375	lwz	r9,TI_FLAGS(r12)
 376	li	r8,-MAX_ERRNO
 377	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 378	bne-	syscall_exit_work
 379	cmplw	0,r3,r8
 380	blt+	syscall_exit_cont
 381	lwz	r11,_CCR(r1)			/* Load CR */
 382	neg	r3,r3
 383	oris	r11,r11,0x1000	/* Set SO bit in CR */
 384	stw	r11,_CCR(r1)
 385syscall_exit_cont:
 386	lwz	r8,_MSR(r1)
 387#ifdef CONFIG_TRACE_IRQFLAGS
 388	/* If we are going to return from the syscall with interrupts
 389	 * off, we trace that here. It shouldn't happen though but we
 390	 * want to catch the bugger if it does right ?
 391	 */
 392	andi.	r10,r8,MSR_EE
 393	bne+	1f
 394	stw	r3,GPR3(r1)
 395	bl      trace_hardirqs_off
 396	lwz	r3,GPR3(r1)
 3971:
 398#endif /* CONFIG_TRACE_IRQFLAGS */
 399#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 400	/* If the process has its own DBCR0 value, load it up.  The internal
 401	   debug mode bit tells us that dbcr0 should be loaded. */
 402	lwz	r0,THREAD+THREAD_DBCR0(r2)
 403	andis.	r10,r0,DBCR0_IDM@h
 404	bnel-	load_dbcr0
 405#endif
 406#ifdef CONFIG_44x
 407BEGIN_MMU_FTR_SECTION
 408	lis	r4,icache_44x_need_flush@ha
 409	lwz	r5,icache_44x_need_flush@l(r4)
 410	cmplwi	cr0,r5,0
 411	bne-	2f
 4121:
 413END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
 414#endif /* CONFIG_44x */
 415BEGIN_FTR_SECTION
 416	lwarx	r7,0,r1
 417END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 418	stwcx.	r0,0,r1			/* to clear the reservation */
 419#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 420	andi.	r4,r8,MSR_PR
 421	beq	3f
 422	CURRENT_THREAD_INFO(r4, r1)
 423	ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
 4243:
 425#endif
 426	lwz	r4,_LINK(r1)
 427	lwz	r5,_CCR(r1)
 428	mtlr	r4
 429	mtcr	r5
 430	lwz	r7,_NIP(r1)
 
 431	lwz	r2,GPR2(r1)
 432	lwz	r1,GPR1(r1)
 433#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 434	mtspr	SPRN_NRI, r0
 435#endif
 436	mtspr	SPRN_SRR0,r7
 437	mtspr	SPRN_SRR1,r8
 438	SYNC
 439	RFI
 440#ifdef CONFIG_44x
 4412:	li	r7,0
 442	iccci	r0,r0
 443	stw	r7,icache_44x_need_flush@l(r4)
 444	b	1b
 445#endif  /* CONFIG_44x */
 446
 44766:	li	r3,-ENOSYS
 448	b	ret_from_syscall
 449
 450	.globl	ret_from_fork
 451ret_from_fork:
 452	REST_NVGPRS(r1)
 453	bl	schedule_tail
 454	li	r3,0
 455	b	ret_from_syscall
 456
 457	.globl	ret_from_kernel_thread
 458ret_from_kernel_thread:
 459	REST_NVGPRS(r1)
 460	bl	schedule_tail
 461	mtlr	r14
 462	mr	r3,r15
 463	PPC440EP_ERR42
 464	blrl
 465	li	r3,0
 466	b	ret_from_syscall
 467
 468/* Traced system call support */
 469syscall_dotrace:
 470	SAVE_NVGPRS(r1)
 471	li	r0,0xc00
 472	stw	r0,_TRAP(r1)
 473	addi	r3,r1,STACK_FRAME_OVERHEAD
 474	bl	do_syscall_trace_enter
 475	/*
 476	 * Restore argument registers possibly just changed.
 477	 * We use the return value of do_syscall_trace_enter
 478	 * for call number to look up in the table (r0).
 479	 */
 480	mr	r0,r3
 481	lwz	r3,GPR3(r1)
 482	lwz	r4,GPR4(r1)
 483	lwz	r5,GPR5(r1)
 484	lwz	r6,GPR6(r1)
 485	lwz	r7,GPR7(r1)
 486	lwz	r8,GPR8(r1)
 487	REST_NVGPRS(r1)
 488
 489	cmplwi	r0,NR_syscalls
 490	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
 491	bge-	ret_from_syscall
 492	b	syscall_dotrace_cont
 493
 494syscall_exit_work:
 495	andi.	r0,r9,_TIF_RESTOREALL
 496	beq+	0f
 497	REST_NVGPRS(r1)
 498	b	2f
 4990:	cmplw	0,r3,r8
 500	blt+	1f
 501	andi.	r0,r9,_TIF_NOERROR
 502	bne-	1f
 503	lwz	r11,_CCR(r1)			/* Load CR */
 504	neg	r3,r3
 505	oris	r11,r11,0x1000	/* Set SO bit in CR */
 506	stw	r11,_CCR(r1)
 507
 5081:	stw	r6,RESULT(r1)	/* Save result */
 509	stw	r3,GPR3(r1)	/* Update return value */
 5102:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
 511	beq	4f
 512
 513	/* Clear per-syscall TIF flags if any are set.  */
 514
 515	li	r11,_TIF_PERSYSCALL_MASK
 516	addi	r12,r12,TI_FLAGS
 5173:	lwarx	r8,0,r12
 518	andc	r8,r8,r11
 519#ifdef CONFIG_IBM405_ERR77
 520	dcbt	0,r12
 521#endif
 522	stwcx.	r8,0,r12
 523	bne-	3b
 524	subi	r12,r12,TI_FLAGS
 525	
 5264:	/* Anything which requires enabling interrupts? */
 527	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
 528	beq	ret_from_except
 529
 530	/* Re-enable interrupts. There is no need to trace that with
 531	 * lockdep as we are supposed to have IRQs on at this point
 532	 */
 533	ori	r10,r10,MSR_EE
 534	SYNC
 535	MTMSRD(r10)
 536
 537	/* Save NVGPRS if they're not saved already */
 538	lwz	r4,_TRAP(r1)
 539	andi.	r4,r4,1
 540	beq	5f
 541	SAVE_NVGPRS(r1)
 542	li	r4,0xc00
 543	stw	r4,_TRAP(r1)
 5445:
 545	addi	r3,r1,STACK_FRAME_OVERHEAD
 546	bl	do_syscall_trace_leave
 547	b	ret_from_except_full
 548
 549/*
 550 * The fork/clone functions need to copy the full register set into
 551 * the child process. Therefore we need to save all the nonvolatile
 552 * registers (r13 - r31) before calling the C code.
 553 */
 554	.globl	ppc_fork
 555ppc_fork:
 556	SAVE_NVGPRS(r1)
 557	lwz	r0,_TRAP(r1)
 558	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 559	stw	r0,_TRAP(r1)		/* register set saved */
 560	b	sys_fork
 561
 562	.globl	ppc_vfork
 563ppc_vfork:
 564	SAVE_NVGPRS(r1)
 565	lwz	r0,_TRAP(r1)
 566	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 567	stw	r0,_TRAP(r1)		/* register set saved */
 568	b	sys_vfork
 569
 570	.globl	ppc_clone
 571ppc_clone:
 572	SAVE_NVGPRS(r1)
 573	lwz	r0,_TRAP(r1)
 574	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 575	stw	r0,_TRAP(r1)		/* register set saved */
 576	b	sys_clone
 577
 578	.globl	ppc_swapcontext
 579ppc_swapcontext:
 580	SAVE_NVGPRS(r1)
 581	lwz	r0,_TRAP(r1)
 582	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 583	stw	r0,_TRAP(r1)		/* register set saved */
 584	b	sys_swapcontext
 585
 586/*
 587 * Top-level page fault handling.
 588 * This is in assembler because if do_page_fault tells us that
 589 * it is a bad kernel page fault, we want to save the non-volatile
 590 * registers before calling bad_page_fault.
 591 */
 592	.globl	handle_page_fault
 593handle_page_fault:
 594	stw	r4,_DAR(r1)
 595	addi	r3,r1,STACK_FRAME_OVERHEAD
 596#ifdef CONFIG_6xx
 597	andis.  r0,r5,DSISR_DABRMATCH@h
 598	bne-    handle_dabr_fault
 599#endif
 600	bl	do_page_fault
 601	cmpwi	r3,0
 602	beq+	ret_from_except
 603	SAVE_NVGPRS(r1)
 604	lwz	r0,_TRAP(r1)
 605	clrrwi	r0,r0,1
 606	stw	r0,_TRAP(r1)
 607	mr	r5,r3
 608	addi	r3,r1,STACK_FRAME_OVERHEAD
 609	lwz	r4,_DAR(r1)
 610	bl	bad_page_fault
 611	b	ret_from_except_full
 612
 613#ifdef CONFIG_6xx
 614	/* We have a data breakpoint exception - handle it */
 615handle_dabr_fault:
 616	SAVE_NVGPRS(r1)
 617	lwz	r0,_TRAP(r1)
 618	clrrwi	r0,r0,1
 619	stw	r0,_TRAP(r1)
 620	bl      do_break
 621	b	ret_from_except_full
 622#endif
 623
 624/*
 625 * This routine switches between two different tasks.  The process
 626 * state of one is saved on its kernel stack.  Then the state
 627 * of the other is restored from its kernel stack.  The memory
 628 * management hardware is updated to the second process's state.
 629 * Finally, we can return to the second process.
 630 * On entry, r3 points to the THREAD for the current task, r4
 631 * points to the THREAD for the new task.
 632 *
 633 * This routine is always called with interrupts disabled.
 634 *
 635 * Note: there are two ways to get to the "going out" portion
 636 * of this code; either by coming in via the entry (_switch)
 637 * or via "fork" which must set up an environment equivalent
 638 * to the "_switch" path.  If you change this , you'll have to
 639 * change the fork code also.
 640 *
 641 * The code which creates the new task context is in 'copy_thread'
 642 * in arch/ppc/kernel/process.c
 643 */
 644_GLOBAL(_switch)
 645	stwu	r1,-INT_FRAME_SIZE(r1)
 646	mflr	r0
 647	stw	r0,INT_FRAME_SIZE+4(r1)
 648	/* r3-r12 are caller saved -- Cort */
 649	SAVE_NVGPRS(r1)
 650	stw	r0,_NIP(r1)	/* Return to switch caller */
 651	mfmsr	r11
 652	li	r0,MSR_FP	/* Disable floating-point */
 653#ifdef CONFIG_ALTIVEC
 654BEGIN_FTR_SECTION
 655	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
 656	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
 657	stw	r12,THREAD+THREAD_VRSAVE(r2)
 658END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 659#endif /* CONFIG_ALTIVEC */
 660#ifdef CONFIG_SPE
 661BEGIN_FTR_SECTION
 662	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
 663	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
 664	stw	r12,THREAD+THREAD_SPEFSCR(r2)
 665END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 666#endif /* CONFIG_SPE */
 667	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
 668	beq+	1f
 669	andc	r11,r11,r0
 670	MTMSRD(r11)
 671	isync
 6721:	stw	r11,_MSR(r1)
 673	mfcr	r10
 674	stw	r10,_CCR(r1)
 675	stw	r1,KSP(r3)	/* Set old stack pointer */
 676
 677#ifdef CONFIG_SMP
 678	/* We need a sync somewhere here to make sure that if the
 679	 * previous task gets rescheduled on another CPU, it sees all
 680	 * stores it has performed on this one.
 681	 */
 682	sync
 683#endif /* CONFIG_SMP */
 684
 685	tophys(r0,r4)
 686	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
 687	lwz	r1,KSP(r4)	/* Load new stack pointer */
 688
 689	/* save the old current 'last' for return value */
 690	mr	r3,r2
 691	addi	r2,r4,-THREAD	/* Update current */
 692
 693#ifdef CONFIG_ALTIVEC
 694BEGIN_FTR_SECTION
 695	lwz	r0,THREAD+THREAD_VRSAVE(r2)
 696	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
 697END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 698#endif /* CONFIG_ALTIVEC */
 699#ifdef CONFIG_SPE
 700BEGIN_FTR_SECTION
 701	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
 702	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 703END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 704#endif /* CONFIG_SPE */
 705
 706	lwz	r0,_CCR(r1)
 707	mtcrf	0xFF,r0
 708	/* r3-r12 are destroyed -- Cort */
 709	REST_NVGPRS(r1)
 710
 711	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
 712	mtlr	r4
 713	addi	r1,r1,INT_FRAME_SIZE
 714	blr
 715
 716	.globl	fast_exception_return
 717fast_exception_return:
 718#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 719	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
 720	beq	1f			/* if not, we've got problems */
 721#endif
 722
 7232:	REST_4GPRS(3, r11)
 724	lwz	r10,_CCR(r11)
 725	REST_GPR(1, r11)
 726	mtcr	r10
 727	lwz	r10,_LINK(r11)
 728	mtlr	r10
 729	REST_GPR(10, r11)
 730#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 731	mtspr	SPRN_NRI, r0
 732#endif
 733	mtspr	SPRN_SRR1,r9
 734	mtspr	SPRN_SRR0,r12
 735	REST_GPR(9, r11)
 736	REST_GPR(12, r11)
 737	lwz	r11,GPR11(r11)
 738	SYNC
 739	RFI
 740
 741#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 742/* check if the exception happened in a restartable section */
 7431:	lis	r3,exc_exit_restart_end@ha
 744	addi	r3,r3,exc_exit_restart_end@l
 745	cmplw	r12,r3
 746	bge	3f
 747	lis	r4,exc_exit_restart@ha
 748	addi	r4,r4,exc_exit_restart@l
 749	cmplw	r12,r4
 750	blt	3f
 751	lis	r3,fee_restarts@ha
 752	tophys(r3,r3)
 753	lwz	r5,fee_restarts@l(r3)
 754	addi	r5,r5,1
 755	stw	r5,fee_restarts@l(r3)
 756	mr	r12,r4		/* restart at exc_exit_restart */
 757	b	2b
 758
 759	.section .bss
 760	.align	2
 761fee_restarts:
 762	.space	4
 763	.previous
 764
 765/* aargh, a nonrecoverable interrupt, panic */
 766/* aargh, we don't know which trap this is */
 767/* but the 601 doesn't implement the RI bit, so assume it's OK */
 7683:
 769BEGIN_FTR_SECTION
 770	b	2b
 771END_FTR_SECTION_IFSET(CPU_FTR_601)
 772	li	r10,-1
 773	stw	r10,_TRAP(r11)
 774	addi	r3,r1,STACK_FRAME_OVERHEAD
 775	lis	r10,MSR_KERNEL@h
 776	ori	r10,r10,MSR_KERNEL@l
 777	bl	transfer_to_handler_full
 778	.long	nonrecoverable_exception
 779	.long	ret_from_except
 780#endif
 781
 782	.globl	ret_from_except_full
 783ret_from_except_full:
 784	REST_NVGPRS(r1)
 785	/* fall through */
 786
 787	.globl	ret_from_except
 788ret_from_except:
 789	/* Hard-disable interrupts so that current_thread_info()->flags
 790	 * can't change between when we test it and when we return
 791	 * from the interrupt. */
 792	/* Note: We don't bother telling lockdep about it */
 793	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 794	SYNC			/* Some chip revs have problems here... */
 795	MTMSRD(r10)		/* disable interrupts */
 796
 797	lwz	r3,_MSR(r1)	/* Returning to user mode? */
 798	andi.	r0,r3,MSR_PR
 799	beq	resume_kernel
 800
 801user_exc_return:		/* r10 contains MSR_KERNEL here */
 802	/* Check current_thread_info()->flags */
 803	CURRENT_THREAD_INFO(r9, r1)
 804	lwz	r9,TI_FLAGS(r9)
 805	andi.	r0,r9,_TIF_USER_WORK_MASK
 806	bne	do_work
 807
 808restore_user:
 809#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 810	/* Check whether this process has its own DBCR0 value.  The internal
 811	   debug mode bit tells us that dbcr0 should be loaded. */
 812	lwz	r0,THREAD+THREAD_DBCR0(r2)
 813	andis.	r10,r0,DBCR0_IDM@h
 814	bnel-	load_dbcr0
 815#endif
 816#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 817	CURRENT_THREAD_INFO(r9, r1)
 818	ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
 819#endif
 820
 821	b	restore
 822
 823/* N.B. the only way to get here is from the beq following ret_from_except. */
 824resume_kernel:
 825	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
 826	CURRENT_THREAD_INFO(r9, r1)
 827	lwz	r8,TI_FLAGS(r9)
 828	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
 829	beq+	1f
 830
 831	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
 832
 833	lwz	r3,GPR1(r1)
 834	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
 835	mr	r4,r1			/* src:  current exception frame */
 836	mr	r1,r3			/* Reroute the trampoline frame to r1 */
 837
 838	/* Copy from the original to the trampoline. */
 839	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
 840	li	r6,0			/* start offset: 0 */
 841	mtctr	r5
 8422:	lwzx	r0,r6,r4
 843	stwx	r0,r6,r3
 844	addi	r6,r6,4
 845	bdnz	2b
 846
 847	/* Do real store operation to complete stwu */
 848	lwz	r5,GPR1(r1)
 849	stw	r8,0(r5)
 850
 851	/* Clear _TIF_EMULATE_STACK_STORE flag */
 852	lis	r11,_TIF_EMULATE_STACK_STORE@h
 853	addi	r5,r9,TI_FLAGS
 8540:	lwarx	r8,0,r5
 855	andc	r8,r8,r11
 856#ifdef CONFIG_IBM405_ERR77
 857	dcbt	0,r5
 858#endif
 859	stwcx.	r8,0,r5
 860	bne-	0b
 8611:
 862
 863#ifdef CONFIG_PREEMPT
 864	/* check current_thread_info->preempt_count */
 865	lwz	r0,TI_PREEMPT(r9)
 866	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
 867	bne	restore
 868	andi.	r8,r8,_TIF_NEED_RESCHED
 869	beq+	restore
 870	lwz	r3,_MSR(r1)
 871	andi.	r0,r3,MSR_EE	/* interrupts off? */
 872	beq	restore		/* don't schedule if so */
 873#ifdef CONFIG_TRACE_IRQFLAGS
 874	/* Lockdep thinks irqs are enabled, we need to call
 875	 * preempt_schedule_irq with IRQs off, so we inform lockdep
 876	 * now that we -did- turn them off already
 877	 */
 878	bl	trace_hardirqs_off
 879#endif
 8801:	bl	preempt_schedule_irq
 881	CURRENT_THREAD_INFO(r9, r1)
 882	lwz	r3,TI_FLAGS(r9)
 883	andi.	r0,r3,_TIF_NEED_RESCHED
 884	bne-	1b
 885#ifdef CONFIG_TRACE_IRQFLAGS
 886	/* And now, to properly rebalance the above, we tell lockdep they
 887	 * are being turned back on, which will happen when we return
 888	 */
 889	bl	trace_hardirqs_on
 890#endif
 891#endif /* CONFIG_PREEMPT */
 892
 893	/* interrupts are hard-disabled at this point */
 894restore:
 895#ifdef CONFIG_44x
 896BEGIN_MMU_FTR_SECTION
 897	b	1f
 898END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 899	lis	r4,icache_44x_need_flush@ha
 900	lwz	r5,icache_44x_need_flush@l(r4)
 901	cmplwi	cr0,r5,0
 902	beq+	1f
 903	li	r6,0
 904	iccci	r0,r0
 905	stw	r6,icache_44x_need_flush@l(r4)
 9061:
 907#endif  /* CONFIG_44x */
 908
 909	lwz	r9,_MSR(r1)
 910#ifdef CONFIG_TRACE_IRQFLAGS
 911	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
 912	 * off in this assembly code while peeking at TI_FLAGS() and such. However
 913	 * we need to inform it if the exception turned interrupts off, and we
 914	 * are about to trun them back on.
 915	 *
 916	 * The problem here sadly is that we don't know whether the exceptions was
 917	 * one that turned interrupts off or not. So we always tell lockdep about
 918	 * turning them on here when we go back to wherever we came from with EE
 919	 * on, even if that may meen some redudant calls being tracked. Maybe later
 920	 * we could encode what the exception did somewhere or test the exception
 921	 * type in the pt_regs but that sounds overkill
 922	 */
 923	andi.	r10,r9,MSR_EE
 924	beq	1f
 925	/*
 926	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
 927	 * which is the stack frame here, we need to force a stack frame
 928	 * in case we came from user space.
 929	 */
 930	stwu	r1,-32(r1)
 931	mflr	r0
 932	stw	r0,4(r1)
 933	stwu	r1,-32(r1)
 934	bl	trace_hardirqs_on
 935	lwz	r1,0(r1)
 936	lwz	r1,0(r1)
 937	lwz	r9,_MSR(r1)
 9381:
 939#endif /* CONFIG_TRACE_IRQFLAGS */
 940
 941	lwz	r0,GPR0(r1)
 942	lwz	r2,GPR2(r1)
 943	REST_4GPRS(3, r1)
 944	REST_2GPRS(7, r1)
 945
 946	lwz	r10,_XER(r1)
 947	lwz	r11,_CTR(r1)
 948	mtspr	SPRN_XER,r10
 949	mtctr	r11
 950
 951	PPC405_ERR77(0,r1)
 952BEGIN_FTR_SECTION
 953	lwarx	r11,0,r1
 954END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 955	stwcx.	r0,0,r1			/* to clear the reservation */
 956
 957#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 958	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
 959	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
 960
 961	lwz	r10,_CCR(r1)
 962	lwz	r11,_LINK(r1)
 963	mtcrf	0xFF,r10
 964	mtlr	r11
 965
 966	/*
 967	 * Once we put values in SRR0 and SRR1, we are in a state
 968	 * where exceptions are not recoverable, since taking an
 969	 * exception will trash SRR0 and SRR1.  Therefore we clear the
 970	 * MSR:RI bit to indicate this.  If we do take an exception,
 971	 * we can't return to the point of the exception but we
 972	 * can restart the exception exit path at the label
 973	 * exc_exit_restart below.  -- paulus
 974	 */
 975	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
 976	SYNC
 977	MTMSRD(r10)		/* clear the RI bit */
 978	.globl exc_exit_restart
 979exc_exit_restart:
 980	lwz	r12,_NIP(r1)
 981#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 982	mtspr	SPRN_NRI, r0
 983#endif
 984	mtspr	SPRN_SRR0,r12
 985	mtspr	SPRN_SRR1,r9
 986	REST_4GPRS(9, r1)
 987	lwz	r1,GPR1(r1)
 988	.globl exc_exit_restart_end
 989exc_exit_restart_end:
 990	SYNC
 991	RFI
 992
 993#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
 994	/*
 995	 * This is a bit different on 4xx/Book-E because it doesn't have
 996	 * the RI bit in the MSR.
 997	 * The TLB miss handler checks if we have interrupted
 998	 * the exception exit path and restarts it if so
 999	 * (well maybe one day it will... :).
1000	 */
1001	lwz	r11,_LINK(r1)
1002	mtlr	r11
1003	lwz	r10,_CCR(r1)
1004	mtcrf	0xff,r10
1005	REST_2GPRS(9, r1)
1006	.globl exc_exit_restart
1007exc_exit_restart:
1008	lwz	r11,_NIP(r1)
1009	lwz	r12,_MSR(r1)
1010exc_exit_start:
1011	mtspr	SPRN_SRR0,r11
1012	mtspr	SPRN_SRR1,r12
1013	REST_2GPRS(11, r1)
1014	lwz	r1,GPR1(r1)
1015	.globl exc_exit_restart_end
1016exc_exit_restart_end:
1017	PPC405_ERR77_SYNC
1018	rfi
1019	b	.			/* prevent prefetch past rfi */
1020
1021/*
1022 * Returning from a critical interrupt in user mode doesn't need
1023 * to be any different from a normal exception.  For a critical
1024 * interrupt in the kernel, we just return (without checking for
1025 * preemption) since the interrupt may have happened at some crucial
1026 * place (e.g. inside the TLB miss handler), and because we will be
1027 * running with r1 pointing into critical_stack, not the current
1028 * process's kernel stack (and therefore current_thread_info() will
1029 * give the wrong answer).
1030 * We have to restore various SPRs that may have been in use at the
1031 * time of the critical interrupt.
1032 *
1033 */
1034#ifdef CONFIG_40x
1035#define PPC_40x_TURN_OFF_MSR_DR						    \
1036	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1037	 * assume the instructions here are mapped by a pinned TLB entry */ \
1038	li	r10,MSR_IR;						    \
1039	mtmsr	r10;							    \
1040	isync;								    \
1041	tophys(r1, r1);
1042#else
1043#define PPC_40x_TURN_OFF_MSR_DR
1044#endif
1045
1046#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1047	REST_NVGPRS(r1);						\
1048	lwz	r3,_MSR(r1);						\
1049	andi.	r3,r3,MSR_PR;						\
1050	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1051	bne	user_exc_return;					\
1052	lwz	r0,GPR0(r1);						\
1053	lwz	r2,GPR2(r1);						\
1054	REST_4GPRS(3, r1);						\
1055	REST_2GPRS(7, r1);						\
1056	lwz	r10,_XER(r1);						\
1057	lwz	r11,_CTR(r1);						\
1058	mtspr	SPRN_XER,r10;						\
1059	mtctr	r11;							\
1060	PPC405_ERR77(0,r1);						\
1061	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1062	lwz	r11,_LINK(r1);						\
1063	mtlr	r11;							\
1064	lwz	r10,_CCR(r1);						\
1065	mtcrf	0xff,r10;						\
1066	PPC_40x_TURN_OFF_MSR_DR;					\
1067	lwz	r9,_DEAR(r1);						\
1068	lwz	r10,_ESR(r1);						\
1069	mtspr	SPRN_DEAR,r9;						\
1070	mtspr	SPRN_ESR,r10;						\
1071	lwz	r11,_NIP(r1);						\
1072	lwz	r12,_MSR(r1);						\
1073	mtspr	exc_lvl_srr0,r11;					\
1074	mtspr	exc_lvl_srr1,r12;					\
1075	lwz	r9,GPR9(r1);						\
1076	lwz	r12,GPR12(r1);						\
1077	lwz	r10,GPR10(r1);						\
1078	lwz	r11,GPR11(r1);						\
1079	lwz	r1,GPR1(r1);						\
1080	PPC405_ERR77_SYNC;						\
1081	exc_lvl_rfi;							\
1082	b	.;		/* prevent prefetch past exc_lvl_rfi */
1083
1084#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1085	lwz	r9,_##exc_lvl_srr0(r1);					\
1086	lwz	r10,_##exc_lvl_srr1(r1);				\
1087	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1088	mtspr	SPRN_##exc_lvl_srr1,r10;
1089
1090#if defined(CONFIG_PPC_BOOK3E_MMU)
1091#ifdef CONFIG_PHYS_64BIT
1092#define	RESTORE_MAS7							\
1093	lwz	r11,MAS7(r1);						\
1094	mtspr	SPRN_MAS7,r11;
1095#else
1096#define	RESTORE_MAS7
1097#endif /* CONFIG_PHYS_64BIT */
1098#define RESTORE_MMU_REGS						\
1099	lwz	r9,MAS0(r1);						\
1100	lwz	r10,MAS1(r1);						\
1101	lwz	r11,MAS2(r1);						\
1102	mtspr	SPRN_MAS0,r9;						\
1103	lwz	r9,MAS3(r1);						\
1104	mtspr	SPRN_MAS1,r10;						\
1105	lwz	r10,MAS6(r1);						\
1106	mtspr	SPRN_MAS2,r11;						\
1107	mtspr	SPRN_MAS3,r9;						\
1108	mtspr	SPRN_MAS6,r10;						\
1109	RESTORE_MAS7;
1110#elif defined(CONFIG_44x)
1111#define RESTORE_MMU_REGS						\
1112	lwz	r9,MMUCR(r1);						\
1113	mtspr	SPRN_MMUCR,r9;
1114#else
1115#define RESTORE_MMU_REGS
1116#endif
1117
1118#ifdef CONFIG_40x
1119	.globl	ret_from_crit_exc
1120ret_from_crit_exc:
1121	mfspr	r9,SPRN_SPRG_THREAD
1122	lis	r10,saved_ksp_limit@ha;
1123	lwz	r10,saved_ksp_limit@l(r10);
1124	tovirt(r9,r9);
1125	stw	r10,KSP_LIMIT(r9)
1126	lis	r9,crit_srr0@ha;
1127	lwz	r9,crit_srr0@l(r9);
1128	lis	r10,crit_srr1@ha;
1129	lwz	r10,crit_srr1@l(r10);
1130	mtspr	SPRN_SRR0,r9;
1131	mtspr	SPRN_SRR1,r10;
1132	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1133#endif /* CONFIG_40x */
1134
1135#ifdef CONFIG_BOOKE
1136	.globl	ret_from_crit_exc
1137ret_from_crit_exc:
1138	mfspr	r9,SPRN_SPRG_THREAD
1139	lwz	r10,SAVED_KSP_LIMIT(r1)
1140	stw	r10,KSP_LIMIT(r9)
1141	RESTORE_xSRR(SRR0,SRR1);
1142	RESTORE_MMU_REGS;
1143	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1144
1145	.globl	ret_from_debug_exc
1146ret_from_debug_exc:
1147	mfspr	r9,SPRN_SPRG_THREAD
1148	lwz	r10,SAVED_KSP_LIMIT(r1)
1149	stw	r10,KSP_LIMIT(r9)
1150	lwz	r9,THREAD_INFO-THREAD(r9)
1151	CURRENT_THREAD_INFO(r10, r1)
1152	lwz	r10,TI_PREEMPT(r10)
1153	stw	r10,TI_PREEMPT(r9)
1154	RESTORE_xSRR(SRR0,SRR1);
1155	RESTORE_xSRR(CSRR0,CSRR1);
1156	RESTORE_MMU_REGS;
1157	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1158
1159	.globl	ret_from_mcheck_exc
1160ret_from_mcheck_exc:
1161	mfspr	r9,SPRN_SPRG_THREAD
1162	lwz	r10,SAVED_KSP_LIMIT(r1)
1163	stw	r10,KSP_LIMIT(r9)
1164	RESTORE_xSRR(SRR0,SRR1);
1165	RESTORE_xSRR(CSRR0,CSRR1);
1166	RESTORE_xSRR(DSRR0,DSRR1);
1167	RESTORE_MMU_REGS;
1168	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1169#endif /* CONFIG_BOOKE */
1170
1171/*
1172 * Load the DBCR0 value for a task that is being ptraced,
1173 * having first saved away the global DBCR0.  Note that r0
1174 * has the dbcr0 value to set upon entry to this.
1175 */
1176load_dbcr0:
1177	mfmsr	r10		/* first disable debug exceptions */
1178	rlwinm	r10,r10,0,~MSR_DE
1179	mtmsr	r10
1180	isync
1181	mfspr	r10,SPRN_DBCR0
1182	lis	r11,global_dbcr0@ha
1183	addi	r11,r11,global_dbcr0@l
1184#ifdef CONFIG_SMP
1185	CURRENT_THREAD_INFO(r9, r1)
1186	lwz	r9,TI_CPU(r9)
1187	slwi	r9,r9,3
1188	add	r11,r11,r9
1189#endif
1190	stw	r10,0(r11)
1191	mtspr	SPRN_DBCR0,r0
1192	lwz	r10,4(r11)
1193	addi	r10,r10,1
1194	stw	r10,4(r11)
1195	li	r11,-1
1196	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1197	blr
1198
1199	.section .bss
1200	.align	4
1201global_dbcr0:
1202	.space	8*NR_CPUS
1203	.previous
1204#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1205
1206do_work:			/* r10 contains MSR_KERNEL here */
1207	andi.	r0,r9,_TIF_NEED_RESCHED
1208	beq	do_user_signal
1209
1210do_resched:			/* r10 contains MSR_KERNEL here */
1211	/* Note: We don't need to inform lockdep that we are enabling
1212	 * interrupts here. As far as it knows, they are already enabled
1213	 */
1214	ori	r10,r10,MSR_EE
1215	SYNC
1216	MTMSRD(r10)		/* hard-enable interrupts */
1217	bl	schedule
1218recheck:
1219	/* Note: And we don't tell it we are disabling them again
1220	 * neither. Those disable/enable cycles used to peek at
1221	 * TI_FLAGS aren't advertised.
1222	 */
1223	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1224	SYNC
1225	MTMSRD(r10)		/* disable interrupts */
1226	CURRENT_THREAD_INFO(r9, r1)
1227	lwz	r9,TI_FLAGS(r9)
1228	andi.	r0,r9,_TIF_NEED_RESCHED
1229	bne-	do_resched
1230	andi.	r0,r9,_TIF_USER_WORK_MASK
1231	beq	restore_user
1232do_user_signal:			/* r10 contains MSR_KERNEL here */
1233	ori	r10,r10,MSR_EE
1234	SYNC
1235	MTMSRD(r10)		/* hard-enable interrupts */
1236	/* save r13-r31 in the exception frame, if not already done */
1237	lwz	r3,_TRAP(r1)
1238	andi.	r0,r3,1
1239	beq	2f
1240	SAVE_NVGPRS(r1)
1241	rlwinm	r3,r3,0,0,30
1242	stw	r3,_TRAP(r1)
12432:	addi	r3,r1,STACK_FRAME_OVERHEAD
1244	mr	r4,r9
1245	bl	do_notify_resume
1246	REST_NVGPRS(r1)
1247	b	recheck
1248
1249/*
1250 * We come here when we are at the end of handling an exception
1251 * that occurred at a place where taking an exception will lose
1252 * state information, such as the contents of SRR0 and SRR1.
1253 */
1254nonrecoverable:
1255	lis	r10,exc_exit_restart_end@ha
1256	addi	r10,r10,exc_exit_restart_end@l
1257	cmplw	r12,r10
1258	bge	3f
1259	lis	r11,exc_exit_restart@ha
1260	addi	r11,r11,exc_exit_restart@l
1261	cmplw	r12,r11
1262	blt	3f
1263	lis	r10,ee_restarts@ha
1264	lwz	r12,ee_restarts@l(r10)
1265	addi	r12,r12,1
1266	stw	r12,ee_restarts@l(r10)
1267	mr	r12,r11		/* restart at exc_exit_restart */
1268	blr
12693:	/* OK, we can't recover, kill this process */
1270	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1271BEGIN_FTR_SECTION
1272	blr
1273END_FTR_SECTION_IFSET(CPU_FTR_601)
1274	lwz	r3,_TRAP(r1)
1275	andi.	r0,r3,1
1276	beq	4f
1277	SAVE_NVGPRS(r1)
1278	rlwinm	r3,r3,0,0,30
1279	stw	r3,_TRAP(r1)
12804:	addi	r3,r1,STACK_FRAME_OVERHEAD
1281	bl	nonrecoverable_exception
1282	/* shouldn't return */
1283	b	4b
1284
1285	.section .bss
1286	.align	2
1287ee_restarts:
1288	.space	4
1289	.previous
1290
1291/*
1292 * PROM code for specific machines follows.  Put it
1293 * here so it's easy to add arch-specific sections later.
1294 * -- Cort
1295 */
1296#ifdef CONFIG_PPC_RTAS
1297/*
1298 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1299 * called with the MMU off.
1300 */
1301_GLOBAL(enter_rtas)
1302	stwu	r1,-INT_FRAME_SIZE(r1)
1303	mflr	r0
1304	stw	r0,INT_FRAME_SIZE+4(r1)
1305	LOAD_REG_ADDR(r4, rtas)
1306	lis	r6,1f@ha	/* physical return address for rtas */
1307	addi	r6,r6,1f@l
1308	tophys(r6,r6)
1309	tophys(r7,r1)
1310	lwz	r8,RTASENTRY(r4)
1311	lwz	r4,RTASBASE(r4)
1312	mfmsr	r9
1313	stw	r9,8(r1)
1314	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1315	SYNC			/* disable interrupts so SRR0/1 */
1316	MTMSRD(r0)		/* don't get trashed */
1317	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1318	mtlr	r6
1319	mtspr	SPRN_SPRG_RTAS,r7
1320	mtspr	SPRN_SRR0,r8
1321	mtspr	SPRN_SRR1,r9
1322	RFI
13231:	tophys(r9,r1)
1324	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1325	lwz	r9,8(r9)	/* original msr value */
 
1326	addi	r1,r1,INT_FRAME_SIZE
1327	li	r0,0
1328	mtspr	SPRN_SPRG_RTAS,r0
1329	mtspr	SPRN_SRR0,r8
1330	mtspr	SPRN_SRR1,r9
1331	RFI			/* return to caller */
1332
1333	.globl	machine_check_in_rtas
1334machine_check_in_rtas:
1335	twi	31,0,0
1336	/* XXX load up BATs and panic */
1337
1338#endif /* CONFIG_PPC_RTAS */
v4.10.11
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
   5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
   6 *  Adapted for Power Macintosh by Paul Mackerras.
   7 *  Low-level exception handlers and MMU support
   8 *  rewritten by Paul Mackerras.
   9 *    Copyright (C) 1996 Paul Mackerras.
  10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11 *
  12 *  This file contains the system call entry code, context switch
  13 *  code, and exception/interrupt return code for PowerPC.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#include <linux/errno.h>
  23#include <linux/err.h>
  24#include <linux/sys.h>
  25#include <linux/threads.h>
  26#include <asm/reg.h>
  27#include <asm/page.h>
  28#include <asm/mmu.h>
  29#include <asm/cputable.h>
  30#include <asm/thread_info.h>
  31#include <asm/ppc_asm.h>
  32#include <asm/asm-offsets.h>
  33#include <asm/unistd.h>
  34#include <asm/ftrace.h>
  35#include <asm/ptrace.h>
  36#include <asm/export.h>
  37
  38/*
  39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
  40 */
  41#if MSR_KERNEL >= 0x10000
  42#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
  43#else
  44#define LOAD_MSR_KERNEL(r, x)	li r,(x)
  45#endif
  46
 
 
 
 
 
 
 
  47#ifdef CONFIG_BOOKE
  48	.globl	mcheck_transfer_to_handler
  49mcheck_transfer_to_handler:
  50	mfspr	r0,SPRN_DSRR0
  51	stw	r0,_DSRR0(r11)
  52	mfspr	r0,SPRN_DSRR1
  53	stw	r0,_DSRR1(r11)
  54	/* fall through */
  55
  56	.globl	debug_transfer_to_handler
  57debug_transfer_to_handler:
  58	mfspr	r0,SPRN_CSRR0
  59	stw	r0,_CSRR0(r11)
  60	mfspr	r0,SPRN_CSRR1
  61	stw	r0,_CSRR1(r11)
  62	/* fall through */
  63
  64	.globl	crit_transfer_to_handler
  65crit_transfer_to_handler:
  66#ifdef CONFIG_PPC_BOOK3E_MMU
  67	mfspr	r0,SPRN_MAS0
  68	stw	r0,MAS0(r11)
  69	mfspr	r0,SPRN_MAS1
  70	stw	r0,MAS1(r11)
  71	mfspr	r0,SPRN_MAS2
  72	stw	r0,MAS2(r11)
  73	mfspr	r0,SPRN_MAS3
  74	stw	r0,MAS3(r11)
  75	mfspr	r0,SPRN_MAS6
  76	stw	r0,MAS6(r11)
  77#ifdef CONFIG_PHYS_64BIT
  78	mfspr	r0,SPRN_MAS7
  79	stw	r0,MAS7(r11)
  80#endif /* CONFIG_PHYS_64BIT */
  81#endif /* CONFIG_PPC_BOOK3E_MMU */
  82#ifdef CONFIG_44x
  83	mfspr	r0,SPRN_MMUCR
  84	stw	r0,MMUCR(r11)
  85#endif
  86	mfspr	r0,SPRN_SRR0
  87	stw	r0,_SRR0(r11)
  88	mfspr	r0,SPRN_SRR1
  89	stw	r0,_SRR1(r11)
  90
  91	/* set the stack limit to the current stack
  92	 * and set the limit to protect the thread_info
  93	 * struct
  94	 */
  95	mfspr	r8,SPRN_SPRG_THREAD
  96	lwz	r0,KSP_LIMIT(r8)
  97	stw	r0,SAVED_KSP_LIMIT(r11)
  98	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
  99	stw	r0,KSP_LIMIT(r8)
 100	/* fall through */
 101#endif
 102
 103#ifdef CONFIG_40x
 104	.globl	crit_transfer_to_handler
 105crit_transfer_to_handler:
 106	lwz	r0,crit_r10@l(0)
 107	stw	r0,GPR10(r11)
 108	lwz	r0,crit_r11@l(0)
 109	stw	r0,GPR11(r11)
 110	mfspr	r0,SPRN_SRR0
 111	stw	r0,crit_srr0@l(0)
 112	mfspr	r0,SPRN_SRR1
 113	stw	r0,crit_srr1@l(0)
 114
 115	/* set the stack limit to the current stack
 116	 * and set the limit to protect the thread_info
 117	 * struct
 118	 */
 119	mfspr	r8,SPRN_SPRG_THREAD
 120	lwz	r0,KSP_LIMIT(r8)
 121	stw	r0,saved_ksp_limit@l(0)
 122	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
 123	stw	r0,KSP_LIMIT(r8)
 124	/* fall through */
 125#endif
 126
 127/*
 128 * This code finishes saving the registers to the exception frame
 129 * and jumps to the appropriate handler for the exception, turning
 130 * on address translation.
 131 * Note that we rely on the caller having set cr0.eq iff the exception
 132 * occurred in kernel mode (i.e. MSR:PR = 0).
 133 */
 134	.globl	transfer_to_handler_full
 135transfer_to_handler_full:
 136	SAVE_NVGPRS(r11)
 137	/* fall through */
 138
 139	.globl	transfer_to_handler
 140transfer_to_handler:
 141	stw	r2,GPR2(r11)
 142	stw	r12,_NIP(r11)
 143	stw	r9,_MSR(r11)
 144	andi.	r2,r9,MSR_PR
 145	mfctr	r12
 146	mfspr	r2,SPRN_XER
 147	stw	r12,_CTR(r11)
 148	stw	r2,_XER(r11)
 149	mfspr	r12,SPRN_SPRG_THREAD
 150	addi	r2,r12,-THREAD
 151	tovirt(r2,r2)			/* set r2 to current */
 152	beq	2f			/* if from user, fix up THREAD.regs */
 153	addi	r11,r1,STACK_FRAME_OVERHEAD
 154	stw	r11,PT_REGS(r12)
 155#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 156	/* Check to see if the dbcr0 register is set up to debug.  Use the
 157	   internal debug mode bit to do this. */
 158	lwz	r12,THREAD_DBCR0(r12)
 159	andis.	r12,r12,DBCR0_IDM@h
 160	beq+	3f
 161	/* From user and task is ptraced - load up global dbcr0 */
 162	li	r12,-1			/* clear all pending debug events */
 163	mtspr	SPRN_DBSR,r12
 164	lis	r11,global_dbcr0@ha
 165	tophys(r11,r11)
 166	addi	r11,r11,global_dbcr0@l
 167#ifdef CONFIG_SMP
 168	CURRENT_THREAD_INFO(r9, r1)
 169	lwz	r9,TI_CPU(r9)
 170	slwi	r9,r9,3
 171	add	r11,r11,r9
 172#endif
 173	lwz	r12,0(r11)
 174	mtspr	SPRN_DBCR0,r12
 175	lwz	r12,4(r11)
 176	addi	r12,r12,-1
 177	stw	r12,4(r11)
 178#endif
 179#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 180	CURRENT_THREAD_INFO(r9, r1)
 181	tophys(r9, r9)
 182	ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
 183#endif
 184
 185	b	3f
 186
 1872:	/* if from kernel, check interrupted DOZE/NAP mode and
 188         * check for stack overflow
 189         */
 190	lwz	r9,KSP_LIMIT(r12)
 191	cmplw	r1,r9			/* if r1 <= ksp_limit */
 192	ble-	stack_ovf		/* then the kernel stack overflowed */
 1935:
 194#if defined(CONFIG_6xx) || defined(CONFIG_E500)
 195	CURRENT_THREAD_INFO(r9, r1)
 196	tophys(r9,r9)			/* check local flags */
 197	lwz	r12,TI_LOCAL_FLAGS(r9)
 198	mtcrf	0x01,r12
 199	bt-	31-TLF_NAPPING,4f
 200	bt-	31-TLF_SLEEPING,7f
 201#endif /* CONFIG_6xx || CONFIG_E500 */
 202	.globl transfer_to_handler_cont
 203transfer_to_handler_cont:
 2043:
 205	mflr	r9
 206	lwz	r11,0(r9)		/* virtual address of handler */
 207	lwz	r9,4(r9)		/* where to go when done */
 
 
 
 208#ifdef CONFIG_TRACE_IRQFLAGS
 209	lis	r12,reenable_mmu@h
 210	ori	r12,r12,reenable_mmu@l
 211	mtspr	SPRN_SRR0,r12
 212	mtspr	SPRN_SRR1,r10
 213	SYNC
 214	RFI
 215reenable_mmu:				/* re-enable mmu so we can */
 216	mfmsr	r10
 217	lwz	r12,_MSR(r1)
 218	xor	r10,r10,r12
 219	andi.	r10,r10,MSR_EE		/* Did EE change? */
 220	beq	1f
 221
 222	/*
 223	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
 224	 * If from user mode there is only one stack frame on the stack, and
 225	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
 226	 * stack frame to make trace_hardirqs_off happy.
 227	 *
 228	 * This is handy because we also need to save a bunch of GPRs,
 229	 * r3 can be different from GPR3(r1) at this point, r9 and r11
 230	 * contains the old MSR and handler address respectively,
 231	 * r4 & r5 can contain page fault arguments that need to be passed
 232	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
 233	 * they aren't useful past this point (aren't syscall arguments),
 234	 * the rest is restored from the exception frame.
 235	 */
 236	stwu	r1,-32(r1)
 237	stw	r9,8(r1)
 238	stw	r11,12(r1)
 239	stw	r3,16(r1)
 240	stw	r4,20(r1)
 241	stw	r5,24(r1)
 242	bl	trace_hardirqs_off
 243	lwz	r5,24(r1)
 244	lwz	r4,20(r1)
 245	lwz	r3,16(r1)
 246	lwz	r11,12(r1)
 247	lwz	r9,8(r1)
 248	addi	r1,r1,32
 249	lwz	r0,GPR0(r1)
 250	lwz	r6,GPR6(r1)
 251	lwz	r7,GPR7(r1)
 252	lwz	r8,GPR8(r1)
 2531:	mtctr	r11
 254	mtlr	r9
 255	bctr				/* jump to handler */
 256#else /* CONFIG_TRACE_IRQFLAGS */
 257	mtspr	SPRN_SRR0,r11
 258	mtspr	SPRN_SRR1,r10
 259	mtlr	r9
 260	SYNC
 261	RFI				/* jump to handler, enable MMU */
 262#endif /* CONFIG_TRACE_IRQFLAGS */
 263
 264#if defined (CONFIG_6xx) || defined(CONFIG_E500)
 2654:	rlwinm	r12,r12,0,~_TLF_NAPPING
 266	stw	r12,TI_LOCAL_FLAGS(r9)
 267	b	power_save_ppc32_restore
 268
 2697:	rlwinm	r12,r12,0,~_TLF_SLEEPING
 270	stw	r12,TI_LOCAL_FLAGS(r9)
 271	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
 272	rlwinm	r9,r9,0,~MSR_EE
 273	lwz	r12,_LINK(r11)		/* and return to address in LR */
 274	b	fast_exception_return
 275#endif
 276
 277/*
 278 * On kernel stack overflow, load up an initial stack pointer
 279 * and call StackOverflow(regs), which should not return.
 280 */
 281stack_ovf:
 282	/* sometimes we use a statically-allocated stack, which is OK. */
 283	lis	r12,_end@h
 284	ori	r12,r12,_end@l
 285	cmplw	r1,r12
 286	ble	5b			/* r1 <= &_end is OK */
 287	SAVE_NVGPRS(r11)
 288	addi	r3,r1,STACK_FRAME_OVERHEAD
 289	lis	r1,init_thread_union@ha
 290	addi	r1,r1,init_thread_union@l
 291	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 292	lis	r9,StackOverflow@ha
 293	addi	r9,r9,StackOverflow@l
 294	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 295	FIX_SRR1(r10,r12)
 
 
 296	mtspr	SPRN_SRR0,r9
 297	mtspr	SPRN_SRR1,r10
 298	SYNC
 299	RFI
 300
 301/*
 302 * Handle a system call.
 303 */
 304	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
 305	.stabs	"entry_32.S",N_SO,0,0,0f
 3060:
 307
 308_GLOBAL(DoSyscall)
 309	stw	r3,ORIG_GPR3(r1)
 310	li	r12,0
 311	stw	r12,RESULT(r1)
 312	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
 313	rlwinm	r11,r11,0,4,2
 314	stw	r11,_CCR(r1)
 315#ifdef CONFIG_TRACE_IRQFLAGS
 316	/* Return from syscalls can (and generally will) hard enable
 317	 * interrupts. You aren't supposed to call a syscall with
 318	 * interrupts disabled in the first place. However, to ensure
 319	 * that we get it right vs. lockdep if it happens, we force
 320	 * that hard enable here with appropriate tracing if we see
 321	 * that we have been called with interrupts off
 322	 */
 323	mfmsr	r11
 324	andi.	r12,r11,MSR_EE
 325	bne+	1f
 326	/* We came in with interrupts disabled, we enable them now */
 327	bl	trace_hardirqs_on
 328	mfmsr	r11
 329	lwz	r0,GPR0(r1)
 330	lwz	r3,GPR3(r1)
 331	lwz	r4,GPR4(r1)
 332	ori	r11,r11,MSR_EE
 333	lwz	r5,GPR5(r1)
 334	lwz	r6,GPR6(r1)
 335	lwz	r7,GPR7(r1)
 336	lwz	r8,GPR8(r1)
 337	mtmsr	r11
 3381:
 339#endif /* CONFIG_TRACE_IRQFLAGS */
 340	CURRENT_THREAD_INFO(r10, r1)
 341	lwz	r11,TI_FLAGS(r10)
 342	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
 343	bne-	syscall_dotrace
 344syscall_dotrace_cont:
 345	cmplwi	0,r0,NR_syscalls
 346	lis	r10,sys_call_table@h
 347	ori	r10,r10,sys_call_table@l
 348	slwi	r0,r0,2
 349	bge-	66f
 350	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
 351	mtlr	r10
 352	addi	r9,r1,STACK_FRAME_OVERHEAD
 353	PPC440EP_ERR42
 354	blrl			/* Call handler */
 355	.globl	ret_from_syscall
 356ret_from_syscall:
 357	mr	r6,r3
 358	CURRENT_THREAD_INFO(r12, r1)
 359	/* disable interrupts so current_thread_info()->flags can't change */
 360	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
 361	/* Note: We don't bother telling lockdep about it */
 362	SYNC
 363	MTMSRD(r10)
 364	lwz	r9,TI_FLAGS(r12)
 365	li	r8,-MAX_ERRNO
 366	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 367	bne-	syscall_exit_work
 368	cmplw	0,r3,r8
 369	blt+	syscall_exit_cont
 370	lwz	r11,_CCR(r1)			/* Load CR */
 371	neg	r3,r3
 372	oris	r11,r11,0x1000	/* Set SO bit in CR */
 373	stw	r11,_CCR(r1)
 374syscall_exit_cont:
 375	lwz	r8,_MSR(r1)
 376#ifdef CONFIG_TRACE_IRQFLAGS
 377	/* If we are going to return from the syscall with interrupts
 378	 * off, we trace that here. It shouldn't happen though but we
 379	 * want to catch the bugger if it does right ?
 380	 */
 381	andi.	r10,r8,MSR_EE
 382	bne+	1f
 383	stw	r3,GPR3(r1)
 384	bl      trace_hardirqs_off
 385	lwz	r3,GPR3(r1)
 3861:
 387#endif /* CONFIG_TRACE_IRQFLAGS */
 388#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 389	/* If the process has its own DBCR0 value, load it up.  The internal
 390	   debug mode bit tells us that dbcr0 should be loaded. */
 391	lwz	r0,THREAD+THREAD_DBCR0(r2)
 392	andis.	r10,r0,DBCR0_IDM@h
 393	bnel-	load_dbcr0
 394#endif
 395#ifdef CONFIG_44x
 396BEGIN_MMU_FTR_SECTION
 397	lis	r4,icache_44x_need_flush@ha
 398	lwz	r5,icache_44x_need_flush@l(r4)
 399	cmplwi	cr0,r5,0
 400	bne-	2f
 4011:
 402END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
 403#endif /* CONFIG_44x */
 404BEGIN_FTR_SECTION
 405	lwarx	r7,0,r1
 406END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 407	stwcx.	r0,0,r1			/* to clear the reservation */
 408#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 409	andi.	r4,r8,MSR_PR
 410	beq	3f
 411	CURRENT_THREAD_INFO(r4, r1)
 412	ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
 4133:
 414#endif
 415	lwz	r4,_LINK(r1)
 416	lwz	r5,_CCR(r1)
 417	mtlr	r4
 418	mtcr	r5
 419	lwz	r7,_NIP(r1)
 420	FIX_SRR1(r8, r0)
 421	lwz	r2,GPR2(r1)
 422	lwz	r1,GPR1(r1)
 
 
 
 423	mtspr	SPRN_SRR0,r7
 424	mtspr	SPRN_SRR1,r8
 425	SYNC
 426	RFI
 427#ifdef CONFIG_44x
 4282:	li	r7,0
 429	iccci	r0,r0
 430	stw	r7,icache_44x_need_flush@l(r4)
 431	b	1b
 432#endif  /* CONFIG_44x */
 433
 43466:	li	r3,-ENOSYS
 435	b	ret_from_syscall
 436
 437	.globl	ret_from_fork
 438ret_from_fork:
 439	REST_NVGPRS(r1)
 440	bl	schedule_tail
 441	li	r3,0
 442	b	ret_from_syscall
 443
 444	.globl	ret_from_kernel_thread
 445ret_from_kernel_thread:
 446	REST_NVGPRS(r1)
 447	bl	schedule_tail
 448	mtlr	r14
 449	mr	r3,r15
 450	PPC440EP_ERR42
 451	blrl
 452	li	r3,0
 453	b	ret_from_syscall
 454
 455/* Traced system call support */
 456syscall_dotrace:
 457	SAVE_NVGPRS(r1)
 458	li	r0,0xc00
 459	stw	r0,_TRAP(r1)
 460	addi	r3,r1,STACK_FRAME_OVERHEAD
 461	bl	do_syscall_trace_enter
 462	/*
 463	 * Restore argument registers possibly just changed.
 464	 * We use the return value of do_syscall_trace_enter
 465	 * for call number to look up in the table (r0).
 466	 */
 467	mr	r0,r3
 468	lwz	r3,GPR3(r1)
 469	lwz	r4,GPR4(r1)
 470	lwz	r5,GPR5(r1)
 471	lwz	r6,GPR6(r1)
 472	lwz	r7,GPR7(r1)
 473	lwz	r8,GPR8(r1)
 474	REST_NVGPRS(r1)
 475
 476	cmplwi	r0,NR_syscalls
 477	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
 478	bge-	ret_from_syscall
 479	b	syscall_dotrace_cont
 480
 481syscall_exit_work:
 482	andi.	r0,r9,_TIF_RESTOREALL
 483	beq+	0f
 484	REST_NVGPRS(r1)
 485	b	2f
 4860:	cmplw	0,r3,r8
 487	blt+	1f
 488	andi.	r0,r9,_TIF_NOERROR
 489	bne-	1f
 490	lwz	r11,_CCR(r1)			/* Load CR */
 491	neg	r3,r3
 492	oris	r11,r11,0x1000	/* Set SO bit in CR */
 493	stw	r11,_CCR(r1)
 494
 4951:	stw	r6,RESULT(r1)	/* Save result */
 496	stw	r3,GPR3(r1)	/* Update return value */
 4972:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
 498	beq	4f
 499
 500	/* Clear per-syscall TIF flags if any are set.  */
 501
 502	li	r11,_TIF_PERSYSCALL_MASK
 503	addi	r12,r12,TI_FLAGS
 5043:	lwarx	r8,0,r12
 505	andc	r8,r8,r11
 506#ifdef CONFIG_IBM405_ERR77
 507	dcbt	0,r12
 508#endif
 509	stwcx.	r8,0,r12
 510	bne-	3b
 511	subi	r12,r12,TI_FLAGS
 512	
 5134:	/* Anything which requires enabling interrupts? */
 514	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
 515	beq	ret_from_except
 516
 517	/* Re-enable interrupts. There is no need to trace that with
 518	 * lockdep as we are supposed to have IRQs on at this point
 519	 */
 520	ori	r10,r10,MSR_EE
 521	SYNC
 522	MTMSRD(r10)
 523
 524	/* Save NVGPRS if they're not saved already */
 525	lwz	r4,_TRAP(r1)
 526	andi.	r4,r4,1
 527	beq	5f
 528	SAVE_NVGPRS(r1)
 529	li	r4,0xc00
 530	stw	r4,_TRAP(r1)
 5315:
 532	addi	r3,r1,STACK_FRAME_OVERHEAD
 533	bl	do_syscall_trace_leave
 534	b	ret_from_except_full
 535
 536/*
 537 * The fork/clone functions need to copy the full register set into
 538 * the child process. Therefore we need to save all the nonvolatile
 539 * registers (r13 - r31) before calling the C code.
 540 */
 541	.globl	ppc_fork
 542ppc_fork:
 543	SAVE_NVGPRS(r1)
 544	lwz	r0,_TRAP(r1)
 545	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 546	stw	r0,_TRAP(r1)		/* register set saved */
 547	b	sys_fork
 548
 549	.globl	ppc_vfork
 550ppc_vfork:
 551	SAVE_NVGPRS(r1)
 552	lwz	r0,_TRAP(r1)
 553	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 554	stw	r0,_TRAP(r1)		/* register set saved */
 555	b	sys_vfork
 556
 557	.globl	ppc_clone
 558ppc_clone:
 559	SAVE_NVGPRS(r1)
 560	lwz	r0,_TRAP(r1)
 561	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 562	stw	r0,_TRAP(r1)		/* register set saved */
 563	b	sys_clone
 564
 565	.globl	ppc_swapcontext
 566ppc_swapcontext:
 567	SAVE_NVGPRS(r1)
 568	lwz	r0,_TRAP(r1)
 569	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 570	stw	r0,_TRAP(r1)		/* register set saved */
 571	b	sys_swapcontext
 572
 573/*
 574 * Top-level page fault handling.
 575 * This is in assembler because if do_page_fault tells us that
 576 * it is a bad kernel page fault, we want to save the non-volatile
 577 * registers before calling bad_page_fault.
 578 */
 579	.globl	handle_page_fault
 580handle_page_fault:
 581	stw	r4,_DAR(r1)
 582	addi	r3,r1,STACK_FRAME_OVERHEAD
 
 
 
 
 583	bl	do_page_fault
 584	cmpwi	r3,0
 585	beq+	ret_from_except
 586	SAVE_NVGPRS(r1)
 587	lwz	r0,_TRAP(r1)
 588	clrrwi	r0,r0,1
 589	stw	r0,_TRAP(r1)
 590	mr	r5,r3
 591	addi	r3,r1,STACK_FRAME_OVERHEAD
 592	lwz	r4,_DAR(r1)
 593	bl	bad_page_fault
 594	b	ret_from_except_full
 595
 
 
 
 
 
 
 
 
 
 
 
 596/*
 597 * This routine switches between two different tasks.  The process
 598 * state of one is saved on its kernel stack.  Then the state
 599 * of the other is restored from its kernel stack.  The memory
 600 * management hardware is updated to the second process's state.
 601 * Finally, we can return to the second process.
 602 * On entry, r3 points to the THREAD for the current task, r4
 603 * points to the THREAD for the new task.
 604 *
 605 * This routine is always called with interrupts disabled.
 606 *
 607 * Note: there are two ways to get to the "going out" portion
 608 * of this code; either by coming in via the entry (_switch)
 609 * or via "fork" which must set up an environment equivalent
 610 * to the "_switch" path.  If you change this , you'll have to
 611 * change the fork code also.
 612 *
 613 * The code which creates the new task context is in 'copy_thread'
 614 * in arch/ppc/kernel/process.c
 615 */
 616_GLOBAL(_switch)
 617	stwu	r1,-INT_FRAME_SIZE(r1)
 618	mflr	r0
 619	stw	r0,INT_FRAME_SIZE+4(r1)
 620	/* r3-r12 are caller saved -- Cort */
 621	SAVE_NVGPRS(r1)
 622	stw	r0,_NIP(r1)	/* Return to switch caller */
 623	mfmsr	r11
 624	li	r0,MSR_FP	/* Disable floating-point */
 625#ifdef CONFIG_ALTIVEC
 626BEGIN_FTR_SECTION
 627	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
 628	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
 629	stw	r12,THREAD+THREAD_VRSAVE(r2)
 630END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 631#endif /* CONFIG_ALTIVEC */
 632#ifdef CONFIG_SPE
 633BEGIN_FTR_SECTION
 634	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
 635	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
 636	stw	r12,THREAD+THREAD_SPEFSCR(r2)
 637END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 638#endif /* CONFIG_SPE */
 639	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
 640	beq+	1f
 641	andc	r11,r11,r0
 642	MTMSRD(r11)
 643	isync
 6441:	stw	r11,_MSR(r1)
 645	mfcr	r10
 646	stw	r10,_CCR(r1)
 647	stw	r1,KSP(r3)	/* Set old stack pointer */
 648
 649#ifdef CONFIG_SMP
 650	/* We need a sync somewhere here to make sure that if the
 651	 * previous task gets rescheduled on another CPU, it sees all
 652	 * stores it has performed on this one.
 653	 */
 654	sync
 655#endif /* CONFIG_SMP */
 656
 657	tophys(r0,r4)
 658	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
 659	lwz	r1,KSP(r4)	/* Load new stack pointer */
 660
 661	/* save the old current 'last' for return value */
 662	mr	r3,r2
 663	addi	r2,r4,-THREAD	/* Update current */
 664
 665#ifdef CONFIG_ALTIVEC
 666BEGIN_FTR_SECTION
 667	lwz	r0,THREAD+THREAD_VRSAVE(r2)
 668	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
 669END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 670#endif /* CONFIG_ALTIVEC */
 671#ifdef CONFIG_SPE
 672BEGIN_FTR_SECTION
 673	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
 674	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 675END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 676#endif /* CONFIG_SPE */
 677
 678	lwz	r0,_CCR(r1)
 679	mtcrf	0xFF,r0
 680	/* r3-r12 are destroyed -- Cort */
 681	REST_NVGPRS(r1)
 682
 683	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
 684	mtlr	r4
 685	addi	r1,r1,INT_FRAME_SIZE
 686	blr
 687
 688	.globl	fast_exception_return
 689fast_exception_return:
 690#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 691	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
 692	beq	1f			/* if not, we've got problems */
 693#endif
 694
 6952:	REST_4GPRS(3, r11)
 696	lwz	r10,_CCR(r11)
 697	REST_GPR(1, r11)
 698	mtcr	r10
 699	lwz	r10,_LINK(r11)
 700	mtlr	r10
 701	REST_GPR(10, r11)
 
 
 
 702	mtspr	SPRN_SRR1,r9
 703	mtspr	SPRN_SRR0,r12
 704	REST_GPR(9, r11)
 705	REST_GPR(12, r11)
 706	lwz	r11,GPR11(r11)
 707	SYNC
 708	RFI
 709
 710#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 711/* check if the exception happened in a restartable section */
 7121:	lis	r3,exc_exit_restart_end@ha
 713	addi	r3,r3,exc_exit_restart_end@l
 714	cmplw	r12,r3
 715	bge	3f
 716	lis	r4,exc_exit_restart@ha
 717	addi	r4,r4,exc_exit_restart@l
 718	cmplw	r12,r4
 719	blt	3f
 720	lis	r3,fee_restarts@ha
 721	tophys(r3,r3)
 722	lwz	r5,fee_restarts@l(r3)
 723	addi	r5,r5,1
 724	stw	r5,fee_restarts@l(r3)
 725	mr	r12,r4		/* restart at exc_exit_restart */
 726	b	2b
 727
 728	.section .bss
 729	.align	2
 730fee_restarts:
 731	.space	4
 732	.previous
 733
 734/* aargh, a nonrecoverable interrupt, panic */
 735/* aargh, we don't know which trap this is */
 736/* but the 601 doesn't implement the RI bit, so assume it's OK */
 7373:
 738BEGIN_FTR_SECTION
 739	b	2b
 740END_FTR_SECTION_IFSET(CPU_FTR_601)
 741	li	r10,-1
 742	stw	r10,_TRAP(r11)
 743	addi	r3,r1,STACK_FRAME_OVERHEAD
 744	lis	r10,MSR_KERNEL@h
 745	ori	r10,r10,MSR_KERNEL@l
 746	bl	transfer_to_handler_full
 747	.long	nonrecoverable_exception
 748	.long	ret_from_except
 749#endif
 750
 751	.globl	ret_from_except_full
 752ret_from_except_full:
 753	REST_NVGPRS(r1)
 754	/* fall through */
 755
 756	.globl	ret_from_except
 757ret_from_except:
 758	/* Hard-disable interrupts so that current_thread_info()->flags
 759	 * can't change between when we test it and when we return
 760	 * from the interrupt. */
 761	/* Note: We don't bother telling lockdep about it */
 762	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 763	SYNC			/* Some chip revs have problems here... */
 764	MTMSRD(r10)		/* disable interrupts */
 765
 766	lwz	r3,_MSR(r1)	/* Returning to user mode? */
 767	andi.	r0,r3,MSR_PR
 768	beq	resume_kernel
 769
 770user_exc_return:		/* r10 contains MSR_KERNEL here */
 771	/* Check current_thread_info()->flags */
 772	CURRENT_THREAD_INFO(r9, r1)
 773	lwz	r9,TI_FLAGS(r9)
 774	andi.	r0,r9,_TIF_USER_WORK_MASK
 775	bne	do_work
 776
 777restore_user:
 778#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 779	/* Check whether this process has its own DBCR0 value.  The internal
 780	   debug mode bit tells us that dbcr0 should be loaded. */
 781	lwz	r0,THREAD+THREAD_DBCR0(r2)
 782	andis.	r10,r0,DBCR0_IDM@h
 783	bnel-	load_dbcr0
 784#endif
 785#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 786	CURRENT_THREAD_INFO(r9, r1)
 787	ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
 788#endif
 789
 790	b	restore
 791
 792/* N.B. the only way to get here is from the beq following ret_from_except. */
 793resume_kernel:
 794	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
 795	CURRENT_THREAD_INFO(r9, r1)
 796	lwz	r8,TI_FLAGS(r9)
 797	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
 798	beq+	1f
 799
 800	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
 801
 802	lwz	r3,GPR1(r1)
 803	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
 804	mr	r4,r1			/* src:  current exception frame */
 805	mr	r1,r3			/* Reroute the trampoline frame to r1 */
 806
 807	/* Copy from the original to the trampoline. */
 808	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
 809	li	r6,0			/* start offset: 0 */
 810	mtctr	r5
 8112:	lwzx	r0,r6,r4
 812	stwx	r0,r6,r3
 813	addi	r6,r6,4
 814	bdnz	2b
 815
 816	/* Do real store operation to complete stwu */
 817	lwz	r5,GPR1(r1)
 818	stw	r8,0(r5)
 819
 820	/* Clear _TIF_EMULATE_STACK_STORE flag */
 821	lis	r11,_TIF_EMULATE_STACK_STORE@h
 822	addi	r5,r9,TI_FLAGS
 8230:	lwarx	r8,0,r5
 824	andc	r8,r8,r11
 825#ifdef CONFIG_IBM405_ERR77
 826	dcbt	0,r5
 827#endif
 828	stwcx.	r8,0,r5
 829	bne-	0b
 8301:
 831
 832#ifdef CONFIG_PREEMPT
 833	/* check current_thread_info->preempt_count */
 834	lwz	r0,TI_PREEMPT(r9)
 835	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
 836	bne	restore
 837	andi.	r8,r8,_TIF_NEED_RESCHED
 838	beq+	restore
 839	lwz	r3,_MSR(r1)
 840	andi.	r0,r3,MSR_EE	/* interrupts off? */
 841	beq	restore		/* don't schedule if so */
 842#ifdef CONFIG_TRACE_IRQFLAGS
 843	/* Lockdep thinks irqs are enabled, we need to call
 844	 * preempt_schedule_irq with IRQs off, so we inform lockdep
 845	 * now that we -did- turn them off already
 846	 */
 847	bl	trace_hardirqs_off
 848#endif
 8491:	bl	preempt_schedule_irq
 850	CURRENT_THREAD_INFO(r9, r1)
 851	lwz	r3,TI_FLAGS(r9)
 852	andi.	r0,r3,_TIF_NEED_RESCHED
 853	bne-	1b
 854#ifdef CONFIG_TRACE_IRQFLAGS
 855	/* And now, to properly rebalance the above, we tell lockdep they
 856	 * are being turned back on, which will happen when we return
 857	 */
 858	bl	trace_hardirqs_on
 859#endif
 860#endif /* CONFIG_PREEMPT */
 861
 862	/* interrupts are hard-disabled at this point */
 863restore:
 864#ifdef CONFIG_44x
 865BEGIN_MMU_FTR_SECTION
 866	b	1f
 867END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 868	lis	r4,icache_44x_need_flush@ha
 869	lwz	r5,icache_44x_need_flush@l(r4)
 870	cmplwi	cr0,r5,0
 871	beq+	1f
 872	li	r6,0
 873	iccci	r0,r0
 874	stw	r6,icache_44x_need_flush@l(r4)
 8751:
 876#endif  /* CONFIG_44x */
 877
 878	lwz	r9,_MSR(r1)
 879#ifdef CONFIG_TRACE_IRQFLAGS
 880	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
 881	 * off in this assembly code while peeking at TI_FLAGS() and such. However
 882	 * we need to inform it if the exception turned interrupts off, and we
 883	 * are about to trun them back on.
 884	 *
 885	 * The problem here sadly is that we don't know whether the exceptions was
 886	 * one that turned interrupts off or not. So we always tell lockdep about
 887	 * turning them on here when we go back to wherever we came from with EE
 888	 * on, even if that may meen some redudant calls being tracked. Maybe later
 889	 * we could encode what the exception did somewhere or test the exception
 890	 * type in the pt_regs but that sounds overkill
 891	 */
 892	andi.	r10,r9,MSR_EE
 893	beq	1f
 894	/*
 895	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
 896	 * which is the stack frame here, we need to force a stack frame
 897	 * in case we came from user space.
 898	 */
 899	stwu	r1,-32(r1)
 900	mflr	r0
 901	stw	r0,4(r1)
 902	stwu	r1,-32(r1)
 903	bl	trace_hardirqs_on
 904	lwz	r1,0(r1)
 905	lwz	r1,0(r1)
 906	lwz	r9,_MSR(r1)
 9071:
 908#endif /* CONFIG_TRACE_IRQFLAGS */
 909
 910	lwz	r0,GPR0(r1)
 911	lwz	r2,GPR2(r1)
 912	REST_4GPRS(3, r1)
 913	REST_2GPRS(7, r1)
 914
 915	lwz	r10,_XER(r1)
 916	lwz	r11,_CTR(r1)
 917	mtspr	SPRN_XER,r10
 918	mtctr	r11
 919
 920	PPC405_ERR77(0,r1)
 921BEGIN_FTR_SECTION
 922	lwarx	r11,0,r1
 923END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 924	stwcx.	r0,0,r1			/* to clear the reservation */
 925
 926#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 927	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
 928	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
 929
 930	lwz	r10,_CCR(r1)
 931	lwz	r11,_LINK(r1)
 932	mtcrf	0xFF,r10
 933	mtlr	r11
 934
 935	/*
 936	 * Once we put values in SRR0 and SRR1, we are in a state
 937	 * where exceptions are not recoverable, since taking an
 938	 * exception will trash SRR0 and SRR1.  Therefore we clear the
 939	 * MSR:RI bit to indicate this.  If we do take an exception,
 940	 * we can't return to the point of the exception but we
 941	 * can restart the exception exit path at the label
 942	 * exc_exit_restart below.  -- paulus
 943	 */
 944	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
 945	SYNC
 946	MTMSRD(r10)		/* clear the RI bit */
 947	.globl exc_exit_restart
 948exc_exit_restart:
 949	lwz	r12,_NIP(r1)
 950	FIX_SRR1(r9,r10)
 
 
 951	mtspr	SPRN_SRR0,r12
 952	mtspr	SPRN_SRR1,r9
 953	REST_4GPRS(9, r1)
 954	lwz	r1,GPR1(r1)
 955	.globl exc_exit_restart_end
 956exc_exit_restart_end:
 957	SYNC
 958	RFI
 959
 960#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
 961	/*
 962	 * This is a bit different on 4xx/Book-E because it doesn't have
 963	 * the RI bit in the MSR.
 964	 * The TLB miss handler checks if we have interrupted
 965	 * the exception exit path and restarts it if so
 966	 * (well maybe one day it will... :).
 967	 */
 968	lwz	r11,_LINK(r1)
 969	mtlr	r11
 970	lwz	r10,_CCR(r1)
 971	mtcrf	0xff,r10
 972	REST_2GPRS(9, r1)
 973	.globl exc_exit_restart
 974exc_exit_restart:
 975	lwz	r11,_NIP(r1)
 976	lwz	r12,_MSR(r1)
 977exc_exit_start:
 978	mtspr	SPRN_SRR0,r11
 979	mtspr	SPRN_SRR1,r12
 980	REST_2GPRS(11, r1)
 981	lwz	r1,GPR1(r1)
 982	.globl exc_exit_restart_end
 983exc_exit_restart_end:
 984	PPC405_ERR77_SYNC
 985	rfi
 986	b	.			/* prevent prefetch past rfi */
 987
 988/*
 989 * Returning from a critical interrupt in user mode doesn't need
 990 * to be any different from a normal exception.  For a critical
 991 * interrupt in the kernel, we just return (without checking for
 992 * preemption) since the interrupt may have happened at some crucial
 993 * place (e.g. inside the TLB miss handler), and because we will be
 994 * running with r1 pointing into critical_stack, not the current
 995 * process's kernel stack (and therefore current_thread_info() will
 996 * give the wrong answer).
 997 * We have to restore various SPRs that may have been in use at the
 998 * time of the critical interrupt.
 999 *
1000 */
1001#ifdef CONFIG_40x
1002#define PPC_40x_TURN_OFF_MSR_DR						    \
1003	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1004	 * assume the instructions here are mapped by a pinned TLB entry */ \
1005	li	r10,MSR_IR;						    \
1006	mtmsr	r10;							    \
1007	isync;								    \
1008	tophys(r1, r1);
1009#else
1010#define PPC_40x_TURN_OFF_MSR_DR
1011#endif
1012
1013#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1014	REST_NVGPRS(r1);						\
1015	lwz	r3,_MSR(r1);						\
1016	andi.	r3,r3,MSR_PR;						\
1017	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1018	bne	user_exc_return;					\
1019	lwz	r0,GPR0(r1);						\
1020	lwz	r2,GPR2(r1);						\
1021	REST_4GPRS(3, r1);						\
1022	REST_2GPRS(7, r1);						\
1023	lwz	r10,_XER(r1);						\
1024	lwz	r11,_CTR(r1);						\
1025	mtspr	SPRN_XER,r10;						\
1026	mtctr	r11;							\
1027	PPC405_ERR77(0,r1);						\
1028	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1029	lwz	r11,_LINK(r1);						\
1030	mtlr	r11;							\
1031	lwz	r10,_CCR(r1);						\
1032	mtcrf	0xff,r10;						\
1033	PPC_40x_TURN_OFF_MSR_DR;					\
1034	lwz	r9,_DEAR(r1);						\
1035	lwz	r10,_ESR(r1);						\
1036	mtspr	SPRN_DEAR,r9;						\
1037	mtspr	SPRN_ESR,r10;						\
1038	lwz	r11,_NIP(r1);						\
1039	lwz	r12,_MSR(r1);						\
1040	mtspr	exc_lvl_srr0,r11;					\
1041	mtspr	exc_lvl_srr1,r12;					\
1042	lwz	r9,GPR9(r1);						\
1043	lwz	r12,GPR12(r1);						\
1044	lwz	r10,GPR10(r1);						\
1045	lwz	r11,GPR11(r1);						\
1046	lwz	r1,GPR1(r1);						\
1047	PPC405_ERR77_SYNC;						\
1048	exc_lvl_rfi;							\
1049	b	.;		/* prevent prefetch past exc_lvl_rfi */
1050
1051#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1052	lwz	r9,_##exc_lvl_srr0(r1);					\
1053	lwz	r10,_##exc_lvl_srr1(r1);				\
1054	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1055	mtspr	SPRN_##exc_lvl_srr1,r10;
1056
1057#if defined(CONFIG_PPC_BOOK3E_MMU)
1058#ifdef CONFIG_PHYS_64BIT
1059#define	RESTORE_MAS7							\
1060	lwz	r11,MAS7(r1);						\
1061	mtspr	SPRN_MAS7,r11;
1062#else
1063#define	RESTORE_MAS7
1064#endif /* CONFIG_PHYS_64BIT */
1065#define RESTORE_MMU_REGS						\
1066	lwz	r9,MAS0(r1);						\
1067	lwz	r10,MAS1(r1);						\
1068	lwz	r11,MAS2(r1);						\
1069	mtspr	SPRN_MAS0,r9;						\
1070	lwz	r9,MAS3(r1);						\
1071	mtspr	SPRN_MAS1,r10;						\
1072	lwz	r10,MAS6(r1);						\
1073	mtspr	SPRN_MAS2,r11;						\
1074	mtspr	SPRN_MAS3,r9;						\
1075	mtspr	SPRN_MAS6,r10;						\
1076	RESTORE_MAS7;
1077#elif defined(CONFIG_44x)
1078#define RESTORE_MMU_REGS						\
1079	lwz	r9,MMUCR(r1);						\
1080	mtspr	SPRN_MMUCR,r9;
1081#else
1082#define RESTORE_MMU_REGS
1083#endif
1084
1085#ifdef CONFIG_40x
1086	.globl	ret_from_crit_exc
1087ret_from_crit_exc:
1088	mfspr	r9,SPRN_SPRG_THREAD
1089	lis	r10,saved_ksp_limit@ha;
1090	lwz	r10,saved_ksp_limit@l(r10);
1091	tovirt(r9,r9);
1092	stw	r10,KSP_LIMIT(r9)
1093	lis	r9,crit_srr0@ha;
1094	lwz	r9,crit_srr0@l(r9);
1095	lis	r10,crit_srr1@ha;
1096	lwz	r10,crit_srr1@l(r10);
1097	mtspr	SPRN_SRR0,r9;
1098	mtspr	SPRN_SRR1,r10;
1099	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1100#endif /* CONFIG_40x */
1101
1102#ifdef CONFIG_BOOKE
1103	.globl	ret_from_crit_exc
1104ret_from_crit_exc:
1105	mfspr	r9,SPRN_SPRG_THREAD
1106	lwz	r10,SAVED_KSP_LIMIT(r1)
1107	stw	r10,KSP_LIMIT(r9)
1108	RESTORE_xSRR(SRR0,SRR1);
1109	RESTORE_MMU_REGS;
1110	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1111
1112	.globl	ret_from_debug_exc
1113ret_from_debug_exc:
1114	mfspr	r9,SPRN_SPRG_THREAD
1115	lwz	r10,SAVED_KSP_LIMIT(r1)
1116	stw	r10,KSP_LIMIT(r9)
1117	lwz	r9,THREAD_INFO-THREAD(r9)
1118	CURRENT_THREAD_INFO(r10, r1)
1119	lwz	r10,TI_PREEMPT(r10)
1120	stw	r10,TI_PREEMPT(r9)
1121	RESTORE_xSRR(SRR0,SRR1);
1122	RESTORE_xSRR(CSRR0,CSRR1);
1123	RESTORE_MMU_REGS;
1124	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1125
1126	.globl	ret_from_mcheck_exc
1127ret_from_mcheck_exc:
1128	mfspr	r9,SPRN_SPRG_THREAD
1129	lwz	r10,SAVED_KSP_LIMIT(r1)
1130	stw	r10,KSP_LIMIT(r9)
1131	RESTORE_xSRR(SRR0,SRR1);
1132	RESTORE_xSRR(CSRR0,CSRR1);
1133	RESTORE_xSRR(DSRR0,DSRR1);
1134	RESTORE_MMU_REGS;
1135	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1136#endif /* CONFIG_BOOKE */
1137
1138/*
1139 * Load the DBCR0 value for a task that is being ptraced,
1140 * having first saved away the global DBCR0.  Note that r0
1141 * has the dbcr0 value to set upon entry to this.
1142 */
1143load_dbcr0:
1144	mfmsr	r10		/* first disable debug exceptions */
1145	rlwinm	r10,r10,0,~MSR_DE
1146	mtmsr	r10
1147	isync
1148	mfspr	r10,SPRN_DBCR0
1149	lis	r11,global_dbcr0@ha
1150	addi	r11,r11,global_dbcr0@l
1151#ifdef CONFIG_SMP
1152	CURRENT_THREAD_INFO(r9, r1)
1153	lwz	r9,TI_CPU(r9)
1154	slwi	r9,r9,3
1155	add	r11,r11,r9
1156#endif
1157	stw	r10,0(r11)
1158	mtspr	SPRN_DBCR0,r0
1159	lwz	r10,4(r11)
1160	addi	r10,r10,1
1161	stw	r10,4(r11)
1162	li	r11,-1
1163	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1164	blr
1165
1166	.section .bss
1167	.align	4
1168global_dbcr0:
1169	.space	8*NR_CPUS
1170	.previous
1171#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1172
1173do_work:			/* r10 contains MSR_KERNEL here */
1174	andi.	r0,r9,_TIF_NEED_RESCHED
1175	beq	do_user_signal
1176
1177do_resched:			/* r10 contains MSR_KERNEL here */
1178	/* Note: We don't need to inform lockdep that we are enabling
1179	 * interrupts here. As far as it knows, they are already enabled
1180	 */
1181	ori	r10,r10,MSR_EE
1182	SYNC
1183	MTMSRD(r10)		/* hard-enable interrupts */
1184	bl	schedule
1185recheck:
1186	/* Note: And we don't tell it we are disabling them again
1187	 * neither. Those disable/enable cycles used to peek at
1188	 * TI_FLAGS aren't advertised.
1189	 */
1190	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1191	SYNC
1192	MTMSRD(r10)		/* disable interrupts */
1193	CURRENT_THREAD_INFO(r9, r1)
1194	lwz	r9,TI_FLAGS(r9)
1195	andi.	r0,r9,_TIF_NEED_RESCHED
1196	bne-	do_resched
1197	andi.	r0,r9,_TIF_USER_WORK_MASK
1198	beq	restore_user
1199do_user_signal:			/* r10 contains MSR_KERNEL here */
1200	ori	r10,r10,MSR_EE
1201	SYNC
1202	MTMSRD(r10)		/* hard-enable interrupts */
1203	/* save r13-r31 in the exception frame, if not already done */
1204	lwz	r3,_TRAP(r1)
1205	andi.	r0,r3,1
1206	beq	2f
1207	SAVE_NVGPRS(r1)
1208	rlwinm	r3,r3,0,0,30
1209	stw	r3,_TRAP(r1)
12102:	addi	r3,r1,STACK_FRAME_OVERHEAD
1211	mr	r4,r9
1212	bl	do_notify_resume
1213	REST_NVGPRS(r1)
1214	b	recheck
1215
1216/*
1217 * We come here when we are at the end of handling an exception
1218 * that occurred at a place where taking an exception will lose
1219 * state information, such as the contents of SRR0 and SRR1.
1220 */
1221nonrecoverable:
1222	lis	r10,exc_exit_restart_end@ha
1223	addi	r10,r10,exc_exit_restart_end@l
1224	cmplw	r12,r10
1225	bge	3f
1226	lis	r11,exc_exit_restart@ha
1227	addi	r11,r11,exc_exit_restart@l
1228	cmplw	r12,r11
1229	blt	3f
1230	lis	r10,ee_restarts@ha
1231	lwz	r12,ee_restarts@l(r10)
1232	addi	r12,r12,1
1233	stw	r12,ee_restarts@l(r10)
1234	mr	r12,r11		/* restart at exc_exit_restart */
1235	blr
12363:	/* OK, we can't recover, kill this process */
1237	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1238BEGIN_FTR_SECTION
1239	blr
1240END_FTR_SECTION_IFSET(CPU_FTR_601)
1241	lwz	r3,_TRAP(r1)
1242	andi.	r0,r3,1
1243	beq	4f
1244	SAVE_NVGPRS(r1)
1245	rlwinm	r3,r3,0,0,30
1246	stw	r3,_TRAP(r1)
12474:	addi	r3,r1,STACK_FRAME_OVERHEAD
1248	bl	nonrecoverable_exception
1249	/* shouldn't return */
1250	b	4b
1251
1252	.section .bss
1253	.align	2
1254ee_restarts:
1255	.space	4
1256	.previous
1257
1258/*
1259 * PROM code for specific machines follows.  Put it
1260 * here so it's easy to add arch-specific sections later.
1261 * -- Cort
1262 */
1263#ifdef CONFIG_PPC_RTAS
1264/*
1265 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1266 * called with the MMU off.
1267 */
1268_GLOBAL(enter_rtas)
1269	stwu	r1,-INT_FRAME_SIZE(r1)
1270	mflr	r0
1271	stw	r0,INT_FRAME_SIZE+4(r1)
1272	LOAD_REG_ADDR(r4, rtas)
1273	lis	r6,1f@ha	/* physical return address for rtas */
1274	addi	r6,r6,1f@l
1275	tophys(r6,r6)
1276	tophys(r7,r1)
1277	lwz	r8,RTASENTRY(r4)
1278	lwz	r4,RTASBASE(r4)
1279	mfmsr	r9
1280	stw	r9,8(r1)
1281	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1282	SYNC			/* disable interrupts so SRR0/1 */
1283	MTMSRD(r0)		/* don't get trashed */
1284	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1285	mtlr	r6
1286	mtspr	SPRN_SPRG_RTAS,r7
1287	mtspr	SPRN_SRR0,r8
1288	mtspr	SPRN_SRR1,r9
1289	RFI
12901:	tophys(r9,r1)
1291	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1292	lwz	r9,8(r9)	/* original msr value */
1293	FIX_SRR1(r9,r0)
1294	addi	r1,r1,INT_FRAME_SIZE
1295	li	r0,0
1296	mtspr	SPRN_SPRG_RTAS,r0
1297	mtspr	SPRN_SRR0,r8
1298	mtspr	SPRN_SRR1,r9
1299	RFI			/* return to caller */
1300
1301	.globl	machine_check_in_rtas
1302machine_check_in_rtas:
1303	twi	31,0,0
1304	/* XXX load up BATs and panic */
1305
1306#endif /* CONFIG_PPC_RTAS */
1307
1308#ifdef CONFIG_FUNCTION_TRACER
1309#ifdef CONFIG_DYNAMIC_FTRACE
1310_GLOBAL(mcount)
1311_GLOBAL(_mcount)
1312	/*
1313	 * It is required that _mcount on PPC32 must preserve the
1314	 * link register. But we have r0 to play with. We use r0
1315	 * to push the return address back to the caller of mcount
1316	 * into the ctr register, restore the link register and
1317	 * then jump back using the ctr register.
1318	 */
1319	mflr	r0
1320	mtctr	r0
1321	lwz	r0, 4(r1)
1322	mtlr	r0
1323	bctr
1324
1325_GLOBAL(ftrace_caller)
1326	MCOUNT_SAVE_FRAME
1327	/* r3 ends up with link register */
1328	subi	r3, r3, MCOUNT_INSN_SIZE
1329.globl ftrace_call
1330ftrace_call:
1331	bl	ftrace_stub
1332	nop
1333#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1334.globl ftrace_graph_call
1335ftrace_graph_call:
1336	b	ftrace_graph_stub
1337_GLOBAL(ftrace_graph_stub)
1338#endif
1339	MCOUNT_RESTORE_FRAME
1340	/* old link register ends up in ctr reg */
1341	bctr
1342#else
1343_GLOBAL(mcount)
1344_GLOBAL(_mcount)
1345
1346	MCOUNT_SAVE_FRAME
1347
1348	subi	r3, r3, MCOUNT_INSN_SIZE
1349	LOAD_REG_ADDR(r5, ftrace_trace_function)
1350	lwz	r5,0(r5)
1351
1352	mtctr	r5
1353	bctrl
1354	nop
1355
1356#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1357	b	ftrace_graph_caller
1358#endif
1359	MCOUNT_RESTORE_FRAME
1360	bctr
1361#endif
1362EXPORT_SYMBOL(_mcount)
1363
1364_GLOBAL(ftrace_stub)
1365	blr
1366
1367#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1368_GLOBAL(ftrace_graph_caller)
1369	/* load r4 with local address */
1370	lwz	r4, 44(r1)
1371	subi	r4, r4, MCOUNT_INSN_SIZE
1372
1373	/* Grab the LR out of the caller stack frame */
1374	lwz	r3,52(r1)
1375
1376	bl	prepare_ftrace_return
1377	nop
1378
1379        /*
1380         * prepare_ftrace_return gives us the address we divert to.
1381         * Change the LR in the callers stack frame to this.
1382         */
1383	stw	r3,52(r1)
1384
1385	MCOUNT_RESTORE_FRAME
1386	/* old link register ends up in ctr reg */
1387	bctr
1388
1389_GLOBAL(return_to_handler)
1390	/* need to save return values */
1391	stwu	r1, -32(r1)
1392	stw	r3, 20(r1)
1393	stw	r4, 16(r1)
1394	stw	r31, 12(r1)
1395	mr	r31, r1
1396
1397	bl	ftrace_return_to_handler
1398	nop
1399
1400	/* return value has real return address */
1401	mtlr	r3
1402
1403	lwz	r3, 20(r1)
1404	lwz	r4, 16(r1)
1405	lwz	r31,12(r1)
1406	lwz	r1, 0(r1)
1407
1408	/* Jump back to real return address */
1409	blr
1410#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1411
1412#endif /* CONFIG_FUNCTION_TRACER */