Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
   5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
   6 *  Adapted for Power Macintosh by Paul Mackerras.
   7 *  Low-level exception handlers and MMU support
   8 *  rewritten by Paul Mackerras.
   9 *    Copyright (C) 1996 Paul Mackerras.
  10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11 *
  12 *  This file contains the system call entry code, context switch
  13 *  code, and exception/interrupt return code for PowerPC.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#include <linux/errno.h>
  23#include <linux/err.h>
  24#include <linux/sys.h>
  25#include <linux/threads.h>
 
 
  26#include <asm/reg.h>
  27#include <asm/page.h>
  28#include <asm/mmu.h>
  29#include <asm/cputable.h>
  30#include <asm/thread_info.h>
  31#include <asm/ppc_asm.h>
  32#include <asm/asm-offsets.h>
  33#include <asm/unistd.h>
  34#include <asm/ptrace.h>
  35#include <asm/export.h>
 
 
 
 
 
 
  36
  37/*
  38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
 
 
  39 */
  40#if MSR_KERNEL >= 0x10000
  41#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
  42#else
  43#define LOAD_MSR_KERNEL(r, x)	li r,(x)
  44#endif
  45
  46/*
  47 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
  48 * fit into one page in order to not encounter a TLB miss between the
  49 * modification of srr0/srr1 and the associated rfi.
  50 */
  51	.align	12
  52
  53#ifdef CONFIG_BOOKE
  54	.globl	mcheck_transfer_to_handler
  55mcheck_transfer_to_handler:
  56	mfspr	r0,SPRN_DSRR0
  57	stw	r0,_DSRR0(r11)
  58	mfspr	r0,SPRN_DSRR1
  59	stw	r0,_DSRR1(r11)
  60	/* fall through */
  61
  62	.globl	debug_transfer_to_handler
  63debug_transfer_to_handler:
  64	mfspr	r0,SPRN_CSRR0
  65	stw	r0,_CSRR0(r11)
  66	mfspr	r0,SPRN_CSRR1
  67	stw	r0,_CSRR1(r11)
  68	/* fall through */
  69
  70	.globl	crit_transfer_to_handler
  71crit_transfer_to_handler:
  72#ifdef CONFIG_PPC_BOOK3E_MMU
  73	mfspr	r0,SPRN_MAS0
  74	stw	r0,MAS0(r11)
  75	mfspr	r0,SPRN_MAS1
  76	stw	r0,MAS1(r11)
  77	mfspr	r0,SPRN_MAS2
  78	stw	r0,MAS2(r11)
  79	mfspr	r0,SPRN_MAS3
  80	stw	r0,MAS3(r11)
  81	mfspr	r0,SPRN_MAS6
  82	stw	r0,MAS6(r11)
  83#ifdef CONFIG_PHYS_64BIT
  84	mfspr	r0,SPRN_MAS7
  85	stw	r0,MAS7(r11)
  86#endif /* CONFIG_PHYS_64BIT */
  87#endif /* CONFIG_PPC_BOOK3E_MMU */
  88#ifdef CONFIG_44x
  89	mfspr	r0,SPRN_MMUCR
  90	stw	r0,MMUCR(r11)
  91#endif
  92	mfspr	r0,SPRN_SRR0
  93	stw	r0,_SRR0(r11)
  94	mfspr	r0,SPRN_SRR1
  95	stw	r0,_SRR1(r11)
  96
  97	/* set the stack limit to the current stack
  98	 * and set the limit to protect the thread_info
  99	 * struct
 100	 */
 101	mfspr	r8,SPRN_SPRG_THREAD
 102	lwz	r0,KSP_LIMIT(r8)
 103	stw	r0,SAVED_KSP_LIMIT(r11)
 104	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
 105	stw	r0,KSP_LIMIT(r8)
 106	/* fall through */
 107#endif
 108
 109#ifdef CONFIG_40x
 110	.globl	crit_transfer_to_handler
 111crit_transfer_to_handler:
 112	lwz	r0,crit_r10@l(0)
 113	stw	r0,GPR10(r11)
 114	lwz	r0,crit_r11@l(0)
 115	stw	r0,GPR11(r11)
 116	mfspr	r0,SPRN_SRR0
 117	stw	r0,crit_srr0@l(0)
 118	mfspr	r0,SPRN_SRR1
 119	stw	r0,crit_srr1@l(0)
 120
 121	/* set the stack limit to the current stack
 122	 * and set the limit to protect the thread_info
 123	 * struct
 124	 */
 125	mfspr	r8,SPRN_SPRG_THREAD
 126	lwz	r0,KSP_LIMIT(r8)
 127	stw	r0,saved_ksp_limit@l(0)
 128	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
 129	stw	r0,KSP_LIMIT(r8)
 130	/* fall through */
 131#endif
 132
 133/*
 134 * This code finishes saving the registers to the exception frame
 135 * and jumps to the appropriate handler for the exception, turning
 136 * on address translation.
 137 * Note that we rely on the caller having set cr0.eq iff the exception
 138 * occurred in kernel mode (i.e. MSR:PR = 0).
 139 */
 140	.globl	transfer_to_handler_full
 141transfer_to_handler_full:
 142	SAVE_NVGPRS(r11)
 143	/* fall through */
 144
 145	.globl	transfer_to_handler
 146transfer_to_handler:
 147	stw	r2,GPR2(r11)
 148	stw	r12,_NIP(r11)
 149	stw	r9,_MSR(r11)
 150	andi.	r2,r9,MSR_PR
 151	mfctr	r12
 152	mfspr	r2,SPRN_XER
 153	stw	r12,_CTR(r11)
 154	stw	r2,_XER(r11)
 155	mfspr	r12,SPRN_SPRG_THREAD
 156	addi	r2,r12,-THREAD
 157	tovirt(r2,r2)			/* set r2 to current */
 158	beq	2f			/* if from user, fix up THREAD.regs */
 159	addi	r11,r1,STACK_FRAME_OVERHEAD
 160	stw	r11,PT_REGS(r12)
 161#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 162	/* Check to see if the dbcr0 register is set up to debug.  Use the
 163	   internal debug mode bit to do this. */
 164	lwz	r12,THREAD_DBCR0(r12)
 165	andis.	r12,r12,DBCR0_IDM@h
 166	beq+	3f
 167	/* From user and task is ptraced - load up global dbcr0 */
 168	li	r12,-1			/* clear all pending debug events */
 169	mtspr	SPRN_DBSR,r12
 170	lis	r11,global_dbcr0@ha
 171	tophys(r11,r11)
 172	addi	r11,r11,global_dbcr0@l
 173#ifdef CONFIG_SMP
 174	CURRENT_THREAD_INFO(r9, r1)
 175	lwz	r9,TI_CPU(r9)
 176	slwi	r9,r9,3
 177	add	r11,r11,r9
 178#endif
 179	lwz	r12,0(r11)
 180	mtspr	SPRN_DBCR0,r12
 181	lwz	r12,4(r11)
 182	addi	r12,r12,-1
 183	stw	r12,4(r11)
 184#endif
 185#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 186	CURRENT_THREAD_INFO(r9, r1)
 187	tophys(r9, r9)
 188	ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
 189#endif
 190
 191	b	3f
 192
 1932:	/* if from kernel, check interrupted DOZE/NAP mode and
 194         * check for stack overflow
 195         */
 196	lwz	r9,KSP_LIMIT(r12)
 197	cmplw	r1,r9			/* if r1 <= ksp_limit */
 198	ble-	stack_ovf		/* then the kernel stack overflowed */
 1995:
 200#if defined(CONFIG_6xx) || defined(CONFIG_E500)
 201	CURRENT_THREAD_INFO(r9, r1)
 202	tophys(r9,r9)			/* check local flags */
 203	lwz	r12,TI_LOCAL_FLAGS(r9)
 204	mtcrf	0x01,r12
 205	bt-	31-TLF_NAPPING,4f
 206	bt-	31-TLF_SLEEPING,7f
 207#endif /* CONFIG_6xx || CONFIG_E500 */
 208	.globl transfer_to_handler_cont
 209transfer_to_handler_cont:
 2103:
 211	mflr	r9
 212	lwz	r11,0(r9)		/* virtual address of handler */
 213	lwz	r9,4(r9)		/* where to go when done */
 214#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 215	mtspr	SPRN_NRI, r0
 216#endif
 217#ifdef CONFIG_TRACE_IRQFLAGS
 218	lis	r12,reenable_mmu@h
 219	ori	r12,r12,reenable_mmu@l
 220	mtspr	SPRN_SRR0,r12
 221	mtspr	SPRN_SRR1,r10
 222	SYNC
 223	RFI
 224reenable_mmu:				/* re-enable mmu so we can */
 225	mfmsr	r10
 226	lwz	r12,_MSR(r1)
 227	xor	r10,r10,r12
 228	andi.	r10,r10,MSR_EE		/* Did EE change? */
 229	beq	1f
 230
 231	/*
 232	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
 233	 * If from user mode there is only one stack frame on the stack, and
 234	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
 235	 * stack frame to make trace_hardirqs_off happy.
 236	 *
 237	 * This is handy because we also need to save a bunch of GPRs,
 238	 * r3 can be different from GPR3(r1) at this point, r9 and r11
 239	 * contains the old MSR and handler address respectively,
 240	 * r4 & r5 can contain page fault arguments that need to be passed
 241	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
 242	 * they aren't useful past this point (aren't syscall arguments),
 243	 * the rest is restored from the exception frame.
 244	 */
 245	stwu	r1,-32(r1)
 246	stw	r9,8(r1)
 247	stw	r11,12(r1)
 248	stw	r3,16(r1)
 249	stw	r4,20(r1)
 250	stw	r5,24(r1)
 251	bl	trace_hardirqs_off
 252	lwz	r5,24(r1)
 253	lwz	r4,20(r1)
 254	lwz	r3,16(r1)
 255	lwz	r11,12(r1)
 256	lwz	r9,8(r1)
 257	addi	r1,r1,32
 258	lwz	r0,GPR0(r1)
 259	lwz	r6,GPR6(r1)
 260	lwz	r7,GPR7(r1)
 261	lwz	r8,GPR8(r1)
 2621:	mtctr	r11
 263	mtlr	r9
 264	bctr				/* jump to handler */
 265#else /* CONFIG_TRACE_IRQFLAGS */
 266	mtspr	SPRN_SRR0,r11
 267	mtspr	SPRN_SRR1,r10
 268	mtlr	r9
 269	SYNC
 270	RFI				/* jump to handler, enable MMU */
 271#endif /* CONFIG_TRACE_IRQFLAGS */
 272
 273#if defined (CONFIG_6xx) || defined(CONFIG_E500)
 2744:	rlwinm	r12,r12,0,~_TLF_NAPPING
 275	stw	r12,TI_LOCAL_FLAGS(r9)
 276	b	power_save_ppc32_restore
 277
 2787:	rlwinm	r12,r12,0,~_TLF_SLEEPING
 279	stw	r12,TI_LOCAL_FLAGS(r9)
 280	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
 281	rlwinm	r9,r9,0,~MSR_EE
 282	lwz	r12,_LINK(r11)		/* and return to address in LR */
 
 283	b	fast_exception_return
 284#endif
 
 285
 286/*
 287 * On kernel stack overflow, load up an initial stack pointer
 288 * and call StackOverflow(regs), which should not return.
 289 */
 290stack_ovf:
 291	/* sometimes we use a statically-allocated stack, which is OK. */
 292	lis	r12,_end@h
 293	ori	r12,r12,_end@l
 294	cmplw	r1,r12
 295	ble	5b			/* r1 <= &_end is OK */
 296	SAVE_NVGPRS(r11)
 297	addi	r3,r1,STACK_FRAME_OVERHEAD
 298	lis	r1,init_thread_union@ha
 299	addi	r1,r1,init_thread_union@l
 300	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 301	lis	r9,StackOverflow@ha
 302	addi	r9,r9,StackOverflow@l
 303	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 304#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 305	mtspr	SPRN_NRI, r0
 306#endif
 307	mtspr	SPRN_SRR0,r9
 308	mtspr	SPRN_SRR1,r10
 309	SYNC
 310	RFI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 311
 312/*
 313 * Handle a system call.
 314 */
 315	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
 316	.stabs	"entry_32.S",N_SO,0,0,0f
 3170:
 318
 319_GLOBAL(DoSyscall)
 320	stw	r3,ORIG_GPR3(r1)
 321	li	r12,0
 322	stw	r12,RESULT(r1)
 323	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
 324	rlwinm	r11,r11,0,4,2
 325	stw	r11,_CCR(r1)
 326#ifdef CONFIG_TRACE_IRQFLAGS
 327	/* Return from syscalls can (and generally will) hard enable
 328	 * interrupts. You aren't supposed to call a syscall with
 329	 * interrupts disabled in the first place. However, to ensure
 330	 * that we get it right vs. lockdep if it happens, we force
 331	 * that hard enable here with appropriate tracing if we see
 332	 * that we have been called with interrupts off
 333	 */
 334	mfmsr	r11
 335	andi.	r12,r11,MSR_EE
 336	bne+	1f
 337	/* We came in with interrupts disabled, we enable them now */
 338	bl	trace_hardirqs_on
 339	mfmsr	r11
 340	lwz	r0,GPR0(r1)
 341	lwz	r3,GPR3(r1)
 342	lwz	r4,GPR4(r1)
 343	ori	r11,r11,MSR_EE
 344	lwz	r5,GPR5(r1)
 345	lwz	r6,GPR6(r1)
 346	lwz	r7,GPR7(r1)
 347	lwz	r8,GPR8(r1)
 348	mtmsr	r11
 3491:
 350#endif /* CONFIG_TRACE_IRQFLAGS */
 351	CURRENT_THREAD_INFO(r10, r1)
 352	lwz	r11,TI_FLAGS(r10)
 353	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
 354	bne-	syscall_dotrace
 355syscall_dotrace_cont:
 356	cmplwi	0,r0,NR_syscalls
 357	lis	r10,sys_call_table@h
 358	ori	r10,r10,sys_call_table@l
 359	slwi	r0,r0,2
 360	bge-	66f
 361	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
 362	mtlr	r10
 363	addi	r9,r1,STACK_FRAME_OVERHEAD
 364	PPC440EP_ERR42
 365	blrl			/* Call handler */
 366	.globl	ret_from_syscall
 367ret_from_syscall:
 368	mr	r6,r3
 369	CURRENT_THREAD_INFO(r12, r1)
 370	/* disable interrupts so current_thread_info()->flags can't change */
 371	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
 372	/* Note: We don't bother telling lockdep about it */
 373	SYNC
 374	MTMSRD(r10)
 375	lwz	r9,TI_FLAGS(r12)
 376	li	r8,-MAX_ERRNO
 377	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 378	bne-	syscall_exit_work
 379	cmplw	0,r3,r8
 380	blt+	syscall_exit_cont
 381	lwz	r11,_CCR(r1)			/* Load CR */
 382	neg	r3,r3
 383	oris	r11,r11,0x1000	/* Set SO bit in CR */
 384	stw	r11,_CCR(r1)
 385syscall_exit_cont:
 386	lwz	r8,_MSR(r1)
 387#ifdef CONFIG_TRACE_IRQFLAGS
 388	/* If we are going to return from the syscall with interrupts
 389	 * off, we trace that here. It shouldn't happen though but we
 390	 * want to catch the bugger if it does right ?
 391	 */
 392	andi.	r10,r8,MSR_EE
 393	bne+	1f
 394	stw	r3,GPR3(r1)
 395	bl      trace_hardirqs_off
 396	lwz	r3,GPR3(r1)
 3971:
 398#endif /* CONFIG_TRACE_IRQFLAGS */
 399#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 400	/* If the process has its own DBCR0 value, load it up.  The internal
 401	   debug mode bit tells us that dbcr0 should be loaded. */
 402	lwz	r0,THREAD+THREAD_DBCR0(r2)
 403	andis.	r10,r0,DBCR0_IDM@h
 404	bnel-	load_dbcr0
 405#endif
 406#ifdef CONFIG_44x
 407BEGIN_MMU_FTR_SECTION
 408	lis	r4,icache_44x_need_flush@ha
 409	lwz	r5,icache_44x_need_flush@l(r4)
 410	cmplwi	cr0,r5,0
 411	bne-	2f
 4121:
 413END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
 414#endif /* CONFIG_44x */
 415BEGIN_FTR_SECTION
 416	lwarx	r7,0,r1
 417END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 418	stwcx.	r0,0,r1			/* to clear the reservation */
 419#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 420	andi.	r4,r8,MSR_PR
 421	beq	3f
 422	CURRENT_THREAD_INFO(r4, r1)
 423	ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
 4243:
 425#endif
 426	lwz	r4,_LINK(r1)
 427	lwz	r5,_CCR(r1)
 428	mtlr	r4
 429	mtcr	r5
 430	lwz	r7,_NIP(r1)
 431	lwz	r2,GPR2(r1)
 432	lwz	r1,GPR1(r1)
 433#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 434	mtspr	SPRN_NRI, r0
 435#endif
 436	mtspr	SPRN_SRR0,r7
 437	mtspr	SPRN_SRR1,r8
 438	SYNC
 439	RFI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 440#ifdef CONFIG_44x
 4412:	li	r7,0
 
 442	iccci	r0,r0
 443	stw	r7,icache_44x_need_flush@l(r4)
 444	b	1b
 445#endif  /* CONFIG_44x */
 446
 44766:	li	r3,-ENOSYS
 448	b	ret_from_syscall
 449
 450	.globl	ret_from_fork
 451ret_from_fork:
 452	REST_NVGPRS(r1)
 453	bl	schedule_tail
 454	li	r3,0
 455	b	ret_from_syscall
 456
 457	.globl	ret_from_kernel_thread
 458ret_from_kernel_thread:
 459	REST_NVGPRS(r1)
 460	bl	schedule_tail
 461	mtlr	r14
 462	mr	r3,r15
 463	PPC440EP_ERR42
 464	blrl
 465	li	r3,0
 466	b	ret_from_syscall
 467
 468/* Traced system call support */
 469syscall_dotrace:
 470	SAVE_NVGPRS(r1)
 471	li	r0,0xc00
 472	stw	r0,_TRAP(r1)
 473	addi	r3,r1,STACK_FRAME_OVERHEAD
 474	bl	do_syscall_trace_enter
 475	/*
 476	 * Restore argument registers possibly just changed.
 477	 * We use the return value of do_syscall_trace_enter
 478	 * for call number to look up in the table (r0).
 479	 */
 480	mr	r0,r3
 481	lwz	r3,GPR3(r1)
 482	lwz	r4,GPR4(r1)
 483	lwz	r5,GPR5(r1)
 484	lwz	r6,GPR6(r1)
 485	lwz	r7,GPR7(r1)
 486	lwz	r8,GPR8(r1)
 487	REST_NVGPRS(r1)
 488
 489	cmplwi	r0,NR_syscalls
 490	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
 491	bge-	ret_from_syscall
 492	b	syscall_dotrace_cont
 493
 494syscall_exit_work:
 495	andi.	r0,r9,_TIF_RESTOREALL
 496	beq+	0f
 497	REST_NVGPRS(r1)
 498	b	2f
 4990:	cmplw	0,r3,r8
 500	blt+	1f
 501	andi.	r0,r9,_TIF_NOERROR
 502	bne-	1f
 503	lwz	r11,_CCR(r1)			/* Load CR */
 504	neg	r3,r3
 505	oris	r11,r11,0x1000	/* Set SO bit in CR */
 506	stw	r11,_CCR(r1)
 507
 5081:	stw	r6,RESULT(r1)	/* Save result */
 509	stw	r3,GPR3(r1)	/* Update return value */
 5102:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
 511	beq	4f
 512
 513	/* Clear per-syscall TIF flags if any are set.  */
 514
 515	li	r11,_TIF_PERSYSCALL_MASK
 516	addi	r12,r12,TI_FLAGS
 5173:	lwarx	r8,0,r12
 518	andc	r8,r8,r11
 519#ifdef CONFIG_IBM405_ERR77
 520	dcbt	0,r12
 521#endif
 522	stwcx.	r8,0,r12
 523	bne-	3b
 524	subi	r12,r12,TI_FLAGS
 525	
 5264:	/* Anything which requires enabling interrupts? */
 527	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
 528	beq	ret_from_except
 529
 530	/* Re-enable interrupts. There is no need to trace that with
 531	 * lockdep as we are supposed to have IRQs on at this point
 532	 */
 533	ori	r10,r10,MSR_EE
 534	SYNC
 535	MTMSRD(r10)
 536
 537	/* Save NVGPRS if they're not saved already */
 538	lwz	r4,_TRAP(r1)
 539	andi.	r4,r4,1
 540	beq	5f
 541	SAVE_NVGPRS(r1)
 542	li	r4,0xc00
 543	stw	r4,_TRAP(r1)
 5445:
 545	addi	r3,r1,STACK_FRAME_OVERHEAD
 546	bl	do_syscall_trace_leave
 547	b	ret_from_except_full
 548
 549/*
 550 * The fork/clone functions need to copy the full register set into
 551 * the child process. Therefore we need to save all the nonvolatile
 552 * registers (r13 - r31) before calling the C code.
 553 */
 554	.globl	ppc_fork
 555ppc_fork:
 556	SAVE_NVGPRS(r1)
 557	lwz	r0,_TRAP(r1)
 558	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 559	stw	r0,_TRAP(r1)		/* register set saved */
 560	b	sys_fork
 561
 562	.globl	ppc_vfork
 563ppc_vfork:
 564	SAVE_NVGPRS(r1)
 565	lwz	r0,_TRAP(r1)
 566	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 567	stw	r0,_TRAP(r1)		/* register set saved */
 568	b	sys_vfork
 569
 570	.globl	ppc_clone
 571ppc_clone:
 572	SAVE_NVGPRS(r1)
 573	lwz	r0,_TRAP(r1)
 574	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 575	stw	r0,_TRAP(r1)		/* register set saved */
 576	b	sys_clone
 577
 578	.globl	ppc_swapcontext
 579ppc_swapcontext:
 580	SAVE_NVGPRS(r1)
 581	lwz	r0,_TRAP(r1)
 582	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 583	stw	r0,_TRAP(r1)		/* register set saved */
 584	b	sys_swapcontext
 585
 586/*
 587 * Top-level page fault handling.
 588 * This is in assembler because if do_page_fault tells us that
 589 * it is a bad kernel page fault, we want to save the non-volatile
 590 * registers before calling bad_page_fault.
 591 */
 592	.globl	handle_page_fault
 593handle_page_fault:
 594	stw	r4,_DAR(r1)
 595	addi	r3,r1,STACK_FRAME_OVERHEAD
 596#ifdef CONFIG_6xx
 597	andis.  r0,r5,DSISR_DABRMATCH@h
 598	bne-    handle_dabr_fault
 599#endif
 600	bl	do_page_fault
 601	cmpwi	r3,0
 602	beq+	ret_from_except
 603	SAVE_NVGPRS(r1)
 604	lwz	r0,_TRAP(r1)
 605	clrrwi	r0,r0,1
 606	stw	r0,_TRAP(r1)
 607	mr	r5,r3
 608	addi	r3,r1,STACK_FRAME_OVERHEAD
 609	lwz	r4,_DAR(r1)
 610	bl	bad_page_fault
 611	b	ret_from_except_full
 612
 613#ifdef CONFIG_6xx
 614	/* We have a data breakpoint exception - handle it */
 615handle_dabr_fault:
 616	SAVE_NVGPRS(r1)
 617	lwz	r0,_TRAP(r1)
 618	clrrwi	r0,r0,1
 619	stw	r0,_TRAP(r1)
 620	bl      do_break
 621	b	ret_from_except_full
 622#endif
 623
 624/*
 625 * This routine switches between two different tasks.  The process
 626 * state of one is saved on its kernel stack.  Then the state
 627 * of the other is restored from its kernel stack.  The memory
 628 * management hardware is updated to the second process's state.
 629 * Finally, we can return to the second process.
 630 * On entry, r3 points to the THREAD for the current task, r4
 631 * points to the THREAD for the new task.
 632 *
 633 * This routine is always called with interrupts disabled.
 634 *
 635 * Note: there are two ways to get to the "going out" portion
 636 * of this code; either by coming in via the entry (_switch)
 637 * or via "fork" which must set up an environment equivalent
 638 * to the "_switch" path.  If you change this , you'll have to
 639 * change the fork code also.
 640 *
 641 * The code which creates the new task context is in 'copy_thread'
 642 * in arch/ppc/kernel/process.c
 643 */
 644_GLOBAL(_switch)
 645	stwu	r1,-INT_FRAME_SIZE(r1)
 646	mflr	r0
 647	stw	r0,INT_FRAME_SIZE+4(r1)
 648	/* r3-r12 are caller saved -- Cort */
 649	SAVE_NVGPRS(r1)
 650	stw	r0,_NIP(r1)	/* Return to switch caller */
 651	mfmsr	r11
 652	li	r0,MSR_FP	/* Disable floating-point */
 653#ifdef CONFIG_ALTIVEC
 654BEGIN_FTR_SECTION
 655	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
 656	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
 657	stw	r12,THREAD+THREAD_VRSAVE(r2)
 658END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 659#endif /* CONFIG_ALTIVEC */
 660#ifdef CONFIG_SPE
 661BEGIN_FTR_SECTION
 662	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
 663	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
 664	stw	r12,THREAD+THREAD_SPEFSCR(r2)
 665END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 666#endif /* CONFIG_SPE */
 667	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
 668	beq+	1f
 669	andc	r11,r11,r0
 670	MTMSRD(r11)
 671	isync
 6721:	stw	r11,_MSR(r1)
 673	mfcr	r10
 674	stw	r10,_CCR(r1)
 675	stw	r1,KSP(r3)	/* Set old stack pointer */
 676
 677#ifdef CONFIG_SMP
 678	/* We need a sync somewhere here to make sure that if the
 679	 * previous task gets rescheduled on another CPU, it sees all
 680	 * stores it has performed on this one.
 681	 */
 682	sync
 683#endif /* CONFIG_SMP */
 684
 685	tophys(r0,r4)
 686	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
 687	lwz	r1,KSP(r4)	/* Load new stack pointer */
 688
 689	/* save the old current 'last' for return value */
 690	mr	r3,r2
 691	addi	r2,r4,-THREAD	/* Update current */
 692
 693#ifdef CONFIG_ALTIVEC
 694BEGIN_FTR_SECTION
 695	lwz	r0,THREAD+THREAD_VRSAVE(r2)
 696	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
 697END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 698#endif /* CONFIG_ALTIVEC */
 699#ifdef CONFIG_SPE
 700BEGIN_FTR_SECTION
 701	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
 702	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 703END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 704#endif /* CONFIG_SPE */
 705
 706	lwz	r0,_CCR(r1)
 707	mtcrf	0xFF,r0
 708	/* r3-r12 are destroyed -- Cort */
 709	REST_NVGPRS(r1)
 710
 711	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
 712	mtlr	r4
 713	addi	r1,r1,INT_FRAME_SIZE
 714	blr
 715
 716	.globl	fast_exception_return
 717fast_exception_return:
 718#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 719	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
 720	beq	1f			/* if not, we've got problems */
 721#endif
 722
 7232:	REST_4GPRS(3, r11)
 724	lwz	r10,_CCR(r11)
 725	REST_GPR(1, r11)
 726	mtcr	r10
 727	lwz	r10,_LINK(r11)
 728	mtlr	r10
 
 
 
 729	REST_GPR(10, r11)
 730#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 731	mtspr	SPRN_NRI, r0
 732#endif
 733	mtspr	SPRN_SRR1,r9
 734	mtspr	SPRN_SRR0,r12
 735	REST_GPR(9, r11)
 736	REST_GPR(12, r11)
 737	lwz	r11,GPR11(r11)
 738	SYNC
 739	RFI
 740
 741#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 742/* check if the exception happened in a restartable section */
 7431:	lis	r3,exc_exit_restart_end@ha
 744	addi	r3,r3,exc_exit_restart_end@l
 745	cmplw	r12,r3
 746	bge	3f
 747	lis	r4,exc_exit_restart@ha
 748	addi	r4,r4,exc_exit_restart@l
 749	cmplw	r12,r4
 750	blt	3f
 751	lis	r3,fee_restarts@ha
 752	tophys(r3,r3)
 753	lwz	r5,fee_restarts@l(r3)
 754	addi	r5,r5,1
 755	stw	r5,fee_restarts@l(r3)
 756	mr	r12,r4		/* restart at exc_exit_restart */
 757	b	2b
 758
 759	.section .bss
 760	.align	2
 761fee_restarts:
 762	.space	4
 763	.previous
 764
 765/* aargh, a nonrecoverable interrupt, panic */
 766/* aargh, we don't know which trap this is */
 767/* but the 601 doesn't implement the RI bit, so assume it's OK */
 7683:
 769BEGIN_FTR_SECTION
 770	b	2b
 771END_FTR_SECTION_IFSET(CPU_FTR_601)
 772	li	r10,-1
 773	stw	r10,_TRAP(r11)
 774	addi	r3,r1,STACK_FRAME_OVERHEAD
 775	lis	r10,MSR_KERNEL@h
 776	ori	r10,r10,MSR_KERNEL@l
 777	bl	transfer_to_handler_full
 778	.long	nonrecoverable_exception
 779	.long	ret_from_except
 780#endif
 
 
 
 
 
 
 
 781
 782	.globl	ret_from_except_full
 783ret_from_except_full:
 784	REST_NVGPRS(r1)
 785	/* fall through */
 
 786
 787	.globl	ret_from_except
 788ret_from_except:
 789	/* Hard-disable interrupts so that current_thread_info()->flags
 790	 * can't change between when we test it and when we return
 791	 * from the interrupt. */
 792	/* Note: We don't bother telling lockdep about it */
 793	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 794	SYNC			/* Some chip revs have problems here... */
 795	MTMSRD(r10)		/* disable interrupts */
 796
 797	lwz	r3,_MSR(r1)	/* Returning to user mode? */
 798	andi.	r0,r3,MSR_PR
 799	beq	resume_kernel
 800
 801user_exc_return:		/* r10 contains MSR_KERNEL here */
 802	/* Check current_thread_info()->flags */
 803	CURRENT_THREAD_INFO(r9, r1)
 804	lwz	r9,TI_FLAGS(r9)
 805	andi.	r0,r9,_TIF_USER_WORK_MASK
 806	bne	do_work
 807
 808restore_user:
 809#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 810	/* Check whether this process has its own DBCR0 value.  The internal
 811	   debug mode bit tells us that dbcr0 should be loaded. */
 812	lwz	r0,THREAD+THREAD_DBCR0(r2)
 813	andis.	r10,r0,DBCR0_IDM@h
 814	bnel-	load_dbcr0
 815#endif
 816#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 817	CURRENT_THREAD_INFO(r9, r1)
 818	ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
 819#endif
 820
 821	b	restore
 
 
 
 
 
 822
 823/* N.B. the only way to get here is from the beq following ret_from_except. */
 824resume_kernel:
 825	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
 826	CURRENT_THREAD_INFO(r9, r1)
 827	lwz	r8,TI_FLAGS(r9)
 828	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
 829	beq+	1f
 830
 831	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
 832
 833	lwz	r3,GPR1(r1)
 834	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
 835	mr	r4,r1			/* src:  current exception frame */
 836	mr	r1,r3			/* Reroute the trampoline frame to r1 */
 837
 838	/* Copy from the original to the trampoline. */
 839	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
 840	li	r6,0			/* start offset: 0 */
 841	mtctr	r5
 8422:	lwzx	r0,r6,r4
 843	stwx	r0,r6,r3
 844	addi	r6,r6,4
 845	bdnz	2b
 846
 847	/* Do real store operation to complete stwu */
 848	lwz	r5,GPR1(r1)
 849	stw	r8,0(r5)
 850
 851	/* Clear _TIF_EMULATE_STACK_STORE flag */
 852	lis	r11,_TIF_EMULATE_STACK_STORE@h
 853	addi	r5,r9,TI_FLAGS
 8540:	lwarx	r8,0,r5
 855	andc	r8,r8,r11
 856#ifdef CONFIG_IBM405_ERR77
 857	dcbt	0,r5
 858#endif
 859	stwcx.	r8,0,r5
 860	bne-	0b
 8611:
 862
 863#ifdef CONFIG_PREEMPT
 864	/* check current_thread_info->preempt_count */
 865	lwz	r0,TI_PREEMPT(r9)
 866	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
 867	bne	restore
 868	andi.	r8,r8,_TIF_NEED_RESCHED
 869	beq+	restore
 870	lwz	r3,_MSR(r1)
 871	andi.	r0,r3,MSR_EE	/* interrupts off? */
 872	beq	restore		/* don't schedule if so */
 873#ifdef CONFIG_TRACE_IRQFLAGS
 874	/* Lockdep thinks irqs are enabled, we need to call
 875	 * preempt_schedule_irq with IRQs off, so we inform lockdep
 876	 * now that we -did- turn them off already
 877	 */
 878	bl	trace_hardirqs_off
 879#endif
 8801:	bl	preempt_schedule_irq
 881	CURRENT_THREAD_INFO(r9, r1)
 882	lwz	r3,TI_FLAGS(r9)
 883	andi.	r0,r3,_TIF_NEED_RESCHED
 884	bne-	1b
 885#ifdef CONFIG_TRACE_IRQFLAGS
 886	/* And now, to properly rebalance the above, we tell lockdep they
 887	 * are being turned back on, which will happen when we return
 888	 */
 889	bl	trace_hardirqs_on
 890#endif
 891#endif /* CONFIG_PREEMPT */
 892
 893	/* interrupts are hard-disabled at this point */
 894restore:
 895#ifdef CONFIG_44x
 896BEGIN_MMU_FTR_SECTION
 897	b	1f
 898END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 899	lis	r4,icache_44x_need_flush@ha
 900	lwz	r5,icache_44x_need_flush@l(r4)
 901	cmplwi	cr0,r5,0
 902	beq+	1f
 903	li	r6,0
 904	iccci	r0,r0
 905	stw	r6,icache_44x_need_flush@l(r4)
 9061:
 907#endif  /* CONFIG_44x */
 908
 909	lwz	r9,_MSR(r1)
 910#ifdef CONFIG_TRACE_IRQFLAGS
 911	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
 912	 * off in this assembly code while peeking at TI_FLAGS() and such. However
 913	 * we need to inform it if the exception turned interrupts off, and we
 914	 * are about to trun them back on.
 915	 *
 916	 * The problem here sadly is that we don't know whether the exceptions was
 917	 * one that turned interrupts off or not. So we always tell lockdep about
 918	 * turning them on here when we go back to wherever we came from with EE
 919	 * on, even if that may meen some redudant calls being tracked. Maybe later
 920	 * we could encode what the exception did somewhere or test the exception
 921	 * type in the pt_regs but that sounds overkill
 922	 */
 923	andi.	r10,r9,MSR_EE
 924	beq	1f
 925	/*
 926	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
 927	 * which is the stack frame here, we need to force a stack frame
 928	 * in case we came from user space.
 929	 */
 930	stwu	r1,-32(r1)
 931	mflr	r0
 932	stw	r0,4(r1)
 933	stwu	r1,-32(r1)
 934	bl	trace_hardirqs_on
 935	lwz	r1,0(r1)
 936	lwz	r1,0(r1)
 937	lwz	r9,_MSR(r1)
 9381:
 939#endif /* CONFIG_TRACE_IRQFLAGS */
 940
 941	lwz	r0,GPR0(r1)
 942	lwz	r2,GPR2(r1)
 943	REST_4GPRS(3, r1)
 944	REST_2GPRS(7, r1)
 945
 946	lwz	r10,_XER(r1)
 947	lwz	r11,_CTR(r1)
 948	mtspr	SPRN_XER,r10
 949	mtctr	r11
 950
 951	PPC405_ERR77(0,r1)
 952BEGIN_FTR_SECTION
 953	lwarx	r11,0,r1
 954END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 955	stwcx.	r0,0,r1			/* to clear the reservation */
 
 
 
 
 
 
 
 956
 957#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 958	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
 959	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
 960
 961	lwz	r10,_CCR(r1)
 962	lwz	r11,_LINK(r1)
 963	mtcrf	0xFF,r10
 964	mtlr	r11
 965
 966	/*
 967	 * Once we put values in SRR0 and SRR1, we are in a state
 968	 * where exceptions are not recoverable, since taking an
 969	 * exception will trash SRR0 and SRR1.  Therefore we clear the
 970	 * MSR:RI bit to indicate this.  If we do take an exception,
 971	 * we can't return to the point of the exception but we
 972	 * can restart the exception exit path at the label
 973	 * exc_exit_restart below.  -- paulus
 974	 */
 975	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
 976	SYNC
 977	MTMSRD(r10)		/* clear the RI bit */
 978	.globl exc_exit_restart
 979exc_exit_restart:
 980	lwz	r12,_NIP(r1)
 981#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 982	mtspr	SPRN_NRI, r0
 
 
 
 
 983#endif
 984	mtspr	SPRN_SRR0,r12
 985	mtspr	SPRN_SRR1,r9
 986	REST_4GPRS(9, r1)
 987	lwz	r1,GPR1(r1)
 988	.globl exc_exit_restart_end
 989exc_exit_restart_end:
 990	SYNC
 991	RFI
 992
 993#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
 994	/*
 995	 * This is a bit different on 4xx/Book-E because it doesn't have
 996	 * the RI bit in the MSR.
 997	 * The TLB miss handler checks if we have interrupted
 998	 * the exception exit path and restarts it if so
 999	 * (well maybe one day it will... :).
1000	 */
1001	lwz	r11,_LINK(r1)
1002	mtlr	r11
1003	lwz	r10,_CCR(r1)
1004	mtcrf	0xff,r10
1005	REST_2GPRS(9, r1)
1006	.globl exc_exit_restart
1007exc_exit_restart:
1008	lwz	r11,_NIP(r1)
1009	lwz	r12,_MSR(r1)
1010exc_exit_start:
1011	mtspr	SPRN_SRR0,r11
1012	mtspr	SPRN_SRR1,r12
1013	REST_2GPRS(11, r1)
1014	lwz	r1,GPR1(r1)
1015	.globl exc_exit_restart_end
1016exc_exit_restart_end:
1017	PPC405_ERR77_SYNC
1018	rfi
1019	b	.			/* prevent prefetch past rfi */
 
 
 
 
 
1020
1021/*
1022 * Returning from a critical interrupt in user mode doesn't need
1023 * to be any different from a normal exception.  For a critical
1024 * interrupt in the kernel, we just return (without checking for
1025 * preemption) since the interrupt may have happened at some crucial
1026 * place (e.g. inside the TLB miss handler), and because we will be
1027 * running with r1 pointing into critical_stack, not the current
1028 * process's kernel stack (and therefore current_thread_info() will
1029 * give the wrong answer).
1030 * We have to restore various SPRs that may have been in use at the
1031 * time of the critical interrupt.
1032 *
1033 */
1034#ifdef CONFIG_40x
1035#define PPC_40x_TURN_OFF_MSR_DR						    \
1036	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1037	 * assume the instructions here are mapped by a pinned TLB entry */ \
1038	li	r10,MSR_IR;						    \
1039	mtmsr	r10;							    \
1040	isync;								    \
1041	tophys(r1, r1);
1042#else
1043#define PPC_40x_TURN_OFF_MSR_DR
1044#endif
1045
1046#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1047	REST_NVGPRS(r1);						\
1048	lwz	r3,_MSR(r1);						\
1049	andi.	r3,r3,MSR_PR;						\
1050	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1051	bne	user_exc_return;					\
1052	lwz	r0,GPR0(r1);						\
1053	lwz	r2,GPR2(r1);						\
1054	REST_4GPRS(3, r1);						\
1055	REST_2GPRS(7, r1);						\
1056	lwz	r10,_XER(r1);						\
1057	lwz	r11,_CTR(r1);						\
1058	mtspr	SPRN_XER,r10;						\
1059	mtctr	r11;							\
1060	PPC405_ERR77(0,r1);						\
1061	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1062	lwz	r11,_LINK(r1);						\
1063	mtlr	r11;							\
1064	lwz	r10,_CCR(r1);						\
1065	mtcrf	0xff,r10;						\
1066	PPC_40x_TURN_OFF_MSR_DR;					\
1067	lwz	r9,_DEAR(r1);						\
1068	lwz	r10,_ESR(r1);						\
1069	mtspr	SPRN_DEAR,r9;						\
1070	mtspr	SPRN_ESR,r10;						\
1071	lwz	r11,_NIP(r1);						\
1072	lwz	r12,_MSR(r1);						\
1073	mtspr	exc_lvl_srr0,r11;					\
1074	mtspr	exc_lvl_srr1,r12;					\
1075	lwz	r9,GPR9(r1);						\
1076	lwz	r12,GPR12(r1);						\
1077	lwz	r10,GPR10(r1);						\
1078	lwz	r11,GPR11(r1);						\
1079	lwz	r1,GPR1(r1);						\
1080	PPC405_ERR77_SYNC;						\
1081	exc_lvl_rfi;							\
1082	b	.;		/* prevent prefetch past exc_lvl_rfi */
1083
1084#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1085	lwz	r9,_##exc_lvl_srr0(r1);					\
1086	lwz	r10,_##exc_lvl_srr1(r1);				\
1087	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1088	mtspr	SPRN_##exc_lvl_srr1,r10;
1089
1090#if defined(CONFIG_PPC_BOOK3E_MMU)
1091#ifdef CONFIG_PHYS_64BIT
1092#define	RESTORE_MAS7							\
1093	lwz	r11,MAS7(r1);						\
1094	mtspr	SPRN_MAS7,r11;
1095#else
1096#define	RESTORE_MAS7
1097#endif /* CONFIG_PHYS_64BIT */
1098#define RESTORE_MMU_REGS						\
1099	lwz	r9,MAS0(r1);						\
1100	lwz	r10,MAS1(r1);						\
1101	lwz	r11,MAS2(r1);						\
1102	mtspr	SPRN_MAS0,r9;						\
1103	lwz	r9,MAS3(r1);						\
1104	mtspr	SPRN_MAS1,r10;						\
1105	lwz	r10,MAS6(r1);						\
1106	mtspr	SPRN_MAS2,r11;						\
1107	mtspr	SPRN_MAS3,r9;						\
1108	mtspr	SPRN_MAS6,r10;						\
1109	RESTORE_MAS7;
1110#elif defined(CONFIG_44x)
1111#define RESTORE_MMU_REGS						\
1112	lwz	r9,MMUCR(r1);						\
1113	mtspr	SPRN_MMUCR,r9;
1114#else
1115#define RESTORE_MMU_REGS
1116#endif
1117
1118#ifdef CONFIG_40x
1119	.globl	ret_from_crit_exc
1120ret_from_crit_exc:
1121	mfspr	r9,SPRN_SPRG_THREAD
1122	lis	r10,saved_ksp_limit@ha;
1123	lwz	r10,saved_ksp_limit@l(r10);
1124	tovirt(r9,r9);
1125	stw	r10,KSP_LIMIT(r9)
1126	lis	r9,crit_srr0@ha;
1127	lwz	r9,crit_srr0@l(r9);
1128	lis	r10,crit_srr1@ha;
1129	lwz	r10,crit_srr1@l(r10);
1130	mtspr	SPRN_SRR0,r9;
1131	mtspr	SPRN_SRR1,r10;
1132	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
 
1133#endif /* CONFIG_40x */
1134
1135#ifdef CONFIG_BOOKE
1136	.globl	ret_from_crit_exc
1137ret_from_crit_exc:
1138	mfspr	r9,SPRN_SPRG_THREAD
1139	lwz	r10,SAVED_KSP_LIMIT(r1)
1140	stw	r10,KSP_LIMIT(r9)
1141	RESTORE_xSRR(SRR0,SRR1);
1142	RESTORE_MMU_REGS;
1143	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
 
1144
1145	.globl	ret_from_debug_exc
1146ret_from_debug_exc:
1147	mfspr	r9,SPRN_SPRG_THREAD
1148	lwz	r10,SAVED_KSP_LIMIT(r1)
1149	stw	r10,KSP_LIMIT(r9)
1150	lwz	r9,THREAD_INFO-THREAD(r9)
1151	CURRENT_THREAD_INFO(r10, r1)
1152	lwz	r10,TI_PREEMPT(r10)
1153	stw	r10,TI_PREEMPT(r9)
1154	RESTORE_xSRR(SRR0,SRR1);
1155	RESTORE_xSRR(CSRR0,CSRR1);
1156	RESTORE_MMU_REGS;
1157	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
 
1158
1159	.globl	ret_from_mcheck_exc
1160ret_from_mcheck_exc:
1161	mfspr	r9,SPRN_SPRG_THREAD
1162	lwz	r10,SAVED_KSP_LIMIT(r1)
1163	stw	r10,KSP_LIMIT(r9)
1164	RESTORE_xSRR(SRR0,SRR1);
1165	RESTORE_xSRR(CSRR0,CSRR1);
1166	RESTORE_xSRR(DSRR0,DSRR1);
1167	RESTORE_MMU_REGS;
1168	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
 
1169#endif /* CONFIG_BOOKE */
1170
1171/*
1172 * Load the DBCR0 value for a task that is being ptraced,
1173 * having first saved away the global DBCR0.  Note that r0
1174 * has the dbcr0 value to set upon entry to this.
1175 */
1176load_dbcr0:
1177	mfmsr	r10		/* first disable debug exceptions */
1178	rlwinm	r10,r10,0,~MSR_DE
1179	mtmsr	r10
1180	isync
1181	mfspr	r10,SPRN_DBCR0
1182	lis	r11,global_dbcr0@ha
1183	addi	r11,r11,global_dbcr0@l
1184#ifdef CONFIG_SMP
1185	CURRENT_THREAD_INFO(r9, r1)
1186	lwz	r9,TI_CPU(r9)
1187	slwi	r9,r9,3
1188	add	r11,r11,r9
1189#endif
1190	stw	r10,0(r11)
1191	mtspr	SPRN_DBCR0,r0
1192	lwz	r10,4(r11)
1193	addi	r10,r10,1
1194	stw	r10,4(r11)
1195	li	r11,-1
1196	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1197	blr
1198
1199	.section .bss
1200	.align	4
1201global_dbcr0:
1202	.space	8*NR_CPUS
1203	.previous
1204#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1205
1206do_work:			/* r10 contains MSR_KERNEL here */
1207	andi.	r0,r9,_TIF_NEED_RESCHED
1208	beq	do_user_signal
1209
1210do_resched:			/* r10 contains MSR_KERNEL here */
1211	/* Note: We don't need to inform lockdep that we are enabling
1212	 * interrupts here. As far as it knows, they are already enabled
1213	 */
1214	ori	r10,r10,MSR_EE
1215	SYNC
1216	MTMSRD(r10)		/* hard-enable interrupts */
1217	bl	schedule
1218recheck:
1219	/* Note: And we don't tell it we are disabling them again
1220	 * neither. Those disable/enable cycles used to peek at
1221	 * TI_FLAGS aren't advertised.
1222	 */
1223	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1224	SYNC
1225	MTMSRD(r10)		/* disable interrupts */
1226	CURRENT_THREAD_INFO(r9, r1)
1227	lwz	r9,TI_FLAGS(r9)
1228	andi.	r0,r9,_TIF_NEED_RESCHED
1229	bne-	do_resched
1230	andi.	r0,r9,_TIF_USER_WORK_MASK
1231	beq	restore_user
1232do_user_signal:			/* r10 contains MSR_KERNEL here */
1233	ori	r10,r10,MSR_EE
1234	SYNC
1235	MTMSRD(r10)		/* hard-enable interrupts */
1236	/* save r13-r31 in the exception frame, if not already done */
1237	lwz	r3,_TRAP(r1)
1238	andi.	r0,r3,1
1239	beq	2f
1240	SAVE_NVGPRS(r1)
1241	rlwinm	r3,r3,0,0,30
1242	stw	r3,_TRAP(r1)
12432:	addi	r3,r1,STACK_FRAME_OVERHEAD
1244	mr	r4,r9
1245	bl	do_notify_resume
1246	REST_NVGPRS(r1)
1247	b	recheck
1248
1249/*
1250 * We come here when we are at the end of handling an exception
1251 * that occurred at a place where taking an exception will lose
1252 * state information, such as the contents of SRR0 and SRR1.
1253 */
1254nonrecoverable:
1255	lis	r10,exc_exit_restart_end@ha
1256	addi	r10,r10,exc_exit_restart_end@l
1257	cmplw	r12,r10
1258	bge	3f
1259	lis	r11,exc_exit_restart@ha
1260	addi	r11,r11,exc_exit_restart@l
1261	cmplw	r12,r11
1262	blt	3f
1263	lis	r10,ee_restarts@ha
1264	lwz	r12,ee_restarts@l(r10)
1265	addi	r12,r12,1
1266	stw	r12,ee_restarts@l(r10)
1267	mr	r12,r11		/* restart at exc_exit_restart */
1268	blr
12693:	/* OK, we can't recover, kill this process */
1270	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1271BEGIN_FTR_SECTION
1272	blr
1273END_FTR_SECTION_IFSET(CPU_FTR_601)
1274	lwz	r3,_TRAP(r1)
1275	andi.	r0,r3,1
1276	beq	4f
1277	SAVE_NVGPRS(r1)
1278	rlwinm	r3,r3,0,0,30
1279	stw	r3,_TRAP(r1)
12804:	addi	r3,r1,STACK_FRAME_OVERHEAD
1281	bl	nonrecoverable_exception
1282	/* shouldn't return */
1283	b	4b
1284
1285	.section .bss
1286	.align	2
1287ee_restarts:
1288	.space	4
1289	.previous
1290
1291/*
1292 * PROM code for specific machines follows.  Put it
1293 * here so it's easy to add arch-specific sections later.
1294 * -- Cort
1295 */
1296#ifdef CONFIG_PPC_RTAS
1297/*
1298 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1299 * called with the MMU off.
1300 */
1301_GLOBAL(enter_rtas)
1302	stwu	r1,-INT_FRAME_SIZE(r1)
1303	mflr	r0
1304	stw	r0,INT_FRAME_SIZE+4(r1)
1305	LOAD_REG_ADDR(r4, rtas)
1306	lis	r6,1f@ha	/* physical return address for rtas */
1307	addi	r6,r6,1f@l
1308	tophys(r6,r6)
1309	tophys(r7,r1)
1310	lwz	r8,RTASENTRY(r4)
1311	lwz	r4,RTASBASE(r4)
1312	mfmsr	r9
1313	stw	r9,8(r1)
1314	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1315	SYNC			/* disable interrupts so SRR0/1 */
1316	MTMSRD(r0)		/* don't get trashed */
1317	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1318	mtlr	r6
1319	mtspr	SPRN_SPRG_RTAS,r7
1320	mtspr	SPRN_SRR0,r8
1321	mtspr	SPRN_SRR1,r9
1322	RFI
13231:	tophys(r9,r1)
1324	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1325	lwz	r9,8(r9)	/* original msr value */
1326	addi	r1,r1,INT_FRAME_SIZE
1327	li	r0,0
1328	mtspr	SPRN_SPRG_RTAS,r0
1329	mtspr	SPRN_SRR0,r8
1330	mtspr	SPRN_SRR1,r9
1331	RFI			/* return to caller */
1332
1333	.globl	machine_check_in_rtas
1334machine_check_in_rtas:
1335	twi	31,0,0
1336	/* XXX load up BATs and panic */
1337
1338#endif /* CONFIG_PPC_RTAS */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 *  PowerPC version
  4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
  6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
  7 *  Adapted for Power Macintosh by Paul Mackerras.
  8 *  Low-level exception handlers and MMU support
  9 *  rewritten by Paul Mackerras.
 10 *    Copyright (C) 1996 Paul Mackerras.
 11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 12 *
 13 *  This file contains the system call entry code, context switch
 14 *  code, and exception/interrupt return code for PowerPC.
 
 
 
 
 
 
 15 */
 16
 17#include <linux/errno.h>
 18#include <linux/err.h>
 19#include <linux/sys.h>
 20#include <linux/threads.h>
 21#include <linux/linkage.h>
 22
 23#include <asm/reg.h>
 24#include <asm/page.h>
 25#include <asm/mmu.h>
 26#include <asm/cputable.h>
 27#include <asm/thread_info.h>
 28#include <asm/ppc_asm.h>
 29#include <asm/asm-offsets.h>
 30#include <asm/unistd.h>
 31#include <asm/ptrace.h>
 32#include <asm/feature-fixups.h>
 33#include <asm/barrier.h>
 34#include <asm/kup.h>
 35#include <asm/bug.h>
 36#include <asm/interrupt.h>
 37
 38#include "head_32.h"
 39
 40/*
 41 * powerpc relies on return from interrupt/syscall being context synchronising
 42 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
 43 * synchronisation instructions.
 44 */
 
 
 
 
 
 45
 46/*
 47 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
 48 * fit into one page in order to not encounter a TLB miss between the
 49 * modification of srr0/srr1 and the associated rfi.
 50 */
 51	.align	12
 52
 53#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)
 54	.globl	prepare_transfer_to_handler
 55prepare_transfer_to_handler:
 56	/* if from kernel, check interrupted DOZE/NAP mode */
 57	lwz	r12,TI_LOCAL_FLAGS(r2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58	mtcrf	0x01,r12
 59	bt-	31-TLF_NAPPING,4f
 60	bt-	31-TLF_SLEEPING,7f
 61	blr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62
 
 634:	rlwinm	r12,r12,0,~_TLF_NAPPING
 64	stw	r12,TI_LOCAL_FLAGS(r2)
 65	b	power_save_ppc32_restore
 66
 677:	rlwinm	r12,r12,0,~_TLF_SLEEPING
 68	stw	r12,TI_LOCAL_FLAGS(r2)
 69	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
 70	rlwinm	r9,r9,0,~MSR_EE
 71	lwz	r12,_LINK(r11)		/* and return to address in LR */
 72	REST_GPR(2, r11)
 73	b	fast_exception_return
 74_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
 75#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
 76
 77#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
 78SYM_FUNC_START(__kuep_lock)
 79	lwz	r9, THREAD+THSR0(r2)
 80	update_user_segments_by_4 r9, r10, r11, r12
 81	blr
 82SYM_FUNC_END(__kuep_lock)
 83
 84SYM_FUNC_START_LOCAL(__kuep_unlock)
 85	lwz	r9, THREAD+THSR0(r2)
 86	rlwinm  r9,r9,0,~SR_NX
 87	update_user_segments_by_4 r9, r10, r11, r12
 88	blr
 89SYM_FUNC_END(__kuep_unlock)
 90
 91.macro	kuep_lock
 92	bl	__kuep_lock
 93.endm
 94.macro	kuep_unlock
 95	bl	__kuep_unlock
 96.endm
 97#else
 98.macro	kuep_lock
 99.endm
100.macro	kuep_unlock
101.endm
102#endif
103
104	.globl	transfer_to_syscall
105transfer_to_syscall:
106	stw	r3, ORIG_GPR3(r1)
107	stw	r11, GPR1(r1)
108	stw	r11, 0(r1)
109	mflr	r12
110	stw	r12, _LINK(r1)
111#ifdef CONFIG_BOOKE_OR_40x
112	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
113#endif
114	lis	r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
115	SAVE_GPR(2, r1)
116	addi	r12,r12,STACK_FRAME_REGS_MARKER@l
117	stw	r9,_MSR(r1)
118	li	r2, INTERRUPT_SYSCALL
119	stw	r12,STACK_INT_FRAME_MARKER(r1)
120	stw	r2,_TRAP(r1)
121	SAVE_GPR(0, r1)
122	SAVE_GPRS(3, 8, r1)
123	addi	r2,r10,-THREAD
124	SAVE_NVGPRS(r1)
125	kuep_lock
126
127	/* Calling convention has r3 = regs, r4 = orig r0 */
128	addi	r3,r1,STACK_INT_FRAME_REGS
129	mr	r4,r0
130	bl	system_call_exception
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132ret_from_syscall:
133	addi    r4,r1,STACK_INT_FRAME_REGS
134	li	r5,0
135	bl	syscall_exit_prepare
136#ifdef CONFIG_PPC_47x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137	lis	r4,icache_44x_need_flush@ha
138	lwz	r5,icache_44x_need_flush@l(r4)
139	cmplwi	cr0,r5,0
140	bne-	.L44x_icache_flush
141#endif /* CONFIG_PPC_47x */
142.L44x_icache_flush_return:
143	kuep_unlock
 
 
 
 
 
 
 
 
 
 
 
144	lwz	r4,_LINK(r1)
145	lwz	r5,_CCR(r1)
146	mtlr	r4
 
147	lwz	r7,_NIP(r1)
148	lwz	r8,_MSR(r1)
149	cmpwi	r3,0
150	REST_GPR(3, r1)
151syscall_exit_finish:
 
152	mtspr	SPRN_SRR0,r7
153	mtspr	SPRN_SRR1,r8
154
155	bne	3f
156	mtcr	r5
157
1581:	REST_GPR(2, r1)
159	REST_GPR(1, r1)
160	rfi
161#ifdef CONFIG_40x
162	b .	/* Prevent prefetch past rfi */
163#endif
164
1653:	mtcr	r5
166	lwz	r4,_CTR(r1)
167	lwz	r5,_XER(r1)
168	REST_NVGPRS(r1)
169	mtctr	r4
170	mtxer	r5
171	REST_GPR(0, r1)
172	REST_GPRS(3, 12, r1)
173	b	1b
174
175#ifdef CONFIG_44x
176.L44x_icache_flush:
177	li	r7,0
178	iccci	r0,r0
179	stw	r7,icache_44x_need_flush@l(r4)
180	b	.L44x_icache_flush_return
181#endif  /* CONFIG_44x */
182
 
 
 
183	.globl	ret_from_fork
184ret_from_fork:
185	REST_NVGPRS(r1)
186	bl	schedule_tail
187	li	r3,0	/* fork() return value */
188	b	ret_from_syscall
189
190	.globl	ret_from_kernel_user_thread
191ret_from_kernel_user_thread:
 
192	bl	schedule_tail
193	mtctr	r14
194	mr	r3,r15
195	PPC440EP_ERR42
196	bctrl
197	li	r3,0
198	b	ret_from_syscall
199
200	.globl	start_kernel_thread
201start_kernel_thread:
202	bl	schedule_tail
203	mtctr	r14
204	mr	r3,r15
205	PPC440EP_ERR42
206	bctrl
207	/*
208	 * This must not return. We actually want to BUG here, not WARN,
209	 * because BUG will exit the process which is what the kernel thread
210	 * should have done, which may give some hope of continuing.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211	 */
212100:	trap
213	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
215	.globl	fast_exception_return
216fast_exception_return:
217#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
218	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
219	beq	3f			/* if not, we've got problems */
220#endif
221
2222:	lwz	r10,_CCR(r11)
223	REST_GPRS(1, 6, r11)
 
224	mtcr	r10
225	lwz	r10,_LINK(r11)
226	mtlr	r10
227	/* Clear the exception marker on the stack to avoid confusing stacktrace */
228	li	r10, 0
229	stw	r10, 8(r11)
230	REST_GPR(10, r11)
231#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
232	mtspr	SPRN_NRI, r0
233#endif
234	mtspr	SPRN_SRR1,r9
235	mtspr	SPRN_SRR0,r12
236	REST_GPR(9, r11)
237	REST_GPR(12, r11)
238	REST_GPR(11, r11)
239	rfi
240#ifdef CONFIG_40x
241	b .	/* Prevent prefetch past rfi */
242#endif
243_ASM_NOKPROBE_SYMBOL(fast_exception_return)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
245/* aargh, a nonrecoverable interrupt, panic */
246/* aargh, we don't know which trap this is */
 
2473:
 
 
 
248	li	r10,-1
249	stw	r10,_TRAP(r11)
250	prepare_transfer_to_handler
251	bl	unrecoverable_exception
252	trap	/* should not get here */
253
254	.globl interrupt_return
255interrupt_return:
256	lwz	r4,_MSR(r1)
257	addi	r3,r1,STACK_INT_FRAME_REGS
258	andi.	r0,r4,MSR_PR
259	beq	.Lkernel_interrupt_return
260	bl	interrupt_exit_user_prepare
261	cmpwi	r3,0
262	kuep_unlock
263	bne-	.Lrestore_nvgprs
264
265.Lfast_user_interrupt_return:
266	lwz	r11,_NIP(r1)
267	lwz	r12,_MSR(r1)
268	mtspr	SPRN_SRR0,r11
269	mtspr	SPRN_SRR1,r12
270
271BEGIN_FTR_SECTION
272	stwcx.	r0,0,r1		/* to clear the reservation */
273FTR_SECTION_ELSE
274	lwarx	r0,0,r1
275ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
277	lwz	r3,_CCR(r1)
278	lwz	r4,_LINK(r1)
279	lwz	r5,_CTR(r1)
280	lwz	r6,_XER(r1)
281	li	r0,0
 
 
 
 
 
 
 
282
283	/*
284	 * Leaving a stale exception marker on the stack can confuse
285	 * the reliable stack unwinder later on. Clear it.
286	 */
287	stw	r0,8(r1)
288	REST_GPRS(7, 12, r1)
289
290	mtcr	r3
291	mtlr	r4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292	mtctr	r5
293	mtspr	SPRN_XER,r6
294
295	REST_GPRS(2, 6, r1)
296	REST_GPR(0, r1)
297	REST_GPR(1, r1)
298	rfi
299#ifdef CONFIG_40x
300	b .	/* Prevent prefetch past rfi */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301#endif
 
302
303.Lrestore_nvgprs:
304	REST_NVGPRS(r1)
305	b	.Lfast_user_interrupt_return
 
 
 
 
 
 
 
 
 
 
 
 
306
307.Lkernel_interrupt_return:
308	bl	interrupt_exit_kernel_prepare
309
310.Lfast_kernel_interrupt_return:
311	cmpwi	cr1,r3,0
312	lwz	r11,_NIP(r1)
313	lwz	r12,_MSR(r1)
314	mtspr	SPRN_SRR0,r11
315	mtspr	SPRN_SRR1,r12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
 
317BEGIN_FTR_SECTION
318	stwcx.	r0,0,r1		/* to clear the reservation */
319FTR_SECTION_ELSE
320	lwarx	r0,0,r1
321ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
322
323	lwz	r3,_LINK(r1)
324	lwz	r4,_CTR(r1)
325	lwz	r5,_XER(r1)
326	lwz	r6,_CCR(r1)
327	li	r0,0
328
329	REST_GPRS(7, 12, r1)
 
 
330
331	mtlr	r3
332	mtctr	r4
333	mtspr	SPRN_XER,r5
 
334
335	/*
336	 * Leaving a stale exception marker on the stack can confuse
337	 * the reliable stack unwinder later on. Clear it.
 
 
 
 
 
338	 */
339	stw	r0,8(r1)
340
341	REST_GPRS(2, 5, r1)
342
343	bne-	cr1,1f /* emulate stack store */
344	mtcr	r6
345	REST_GPR(6, r1)
346	REST_GPR(0, r1)
347	REST_GPR(1, r1)
348	rfi
349#ifdef CONFIG_40x
350	b .	/* Prevent prefetch past rfi */
351#endif
 
 
 
 
 
 
 
 
352
3531:	/*
354	 * Emulate stack store with update. New r1 value was already calculated
355	 * and updated in our interrupt regs by emulate_loadstore, but we can't
356	 * store the previous value of r1 to the stack before re-loading our
357	 * registers from it, otherwise they could be clobbered.  Use
358	 * SPRG Scratch0 as temporary storage to hold the store
359	 * data, as interrupts are disabled here so it won't be clobbered.
360	 */
361	mtcr	r6
362#ifdef CONFIG_BOOKE
363	mtspr	SPRN_SPRG_WSCRATCH0, r9
364#else
365	mtspr	SPRN_SPRG_SCRATCH0, r9
366#endif
367	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
368	REST_GPR(6, r1)
369	REST_GPR(0, r1)
370	REST_GPR(1, r1)
371	stw	r9,0(r1) /* perform store component of stwu */
372#ifdef CONFIG_BOOKE
373	mfspr	r9, SPRN_SPRG_RSCRATCH0
374#else
375	mfspr	r9, SPRN_SPRG_SCRATCH0
376#endif
 
377	rfi
378#ifdef CONFIG_40x
379	b .	/* Prevent prefetch past rfi */
380#endif
381_ASM_NOKPROBE_SYMBOL(interrupt_return)
382
383#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
384
385/*
386 * Returning from a critical interrupt in user mode doesn't need
387 * to be any different from a normal exception.  For a critical
388 * interrupt in the kernel, we just return (without checking for
389 * preemption) since the interrupt may have happened at some crucial
390 * place (e.g. inside the TLB miss handler), and because we will be
391 * running with r1 pointing into critical_stack, not the current
392 * process's kernel stack (and therefore current_thread_info() will
393 * give the wrong answer).
394 * We have to restore various SPRs that may have been in use at the
395 * time of the critical interrupt.
396 *
397 */
398#ifdef CONFIG_40x
399#define PPC_40x_TURN_OFF_MSR_DR						    \
400	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
401	 * assume the instructions here are mapped by a pinned TLB entry */ \
402	li	r10,MSR_IR;						    \
403	mtmsr	r10;							    \
404	isync;								    \
405	tophys(r1, r1);
406#else
407#define PPC_40x_TURN_OFF_MSR_DR
408#endif
409
410#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
411	REST_NVGPRS(r1);						\
412	lwz	r3,_MSR(r1);						\
413	andi.	r3,r3,MSR_PR;						\
414	bne	interrupt_return;					\
415	REST_GPR(0, r1);						\
416	REST_GPRS(2, 8, r1);						\
 
 
 
417	lwz	r10,_XER(r1);						\
418	lwz	r11,_CTR(r1);						\
419	mtspr	SPRN_XER,r10;						\
420	mtctr	r11;							\
 
421	stwcx.	r0,0,r1;		/* to clear the reservation */	\
422	lwz	r11,_LINK(r1);						\
423	mtlr	r11;							\
424	lwz	r10,_CCR(r1);						\
425	mtcrf	0xff,r10;						\
426	PPC_40x_TURN_OFF_MSR_DR;					\
427	lwz	r9,_DEAR(r1);						\
428	lwz	r10,_ESR(r1);						\
429	mtspr	SPRN_DEAR,r9;						\
430	mtspr	SPRN_ESR,r10;						\
431	lwz	r11,_NIP(r1);						\
432	lwz	r12,_MSR(r1);						\
433	mtspr	exc_lvl_srr0,r11;					\
434	mtspr	exc_lvl_srr1,r12;					\
435	REST_GPRS(9, 12, r1);						\
436	REST_GPR(1, r1);						\
 
 
 
 
437	exc_lvl_rfi;							\
438	b	.;		/* prevent prefetch past exc_lvl_rfi */
439
440#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
441	lwz	r9,_##exc_lvl_srr0(r1);					\
442	lwz	r10,_##exc_lvl_srr1(r1);				\
443	mtspr	SPRN_##exc_lvl_srr0,r9;					\
444	mtspr	SPRN_##exc_lvl_srr1,r10;
445
446#if defined(CONFIG_PPC_E500)
447#ifdef CONFIG_PHYS_64BIT
448#define	RESTORE_MAS7							\
449	lwz	r11,MAS7(r1);						\
450	mtspr	SPRN_MAS7,r11;
451#else
452#define	RESTORE_MAS7
453#endif /* CONFIG_PHYS_64BIT */
454#define RESTORE_MMU_REGS						\
455	lwz	r9,MAS0(r1);						\
456	lwz	r10,MAS1(r1);						\
457	lwz	r11,MAS2(r1);						\
458	mtspr	SPRN_MAS0,r9;						\
459	lwz	r9,MAS3(r1);						\
460	mtspr	SPRN_MAS1,r10;						\
461	lwz	r10,MAS6(r1);						\
462	mtspr	SPRN_MAS2,r11;						\
463	mtspr	SPRN_MAS3,r9;						\
464	mtspr	SPRN_MAS6,r10;						\
465	RESTORE_MAS7;
466#elif defined(CONFIG_44x)
467#define RESTORE_MMU_REGS						\
468	lwz	r9,MMUCR(r1);						\
469	mtspr	SPRN_MMUCR,r9;
470#else
471#define RESTORE_MMU_REGS
472#endif
473
474#ifdef CONFIG_40x
475	.globl	ret_from_crit_exc
476ret_from_crit_exc:
 
 
 
 
 
477	lis	r9,crit_srr0@ha;
478	lwz	r9,crit_srr0@l(r9);
479	lis	r10,crit_srr1@ha;
480	lwz	r10,crit_srr1@l(r10);
481	mtspr	SPRN_SRR0,r9;
482	mtspr	SPRN_SRR1,r10;
483	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
484_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
485#endif /* CONFIG_40x */
486
487#ifdef CONFIG_BOOKE
488	.globl	ret_from_crit_exc
489ret_from_crit_exc:
 
 
 
490	RESTORE_xSRR(SRR0,SRR1);
491	RESTORE_MMU_REGS;
492	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
493_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
494
495	.globl	ret_from_debug_exc
496ret_from_debug_exc:
 
 
 
 
 
 
 
497	RESTORE_xSRR(SRR0,SRR1);
498	RESTORE_xSRR(CSRR0,CSRR1);
499	RESTORE_MMU_REGS;
500	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
501_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
502
503	.globl	ret_from_mcheck_exc
504ret_from_mcheck_exc:
 
 
 
505	RESTORE_xSRR(SRR0,SRR1);
506	RESTORE_xSRR(CSRR0,CSRR1);
507	RESTORE_xSRR(DSRR0,DSRR1);
508	RESTORE_MMU_REGS;
509	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
510_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
511#endif /* CONFIG_BOOKE */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */