Linux Audio

Check our new training course

Loading...
v5.9
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 *  PowerPC version
   4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
   6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
   7 *  Adapted for Power Macintosh by Paul Mackerras.
   8 *  Low-level exception handlers and MMU support
   9 *  rewritten by Paul Mackerras.
  10 *    Copyright (C) 1996 Paul Mackerras.
  11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  12 *
  13 *  This file contains the system call entry code, context switch
  14 *  code, and exception/interrupt return code for PowerPC.
  15 */
  16
  17#include <linux/errno.h>
  18#include <linux/err.h>
  19#include <linux/sys.h>
  20#include <linux/threads.h>
 
 
  21#include <asm/reg.h>
  22#include <asm/page.h>
  23#include <asm/mmu.h>
  24#include <asm/cputable.h>
  25#include <asm/thread_info.h>
  26#include <asm/ppc_asm.h>
  27#include <asm/asm-offsets.h>
  28#include <asm/unistd.h>
  29#include <asm/ptrace.h>
  30#include <asm/export.h>
  31#include <asm/feature-fixups.h>
  32#include <asm/barrier.h>
  33#include <asm/kup.h>
  34#include <asm/bug.h>
 
  35
  36#include "head_32.h"
  37
  38/*
  39 * powerpc relies on return from interrupt/syscall being context synchronising
  40 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
  41 * synchronisation instructions.
  42 */
  43
  44/*
  45 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
  46 * fit into one page in order to not encounter a TLB miss between the
  47 * modification of srr0/srr1 and the associated rfi.
  48 */
  49	.align	12
  50
  51#ifdef CONFIG_BOOKE
  52	.globl	mcheck_transfer_to_handler
  53mcheck_transfer_to_handler:
  54	mfspr	r0,SPRN_DSRR0
  55	stw	r0,_DSRR0(r11)
  56	mfspr	r0,SPRN_DSRR1
  57	stw	r0,_DSRR1(r11)
  58	/* fall through */
  59_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
  60
  61	.globl	debug_transfer_to_handler
  62debug_transfer_to_handler:
  63	mfspr	r0,SPRN_CSRR0
  64	stw	r0,_CSRR0(r11)
  65	mfspr	r0,SPRN_CSRR1
  66	stw	r0,_CSRR1(r11)
  67	/* fall through */
  68_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
  69
  70	.globl	crit_transfer_to_handler
  71crit_transfer_to_handler:
  72#ifdef CONFIG_PPC_BOOK3E_MMU
  73	mfspr	r0,SPRN_MAS0
  74	stw	r0,MAS0(r11)
  75	mfspr	r0,SPRN_MAS1
  76	stw	r0,MAS1(r11)
  77	mfspr	r0,SPRN_MAS2
  78	stw	r0,MAS2(r11)
  79	mfspr	r0,SPRN_MAS3
  80	stw	r0,MAS3(r11)
  81	mfspr	r0,SPRN_MAS6
  82	stw	r0,MAS6(r11)
  83#ifdef CONFIG_PHYS_64BIT
  84	mfspr	r0,SPRN_MAS7
  85	stw	r0,MAS7(r11)
  86#endif /* CONFIG_PHYS_64BIT */
  87#endif /* CONFIG_PPC_BOOK3E_MMU */
  88#ifdef CONFIG_44x
  89	mfspr	r0,SPRN_MMUCR
  90	stw	r0,MMUCR(r11)
  91#endif
  92	mfspr	r0,SPRN_SRR0
  93	stw	r0,_SRR0(r11)
  94	mfspr	r0,SPRN_SRR1
  95	stw	r0,_SRR1(r11)
  96
  97	/* set the stack limit to the current stack */
  98	mfspr	r8,SPRN_SPRG_THREAD
  99	lwz	r0,KSP_LIMIT(r8)
 100	stw	r0,SAVED_KSP_LIMIT(r11)
 101	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
 102	stw	r0,KSP_LIMIT(r8)
 103	/* fall through */
 104_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
 105#endif
 106
 107#ifdef CONFIG_40x
 108	.globl	crit_transfer_to_handler
 109crit_transfer_to_handler:
 110	lwz	r0,crit_r10@l(0)
 111	stw	r0,GPR10(r11)
 112	lwz	r0,crit_r11@l(0)
 113	stw	r0,GPR11(r11)
 114	mfspr	r0,SPRN_SRR0
 115	stw	r0,crit_srr0@l(0)
 116	mfspr	r0,SPRN_SRR1
 117	stw	r0,crit_srr1@l(0)
 118
 119	/* set the stack limit to the current stack */
 120	mfspr	r8,SPRN_SPRG_THREAD
 121	lwz	r0,KSP_LIMIT(r8)
 122	stw	r0,saved_ksp_limit@l(0)
 123	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
 124	stw	r0,KSP_LIMIT(r8)
 125	/* fall through */
 126_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
 127#endif
 128
 129/*
 130 * This code finishes saving the registers to the exception frame
 131 * and jumps to the appropriate handler for the exception, turning
 132 * on address translation.
 133 * Note that we rely on the caller having set cr0.eq iff the exception
 134 * occurred in kernel mode (i.e. MSR:PR = 0).
 135 */
 136	.globl	transfer_to_handler_full
 137transfer_to_handler_full:
 138	SAVE_NVGPRS(r11)
 139_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
 140	/* fall through */
 141
 142	.globl	transfer_to_handler
 143transfer_to_handler:
 144	stw	r2,GPR2(r11)
 145	stw	r12,_NIP(r11)
 146	stw	r9,_MSR(r11)
 147	andi.	r2,r9,MSR_PR
 148	mfctr	r12
 149	mfspr	r2,SPRN_XER
 150	stw	r12,_CTR(r11)
 151	stw	r2,_XER(r11)
 152	mfspr	r12,SPRN_SPRG_THREAD
 153	tovirt_vmstack r12, r12
 154	beq	2f			/* if from user, fix up THREAD.regs */
 155	addi	r2, r12, -THREAD
 156	addi	r11,r1,STACK_FRAME_OVERHEAD
 157	stw	r11,PT_REGS(r12)
 158#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 159	/* Check to see if the dbcr0 register is set up to debug.  Use the
 160	   internal debug mode bit to do this. */
 161	lwz	r12,THREAD_DBCR0(r12)
 162	andis.	r12,r12,DBCR0_IDM@h
 163#endif
 164	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
 165#ifdef CONFIG_PPC_BOOK3S_32
 166	kuep_lock r11, r12
 167#endif
 168#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 169	beq+	3f
 170	/* From user and task is ptraced - load up global dbcr0 */
 171	li	r12,-1			/* clear all pending debug events */
 172	mtspr	SPRN_DBSR,r12
 173	lis	r11,global_dbcr0@ha
 174	tophys(r11,r11)
 175	addi	r11,r11,global_dbcr0@l
 176#ifdef CONFIG_SMP
 177	lwz	r9,TASK_CPU(r2)
 178	slwi	r9,r9,3
 179	add	r11,r11,r9
 180#endif
 181	lwz	r12,0(r11)
 182	mtspr	SPRN_DBCR0,r12
 183	lwz	r12,4(r11)
 184	addi	r12,r12,-1
 185	stw	r12,4(r11)
 186#endif
 187
 188	b	3f
 189
 1902:	/* if from kernel, check interrupted DOZE/NAP mode and
 191         * check for stack overflow
 192         */
 193	kuap_save_and_lock r11, r12, r9, r2, r6
 194	addi	r2, r12, -THREAD
 195#ifndef CONFIG_VMAP_STACK
 196	lwz	r9,KSP_LIMIT(r12)
 197	cmplw	r1,r9			/* if r1 <= ksp_limit */
 198	ble-	stack_ovf		/* then the kernel stack overflowed */
 199#endif
 2005:
 201#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
 202	lwz	r12,TI_LOCAL_FLAGS(r2)
 203	mtcrf	0x01,r12
 204	bt-	31-TLF_NAPPING,4f
 205	bt-	31-TLF_SLEEPING,7f
 206#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
 207	.globl transfer_to_handler_cont
 208transfer_to_handler_cont:
 2093:
 210	mflr	r9
 211	tovirt_novmstack r2, r2 	/* set r2 to current */
 212	tovirt_vmstack r9, r9
 213	lwz	r11,0(r9)		/* virtual address of handler */
 214	lwz	r9,4(r9)		/* where to go when done */
 215#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 216	mtspr	SPRN_NRI, r0
 217#endif
 218#ifdef CONFIG_TRACE_IRQFLAGS
 219	/*
 220	 * When tracing IRQ state (lockdep) we enable the MMU before we call
 221	 * the IRQ tracing functions as they might access vmalloc space or
 222	 * perform IOs for console output.
 223	 *
 224	 * To speed up the syscall path where interrupts stay on, let's check
 225	 * first if we are changing the MSR value at all.
 226	 */
 227	tophys_novmstack r12, r1
 228	lwz	r12,_MSR(r12)
 229	andi.	r12,r12,MSR_EE
 230	bne	1f
 231
 232	/* MSR isn't changing, just transition directly */
 233#endif
 234	mtspr	SPRN_SRR0,r11
 235	mtspr	SPRN_SRR1,r10
 236	mtlr	r9
 237	SYNC
 238	RFI				/* jump to handler, enable MMU */
 239
 240#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
 2414:	rlwinm	r12,r12,0,~_TLF_NAPPING
 242	stw	r12,TI_LOCAL_FLAGS(r2)
 243	b	power_save_ppc32_restore
 244
 2457:	rlwinm	r12,r12,0,~_TLF_SLEEPING
 246	stw	r12,TI_LOCAL_FLAGS(r2)
 247	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
 248	rlwinm	r9,r9,0,~MSR_EE
 249	lwz	r12,_LINK(r11)		/* and return to address in LR */
 250	kuap_restore r11, r2, r3, r4, r5
 251	lwz	r2, GPR2(r11)
 252	b	fast_exception_return
 253#endif
 254_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
 255_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
 256
 257#ifdef CONFIG_TRACE_IRQFLAGS
 2581:	/* MSR is changing, re-enable MMU so we can notify lockdep. We need to
 259	 * keep interrupts disabled at this point otherwise we might risk
 260	 * taking an interrupt before we tell lockdep they are enabled.
 261	 */
 262	lis	r12,reenable_mmu@h
 263	ori	r12,r12,reenable_mmu@l
 264	LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
 265	mtspr	SPRN_SRR0,r12
 266	mtspr	SPRN_SRR1,r0
 267	SYNC
 268	RFI
 269
 270reenable_mmu:
 271	/*
 272	 * We save a bunch of GPRs,
 273	 * r3 can be different from GPR3(r1) at this point, r9 and r11
 274	 * contains the old MSR and handler address respectively,
 275	 * r4 & r5 can contain page fault arguments that need to be passed
 276	 * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
 277	 * clobbered as they aren't useful past this point.
 278	 */
 279
 280	stwu	r1,-32(r1)
 281	stw	r9,8(r1)
 282	stw	r11,12(r1)
 283	stw	r3,16(r1)
 284	stw	r4,20(r1)
 285	stw	r5,24(r1)
 286
 287	/* If we are disabling interrupts (normal case), simply log it with
 288	 * lockdep
 289	 */
 2901:	bl	trace_hardirqs_off
 291	lwz	r5,24(r1)
 292	lwz	r4,20(r1)
 293	lwz	r3,16(r1)
 294	lwz	r11,12(r1)
 295	lwz	r9,8(r1)
 296	addi	r1,r1,32
 297	mtctr	r11
 298	mtlr	r9
 299	bctr				/* jump to handler */
 300#endif /* CONFIG_TRACE_IRQFLAGS */
 301
 302#ifndef CONFIG_VMAP_STACK
 303/*
 304 * On kernel stack overflow, load up an initial stack pointer
 305 * and call StackOverflow(regs), which should not return.
 306 */
 307stack_ovf:
 308	/* sometimes we use a statically-allocated stack, which is OK. */
 309	lis	r12,_end@h
 310	ori	r12,r12,_end@l
 311	cmplw	r1,r12
 312	ble	5b			/* r1 <= &_end is OK */
 313	SAVE_NVGPRS(r11)
 314	addi	r3,r1,STACK_FRAME_OVERHEAD
 315	lis	r1,init_thread_union@ha
 316	addi	r1,r1,init_thread_union@l
 317	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 318	lis	r9,StackOverflow@ha
 319	addi	r9,r9,StackOverflow@l
 320	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
 321#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 322	mtspr	SPRN_NRI, r0
 323#endif
 324	mtspr	SPRN_SRR0,r9
 325	mtspr	SPRN_SRR1,r10
 326	SYNC
 327	RFI
 328_ASM_NOKPROBE_SYMBOL(stack_ovf)
 329#endif
 330
 331#ifdef CONFIG_TRACE_IRQFLAGS
 332trace_syscall_entry_irq_off:
 333	/*
 334	 * Syscall shouldn't happen while interrupts are disabled,
 335	 * so let's do a warning here.
 336	 */
 3370:	trap
 338	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
 339	bl	trace_hardirqs_on
 340
 341	/* Now enable for real */
 342	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
 343	mtmsr	r10
 344
 345	REST_GPR(0, r1)
 346	REST_4GPRS(3, r1)
 347	REST_2GPRS(7, r1)
 348	b	DoSyscall
 349#endif /* CONFIG_TRACE_IRQFLAGS */
 350
 351	.globl	transfer_to_syscall
 352transfer_to_syscall:
 353#ifdef CONFIG_TRACE_IRQFLAGS
 354	andi.	r12,r9,MSR_EE
 355	beq-	trace_syscall_entry_irq_off
 356#endif /* CONFIG_TRACE_IRQFLAGS */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 357
 358/*
 359 * Handle a system call.
 360 */
 361	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
 362	.stabs	"entry_32.S",N_SO,0,0,0f
 3630:
 364
 365_GLOBAL(DoSyscall)
 366	stw	r3,ORIG_GPR3(r1)
 367	li	r12,0
 368	stw	r12,RESULT(r1)
 369#ifdef CONFIG_TRACE_IRQFLAGS
 370	/* Make sure interrupts are enabled */
 371	mfmsr	r11
 372	andi.	r12,r11,MSR_EE
 373	/* We came in with interrupts disabled, we WARN and mark them enabled
 374	 * for lockdep now */
 3750:	tweqi	r12, 0
 376	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
 377#endif /* CONFIG_TRACE_IRQFLAGS */
 378	lwz	r11,TI_FLAGS(r2)
 379	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
 380	bne-	syscall_dotrace
 381syscall_dotrace_cont:
 382	cmplwi	0,r0,NR_syscalls
 383	lis	r10,sys_call_table@h
 384	ori	r10,r10,sys_call_table@l
 385	slwi	r0,r0,2
 386	bge-	66f
 387
 388	barrier_nospec_asm
 389	/*
 390	 * Prevent the load of the handler below (based on the user-passed
 391	 * system call number) being speculatively executed until the test
 392	 * against NR_syscalls and branch to .66f above has
 393	 * committed.
 394	 */
 395
 396	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
 397	mtlr	r10
 398	addi	r9,r1,STACK_FRAME_OVERHEAD
 399	PPC440EP_ERR42
 400	blrl			/* Call handler */
 401	.globl	ret_from_syscall
 402ret_from_syscall:
 403#ifdef CONFIG_DEBUG_RSEQ
 404	/* Check whether the syscall is issued inside a restartable sequence */
 405	stw	r3,GPR3(r1)
 406	addi    r3,r1,STACK_FRAME_OVERHEAD
 407	bl      rseq_syscall
 408	lwz	r3,GPR3(r1)
 409#endif
 410	mr	r6,r3
 411	/* disable interrupts so current_thread_info()->flags can't change */
 412	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
 413	/* Note: We don't bother telling lockdep about it */
 414	SYNC
 415	mtmsr	r10
 416	lwz	r9,TI_FLAGS(r2)
 417	li	r8,-MAX_ERRNO
 418	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 419	bne-	syscall_exit_work
 420	cmplw	0,r3,r8
 421	blt+	syscall_exit_cont
 422	lwz	r11,_CCR(r1)			/* Load CR */
 423	neg	r3,r3
 424	oris	r11,r11,0x1000	/* Set SO bit in CR */
 425	stw	r11,_CCR(r1)
 426syscall_exit_cont:
 427	lwz	r8,_MSR(r1)
 428#ifdef CONFIG_TRACE_IRQFLAGS
 429	/* If we are going to return from the syscall with interrupts
 430	 * off, we trace that here. It shouldn't normally happen.
 431	 */
 432	andi.	r10,r8,MSR_EE
 433	bne+	1f
 434	stw	r3,GPR3(r1)
 435	bl      trace_hardirqs_off
 436	lwz	r3,GPR3(r1)
 4371:
 438#endif /* CONFIG_TRACE_IRQFLAGS */
 439#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 440	/* If the process has its own DBCR0 value, load it up.  The internal
 441	   debug mode bit tells us that dbcr0 should be loaded. */
 442	lwz	r0,THREAD+THREAD_DBCR0(r2)
 443	andis.	r10,r0,DBCR0_IDM@h
 444	bnel-	load_dbcr0
 445#endif
 446#ifdef CONFIG_44x
 447BEGIN_MMU_FTR_SECTION
 448	lis	r4,icache_44x_need_flush@ha
 449	lwz	r5,icache_44x_need_flush@l(r4)
 450	cmplwi	cr0,r5,0
 451	bne-	2f
 4521:
 453END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
 454#endif /* CONFIG_44x */
 455BEGIN_FTR_SECTION
 456	lwarx	r7,0,r1
 457END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 458	stwcx.	r0,0,r1			/* to clear the reservation */
 459	ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
 460#ifdef CONFIG_PPC_BOOK3S_32
 461	kuep_unlock r5, r7
 462#endif
 463	kuap_check r2, r4
 464	lwz	r4,_LINK(r1)
 465	lwz	r5,_CCR(r1)
 466	mtlr	r4
 467	mtcr	r5
 468	lwz	r7,_NIP(r1)
 469	lwz	r2,GPR2(r1)
 470	lwz	r1,GPR1(r1)
 
 471syscall_exit_finish:
 472#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 473	mtspr	SPRN_NRI, r0
 474#endif
 475	mtspr	SPRN_SRR0,r7
 476	mtspr	SPRN_SRR1,r8
 477	SYNC
 478	RFI
 479_ASM_NOKPROBE_SYMBOL(syscall_exit_finish)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 480#ifdef CONFIG_44x
 4812:	li	r7,0
 482	iccci	r0,r0
 483	stw	r7,icache_44x_need_flush@l(r4)
 484	b	1b
 485#endif  /* CONFIG_44x */
 486
 48766:	li	r3,-ENOSYS
 488	b	ret_from_syscall
 489
 490	.globl	ret_from_fork
 491ret_from_fork:
 492	REST_NVGPRS(r1)
 493	bl	schedule_tail
 494	li	r3,0
 495	b	ret_from_syscall
 496
 497	.globl	ret_from_kernel_thread
 498ret_from_kernel_thread:
 499	REST_NVGPRS(r1)
 500	bl	schedule_tail
 501	mtlr	r14
 502	mr	r3,r15
 503	PPC440EP_ERR42
 504	blrl
 505	li	r3,0
 506	b	ret_from_syscall
 507
 508/* Traced system call support */
 509syscall_dotrace:
 510	SAVE_NVGPRS(r1)
 511	li	r0,0xc00
 512	stw	r0,_TRAP(r1)
 513	addi	r3,r1,STACK_FRAME_OVERHEAD
 514	bl	do_syscall_trace_enter
 515	/*
 516	 * Restore argument registers possibly just changed.
 517	 * We use the return value of do_syscall_trace_enter
 518	 * for call number to look up in the table (r0).
 519	 */
 520	mr	r0,r3
 521	lwz	r3,GPR3(r1)
 522	lwz	r4,GPR4(r1)
 523	lwz	r5,GPR5(r1)
 524	lwz	r6,GPR6(r1)
 525	lwz	r7,GPR7(r1)
 526	lwz	r8,GPR8(r1)
 527	REST_NVGPRS(r1)
 528
 529	cmplwi	r0,NR_syscalls
 530	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
 531	bge-	ret_from_syscall
 532	b	syscall_dotrace_cont
 533
 534syscall_exit_work:
 535	andi.	r0,r9,_TIF_RESTOREALL
 536	beq+	0f
 537	REST_NVGPRS(r1)
 538	b	2f
 5390:	cmplw	0,r3,r8
 540	blt+	1f
 541	andi.	r0,r9,_TIF_NOERROR
 542	bne-	1f
 543	lwz	r11,_CCR(r1)			/* Load CR */
 544	neg	r3,r3
 545	oris	r11,r11,0x1000	/* Set SO bit in CR */
 546	stw	r11,_CCR(r1)
 547
 5481:	stw	r6,RESULT(r1)	/* Save result */
 549	stw	r3,GPR3(r1)	/* Update return value */
 5502:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
 551	beq	4f
 552
 553	/* Clear per-syscall TIF flags if any are set.  */
 554
 555	li	r11,_TIF_PERSYSCALL_MASK
 556	addi	r12,r2,TI_FLAGS
 5573:	lwarx	r8,0,r12
 558	andc	r8,r8,r11
 559	stwcx.	r8,0,r12
 560	bne-	3b
 561	
 5624:	/* Anything which requires enabling interrupts? */
 563	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
 564	beq	ret_from_except
 565
 566	/* Re-enable interrupts. There is no need to trace that with
 567	 * lockdep as we are supposed to have IRQs on at this point
 568	 */
 569	ori	r10,r10,MSR_EE
 570	SYNC
 571	mtmsr	r10
 572
 573	/* Save NVGPRS if they're not saved already */
 574	lwz	r4,_TRAP(r1)
 575	andi.	r4,r4,1
 576	beq	5f
 577	SAVE_NVGPRS(r1)
 578	li	r4,0xc00
 579	stw	r4,_TRAP(r1)
 5805:
 581	addi	r3,r1,STACK_FRAME_OVERHEAD
 582	bl	do_syscall_trace_leave
 583	b	ret_from_except_full
 584
 585	/*
 586	 * System call was called from kernel. We get here with SRR1 in r9.
 587	 * Mark the exception as recoverable once we have retrieved SRR0,
 588	 * trap a warning and return ENOSYS with CR[SO] set.
 589	 */
 590	.globl	ret_from_kernel_syscall
 591ret_from_kernel_syscall:
 592	mfspr	r9, SPRN_SRR0
 593	mfspr	r10, SPRN_SRR1
 594#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
 595	LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
 596	mtmsr	r11
 597#endif
 598
 5990:	trap
 600	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
 601
 602	li	r3, ENOSYS
 603	crset	so
 604#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 605	mtspr	SPRN_NRI, r0
 606#endif
 607	mtspr	SPRN_SRR0, r9
 608	mtspr	SPRN_SRR1, r10
 609	SYNC
 610	RFI
 611_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall)
 612
 613/*
 614 * The fork/clone functions need to copy the full register set into
 615 * the child process. Therefore we need to save all the nonvolatile
 616 * registers (r13 - r31) before calling the C code.
 617 */
 618	.globl	ppc_fork
 619ppc_fork:
 620	SAVE_NVGPRS(r1)
 621	lwz	r0,_TRAP(r1)
 622	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 623	stw	r0,_TRAP(r1)		/* register set saved */
 624	b	sys_fork
 625
 626	.globl	ppc_vfork
 627ppc_vfork:
 628	SAVE_NVGPRS(r1)
 629	lwz	r0,_TRAP(r1)
 630	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 631	stw	r0,_TRAP(r1)		/* register set saved */
 632	b	sys_vfork
 633
 634	.globl	ppc_clone
 635ppc_clone:
 636	SAVE_NVGPRS(r1)
 637	lwz	r0,_TRAP(r1)
 638	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 639	stw	r0,_TRAP(r1)		/* register set saved */
 640	b	sys_clone
 641
 642	.globl	ppc_clone3
 643ppc_clone3:
 644	SAVE_NVGPRS(r1)
 645	lwz	r0,_TRAP(r1)
 646	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 647	stw	r0,_TRAP(r1)		/* register set saved */
 648	b	sys_clone3
 649
 650	.globl	ppc_swapcontext
 651ppc_swapcontext:
 652	SAVE_NVGPRS(r1)
 653	lwz	r0,_TRAP(r1)
 654	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 655	stw	r0,_TRAP(r1)		/* register set saved */
 656	b	sys_swapcontext
 657
 658/*
 659 * Top-level page fault handling.
 660 * This is in assembler because if do_page_fault tells us that
 661 * it is a bad kernel page fault, we want to save the non-volatile
 662 * registers before calling bad_page_fault.
 663 */
 664	.globl	handle_page_fault
 665handle_page_fault:
 666	addi	r3,r1,STACK_FRAME_OVERHEAD
 667#ifdef CONFIG_PPC_BOOK3S_32
 668	andis.  r0,r5,DSISR_DABRMATCH@h
 669	bne-    handle_dabr_fault
 670#endif
 671	bl	do_page_fault
 672	cmpwi	r3,0
 673	beq+	ret_from_except
 674	SAVE_NVGPRS(r1)
 675	lwz	r0,_TRAP(r1)
 676	clrrwi	r0,r0,1
 677	stw	r0,_TRAP(r1)
 678	mr	r5,r3
 679	addi	r3,r1,STACK_FRAME_OVERHEAD
 680	lwz	r4,_DAR(r1)
 681	bl	bad_page_fault
 682	b	ret_from_except_full
 683
 684#ifdef CONFIG_PPC_BOOK3S_32
 685	/* We have a data breakpoint exception - handle it */
 686handle_dabr_fault:
 687	SAVE_NVGPRS(r1)
 688	lwz	r0,_TRAP(r1)
 689	clrrwi	r0,r0,1
 690	stw	r0,_TRAP(r1)
 691	bl      do_break
 692	b	ret_from_except_full
 693#endif
 694
 695/*
 696 * This routine switches between two different tasks.  The process
 697 * state of one is saved on its kernel stack.  Then the state
 698 * of the other is restored from its kernel stack.  The memory
 699 * management hardware is updated to the second process's state.
 700 * Finally, we can return to the second process.
 701 * On entry, r3 points to the THREAD for the current task, r4
 702 * points to the THREAD for the new task.
 703 *
 704 * This routine is always called with interrupts disabled.
 705 *
 706 * Note: there are two ways to get to the "going out" portion
 707 * of this code; either by coming in via the entry (_switch)
 708 * or via "fork" which must set up an environment equivalent
 709 * to the "_switch" path.  If you change this , you'll have to
 710 * change the fork code also.
 711 *
 712 * The code which creates the new task context is in 'copy_thread'
 713 * in arch/ppc/kernel/process.c
 714 */
 715_GLOBAL(_switch)
 716	stwu	r1,-INT_FRAME_SIZE(r1)
 717	mflr	r0
 718	stw	r0,INT_FRAME_SIZE+4(r1)
 719	/* r3-r12 are caller saved -- Cort */
 720	SAVE_NVGPRS(r1)
 721	stw	r0,_NIP(r1)	/* Return to switch caller */
 722	mfmsr	r11
 723	li	r0,MSR_FP	/* Disable floating-point */
 724#ifdef CONFIG_ALTIVEC
 725BEGIN_FTR_SECTION
 726	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
 727	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
 728	stw	r12,THREAD+THREAD_VRSAVE(r2)
 729END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 730#endif /* CONFIG_ALTIVEC */
 731#ifdef CONFIG_SPE
 732BEGIN_FTR_SECTION
 733	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
 734	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
 735	stw	r12,THREAD+THREAD_SPEFSCR(r2)
 736END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 737#endif /* CONFIG_SPE */
 738	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
 739	beq+	1f
 740	andc	r11,r11,r0
 741	mtmsr	r11
 742	isync
 7431:	stw	r11,_MSR(r1)
 744	mfcr	r10
 745	stw	r10,_CCR(r1)
 746	stw	r1,KSP(r3)	/* Set old stack pointer */
 747
 748	kuap_check r2, r0
 749#ifdef CONFIG_SMP
 750	/* We need a sync somewhere here to make sure that if the
 751	 * previous task gets rescheduled on another CPU, it sees all
 752	 * stores it has performed on this one.
 753	 */
 754	sync
 755#endif /* CONFIG_SMP */
 756
 757	tophys(r0,r4)
 758	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
 759	lwz	r1,KSP(r4)	/* Load new stack pointer */
 760
 761	/* save the old current 'last' for return value */
 762	mr	r3,r2
 763	addi	r2,r4,-THREAD	/* Update current */
 764
 765#ifdef CONFIG_ALTIVEC
 766BEGIN_FTR_SECTION
 767	lwz	r0,THREAD+THREAD_VRSAVE(r2)
 768	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
 769END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 770#endif /* CONFIG_ALTIVEC */
 771#ifdef CONFIG_SPE
 772BEGIN_FTR_SECTION
 773	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
 774	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 775END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 776#endif /* CONFIG_SPE */
 777
 778	lwz	r0,_CCR(r1)
 779	mtcrf	0xFF,r0
 780	/* r3-r12 are destroyed -- Cort */
 781	REST_NVGPRS(r1)
 782
 783	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
 784	mtlr	r4
 785	addi	r1,r1,INT_FRAME_SIZE
 786	blr
 787
 788	.globl	fast_exception_return
 789fast_exception_return:
 790#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 791	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
 792	beq	1f			/* if not, we've got problems */
 793#endif
 794
 7952:	REST_4GPRS(3, r11)
 796	lwz	r10,_CCR(r11)
 797	REST_GPR(1, r11)
 798	mtcr	r10
 799	lwz	r10,_LINK(r11)
 800	mtlr	r10
 801	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
 802	li	r10, 0
 803	stw	r10, 8(r11)
 804	REST_GPR(10, r11)
 805#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 806	mtspr	SPRN_NRI, r0
 807#endif
 808	mtspr	SPRN_SRR1,r9
 809	mtspr	SPRN_SRR0,r12
 810	REST_GPR(9, r11)
 811	REST_GPR(12, r11)
 812	lwz	r11,GPR11(r11)
 813	SYNC
 814	RFI
 815_ASM_NOKPROBE_SYMBOL(fast_exception_return)
 816
 817#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 818/* check if the exception happened in a restartable section */
 8191:	lis	r3,exc_exit_restart_end@ha
 820	addi	r3,r3,exc_exit_restart_end@l
 821	cmplw	r12,r3
 822#ifdef CONFIG_PPC_BOOK3S_601
 823	bge	2b
 824#else
 825	bge	3f
 826#endif
 827	lis	r4,exc_exit_restart@ha
 828	addi	r4,r4,exc_exit_restart@l
 829	cmplw	r12,r4
 830#ifdef CONFIG_PPC_BOOK3S_601
 831	blt	2b
 832#else
 833	blt	3f
 834#endif
 835	lis	r3,fee_restarts@ha
 836	tophys(r3,r3)
 837	lwz	r5,fee_restarts@l(r3)
 838	addi	r5,r5,1
 839	stw	r5,fee_restarts@l(r3)
 840	mr	r12,r4		/* restart at exc_exit_restart */
 841	b	2b
 842
 843	.section .bss
 844	.align	2
 845fee_restarts:
 846	.space	4
 847	.previous
 848
 849/* aargh, a nonrecoverable interrupt, panic */
 850/* aargh, we don't know which trap this is */
 851/* but the 601 doesn't implement the RI bit, so assume it's OK */
 8523:
 853	li	r10,-1
 854	stw	r10,_TRAP(r11)
 855	addi	r3,r1,STACK_FRAME_OVERHEAD
 856	lis	r10,MSR_KERNEL@h
 857	ori	r10,r10,MSR_KERNEL@l
 858	bl	transfer_to_handler_full
 859	.long	unrecoverable_exception
 860	.long	ret_from_except
 861#endif
 862
 863	.globl	ret_from_except_full
 864ret_from_except_full:
 865	REST_NVGPRS(r1)
 866	/* fall through */
 
 
 
 
 
 
 867
 868	.globl	ret_from_except
 869ret_from_except:
 870	/* Hard-disable interrupts so that current_thread_info()->flags
 871	 * can't change between when we test it and when we return
 872	 * from the interrupt. */
 873	/* Note: We don't bother telling lockdep about it */
 874	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
 875	SYNC			/* Some chip revs have problems here... */
 876	mtmsr	r10		/* disable interrupts */
 877
 878	lwz	r3,_MSR(r1)	/* Returning to user mode? */
 879	andi.	r0,r3,MSR_PR
 880	beq	resume_kernel
 881
 882user_exc_return:		/* r10 contains MSR_KERNEL here */
 883	/* Check current_thread_info()->flags */
 884	lwz	r9,TI_FLAGS(r2)
 885	andi.	r0,r9,_TIF_USER_WORK_MASK
 886	bne	do_work
 887
 888restore_user:
 889#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 890	/* Check whether this process has its own DBCR0 value.  The internal
 891	   debug mode bit tells us that dbcr0 should be loaded. */
 892	lwz	r0,THREAD+THREAD_DBCR0(r2)
 893	andis.	r10,r0,DBCR0_IDM@h
 894	bnel-	load_dbcr0
 895#endif
 896	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
 897#ifdef CONFIG_PPC_BOOK3S_32
 898	kuep_unlock	r10, r11
 899#endif
 900
 901	b	restore
 
 
 
 
 902
 903/* N.B. the only way to get here is from the beq following ret_from_except. */
 904resume_kernel:
 905	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
 906	lwz	r8,TI_FLAGS(r2)
 907	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
 908	beq+	1f
 909
 910	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
 911
 912	lwz	r3,GPR1(r1)
 913	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
 914	mr	r4,r1			/* src:  current exception frame */
 915	mr	r1,r3			/* Reroute the trampoline frame to r1 */
 916
 917	/* Copy from the original to the trampoline. */
 918	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
 919	li	r6,0			/* start offset: 0 */
 920	mtctr	r5
 9212:	lwzx	r0,r6,r4
 922	stwx	r0,r6,r3
 923	addi	r6,r6,4
 924	bdnz	2b
 925
 926	/* Do real store operation to complete stwu */
 927	lwz	r5,GPR1(r1)
 928	stw	r8,0(r5)
 929
 930	/* Clear _TIF_EMULATE_STACK_STORE flag */
 931	lis	r11,_TIF_EMULATE_STACK_STORE@h
 932	addi	r5,r2,TI_FLAGS
 9330:	lwarx	r8,0,r5
 934	andc	r8,r8,r11
 935	stwcx.	r8,0,r5
 936	bne-	0b
 9371:
 938
 939#ifdef CONFIG_PREEMPTION
 940	/* check current_thread_info->preempt_count */
 941	lwz	r0,TI_PREEMPT(r2)
 942	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
 943	bne	restore_kuap
 944	andi.	r8,r8,_TIF_NEED_RESCHED
 945	beq+	restore_kuap
 946	lwz	r3,_MSR(r1)
 947	andi.	r0,r3,MSR_EE	/* interrupts off? */
 948	beq	restore_kuap	/* don't schedule if so */
 949#ifdef CONFIG_TRACE_IRQFLAGS
 950	/* Lockdep thinks irqs are enabled, we need to call
 951	 * preempt_schedule_irq with IRQs off, so we inform lockdep
 952	 * now that we -did- turn them off already
 953	 */
 954	bl	trace_hardirqs_off
 955#endif
 956	bl	preempt_schedule_irq
 957#ifdef CONFIG_TRACE_IRQFLAGS
 958	/* And now, to properly rebalance the above, we tell lockdep they
 959	 * are being turned back on, which will happen when we return
 960	 */
 961	bl	trace_hardirqs_on
 
 
 
 
 
 
 
 
 
 
 
 
 
 962#endif
 963#endif /* CONFIG_PREEMPTION */
 964restore_kuap:
 965	kuap_restore r1, r2, r9, r10, r0
 966
 967	/* interrupts are hard-disabled at this point */
 968restore:
 969#ifdef CONFIG_44x
 970BEGIN_MMU_FTR_SECTION
 971	b	1f
 972END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 973	lis	r4,icache_44x_need_flush@ha
 974	lwz	r5,icache_44x_need_flush@l(r4)
 975	cmplwi	cr0,r5,0
 976	beq+	1f
 977	li	r6,0
 978	iccci	r0,r0
 979	stw	r6,icache_44x_need_flush@l(r4)
 9801:
 981#endif  /* CONFIG_44x */
 982
 983	lwz	r9,_MSR(r1)
 984#ifdef CONFIG_TRACE_IRQFLAGS
 985	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
 986	 * off in this assembly code while peeking at TI_FLAGS() and such. However
 987	 * we need to inform it if the exception turned interrupts off, and we
 988	 * are about to trun them back on.
 989	 */
 990	andi.	r10,r9,MSR_EE
 991	beq	1f
 992	stwu	r1,-32(r1)
 993	mflr	r0
 994	stw	r0,4(r1)
 995	bl	trace_hardirqs_on
 996	addi	r1, r1, 32
 997	lwz	r9,_MSR(r1)
 9981:
 999#endif /* CONFIG_TRACE_IRQFLAGS */
1000
1001	lwz	r0,GPR0(r1)
1002	lwz	r2,GPR2(r1)
1003	REST_4GPRS(3, r1)
1004	REST_2GPRS(7, r1)
1005
1006	lwz	r10,_XER(r1)
1007	lwz	r11,_CTR(r1)
1008	mtspr	SPRN_XER,r10
1009	mtctr	r11
1010
1011BEGIN_FTR_SECTION
1012	lwarx	r11,0,r1
1013END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
1014	stwcx.	r0,0,r1			/* to clear the reservation */
 
 
 
 
 
 
 
1015
1016#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
1017	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
1018	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
1019
1020	lwz	r10,_CCR(r1)
1021	lwz	r11,_LINK(r1)
1022	mtcrf	0xFF,r10
1023	mtlr	r11
1024
1025	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1026	li	r10, 0
1027	stw	r10, 8(r1)
1028	/*
1029	 * Once we put values in SRR0 and SRR1, we are in a state
1030	 * where exceptions are not recoverable, since taking an
1031	 * exception will trash SRR0 and SRR1.  Therefore we clear the
1032	 * MSR:RI bit to indicate this.  If we do take an exception,
1033	 * we can't return to the point of the exception but we
1034	 * can restart the exception exit path at the label
1035	 * exc_exit_restart below.  -- paulus
1036	 */
1037	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
1038	SYNC
1039	mtmsr	r10		/* clear the RI bit */
1040	.globl exc_exit_restart
1041exc_exit_restart:
1042	lwz	r12,_NIP(r1)
1043	mtspr	SPRN_SRR0,r12
1044	mtspr	SPRN_SRR1,r9
1045	REST_4GPRS(9, r1)
1046	lwz	r1,GPR1(r1)
1047	.globl exc_exit_restart_end
1048exc_exit_restart_end:
1049	SYNC
1050	RFI
1051_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
1052_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end)
1053
1054#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1055	/*
1056	 * This is a bit different on 4xx/Book-E because it doesn't have
1057	 * the RI bit in the MSR.
1058	 * The TLB miss handler checks if we have interrupted
1059	 * the exception exit path and restarts it if so
1060	 * (well maybe one day it will... :).
 
 
 
 
 
 
 
 
 
 
 
 
1061	 */
1062	lwz	r11,_LINK(r1)
1063	mtlr	r11
1064	lwz	r10,_CCR(r1)
1065	mtcrf	0xff,r10
1066	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1067	li	r10, 0
1068	stw	r10, 8(r1)
1069	REST_2GPRS(9, r1)
1070	.globl exc_exit_restart
1071exc_exit_restart:
1072	lwz	r11,_NIP(r1)
1073	lwz	r12,_MSR(r1)
1074	mtspr	SPRN_SRR0,r11
1075	mtspr	SPRN_SRR1,r12
1076	REST_2GPRS(11, r1)
1077	lwz	r1,GPR1(r1)
1078	.globl exc_exit_restart_end
1079exc_exit_restart_end:
1080	rfi
1081	b	.			/* prevent prefetch past rfi */
1082_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
 
 
 
 
1083
1084/*
1085 * Returning from a critical interrupt in user mode doesn't need
1086 * to be any different from a normal exception.  For a critical
1087 * interrupt in the kernel, we just return (without checking for
1088 * preemption) since the interrupt may have happened at some crucial
1089 * place (e.g. inside the TLB miss handler), and because we will be
1090 * running with r1 pointing into critical_stack, not the current
1091 * process's kernel stack (and therefore current_thread_info() will
1092 * give the wrong answer).
1093 * We have to restore various SPRs that may have been in use at the
1094 * time of the critical interrupt.
1095 *
1096 */
1097#ifdef CONFIG_40x
1098#define PPC_40x_TURN_OFF_MSR_DR						    \
1099	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1100	 * assume the instructions here are mapped by a pinned TLB entry */ \
1101	li	r10,MSR_IR;						    \
1102	mtmsr	r10;							    \
1103	isync;								    \
1104	tophys(r1, r1);
1105#else
1106#define PPC_40x_TURN_OFF_MSR_DR
1107#endif
1108
1109#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1110	REST_NVGPRS(r1);						\
1111	lwz	r3,_MSR(r1);						\
1112	andi.	r3,r3,MSR_PR;						\
1113	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL);				\
1114	bne	user_exc_return;					\
1115	lwz	r0,GPR0(r1);						\
1116	lwz	r2,GPR2(r1);						\
1117	REST_4GPRS(3, r1);						\
1118	REST_2GPRS(7, r1);						\
1119	lwz	r10,_XER(r1);						\
1120	lwz	r11,_CTR(r1);						\
1121	mtspr	SPRN_XER,r10;						\
1122	mtctr	r11;							\
1123	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1124	lwz	r11,_LINK(r1);						\
1125	mtlr	r11;							\
1126	lwz	r10,_CCR(r1);						\
1127	mtcrf	0xff,r10;						\
1128	PPC_40x_TURN_OFF_MSR_DR;					\
1129	lwz	r9,_DEAR(r1);						\
1130	lwz	r10,_ESR(r1);						\
1131	mtspr	SPRN_DEAR,r9;						\
1132	mtspr	SPRN_ESR,r10;						\
1133	lwz	r11,_NIP(r1);						\
1134	lwz	r12,_MSR(r1);						\
1135	mtspr	exc_lvl_srr0,r11;					\
1136	mtspr	exc_lvl_srr1,r12;					\
1137	lwz	r9,GPR9(r1);						\
1138	lwz	r12,GPR12(r1);						\
1139	lwz	r10,GPR10(r1);						\
1140	lwz	r11,GPR11(r1);						\
1141	lwz	r1,GPR1(r1);						\
1142	exc_lvl_rfi;							\
1143	b	.;		/* prevent prefetch past exc_lvl_rfi */
1144
1145#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1146	lwz	r9,_##exc_lvl_srr0(r1);					\
1147	lwz	r10,_##exc_lvl_srr1(r1);				\
1148	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1149	mtspr	SPRN_##exc_lvl_srr1,r10;
1150
1151#if defined(CONFIG_PPC_BOOK3E_MMU)
1152#ifdef CONFIG_PHYS_64BIT
1153#define	RESTORE_MAS7							\
1154	lwz	r11,MAS7(r1);						\
1155	mtspr	SPRN_MAS7,r11;
1156#else
1157#define	RESTORE_MAS7
1158#endif /* CONFIG_PHYS_64BIT */
1159#define RESTORE_MMU_REGS						\
1160	lwz	r9,MAS0(r1);						\
1161	lwz	r10,MAS1(r1);						\
1162	lwz	r11,MAS2(r1);						\
1163	mtspr	SPRN_MAS0,r9;						\
1164	lwz	r9,MAS3(r1);						\
1165	mtspr	SPRN_MAS1,r10;						\
1166	lwz	r10,MAS6(r1);						\
1167	mtspr	SPRN_MAS2,r11;						\
1168	mtspr	SPRN_MAS3,r9;						\
1169	mtspr	SPRN_MAS6,r10;						\
1170	RESTORE_MAS7;
1171#elif defined(CONFIG_44x)
1172#define RESTORE_MMU_REGS						\
1173	lwz	r9,MMUCR(r1);						\
1174	mtspr	SPRN_MMUCR,r9;
1175#else
1176#define RESTORE_MMU_REGS
1177#endif
1178
1179#ifdef CONFIG_40x
1180	.globl	ret_from_crit_exc
1181ret_from_crit_exc:
1182	mfspr	r9,SPRN_SPRG_THREAD
1183	lis	r10,saved_ksp_limit@ha;
1184	lwz	r10,saved_ksp_limit@l(r10);
1185	tovirt(r9,r9);
1186	stw	r10,KSP_LIMIT(r9)
1187	lis	r9,crit_srr0@ha;
1188	lwz	r9,crit_srr0@l(r9);
1189	lis	r10,crit_srr1@ha;
1190	lwz	r10,crit_srr1@l(r10);
1191	mtspr	SPRN_SRR0,r9;
1192	mtspr	SPRN_SRR1,r10;
1193	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1194_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
1195#endif /* CONFIG_40x */
1196
1197#ifdef CONFIG_BOOKE
1198	.globl	ret_from_crit_exc
1199ret_from_crit_exc:
1200	mfspr	r9,SPRN_SPRG_THREAD
1201	lwz	r10,SAVED_KSP_LIMIT(r1)
1202	stw	r10,KSP_LIMIT(r9)
1203	RESTORE_xSRR(SRR0,SRR1);
1204	RESTORE_MMU_REGS;
1205	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1206_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
1207
1208	.globl	ret_from_debug_exc
1209ret_from_debug_exc:
1210	mfspr	r9,SPRN_SPRG_THREAD
1211	lwz	r10,SAVED_KSP_LIMIT(r1)
1212	stw	r10,KSP_LIMIT(r9)
1213	RESTORE_xSRR(SRR0,SRR1);
1214	RESTORE_xSRR(CSRR0,CSRR1);
1215	RESTORE_MMU_REGS;
1216	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1217_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
1218
1219	.globl	ret_from_mcheck_exc
1220ret_from_mcheck_exc:
1221	mfspr	r9,SPRN_SPRG_THREAD
1222	lwz	r10,SAVED_KSP_LIMIT(r1)
1223	stw	r10,KSP_LIMIT(r9)
1224	RESTORE_xSRR(SRR0,SRR1);
1225	RESTORE_xSRR(CSRR0,CSRR1);
1226	RESTORE_xSRR(DSRR0,DSRR1);
1227	RESTORE_MMU_REGS;
1228	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1229_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
1230#endif /* CONFIG_BOOKE */
1231
1232/*
1233 * Load the DBCR0 value for a task that is being ptraced,
1234 * having first saved away the global DBCR0.  Note that r0
1235 * has the dbcr0 value to set upon entry to this.
1236 */
1237load_dbcr0:
1238	mfmsr	r10		/* first disable debug exceptions */
1239	rlwinm	r10,r10,0,~MSR_DE
1240	mtmsr	r10
1241	isync
1242	mfspr	r10,SPRN_DBCR0
1243	lis	r11,global_dbcr0@ha
1244	addi	r11,r11,global_dbcr0@l
1245#ifdef CONFIG_SMP
1246	lwz	r9,TASK_CPU(r2)
1247	slwi	r9,r9,3
1248	add	r11,r11,r9
1249#endif
1250	stw	r10,0(r11)
1251	mtspr	SPRN_DBCR0,r0
1252	lwz	r10,4(r11)
1253	addi	r10,r10,1
1254	stw	r10,4(r11)
1255	li	r11,-1
1256	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1257	blr
1258
1259	.section .bss
1260	.align	4
1261	.global global_dbcr0
1262global_dbcr0:
1263	.space	8*NR_CPUS
1264	.previous
1265#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1266
1267do_work:			/* r10 contains MSR_KERNEL here */
1268	andi.	r0,r9,_TIF_NEED_RESCHED
1269	beq	do_user_signal
1270
1271do_resched:			/* r10 contains MSR_KERNEL here */
1272#ifdef CONFIG_TRACE_IRQFLAGS
1273	bl	trace_hardirqs_on
1274	mfmsr	r10
1275#endif
1276	ori	r10,r10,MSR_EE
1277	SYNC
1278	mtmsr	r10		/* hard-enable interrupts */
1279	bl	schedule
1280recheck:
1281	/* Note: And we don't tell it we are disabling them again
1282	 * neither. Those disable/enable cycles used to peek at
1283	 * TI_FLAGS aren't advertised.
1284	 */
1285	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
1286	SYNC
1287	mtmsr	r10		/* disable interrupts */
1288	lwz	r9,TI_FLAGS(r2)
1289	andi.	r0,r9,_TIF_NEED_RESCHED
1290	bne-	do_resched
1291	andi.	r0,r9,_TIF_USER_WORK_MASK
1292	beq	restore_user
1293do_user_signal:			/* r10 contains MSR_KERNEL here */
1294	ori	r10,r10,MSR_EE
1295	SYNC
1296	mtmsr	r10		/* hard-enable interrupts */
1297	/* save r13-r31 in the exception frame, if not already done */
1298	lwz	r3,_TRAP(r1)
1299	andi.	r0,r3,1
1300	beq	2f
1301	SAVE_NVGPRS(r1)
1302	rlwinm	r3,r3,0,0,30
1303	stw	r3,_TRAP(r1)
13042:	addi	r3,r1,STACK_FRAME_OVERHEAD
1305	mr	r4,r9
1306	bl	do_notify_resume
1307	REST_NVGPRS(r1)
1308	b	recheck
1309
1310/*
1311 * We come here when we are at the end of handling an exception
1312 * that occurred at a place where taking an exception will lose
1313 * state information, such as the contents of SRR0 and SRR1.
1314 */
1315nonrecoverable:
1316	lis	r10,exc_exit_restart_end@ha
1317	addi	r10,r10,exc_exit_restart_end@l
1318	cmplw	r12,r10
1319#ifdef CONFIG_PPC_BOOK3S_601
1320	bgelr
1321#else
1322	bge	3f
1323#endif
1324	lis	r11,exc_exit_restart@ha
1325	addi	r11,r11,exc_exit_restart@l
1326	cmplw	r12,r11
1327#ifdef CONFIG_PPC_BOOK3S_601
1328	bltlr
1329#else
1330	blt	3f
1331#endif
1332	lis	r10,ee_restarts@ha
1333	lwz	r12,ee_restarts@l(r10)
1334	addi	r12,r12,1
1335	stw	r12,ee_restarts@l(r10)
1336	mr	r12,r11		/* restart at exc_exit_restart */
1337	blr
13383:	/* OK, we can't recover, kill this process */
1339	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1340	lwz	r3,_TRAP(r1)
1341	andi.	r0,r3,1
1342	beq	5f
1343	SAVE_NVGPRS(r1)
1344	rlwinm	r3,r3,0,0,30
1345	stw	r3,_TRAP(r1)
13465:	mfspr	r2,SPRN_SPRG_THREAD
1347	addi	r2,r2,-THREAD
1348	tovirt(r2,r2)			/* set back r2 to current */
13494:	addi	r3,r1,STACK_FRAME_OVERHEAD
1350	bl	unrecoverable_exception
1351	/* shouldn't return */
1352	b	4b
1353_ASM_NOKPROBE_SYMBOL(nonrecoverable)
1354
1355	.section .bss
1356	.align	2
1357ee_restarts:
1358	.space	4
1359	.previous
1360
1361/*
1362 * PROM code for specific machines follows.  Put it
1363 * here so it's easy to add arch-specific sections later.
1364 * -- Cort
1365 */
1366#ifdef CONFIG_PPC_RTAS
1367/*
1368 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1369 * called with the MMU off.
1370 */
1371_GLOBAL(enter_rtas)
1372	stwu	r1,-INT_FRAME_SIZE(r1)
1373	mflr	r0
1374	stw	r0,INT_FRAME_SIZE+4(r1)
1375	LOAD_REG_ADDR(r4, rtas)
1376	lis	r6,1f@ha	/* physical return address for rtas */
1377	addi	r6,r6,1f@l
1378	tophys(r6,r6)
1379	tophys_novmstack r7, r1
1380	lwz	r8,RTASENTRY(r4)
1381	lwz	r4,RTASBASE(r4)
1382	mfmsr	r9
1383	stw	r9,8(r1)
1384	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
1385	SYNC			/* disable interrupts so SRR0/1 */
1386	mtmsr	r0		/* don't get trashed */
1387	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1388	mtlr	r6
1389	stw	r7, THREAD + RTAS_SP(r2)
1390	mtspr	SPRN_SRR0,r8
1391	mtspr	SPRN_SRR1,r9
1392	RFI
13931:	tophys_novmstack r9, r1
1394#ifdef CONFIG_VMAP_STACK
1395	li	r0, MSR_KERNEL & ~MSR_IR	/* can take DTLB miss */
1396	mtmsr	r0
1397	isync
1398#endif
1399	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1400	lwz	r9,8(r9)	/* original msr value */
1401	addi	r1,r1,INT_FRAME_SIZE
1402	li	r0,0
1403	tophys_novmstack r7, r2
1404	stw	r0, THREAD + RTAS_SP(r7)
1405	mtspr	SPRN_SRR0,r8
1406	mtspr	SPRN_SRR1,r9
1407	RFI			/* return to caller */
1408_ASM_NOKPROBE_SYMBOL(enter_rtas)
1409#endif /* CONFIG_PPC_RTAS */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 *  PowerPC version
  4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
  6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
  7 *  Adapted for Power Macintosh by Paul Mackerras.
  8 *  Low-level exception handlers and MMU support
  9 *  rewritten by Paul Mackerras.
 10 *    Copyright (C) 1996 Paul Mackerras.
 11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 12 *
 13 *  This file contains the system call entry code, context switch
 14 *  code, and exception/interrupt return code for PowerPC.
 15 */
 16
 17#include <linux/errno.h>
 18#include <linux/err.h>
 19#include <linux/sys.h>
 20#include <linux/threads.h>
 21#include <linux/linkage.h>
 22
 23#include <asm/reg.h>
 24#include <asm/page.h>
 25#include <asm/mmu.h>
 26#include <asm/cputable.h>
 27#include <asm/thread_info.h>
 28#include <asm/ppc_asm.h>
 29#include <asm/asm-offsets.h>
 30#include <asm/unistd.h>
 31#include <asm/ptrace.h>
 32#include <asm/export.h>
 33#include <asm/feature-fixups.h>
 34#include <asm/barrier.h>
 35#include <asm/kup.h>
 36#include <asm/bug.h>
 37#include <asm/interrupt.h>
 38
 39#include "head_32.h"
 40
 41/*
 42 * powerpc relies on return from interrupt/syscall being context synchronising
 43 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
 44 * synchronisation instructions.
 45 */
 46
 47/*
 48 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
 49 * fit into one page in order to not encounter a TLB miss between the
 50 * modification of srr0/srr1 and the associated rfi.
 51 */
 52	.align	12
 53
 54#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)
 55	.globl	prepare_transfer_to_handler
 56prepare_transfer_to_handler:
 57	/* if from kernel, check interrupted DOZE/NAP mode */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58	lwz	r12,TI_LOCAL_FLAGS(r2)
 59	mtcrf	0x01,r12
 60	bt-	31-TLF_NAPPING,4f
 61	bt-	31-TLF_SLEEPING,7f
 62	blr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63
 
 644:	rlwinm	r12,r12,0,~_TLF_NAPPING
 65	stw	r12,TI_LOCAL_FLAGS(r2)
 66	b	power_save_ppc32_restore
 67
 687:	rlwinm	r12,r12,0,~_TLF_SLEEPING
 69	stw	r12,TI_LOCAL_FLAGS(r2)
 70	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
 71	rlwinm	r9,r9,0,~MSR_EE
 72	lwz	r12,_LINK(r11)		/* and return to address in LR */
 73	REST_GPR(2, r11)
 
 74	b	fast_exception_return
 75_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
 76#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77
 78#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
 79SYM_FUNC_START(__kuep_lock)
 80	lwz	r9, THREAD+THSR0(r2)
 81	update_user_segments_by_4 r9, r10, r11, r12
 82	blr
 83SYM_FUNC_END(__kuep_lock)
 84
 85SYM_FUNC_START_LOCAL(__kuep_unlock)
 86	lwz	r9, THREAD+THSR0(r2)
 87	rlwinm  r9,r9,0,~SR_NX
 88	update_user_segments_by_4 r9, r10, r11, r12
 89	blr
 90SYM_FUNC_END(__kuep_unlock)
 
 
 
 
 
 
 
 
 91
 92.macro	kuep_lock
 93	bl	__kuep_lock
 94.endm
 95.macro	kuep_unlock
 96	bl	__kuep_unlock
 97.endm
 98#else
 99.macro	kuep_lock
100.endm
101.macro	kuep_unlock
102.endm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103#endif
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105	.globl	transfer_to_syscall
106transfer_to_syscall:
107	stw	r3, ORIG_GPR3(r1)
108	stw	r11, GPR1(r1)
109	stw	r11, 0(r1)
110	mflr	r12
111	stw	r12, _LINK(r1)
112#ifdef CONFIG_BOOKE_OR_40x
113	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
114#endif
115	lis	r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
116	SAVE_GPR(2, r1)
117	addi	r12,r12,STACK_FRAME_REGS_MARKER@l
118	stw	r9,_MSR(r1)
119	li	r2, INTERRUPT_SYSCALL
120	stw	r12,STACK_INT_FRAME_MARKER(r1)
121	stw	r2,_TRAP(r1)
122	SAVE_GPR(0, r1)
123	SAVE_GPRS(3, 8, r1)
124	addi	r2,r10,-THREAD
125	SAVE_NVGPRS(r1)
126	kuep_lock
127
128	/* Calling convention has r3 = regs, r4 = orig r0 */
129	addi	r3,r1,STACK_INT_FRAME_REGS
130	mr	r4,r0
131	bl	system_call_exception
132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133ret_from_syscall:
134	addi    r4,r1,STACK_INT_FRAME_REGS
135	li	r5,0
136	bl	syscall_exit_prepare
137#ifdef CONFIG_PPC_47x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138	lis	r4,icache_44x_need_flush@ha
139	lwz	r5,icache_44x_need_flush@l(r4)
140	cmplwi	cr0,r5,0
141	bne-	2f
142#endif /* CONFIG_PPC_47x */
143	kuep_unlock
 
 
 
 
 
 
 
 
 
 
144	lwz	r4,_LINK(r1)
145	lwz	r5,_CCR(r1)
146	mtlr	r4
 
147	lwz	r7,_NIP(r1)
148	lwz	r8,_MSR(r1)
149	cmpwi	r3,0
150	REST_GPR(3, r1)
151syscall_exit_finish:
 
 
 
152	mtspr	SPRN_SRR0,r7
153	mtspr	SPRN_SRR1,r8
154
155	bne	3f
156	mtcr	r5
157
1581:	REST_GPR(2, r1)
159	REST_GPR(1, r1)
160	rfi
161#ifdef CONFIG_40x
162	b .	/* Prevent prefetch past rfi */
163#endif
164
1653:	mtcr	r5
166	lwz	r4,_CTR(r1)
167	lwz	r5,_XER(r1)
168	REST_NVGPRS(r1)
169	mtctr	r4
170	mtxer	r5
171	REST_GPR(0, r1)
172	REST_GPRS(3, 12, r1)
173	b	1b
174
175#ifdef CONFIG_44x
1762:	li	r7,0
177	iccci	r0,r0
178	stw	r7,icache_44x_need_flush@l(r4)
179	b	1b
180#endif  /* CONFIG_44x */
181
 
 
 
182	.globl	ret_from_fork
183ret_from_fork:
184	REST_NVGPRS(r1)
185	bl	schedule_tail
186	li	r3,0
187	b	ret_from_syscall
188
189	.globl	ret_from_kernel_thread
190ret_from_kernel_thread:
191	REST_NVGPRS(r1)
192	bl	schedule_tail
193	mtctr	r14
194	mr	r3,r15
195	PPC440EP_ERR42
196	bctrl
197	li	r3,0
198	b	ret_from_syscall
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200/*
201 * This routine switches between two different tasks.  The process
202 * state of one is saved on its kernel stack.  Then the state
203 * of the other is restored from its kernel stack.  The memory
204 * management hardware is updated to the second process's state.
205 * Finally, we can return to the second process.
206 * On entry, r3 points to the THREAD for the current task, r4
207 * points to the THREAD for the new task.
208 *
209 * This routine is always called with interrupts disabled.
210 *
211 * Note: there are two ways to get to the "going out" portion
212 * of this code; either by coming in via the entry (_switch)
213 * or via "fork" which must set up an environment equivalent
214 * to the "_switch" path.  If you change this , you'll have to
215 * change the fork code also.
216 *
217 * The code which creates the new task context is in 'copy_thread'
218 * in arch/ppc/kernel/process.c
219 */
220_GLOBAL(_switch)
221	stwu	r1,-SWITCH_FRAME_SIZE(r1)
222	mflr	r0
223	stw	r0,SWITCH_FRAME_SIZE+4(r1)
224	/* r3-r12 are caller saved -- Cort */
225	SAVE_NVGPRS(r1)
226	stw	r0,_NIP(r1)	/* Return to switch caller */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227	mfcr	r10
228	stw	r10,_CCR(r1)
229	stw	r1,KSP(r3)	/* Set old stack pointer */
230
 
231#ifdef CONFIG_SMP
232	/* We need a sync somewhere here to make sure that if the
233	 * previous task gets rescheduled on another CPU, it sees all
234	 * stores it has performed on this one.
235	 */
236	sync
237#endif /* CONFIG_SMP */
238
239	tophys(r0,r4)
240	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
241	lwz	r1,KSP(r4)	/* Load new stack pointer */
242
243	/* save the old current 'last' for return value */
244	mr	r3,r2
245	addi	r2,r4,-THREAD	/* Update current */
246
 
 
 
 
 
 
 
 
 
 
 
 
 
247	lwz	r0,_CCR(r1)
248	mtcrf	0xFF,r0
249	/* r3-r12 are destroyed -- Cort */
250	REST_NVGPRS(r1)
251
252	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
253	mtlr	r4
254	addi	r1,r1,SWITCH_FRAME_SIZE
255	blr
256
257	.globl	fast_exception_return
258fast_exception_return:
259#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
260	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
261	beq	3f			/* if not, we've got problems */
262#endif
263
2642:	lwz	r10,_CCR(r11)
265	REST_GPRS(1, 6, r11)
 
266	mtcr	r10
267	lwz	r10,_LINK(r11)
268	mtlr	r10
269	/* Clear the exception marker on the stack to avoid confusing stacktrace */
270	li	r10, 0
271	stw	r10, 8(r11)
272	REST_GPR(10, r11)
273#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
274	mtspr	SPRN_NRI, r0
275#endif
276	mtspr	SPRN_SRR1,r9
277	mtspr	SPRN_SRR0,r12
278	REST_GPR(9, r11)
279	REST_GPR(12, r11)
280	REST_GPR(11, r11)
281	rfi
282#ifdef CONFIG_40x
283	b .	/* Prevent prefetch past rfi */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284#endif
285_ASM_NOKPROBE_SYMBOL(fast_exception_return)
 
 
 
 
 
 
 
 
 
 
 
 
286
287/* aargh, a nonrecoverable interrupt, panic */
288/* aargh, we don't know which trap this is */
 
2893:
290	li	r10,-1
291	stw	r10,_TRAP(r11)
292	prepare_transfer_to_handler
293	bl	unrecoverable_exception
294	trap	/* should not get here */
 
 
 
 
295
296	.globl interrupt_return
297interrupt_return:
298	lwz	r4,_MSR(r1)
299	addi	r3,r1,STACK_INT_FRAME_REGS
300	andi.	r0,r4,MSR_PR
301	beq	.Lkernel_interrupt_return
302	bl	interrupt_exit_user_prepare
303	cmpwi	r3,0
304	kuep_unlock
305	bne-	.Lrestore_nvgprs
306
307.Lfast_user_interrupt_return:
308	lwz	r11,_NIP(r1)
309	lwz	r12,_MSR(r1)
310	mtspr	SPRN_SRR0,r11
311	mtspr	SPRN_SRR1,r12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
313BEGIN_FTR_SECTION
314	stwcx.	r0,0,r1		/* to clear the reservation */
315FTR_SECTION_ELSE
316	lwarx	r0,0,r1
317ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
 
 
 
 
 
 
 
318
319	lwz	r3,_CCR(r1)
320	lwz	r4,_LINK(r1)
321	lwz	r5,_CTR(r1)
322	lwz	r6,_XER(r1)
323	li	r0,0
324
325	/*
326	 * Leaving a stale exception marker on the stack can confuse
327	 * the reliable stack unwinder later on. Clear it.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328	 */
329	stw	r0,8(r1)
330	REST_GPRS(7, 12, r1)
331
332	mtcr	r3
333	mtlr	r4
334	mtctr	r5
335	mtspr	SPRN_XER,r6
336
337	REST_GPRS(2, 6, r1)
338	REST_GPR(0, r1)
339	REST_GPR(1, r1)
340	rfi
341#ifdef CONFIG_40x
342	b .	/* Prevent prefetch past rfi */
343#endif
 
 
 
344
345.Lrestore_nvgprs:
346	REST_NVGPRS(r1)
347	b	.Lfast_user_interrupt_return
 
 
 
 
 
 
 
 
 
 
 
 
348
349.Lkernel_interrupt_return:
350	bl	interrupt_exit_kernel_prepare
351
352.Lfast_kernel_interrupt_return:
353	cmpwi	cr1,r3,0
354	lwz	r11,_NIP(r1)
355	lwz	r12,_MSR(r1)
356	mtspr	SPRN_SRR0,r11
357	mtspr	SPRN_SRR1,r12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
359BEGIN_FTR_SECTION
360	stwcx.	r0,0,r1		/* to clear the reservation */
361FTR_SECTION_ELSE
362	lwarx	r0,0,r1
363ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
364
365	lwz	r3,_LINK(r1)
366	lwz	r4,_CTR(r1)
367	lwz	r5,_XER(r1)
368	lwz	r6,_CCR(r1)
369	li	r0,0
370
371	REST_GPRS(7, 12, r1)
 
 
372
373	mtlr	r3
374	mtctr	r4
375	mtspr	SPRN_XER,r5
 
376
 
 
 
377	/*
378	 * Leaving a stale exception marker on the stack can confuse
379	 * the reliable stack unwinder later on. Clear it.
 
 
 
 
 
380	 */
381	stw	r0,8(r1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
383	REST_GPRS(2, 5, r1)
384
385	bne-	cr1,1f /* emulate stack store */
386	mtcr	r6
387	REST_GPR(6, r1)
388	REST_GPR(0, r1)
389	REST_GPR(1, r1)
390	rfi
391#ifdef CONFIG_40x
392	b .	/* Prevent prefetch past rfi */
393#endif
394
3951:	/*
396	 * Emulate stack store with update. New r1 value was already calculated
397	 * and updated in our interrupt regs by emulate_loadstore, but we can't
398	 * store the previous value of r1 to the stack before re-loading our
399	 * registers from it, otherwise they could be clobbered.  Use
400	 * SPRG Scratch0 as temporary storage to hold the store
401	 * data, as interrupts are disabled here so it won't be clobbered.
402	 */
403	mtcr	r6
404#ifdef CONFIG_BOOKE
405	mtspr	SPRN_SPRG_WSCRATCH0, r9
406#else
407	mtspr	SPRN_SPRG_SCRATCH0, r9
408#endif
409	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
410	REST_GPR(6, r1)
411	REST_GPR(0, r1)
412	REST_GPR(1, r1)
413	stw	r9,0(r1) /* perform store component of stwu */
414#ifdef CONFIG_BOOKE
415	mfspr	r9, SPRN_SPRG_RSCRATCH0
416#else
417	mfspr	r9, SPRN_SPRG_SCRATCH0
418#endif
 
 
419	rfi
420#ifdef CONFIG_40x
421	b .	/* Prevent prefetch past rfi */
422#endif
423_ASM_NOKPROBE_SYMBOL(interrupt_return)
424
425#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
426
427/*
428 * Returning from a critical interrupt in user mode doesn't need
429 * to be any different from a normal exception.  For a critical
430 * interrupt in the kernel, we just return (without checking for
431 * preemption) since the interrupt may have happened at some crucial
432 * place (e.g. inside the TLB miss handler), and because we will be
433 * running with r1 pointing into critical_stack, not the current
434 * process's kernel stack (and therefore current_thread_info() will
435 * give the wrong answer).
436 * We have to restore various SPRs that may have been in use at the
437 * time of the critical interrupt.
438 *
439 */
440#ifdef CONFIG_40x
441#define PPC_40x_TURN_OFF_MSR_DR						    \
442	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
443	 * assume the instructions here are mapped by a pinned TLB entry */ \
444	li	r10,MSR_IR;						    \
445	mtmsr	r10;							    \
446	isync;								    \
447	tophys(r1, r1);
448#else
449#define PPC_40x_TURN_OFF_MSR_DR
450#endif
451
452#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
453	REST_NVGPRS(r1);						\
454	lwz	r3,_MSR(r1);						\
455	andi.	r3,r3,MSR_PR;						\
456	bne	interrupt_return;					\
457	REST_GPR(0, r1);						\
458	REST_GPRS(2, 8, r1);						\
 
 
 
459	lwz	r10,_XER(r1);						\
460	lwz	r11,_CTR(r1);						\
461	mtspr	SPRN_XER,r10;						\
462	mtctr	r11;							\
463	stwcx.	r0,0,r1;		/* to clear the reservation */	\
464	lwz	r11,_LINK(r1);						\
465	mtlr	r11;							\
466	lwz	r10,_CCR(r1);						\
467	mtcrf	0xff,r10;						\
468	PPC_40x_TURN_OFF_MSR_DR;					\
469	lwz	r9,_DEAR(r1);						\
470	lwz	r10,_ESR(r1);						\
471	mtspr	SPRN_DEAR,r9;						\
472	mtspr	SPRN_ESR,r10;						\
473	lwz	r11,_NIP(r1);						\
474	lwz	r12,_MSR(r1);						\
475	mtspr	exc_lvl_srr0,r11;					\
476	mtspr	exc_lvl_srr1,r12;					\
477	REST_GPRS(9, 12, r1);						\
478	REST_GPR(1, r1);						\
 
 
 
479	exc_lvl_rfi;							\
480	b	.;		/* prevent prefetch past exc_lvl_rfi */
481
482#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
483	lwz	r9,_##exc_lvl_srr0(r1);					\
484	lwz	r10,_##exc_lvl_srr1(r1);				\
485	mtspr	SPRN_##exc_lvl_srr0,r9;					\
486	mtspr	SPRN_##exc_lvl_srr1,r10;
487
488#if defined(CONFIG_PPC_E500)
489#ifdef CONFIG_PHYS_64BIT
490#define	RESTORE_MAS7							\
491	lwz	r11,MAS7(r1);						\
492	mtspr	SPRN_MAS7,r11;
493#else
494#define	RESTORE_MAS7
495#endif /* CONFIG_PHYS_64BIT */
496#define RESTORE_MMU_REGS						\
497	lwz	r9,MAS0(r1);						\
498	lwz	r10,MAS1(r1);						\
499	lwz	r11,MAS2(r1);						\
500	mtspr	SPRN_MAS0,r9;						\
501	lwz	r9,MAS3(r1);						\
502	mtspr	SPRN_MAS1,r10;						\
503	lwz	r10,MAS6(r1);						\
504	mtspr	SPRN_MAS2,r11;						\
505	mtspr	SPRN_MAS3,r9;						\
506	mtspr	SPRN_MAS6,r10;						\
507	RESTORE_MAS7;
508#elif defined(CONFIG_44x)
509#define RESTORE_MMU_REGS						\
510	lwz	r9,MMUCR(r1);						\
511	mtspr	SPRN_MMUCR,r9;
512#else
513#define RESTORE_MMU_REGS
514#endif
515
516#ifdef CONFIG_40x
517	.globl	ret_from_crit_exc
518ret_from_crit_exc:
 
 
 
 
 
519	lis	r9,crit_srr0@ha;
520	lwz	r9,crit_srr0@l(r9);
521	lis	r10,crit_srr1@ha;
522	lwz	r10,crit_srr1@l(r10);
523	mtspr	SPRN_SRR0,r9;
524	mtspr	SPRN_SRR1,r10;
525	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
526_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
527#endif /* CONFIG_40x */
528
529#ifdef CONFIG_BOOKE
530	.globl	ret_from_crit_exc
531ret_from_crit_exc:
 
 
 
532	RESTORE_xSRR(SRR0,SRR1);
533	RESTORE_MMU_REGS;
534	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
535_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
536
537	.globl	ret_from_debug_exc
538ret_from_debug_exc:
 
 
 
539	RESTORE_xSRR(SRR0,SRR1);
540	RESTORE_xSRR(CSRR0,CSRR1);
541	RESTORE_MMU_REGS;
542	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
543_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
544
545	.globl	ret_from_mcheck_exc
546ret_from_mcheck_exc:
 
 
 
547	RESTORE_xSRR(SRR0,SRR1);
548	RESTORE_xSRR(CSRR0,CSRR1);
549	RESTORE_xSRR(DSRR0,DSRR1);
550	RESTORE_MMU_REGS;
551	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
552_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
553#endif /* CONFIG_BOOKE */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */