Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 *  PowerPC version 
  4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
  6 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
  7 *  Adapted for Power Macintosh by Paul Mackerras.
  8 *  Low-level exception handlers and MMU support
  9 *  rewritten by Paul Mackerras.
 10 *    Copyright (C) 1996 Paul Mackerras.
 11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 12 *
 13 *  This file contains the system call entry code, context switch
 14 *  code, and exception/interrupt return code for PowerPC.
 
 
 
 
 
 15 */
 16
 17#include <linux/objtool.h>
 18#include <linux/errno.h>
 19#include <linux/err.h>
 20#include <asm/cache.h>
 21#include <asm/unistd.h>
 22#include <asm/processor.h>
 23#include <asm/page.h>
 24#include <asm/mmu.h>
 25#include <asm/thread_info.h>
 26#include <asm/code-patching-asm.h>
 27#include <asm/ppc_asm.h>
 28#include <asm/asm-offsets.h>
 29#include <asm/cputable.h>
 30#include <asm/firmware.h>
 31#include <asm/bug.h>
 32#include <asm/ptrace.h>
 33#include <asm/irqflags.h>
 
 34#include <asm/hw_irq.h>
 35#include <asm/context_tracking.h>
 36#include <asm/ppc-opcode.h>
 37#include <asm/barrier.h>
 38#include <asm/export.h>
 39#include <asm/asm-compat.h>
 40#ifdef CONFIG_PPC_BOOK3S
 41#include <asm/exception-64s.h>
 42#else
 43#include <asm/exception-64e.h>
 44#endif
 45#include <asm/feature-fixups.h>
 46#include <asm/kup.h>
 47
 48/*
 49 * System calls.
 50 */
 
 
 
 
 
 
 
 
 51	.section	".text"
 
 52
 53#ifdef CONFIG_PPC_BOOK3S_64
 54
 55#define FLUSH_COUNT_CACHE	\
 561:	nop;			\
 57	patch_site 1b, patch__call_flush_branch_caches1; \
 581:	nop;			\
 59	patch_site 1b, patch__call_flush_branch_caches2; \
 601:	nop;			\
 61	patch_site 1b, patch__call_flush_branch_caches3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62
 63.macro nops number
 64	.rept \number
 65	nop
 66	.endr
 67.endm
 
 
 
 
 
 
 
 68
 69.balign 32
 70.global flush_branch_caches
 71flush_branch_caches:
 72	/* Save LR into r9 */
 73	mflr	r9
 74
 75	// Flush the link stack
 76	.rept 64
 77	ANNOTATE_INTRA_FUNCTION_CALL
 78	bl	.+4
 79	.endr
 80	b	1f
 81	nops	6
 82
 83	.balign 32
 84	/* Restore LR */
 851:	mtlr	r9
 86
 87	// If we're just flushing the link stack, return here
 883:	nop
 89	patch_site 3b patch__flush_link_stack_return
 90
 91	li	r9,0x7fff
 92	mtctr	r9
 93
 94	PPC_BCCTR_FLUSH
 95
 962:	nop
 97	patch_site 2b patch__flush_count_cache_return
 98
 99	nops	3
100
101	.rept 278
102	.balign 32
103	PPC_BCCTR_FLUSH
104	nops	7
105	.endr
106
107	blr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108#else
109#define FLUSH_COUNT_CACHE
110#endif /* CONFIG_PPC_BOOK3S_64 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
112/*
113 * This routine switches between two different tasks.  The process
114 * state of one is saved on its kernel stack.  Then the state
115 * of the other is restored from its kernel stack.  The memory
116 * management hardware is updated to the second process's state.
117 * Finally, we can return to the second process, via interrupt_return.
118 * On entry, r3 points to the THREAD for the current task, r4
119 * points to the THREAD for the new task.
120 *
121 * Note: there are two ways to get to the "going out" portion
122 * of this code; either by coming in via the entry (_switch)
123 * or via "fork" which must set up an environment equivalent
124 * to the "_switch" path.  If you change this you'll have to change
125 * the fork code also.
126 *
127 * The code which creates the new task context is in 'copy_thread'
128 * in arch/powerpc/kernel/process.c 
129 */
130	.align	7
131_GLOBAL(_switch)
132	mflr	r0
133	std	r0,16(r1)
134	stdu	r1,-SWITCH_FRAME_SIZE(r1)
135	/* r3-r13 are caller saved -- Cort */
136	SAVE_NVGPRS(r1)
137	std	r0,_NIP(r1)	/* Return to switch caller */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138	mfcr	r23
139	std	r23,_CCR(r1)
140	std	r1,KSP(r3)	/* Set old stack pointer */
141
142	kuap_check_amr r9, r10
143
144	FLUSH_COUNT_CACHE	/* Clobbers r9, ctr */
145
146	/*
147	 * On SMP kernels, care must be taken because a task may be
148	 * scheduled off CPUx and on to CPUy. Memory ordering must be
149	 * considered.
150	 *
151	 * Cacheable stores on CPUx will be visible when the task is
152	 * scheduled on CPUy by virtue of the core scheduler barriers
153	 * (see "Notes on Program-Order guarantees on SMP systems." in
154	 * kernel/sched/core.c).
155	 *
156	 * Uncacheable stores in the case of involuntary preemption must
157	 * be taken care of. The smp_mb__after_spinlock() in __schedule()
158	 * is implemented as hwsync on powerpc, which orders MMIO too. So
159	 * long as there is an hwsync in the context switch path, it will
160	 * be executed on the source CPU after the task has performed
161	 * all MMIO ops on that CPU, and on the destination CPU before the
162	 * task performs any MMIO ops there.
163	 */
 
 
164
165	/*
166	 * The kernel context switch path must contain a spin_lock,
167	 * which contains larx/stcx, which will clear any reservation
168	 * of the task being switched.
 
 
169	 */
170#ifdef CONFIG_PPC_BOOK3S
171/* Cancel all explict user streams as they will have no use after context
172 * switch and will stop the HW from creating streams itself
173 */
174	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
175#endif
176
177	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
178	std	r6,PACACURRENT(r13)	/* Set new 'current' */
179#if defined(CONFIG_STACKPROTECTOR)
180	ld	r6, TASK_CANARY(r6)
181	std	r6, PACA_CANARY(r13)
182#endif
183
184	ld	r8,KSP(r4)	/* new stack pointer */
185#ifdef CONFIG_PPC_64S_HASH_MMU
186BEGIN_MMU_FTR_SECTION
187	b	2f
188END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
189BEGIN_FTR_SECTION
 
190	clrrdi	r6,r8,28	/* get its ESID */
191	clrrdi	r9,r1,28	/* get current sp ESID */
192FTR_SECTION_ELSE
193	clrrdi	r6,r8,40	/* get its 1T ESID */
194	clrrdi	r9,r1,40	/* get current sp 1T ESID */
195ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
 
 
 
196	clrldi.	r0,r6,2		/* is new ESID c00000000? */
197	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
198	cror	eq,4*cr1+eq,eq
199	beq	2f		/* if yes, don't slbie it */
200
201	/* Bolt in the new stack SLB entry */
202	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
203	oris	r0,r6,(SLB_ESID_V)@h
204	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
205BEGIN_FTR_SECTION
206	li	r9,MMU_SEGSIZE_1T	/* insert B field */
207	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
208	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
209END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
210
211	/* Update the last bolted SLB.  No write barriers are needed
212	 * here, provided we only update the current CPU's SLB shadow
213	 * buffer.
214	 */
215	ld	r9,PACA_SLBSHADOWPTR(r13)
216	li	r12,0
217	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
218	li	r12,SLBSHADOW_STACKVSID
219	STDX_BE	r7,r12,r9			/* Save VSID */
220	li	r12,SLBSHADOW_STACKESID
221	STDX_BE	r0,r12,r9			/* Save ESID */
222
223	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
224	 * we have 1TB segments, the only CPUs known to have the errata
225	 * only support less than 1TB of system memory and we'll never
226	 * actually hit this code path.
227	 */
228
229	isync
230	slbie	r6
231BEGIN_FTR_SECTION
232	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
233END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
234	slbmte	r7,r0
235	isync
2362:
237#endif /* CONFIG_PPC_64S_HASH_MMU */
238
239	clrrdi	r7, r8, THREAD_SHIFT	/* base of new stack */
240	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
241	   because we don't need to leave the 288-byte ABI gap at the
242	   top of the kernel stack. */
243	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
244
245	/*
246	 * PMU interrupts in radix may come in here. They will use r1, not
247	 * PACAKSAVE, so this stack switch will not cause a problem. They
248	 * will store to the process stack, which may then be migrated to
249	 * another CPU. However the rq lock release on this CPU paired with
250	 * the rq lock acquire on the new CPU before the stack becomes
251	 * active on the new CPU, will order those stores.
252	 */
253	mr	r1,r8		/* start using new stack pointer */
254	std	r7,PACAKSAVE(r13)
255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256	ld	r6,_CCR(r1)
257	mtcrf	0xFF,r6
258
259	/* r3-r13 are destroyed -- Cort */
260	REST_NVGPRS(r1)
 
261
262	/* convert old thread to its task_struct for return value */
263	addi	r3,r3,-THREAD
264	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
265	mtlr	r7
266	addi	r1,r1,SWITCH_FRAME_SIZE
267	blr
268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269_GLOBAL(enter_prom)
270	mflr	r0
271	std	r0,16(r1)
272        stdu	r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
273
274	/* Because PROM is running in 32b mode, it clobbers the high order half
275	 * of all registers that it saves.  We therefore save those registers
276	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
277   	 */
278	SAVE_GPR(2, r1)
279	SAVE_GPR(13, r1)
280	SAVE_NVGPRS(r1)
 
281	mfcr	r10
282	mfmsr	r11
283	std	r10,_CCR(r1)
284	std	r11,_MSR(r1)
285
286	/* Put PROM address in SRR0 */
287	mtsrr0	r4
288
289	/* Setup our trampoline return addr in LR */
290	bcl	20,31,$+4
2910:	mflr	r4
292	addi	r4,r4,(1f - 0b)
293       	mtlr	r4
294
295	/* Prepare a 32-bit mode big endian MSR
296	 */
297#ifdef CONFIG_PPC_BOOK3E_64
298	rlwinm	r11,r11,0,1,31
299	mtsrr1	r11
300	rfi
301#else /* CONFIG_PPC_BOOK3E_64 */
302	LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE)
303	andc	r11,r11,r12
304	mtsrr1	r11
305	RFI_TO_KERNEL
306#endif /* CONFIG_PPC_BOOK3E_64 */
 
 
 
 
307
3081:	/* Return from OF */
309	FIXUP_ENDIAN
310
311	/* Just make sure that r1 top 32 bits didn't get
312	 * corrupt by OF
313	 */
314	rldicl	r1,r1,0,32
315
316	/* Restore the MSR (back to 64 bits) */
317	ld	r0,_MSR(r1)
318	MTMSRD(r0)
319        isync
320
321	/* Restore other registers */
322	REST_GPR(2, r1)
323	REST_GPR(13, r1)
324	REST_NVGPRS(r1)
 
325	ld	r4,_CCR(r1)
326	mtcr	r4
327
328        addi	r1,r1,SWITCH_FRAME_SIZE
329	ld	r0,16(r1)
330	mtlr    r0
331        blr
v3.5.6
 
   1/*
   2 *  PowerPC version 
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
   5 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
   6 *  Adapted for Power Macintosh by Paul Mackerras.
   7 *  Low-level exception handlers and MMU support
   8 *  rewritten by Paul Mackerras.
   9 *    Copyright (C) 1996 Paul Mackerras.
  10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11 *
  12 *  This file contains the system call entry code, context switch
  13 *  code, and exception/interrupt return code for PowerPC.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 */
  20
 
  21#include <linux/errno.h>
 
 
  22#include <asm/unistd.h>
  23#include <asm/processor.h>
  24#include <asm/page.h>
  25#include <asm/mmu.h>
  26#include <asm/thread_info.h>
 
  27#include <asm/ppc_asm.h>
  28#include <asm/asm-offsets.h>
  29#include <asm/cputable.h>
  30#include <asm/firmware.h>
  31#include <asm/bug.h>
  32#include <asm/ptrace.h>
  33#include <asm/irqflags.h>
  34#include <asm/ftrace.h>
  35#include <asm/hw_irq.h>
 
 
 
 
 
 
 
 
 
 
 
 
  36
  37/*
  38 * System calls.
  39 */
  40	.section	".toc","aw"
  41.SYS_CALL_TABLE:
  42	.tc .sys_call_table[TC],.sys_call_table
  43
  44/* This value is used to mark exception frames on the stack. */
  45exception_marker:
  46	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
  47
  48	.section	".text"
  49	.align 7
  50
  51#undef SHOW_SYSCALLS
  52
  53	.globl system_call_common
  54system_call_common:
  55	andi.	r10,r12,MSR_PR
  56	mr	r10,r1
  57	addi	r1,r1,-INT_FRAME_SIZE
  58	beq-	1f
  59	ld	r1,PACAKSAVE(r13)
  601:	std	r10,0(r1)
  61	std	r11,_NIP(r1)
  62	std	r12,_MSR(r1)
  63	std	r0,GPR0(r1)
  64	std	r10,GPR1(r1)
  65	ACCOUNT_CPU_USER_ENTRY(r10, r11)
  66	std	r2,GPR2(r1)
  67	std	r3,GPR3(r1)
  68	mfcr	r2
  69	std	r4,GPR4(r1)
  70	std	r5,GPR5(r1)
  71	std	r6,GPR6(r1)
  72	std	r7,GPR7(r1)
  73	std	r8,GPR8(r1)
  74	li	r11,0
  75	std	r11,GPR9(r1)
  76	std	r11,GPR10(r1)
  77	std	r11,GPR11(r1)
  78	std	r11,GPR12(r1)
  79	std	r11,_XER(r1)
  80	std	r11,_CTR(r1)
  81	std	r9,GPR13(r1)
  82	mflr	r10
  83	/*
  84	 * This clears CR0.SO (bit 28), which is the error indication on
  85	 * return from this system call.
  86	 */
  87	rldimi	r2,r11,28,(63-28)
  88	li	r11,0xc01
  89	std	r10,_LINK(r1)
  90	std	r11,_TRAP(r1)
  91	std	r3,ORIG_GPR3(r1)
  92	std	r2,_CCR(r1)
  93	ld	r2,PACATOC(r13)
  94	addi	r9,r1,STACK_FRAME_OVERHEAD
  95	ld	r11,exception_marker@toc(r2)
  96	std	r11,-16(r9)		/* "regshere" marker */
  97#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
  98BEGIN_FW_FTR_SECTION
  99	beq	33f
 100	/* if from user, see if there are any DTL entries to process */
 101	ld	r10,PACALPPACAPTR(r13)	/* get ptr to VPA */
 102	ld	r11,PACA_DTL_RIDX(r13)	/* get log read index */
 103	ld	r10,LPPACA_DTLIDX(r10)	/* get log write index */
 104	cmpd	cr1,r11,r10
 105	beq+	cr1,33f
 106	bl	.accumulate_stolen_time
 107	REST_GPR(0,r1)
 108	REST_4GPRS(3,r1)
 109	REST_2GPRS(7,r1)
 110	addi	r9,r1,STACK_FRAME_OVERHEAD
 11133:
 112END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
 113#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
 114
 115	/*
 116	 * A syscall should always be called with interrupts enabled
 117	 * so we just unconditionally hard-enable here. When some kind
 118	 * of irq tracing is used, we additionally check that condition
 119	 * is correct
 120	 */
 121#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
 122	lbz	r10,PACASOFTIRQEN(r13)
 123	xori	r10,r10,1
 1241:	tdnei	r10,0
 125	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
 126#endif
 127
 128#ifdef CONFIG_PPC_BOOK3E
 129	wrteei	1
 130#else
 131	ld	r11,PACAKMSR(r13)
 132	ori	r11,r11,MSR_EE
 133	mtmsrd	r11,1
 134#endif /* CONFIG_PPC_BOOK3E */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 135
 136	/* We do need to set SOFTE in the stack frame or the return
 137	 * from interrupt will be painful
 138	 */
 139	li	r10,1
 140	std	r10,SOFTE(r1)
 141
 142#ifdef SHOW_SYSCALLS
 143	bl	.do_show_syscall
 144	REST_GPR(0,r1)
 145	REST_4GPRS(3,r1)
 146	REST_2GPRS(7,r1)
 147	addi	r9,r1,STACK_FRAME_OVERHEAD
 148#endif
 149	clrrdi	r11,r1,THREAD_SHIFT
 150	ld	r10,TI_FLAGS(r11)
 151	andi.	r11,r10,_TIF_SYSCALL_T_OR_A
 152	bne-	syscall_dotrace
 153.Lsyscall_dotrace_cont:
 154	cmpldi	0,r0,NR_syscalls
 155	bge-	syscall_enosys
 156
 157system_call:			/* label this so stack traces look sane */
 158/*
 159 * Need to vector to 32 Bit or default sys_call_table here,
 160 * based on caller's run-mode / personality.
 161 */
 162	ld	r11,.SYS_CALL_TABLE@toc(2)
 163	andi.	r10,r10,_TIF_32BIT
 164	beq	15f
 165	addi	r11,r11,8	/* use 32-bit syscall entries */
 166	clrldi	r3,r3,32
 167	clrldi	r4,r4,32
 168	clrldi	r5,r5,32
 169	clrldi	r6,r6,32
 170	clrldi	r7,r7,32
 171	clrldi	r8,r8,32
 17215:
 173	slwi	r0,r0,4
 174	ldx	r10,r11,r0	/* Fetch system call handler [ptr] */
 175	mtctr   r10
 176	bctrl			/* Call handler */
 177
 178syscall_exit:
 179	std	r3,RESULT(r1)
 180#ifdef SHOW_SYSCALLS
 181	bl	.do_show_syscall_exit
 182	ld	r3,RESULT(r1)
 183#endif
 184	clrrdi	r12,r1,THREAD_SHIFT
 185
 186	ld	r8,_MSR(r1)
 187#ifdef CONFIG_PPC_BOOK3S
 188	/* No MSR:RI on BookE */
 189	andi.	r10,r8,MSR_RI
 190	beq-	unrecov_restore
 191#endif
 192	/*
 193	 * Disable interrupts so current_thread_info()->flags can't change,
 194	 * and so that we don't get interrupted after loading SRR0/1.
 195	 */
 196#ifdef CONFIG_PPC_BOOK3E
 197	wrteei	0
 198#else
 199	ld	r10,PACAKMSR(r13)
 200	mtmsrd	r10,1
 201#endif /* CONFIG_PPC_BOOK3E */
 202
 203	ld	r9,TI_FLAGS(r12)
 204	li	r11,-_LAST_ERRNO
 205	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 206	bne-	syscall_exit_work
 207	cmpld	r3,r11
 208	ld	r5,_CCR(r1)
 209	bge-	syscall_error
 210.Lsyscall_error_cont:
 211	ld	r7,_NIP(r1)
 212BEGIN_FTR_SECTION
 213	stdcx.	r0,0,r1			/* to clear the reservation */
 214END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
 215	andi.	r6,r8,MSR_PR
 216	ld	r4,_LINK(r1)
 217	/*
 218	 * Clear RI before restoring r13.  If we are returning to
 219	 * userspace and we take an exception after restoring r13,
 220	 * we end up corrupting the userspace r13 value.
 221	 */
 222#ifdef CONFIG_PPC_BOOK3S
 223	/* No MSR:RI on BookE */
 224	li	r12,MSR_RI
 225	andc	r11,r10,r12
 226	mtmsrd	r11,1			/* clear MSR.RI */
 227#endif /* CONFIG_PPC_BOOK3S */
 228
 229	beq-	1f
 230	ACCOUNT_CPU_USER_EXIT(r11, r12)
 231	ld	r13,GPR13(r1)	/* only restore r13 if returning to usermode */
 2321:	ld	r2,GPR2(r1)
 233	ld	r1,GPR1(r1)
 234	mtlr	r4
 235	mtcr	r5
 236	mtspr	SPRN_SRR0,r7
 237	mtspr	SPRN_SRR1,r8
 238	RFI
 239	b	.	/* prevent speculative execution */
 240
 241syscall_error:	
 242	oris	r5,r5,0x1000	/* Set SO bit in CR */
 243	neg	r3,r3
 244	std	r5,_CCR(r1)
 245	b	.Lsyscall_error_cont
 246	
 247/* Traced system call support */
 248syscall_dotrace:
 249	bl	.save_nvgprs
 250	addi	r3,r1,STACK_FRAME_OVERHEAD
 251	bl	.do_syscall_trace_enter
 252	/*
 253	 * Restore argument registers possibly just changed.
 254	 * We use the return value of do_syscall_trace_enter
 255	 * for the call number to look up in the table (r0).
 256	 */
 257	mr	r0,r3
 258	ld	r3,GPR3(r1)
 259	ld	r4,GPR4(r1)
 260	ld	r5,GPR5(r1)
 261	ld	r6,GPR6(r1)
 262	ld	r7,GPR7(r1)
 263	ld	r8,GPR8(r1)
 264	addi	r9,r1,STACK_FRAME_OVERHEAD
 265	clrrdi	r10,r1,THREAD_SHIFT
 266	ld	r10,TI_FLAGS(r10)
 267	b	.Lsyscall_dotrace_cont
 268
 269syscall_enosys:
 270	li	r3,-ENOSYS
 271	b	syscall_exit
 272	
 273syscall_exit_work:
 274	/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
 275	 If TIF_NOERROR is set, just save r3 as it is. */
 276
 277	andi.	r0,r9,_TIF_RESTOREALL
 278	beq+	0f
 279	REST_NVGPRS(r1)
 280	b	2f
 2810:	cmpld	r3,r11		/* r10 is -LAST_ERRNO */
 282	blt+	1f
 283	andi.	r0,r9,_TIF_NOERROR
 284	bne-	1f
 285	ld	r5,_CCR(r1)
 286	neg	r3,r3
 287	oris	r5,r5,0x1000	/* Set SO bit in CR */
 288	std	r5,_CCR(r1)
 2891:	std	r3,GPR3(r1)
 2902:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
 291	beq	4f
 292
 293	/* Clear per-syscall TIF flags if any are set.  */
 294
 295	li	r11,_TIF_PERSYSCALL_MASK
 296	addi	r12,r12,TI_FLAGS
 2973:	ldarx	r10,0,r12
 298	andc	r10,r10,r11
 299	stdcx.	r10,0,r12
 300	bne-	3b
 301	subi	r12,r12,TI_FLAGS
 302
 3034:	/* Anything else left to do? */
 304	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
 305	beq	.ret_from_except_lite
 306
 307	/* Re-enable interrupts */
 308#ifdef CONFIG_PPC_BOOK3E
 309	wrteei	1
 310#else
 311	ld	r10,PACAKMSR(r13)
 312	ori	r10,r10,MSR_EE
 313	mtmsrd	r10,1
 314#endif /* CONFIG_PPC_BOOK3E */
 315
 316	bl	.save_nvgprs
 317	addi	r3,r1,STACK_FRAME_OVERHEAD
 318	bl	.do_syscall_trace_leave
 319	b	.ret_from_except
 320
 321/* Save non-volatile GPRs, if not already saved. */
 322_GLOBAL(save_nvgprs)
 323	ld	r11,_TRAP(r1)
 324	andi.	r0,r11,1
 325	beqlr-
 326	SAVE_NVGPRS(r1)
 327	clrrdi	r0,r11,1
 328	std	r0,_TRAP(r1)
 329	blr
 330
 331	
 332/*
 333 * The sigsuspend and rt_sigsuspend system calls can call do_signal
 334 * and thus put the process into the stopped state where we might
 335 * want to examine its user state with ptrace.  Therefore we need
 336 * to save all the nonvolatile registers (r14 - r31) before calling
 337 * the C code.  Similarly, fork, vfork and clone need the full
 338 * register state on the stack so that it can be copied to the child.
 339 */
 340
 341_GLOBAL(ppc_fork)
 342	bl	.save_nvgprs
 343	bl	.sys_fork
 344	b	syscall_exit
 345
 346_GLOBAL(ppc_vfork)
 347	bl	.save_nvgprs
 348	bl	.sys_vfork
 349	b	syscall_exit
 350
 351_GLOBAL(ppc_clone)
 352	bl	.save_nvgprs
 353	bl	.sys_clone
 354	b	syscall_exit
 355
 356_GLOBAL(ppc32_swapcontext)
 357	bl	.save_nvgprs
 358	bl	.compat_sys_swapcontext
 359	b	syscall_exit
 360
 361_GLOBAL(ppc64_swapcontext)
 362	bl	.save_nvgprs
 363	bl	.sys_swapcontext
 364	b	syscall_exit
 365
 366_GLOBAL(ret_from_fork)
 367	bl	.schedule_tail
 368	REST_NVGPRS(r1)
 369	li	r3,0
 370	b	syscall_exit
 371
 372	.section	".toc","aw"
 373DSCR_DEFAULT:
 374	.tc dscr_default[TC],dscr_default
 375
 376	.section	".text"
 377
 378/*
 379 * This routine switches between two different tasks.  The process
 380 * state of one is saved on its kernel stack.  Then the state
 381 * of the other is restored from its kernel stack.  The memory
 382 * management hardware is updated to the second process's state.
 383 * Finally, we can return to the second process, via ret_from_except.
 384 * On entry, r3 points to the THREAD for the current task, r4
 385 * points to the THREAD for the new task.
 386 *
 387 * Note: there are two ways to get to the "going out" portion
 388 * of this code; either by coming in via the entry (_switch)
 389 * or via "fork" which must set up an environment equivalent
 390 * to the "_switch" path.  If you change this you'll have to change
 391 * the fork code also.
 392 *
 393 * The code which creates the new task context is in 'copy_thread'
 394 * in arch/powerpc/kernel/process.c 
 395 */
 396	.align	7
 397_GLOBAL(_switch)
 398	mflr	r0
 399	std	r0,16(r1)
 400	stdu	r1,-SWITCH_FRAME_SIZE(r1)
 401	/* r3-r13 are caller saved -- Cort */
 402	SAVE_8GPRS(14, r1)
 403	SAVE_10GPRS(22, r1)
 404	mflr	r20		/* Return to switch caller */
 405	mfmsr	r22
 406	li	r0, MSR_FP
 407#ifdef CONFIG_VSX
 408BEGIN_FTR_SECTION
 409	oris	r0,r0,MSR_VSX@h	/* Disable VSX */
 410END_FTR_SECTION_IFSET(CPU_FTR_VSX)
 411#endif /* CONFIG_VSX */
 412#ifdef CONFIG_ALTIVEC
 413BEGIN_FTR_SECTION
 414	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
 415	mfspr	r24,SPRN_VRSAVE	/* save vrsave register value */
 416	std	r24,THREAD_VRSAVE(r3)
 417END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 418#endif /* CONFIG_ALTIVEC */
 419#ifdef CONFIG_PPC64
 420BEGIN_FTR_SECTION
 421	mfspr	r25,SPRN_DSCR
 422	std	r25,THREAD_DSCR(r3)
 423END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
 424#endif
 425	and.	r0,r0,r22
 426	beq+	1f
 427	andc	r22,r22,r0
 428	MTMSRD(r22)
 429	isync
 4301:	std	r20,_NIP(r1)
 431	mfcr	r23
 432	std	r23,_CCR(r1)
 433	std	r1,KSP(r3)	/* Set old stack pointer */
 434
 435#ifdef CONFIG_SMP
 436	/* We need a sync somewhere here to make sure that if the
 437	 * previous task gets rescheduled on another CPU, it sees all
 438	 * stores it has performed on this one.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 439	 */
 440	sync
 441#endif /* CONFIG_SMP */
 442
 443	/*
 444	 * If we optimise away the clear of the reservation in system
 445	 * calls because we know the CPU tracks the address of the
 446	 * reservation, then we need to clear it here to cover the
 447	 * case that the kernel context switch path has no larx
 448	 * instructions.
 449	 */
 450BEGIN_FTR_SECTION
 451	ldarx	r6,0,r1
 452END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
 
 
 
 453
 454	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
 455	std	r6,PACACURRENT(r13)	/* Set new 'current' */
 
 
 
 
 456
 457	ld	r8,KSP(r4)	/* new stack pointer */
 458#ifdef CONFIG_PPC_BOOK3S
 
 
 
 459BEGIN_FTR_SECTION
 460  BEGIN_FTR_SECTION_NESTED(95)
 461	clrrdi	r6,r8,28	/* get its ESID */
 462	clrrdi	r9,r1,28	/* get current sp ESID */
 463  FTR_SECTION_ELSE_NESTED(95)
 464	clrrdi	r6,r8,40	/* get its 1T ESID */
 465	clrrdi	r9,r1,40	/* get current sp 1T ESID */
 466  ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
 467FTR_SECTION_ELSE
 468	b	2f
 469ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
 470	clrldi.	r0,r6,2		/* is new ESID c00000000? */
 471	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
 472	cror	eq,4*cr1+eq,eq
 473	beq	2f		/* if yes, don't slbie it */
 474
 475	/* Bolt in the new stack SLB entry */
 476	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
 477	oris	r0,r6,(SLB_ESID_V)@h
 478	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
 479BEGIN_FTR_SECTION
 480	li	r9,MMU_SEGSIZE_1T	/* insert B field */
 481	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
 482	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
 483END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
 484
 485	/* Update the last bolted SLB.  No write barriers are needed
 486	 * here, provided we only update the current CPU's SLB shadow
 487	 * buffer.
 488	 */
 489	ld	r9,PACA_SLBSHADOWPTR(r13)
 490	li	r12,0
 491	std	r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
 492	std	r7,SLBSHADOW_STACKVSID(r9)  /* Save VSID */
 493	std	r0,SLBSHADOW_STACKESID(r9)  /* Save ESID */
 
 
 494
 495	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
 496	 * we have 1TB segments, the only CPUs known to have the errata
 497	 * only support less than 1TB of system memory and we'll never
 498	 * actually hit this code path.
 499	 */
 500
 
 501	slbie	r6
 
 502	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
 
 503	slbmte	r7,r0
 504	isync
 5052:
 506#endif /* !CONFIG_PPC_BOOK3S */
 507
 508	clrrdi	r7,r8,THREAD_SHIFT	/* base of new stack */
 509	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
 510	   because we don't need to leave the 288-byte ABI gap at the
 511	   top of the kernel stack. */
 512	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
 513
 
 
 
 
 
 
 
 
 514	mr	r1,r8		/* start using new stack pointer */
 515	std	r7,PACAKSAVE(r13)
 516
 517#ifdef CONFIG_ALTIVEC
 518BEGIN_FTR_SECTION
 519	ld	r0,THREAD_VRSAVE(r4)
 520	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
 521END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 522#endif /* CONFIG_ALTIVEC */
 523#ifdef CONFIG_PPC64
 524BEGIN_FTR_SECTION
 525	lwz	r6,THREAD_DSCR_INHERIT(r4)
 526	ld	r7,DSCR_DEFAULT@toc(2)
 527	ld	r0,THREAD_DSCR(r4)
 528	cmpwi	r6,0
 529	bne	1f
 530	ld	r0,0(r7)
 5311:	cmpd	r0,r25
 532	beq	2f
 533	mtspr	SPRN_DSCR,r0
 5342:
 535END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
 536#endif
 537
 538	ld	r6,_CCR(r1)
 539	mtcrf	0xFF,r6
 540
 541	/* r3-r13 are destroyed -- Cort */
 542	REST_8GPRS(14, r1)
 543	REST_10GPRS(22, r1)
 544
 545	/* convert old thread to its task_struct for return value */
 546	addi	r3,r3,-THREAD
 547	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
 548	mtlr	r7
 549	addi	r1,r1,SWITCH_FRAME_SIZE
 550	blr
 551
 552	.align	7
 553_GLOBAL(ret_from_except)
 554	ld	r11,_TRAP(r1)
 555	andi.	r0,r11,1
 556	bne	.ret_from_except_lite
 557	REST_NVGPRS(r1)
 558
 559_GLOBAL(ret_from_except_lite)
 560	/*
 561	 * Disable interrupts so that current_thread_info()->flags
 562	 * can't change between when we test it and when we return
 563	 * from the interrupt.
 564	 */
 565#ifdef CONFIG_PPC_BOOK3E
 566	wrteei	0
 567#else
 568	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */
 569	mtmsrd	r10,1		  /* Update machine state */
 570#endif /* CONFIG_PPC_BOOK3E */
 571
 572	clrrdi	r9,r1,THREAD_SHIFT	/* current_thread_info() */
 573	ld	r3,_MSR(r1)
 574	ld	r4,TI_FLAGS(r9)
 575	andi.	r3,r3,MSR_PR
 576	beq	resume_kernel
 577
 578	/* Check current_thread_info()->flags */
 579	andi.	r0,r4,_TIF_USER_WORK_MASK
 580	beq	restore
 581
 582	andi.	r0,r4,_TIF_NEED_RESCHED
 583	beq	1f
 584	bl	.restore_interrupts
 585	bl	.schedule
 586	b	.ret_from_except_lite
 587
 5881:	bl	.save_nvgprs
 589	bl	.restore_interrupts
 590	addi	r3,r1,STACK_FRAME_OVERHEAD
 591	bl	.do_notify_resume
 592	b	.ret_from_except
 593
 594resume_kernel:
 595#ifdef CONFIG_PREEMPT
 596	/* Check if we need to preempt */
 597	andi.	r0,r4,_TIF_NEED_RESCHED
 598	beq+	restore
 599	/* Check that preempt_count() == 0 and interrupts are enabled */
 600	lwz	r8,TI_PREEMPT(r9)
 601	cmpwi	cr1,r8,0
 602	ld	r0,SOFTE(r1)
 603	cmpdi	r0,0
 604	crandc	eq,cr1*4+eq,eq
 605	bne	restore
 606
 607	/*
 608	 * Here we are preempting the current task. We want to make
 609	 * sure we are soft-disabled first
 610	 */
 611	SOFT_DISABLE_INTS(r3,r4)
 6121:	bl	.preempt_schedule_irq
 613
 614	/* Re-test flags and eventually loop */
 615	clrrdi	r9,r1,THREAD_SHIFT
 616	ld	r4,TI_FLAGS(r9)
 617	andi.	r0,r4,_TIF_NEED_RESCHED
 618	bne	1b
 619#endif /* CONFIG_PREEMPT */
 620
 621	.globl	fast_exc_return_irq
 622fast_exc_return_irq:
 623restore:
 624	/*
 625	 * This is the main kernel exit path. First we check if we
 626	 * are about to re-enable interrupts
 627	 */
 628	ld	r5,SOFTE(r1)
 629	lbz	r6,PACASOFTIRQEN(r13)
 630	cmpwi	cr0,r5,0
 631	beq	restore_irq_off
 632
 633	/* We are enabling, were we already enabled ? Yes, just return */
 634	cmpwi	cr0,r6,1
 635	beq	cr0,do_restore
 636
 637	/*
 638	 * We are about to soft-enable interrupts (we are hard disabled
 639	 * at this point). We check if there's anything that needs to
 640	 * be replayed first.
 641	 */
 642	lbz	r0,PACAIRQHAPPENED(r13)
 643	cmpwi	cr0,r0,0
 644	bne-	restore_check_irq_replay
 645
 646	/*
 647	 * Get here when nothing happened while soft-disabled, just
 648	 * soft-enable and move-on. We will hard-enable as a side
 649	 * effect of rfi
 650	 */
 651restore_no_replay:
 652	TRACE_ENABLE_INTS
 653	li	r0,1
 654	stb	r0,PACASOFTIRQEN(r13);
 655
 656	/*
 657	 * Final return path. BookE is handled in a different file
 658	 */
 659do_restore:
 660#ifdef CONFIG_PPC_BOOK3E
 661	b	.exception_return_book3e
 662#else
 663	/*
 664	 * Clear the reservation. If we know the CPU tracks the address of
 665	 * the reservation then we can potentially save some cycles and use
 666	 * a larx. On POWER6 and POWER7 this is significantly faster.
 667	 */
 668BEGIN_FTR_SECTION
 669	stdcx.	r0,0,r1		/* to clear the reservation */
 670FTR_SECTION_ELSE
 671	ldarx	r4,0,r1
 672ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
 673
 674	/*
 675	 * Some code path such as load_up_fpu or altivec return directly
 676	 * here. They run entirely hard disabled and do not alter the
 677	 * interrupt state. They also don't use lwarx/stwcx. and thus
 678	 * are known not to leave dangling reservations.
 679	 */
 680	.globl	fast_exception_return
 681fast_exception_return:
 682	ld	r3,_MSR(r1)
 683	ld	r4,_CTR(r1)
 684	ld	r0,_LINK(r1)
 685	mtctr	r4
 686	mtlr	r0
 687	ld	r4,_XER(r1)
 688	mtspr	SPRN_XER,r4
 689
 690	REST_8GPRS(5, r1)
 691
 692	andi.	r0,r3,MSR_RI
 693	beq-	unrecov_restore
 694
 695	/*
 696	 * Clear RI before restoring r13.  If we are returning to
 697	 * userspace and we take an exception after restoring r13,
 698	 * we end up corrupting the userspace r13 value.
 699	 */
 700	ld	r4,PACAKMSR(r13) /* Get kernel MSR without EE */
 701	andc	r4,r4,r0	 /* r0 contains MSR_RI here */
 702	mtmsrd	r4,1
 703
 704	/*
 705	 * r13 is our per cpu area, only restore it if we are returning to
 706	 * userspace the value stored in the stack frame may belong to
 707	 * another CPU.
 708	 */
 709	andi.	r0,r3,MSR_PR
 710	beq	1f
 711	ACCOUNT_CPU_USER_EXIT(r2, r4)
 712	REST_GPR(13, r1)
 7131:
 714	mtspr	SPRN_SRR1,r3
 715
 716	ld	r2,_CCR(r1)
 717	mtcrf	0xFF,r2
 718	ld	r2,_NIP(r1)
 719	mtspr	SPRN_SRR0,r2
 720
 721	ld	r0,GPR0(r1)
 722	ld	r2,GPR2(r1)
 723	ld	r3,GPR3(r1)
 724	ld	r4,GPR4(r1)
 725	ld	r1,GPR1(r1)
 726
 727	rfid
 728	b	.	/* prevent speculative execution */
 729
 730#endif /* CONFIG_PPC_BOOK3E */
 731
 732	/*
 733	 * We are returning to a context with interrupts soft disabled.
 734	 *
 735	 * However, we may also about to hard enable, so we need to
 736	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
 737	 * or that bit can get out of sync and bad things will happen
 738	 */
 739restore_irq_off:
 740	ld	r3,_MSR(r1)
 741	lbz	r7,PACAIRQHAPPENED(r13)
 742	andi.	r0,r3,MSR_EE
 743	beq	1f
 744	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
 745	stb	r7,PACAIRQHAPPENED(r13)
 7461:	li	r0,0
 747	stb	r0,PACASOFTIRQEN(r13);
 748	TRACE_DISABLE_INTS
 749	b	do_restore
 750
 751	/*
 752	 * Something did happen, check if a re-emit is needed
 753	 * (this also clears paca->irq_happened)
 754	 */
 755restore_check_irq_replay:
 756	/* XXX: We could implement a fast path here where we check
 757	 * for irq_happened being just 0x01, in which case we can
 758	 * clear it and return. That means that we would potentially
 759	 * miss a decrementer having wrapped all the way around.
 760	 *
 761	 * Still, this might be useful for things like hash_page
 762	 */
 763	bl	.__check_irq_replay
 764	cmpwi	cr0,r3,0
 765 	beq	restore_no_replay
 766 
 767	/*
 768	 * We need to re-emit an interrupt. We do so by re-using our
 769	 * existing exception frame. We first change the trap value,
 770	 * but we need to ensure we preserve the low nibble of it
 771	 */
 772	ld	r4,_TRAP(r1)
 773	clrldi	r4,r4,60
 774	or	r4,r4,r3
 775	std	r4,_TRAP(r1)
 776
 777	/*
 778	 * Then find the right handler and call it. Interrupts are
 779	 * still soft-disabled and we keep them that way.
 780	*/
 781	cmpwi	cr0,r3,0x500
 782	bne	1f
 783	addi	r3,r1,STACK_FRAME_OVERHEAD;
 784 	bl	.do_IRQ
 785	b	.ret_from_except
 7861:	cmpwi	cr0,r3,0x900
 787	bne	1f
 788	addi	r3,r1,STACK_FRAME_OVERHEAD;
 789	bl	.timer_interrupt
 790	b	.ret_from_except
 791#ifdef CONFIG_PPC_BOOK3E
 7921:	cmpwi	cr0,r3,0x280
 793	bne	1f
 794	addi	r3,r1,STACK_FRAME_OVERHEAD;
 795	bl	.doorbell_exception
 796	b	.ret_from_except
 797#endif /* CONFIG_PPC_BOOK3E */
 7981:	b	.ret_from_except /* What else to do here ? */
 799 
 800unrecov_restore:
 801	addi	r3,r1,STACK_FRAME_OVERHEAD
 802	bl	.unrecoverable_exception
 803	b	unrecov_restore
 804
 805#ifdef CONFIG_PPC_RTAS
 806/*
 807 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
 808 * called with the MMU off.
 809 *
 810 * In addition, we need to be in 32b mode, at least for now.
 811 * 
 812 * Note: r3 is an input parameter to rtas, so don't trash it...
 813 */
 814_GLOBAL(enter_rtas)
 815	mflr	r0
 816	std	r0,16(r1)
 817        stdu	r1,-RTAS_FRAME_SIZE(r1)	/* Save SP and create stack space. */
 818
 819	/* Because RTAS is running in 32b mode, it clobbers the high order half
 820	 * of all registers that it saves.  We therefore save those registers
 821	 * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
 822   	 */
 823	SAVE_GPR(2, r1)			/* Save the TOC */
 824	SAVE_GPR(13, r1)		/* Save paca */
 825	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
 826	SAVE_10GPRS(22, r1)		/* ditto */
 827
 828	mfcr	r4
 829	std	r4,_CCR(r1)
 830	mfctr	r5
 831	std	r5,_CTR(r1)
 832	mfspr	r6,SPRN_XER
 833	std	r6,_XER(r1)
 834	mfdar	r7
 835	std	r7,_DAR(r1)
 836	mfdsisr	r8
 837	std	r8,_DSISR(r1)
 838
 839	/* Temporary workaround to clear CR until RTAS can be modified to
 840	 * ignore all bits.
 841	 */
 842	li	r0,0
 843	mtcr	r0
 844
 845#ifdef CONFIG_BUG	
 846	/* There is no way it is acceptable to get here with interrupts enabled,
 847	 * check it with the asm equivalent of WARN_ON
 848	 */
 849	lbz	r0,PACASOFTIRQEN(r13)
 8501:	tdnei	r0,0
 851	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
 852#endif
 853	
 854	/* Hard-disable interrupts */
 855	mfmsr	r6
 856	rldicl	r7,r6,48,1
 857	rotldi	r7,r7,16
 858	mtmsrd	r7,1
 859
 860	/* Unfortunately, the stack pointer and the MSR are also clobbered,
 861	 * so they are saved in the PACA which allows us to restore
 862	 * our original state after RTAS returns.
 863         */
 864	std	r1,PACAR1(r13)
 865        std	r6,PACASAVEDMSR(r13)
 866
 867	/* Setup our real return addr */	
 868	LOAD_REG_ADDR(r4,.rtas_return_loc)
 869	clrldi	r4,r4,2			/* convert to realmode address */
 870       	mtlr	r4
 871
 872	li	r0,0
 873	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
 874	andc	r0,r6,r0
 875	
 876        li      r9,1
 877        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
 878	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
 879	andc	r6,r0,r9
 880	sync				/* disable interrupts so SRR0/1 */
 881	mtmsrd	r0			/* don't get trashed */
 882
 883	LOAD_REG_ADDR(r4, rtas)
 884	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
 885	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
 886	
 887	mtspr	SPRN_SRR0,r5
 888	mtspr	SPRN_SRR1,r6
 889	rfid
 890	b	.	/* prevent speculative execution */
 891
 892_STATIC(rtas_return_loc)
 893	/* relocation is off at this point */
 894	GET_PACA(r4)
 895	clrldi	r4,r4,2			/* convert to realmode address */
 896
 897	bcl	20,31,$+4
 8980:	mflr	r3
 899	ld	r3,(1f-0b)(r3)		/* get &.rtas_restore_regs */
 900
 901	mfmsr   r6
 902	li	r0,MSR_RI
 903	andc	r6,r6,r0
 904	sync	
 905	mtmsrd  r6
 906        
 907        ld	r1,PACAR1(r4)           /* Restore our SP */
 908        ld	r4,PACASAVEDMSR(r4)     /* Restore our MSR */
 909
 910	mtspr	SPRN_SRR0,r3
 911	mtspr	SPRN_SRR1,r4
 912	rfid
 913	b	.	/* prevent speculative execution */
 914
 915	.align	3
 9161:	.llong	.rtas_restore_regs
 917
 918_STATIC(rtas_restore_regs)
 919	/* relocation is on at this point */
 920	REST_GPR(2, r1)			/* Restore the TOC */
 921	REST_GPR(13, r1)		/* Restore paca */
 922	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
 923	REST_10GPRS(22, r1)		/* ditto */
 924
 925	GET_PACA(r13)
 926
 927	ld	r4,_CCR(r1)
 928	mtcr	r4
 929	ld	r5,_CTR(r1)
 930	mtctr	r5
 931	ld	r6,_XER(r1)
 932	mtspr	SPRN_XER,r6
 933	ld	r7,_DAR(r1)
 934	mtdar	r7
 935	ld	r8,_DSISR(r1)
 936	mtdsisr	r8
 937
 938        addi	r1,r1,RTAS_FRAME_SIZE	/* Unstack our frame */
 939	ld	r0,16(r1)		/* get return address */
 940
 941	mtlr    r0
 942        blr				/* return to caller */
 943
 944#endif /* CONFIG_PPC_RTAS */
 945
 946_GLOBAL(enter_prom)
 947	mflr	r0
 948	std	r0,16(r1)
 949        stdu	r1,-PROM_FRAME_SIZE(r1)	/* Save SP and create stack space */
 950
 951	/* Because PROM is running in 32b mode, it clobbers the high order half
 952	 * of all registers that it saves.  We therefore save those registers
 953	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
 954   	 */
 955	SAVE_GPR(2, r1)
 956	SAVE_GPR(13, r1)
 957	SAVE_8GPRS(14, r1)
 958	SAVE_10GPRS(22, r1)
 959	mfcr	r10
 960	mfmsr	r11
 961	std	r10,_CCR(r1)
 962	std	r11,_MSR(r1)
 963
 964	/* Get the PROM entrypoint */
 965	mtlr	r4
 966
 967	/* Switch MSR to 32 bits mode
 
 
 
 
 
 
 968	 */
 969#ifdef CONFIG_PPC_BOOK3E
 970	rlwinm	r11,r11,0,1,31
 971	mtmsr	r11
 972#else /* CONFIG_PPC_BOOK3E */
 973        mfmsr   r11
 974        li      r12,1
 975        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
 976        andc    r11,r11,r12
 977        li      r12,1
 978        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
 979        andc    r11,r11,r12
 980        mtmsrd  r11
 981#endif /* CONFIG_PPC_BOOK3E */
 982        isync
 983
 984	/* Enter PROM here... */
 985	blrl
 986
 987	/* Just make sure that r1 top 32 bits didn't get
 988	 * corrupt by OF
 989	 */
 990	rldicl	r1,r1,0,32
 991
 992	/* Restore the MSR (back to 64 bits) */
 993	ld	r0,_MSR(r1)
 994	MTMSRD(r0)
 995        isync
 996
 997	/* Restore other registers */
 998	REST_GPR(2, r1)
 999	REST_GPR(13, r1)
1000	REST_8GPRS(14, r1)
1001	REST_10GPRS(22, r1)
1002	ld	r4,_CCR(r1)
1003	mtcr	r4
1004	
1005        addi	r1,r1,PROM_FRAME_SIZE
1006	ld	r0,16(r1)
1007	mtlr    r0
1008        blr
1009
1010#ifdef CONFIG_FUNCTION_TRACER
1011#ifdef CONFIG_DYNAMIC_FTRACE
1012_GLOBAL(mcount)
1013_GLOBAL(_mcount)
1014	blr
1015
1016_GLOBAL(ftrace_caller)
1017	/* Taken from output of objdump from lib64/glibc */
1018	mflr	r3
1019	ld	r11, 0(r1)
1020	stdu	r1, -112(r1)
1021	std	r3, 128(r1)
1022	ld	r4, 16(r11)
1023	subi	r3, r3, MCOUNT_INSN_SIZE
1024.globl ftrace_call
1025ftrace_call:
1026	bl	ftrace_stub
1027	nop
1028#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1029.globl ftrace_graph_call
1030ftrace_graph_call:
1031	b	ftrace_graph_stub
1032_GLOBAL(ftrace_graph_stub)
1033#endif
1034	ld	r0, 128(r1)
1035	mtlr	r0
1036	addi	r1, r1, 112
1037_GLOBAL(ftrace_stub)
1038	blr
1039#else
1040_GLOBAL(mcount)
1041	blr
1042
1043_GLOBAL(_mcount)
1044	/* Taken from output of objdump from lib64/glibc */
1045	mflr	r3
1046	ld	r11, 0(r1)
1047	stdu	r1, -112(r1)
1048	std	r3, 128(r1)
1049	ld	r4, 16(r11)
1050
1051	subi	r3, r3, MCOUNT_INSN_SIZE
1052	LOAD_REG_ADDR(r5,ftrace_trace_function)
1053	ld	r5,0(r5)
1054	ld	r5,0(r5)
1055	mtctr	r5
1056	bctrl
1057	nop
1058
1059
1060#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1061	b	ftrace_graph_caller
1062#endif
1063	ld	r0, 128(r1)
1064	mtlr	r0
1065	addi	r1, r1, 112
1066_GLOBAL(ftrace_stub)
1067	blr
1068
1069#endif /* CONFIG_DYNAMIC_FTRACE */
1070
1071#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1072_GLOBAL(ftrace_graph_caller)
1073	/* load r4 with local address */
1074	ld	r4, 128(r1)
1075	subi	r4, r4, MCOUNT_INSN_SIZE
1076
1077	/* get the parent address */
1078	ld	r11, 112(r1)
1079	addi	r3, r11, 16
1080
1081	bl	.prepare_ftrace_return
1082	nop
1083
1084	ld	r0, 128(r1)
1085	mtlr	r0
1086	addi	r1, r1, 112
1087	blr
1088
1089_GLOBAL(return_to_handler)
1090	/* need to save return values */
1091	std	r4,  -24(r1)
1092	std	r3,  -16(r1)
1093	std	r31, -8(r1)
1094	mr	r31, r1
1095	stdu	r1, -112(r1)
1096
1097	bl	.ftrace_return_to_handler
1098	nop
1099
1100	/* return value has real return address */
1101	mtlr	r3
1102
1103	ld	r1, 0(r1)
1104	ld	r4,  -24(r1)
1105	ld	r3,  -16(r1)
1106	ld	r31, -8(r1)
1107
1108	/* Jump back to real return address */
1109	blr
1110
1111_GLOBAL(mod_return_to_handler)
1112	/* need to save return values */
1113	std	r4,  -32(r1)
1114	std	r3,  -24(r1)
1115	/* save TOC */
1116	std	r2,  -16(r1)
1117	std	r31, -8(r1)
1118	mr	r31, r1
1119	stdu	r1, -112(r1)
1120
1121	/*
1122	 * We are in a module using the module's TOC.
1123	 * Switch to our TOC to run inside the core kernel.
1124	 */
1125	ld	r2, PACATOC(r13)
1126
1127	bl	.ftrace_return_to_handler
1128	nop
1129
1130	/* return value has real return address */
1131	mtlr	r3
1132
1133	ld	r1, 0(r1)
1134	ld	r4,  -32(r1)
1135	ld	r3,  -24(r1)
1136	ld	r2,  -16(r1)
1137	ld	r31, -8(r1)
1138
1139	/* Jump back to real return address */
1140	blr
1141#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1142#endif /* CONFIG_FUNCTION_TRACER */