Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 *  PowerPC version
  4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
  6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
  7 *  Adapted for Power Macintosh by Paul Mackerras.
  8 *  Low-level exception handlers and MMU support
  9 *  rewritten by Paul Mackerras.
 10 *    Copyright (C) 1996 Paul Mackerras.
 11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 12 *
 13 *  This file contains the system call entry code, context switch
 14 *  code, and exception/interrupt return code for PowerPC.
 
 
 
 
 
 
 15 */
 16
 17#include <linux/errno.h>
 18#include <linux/err.h>
 19#include <linux/sys.h>
 20#include <linux/threads.h>
 21#include <linux/linkage.h>
 22
 23#include <asm/reg.h>
 24#include <asm/page.h>
 25#include <asm/mmu.h>
 26#include <asm/cputable.h>
 27#include <asm/thread_info.h>
 28#include <asm/ppc_asm.h>
 29#include <asm/asm-offsets.h>
 30#include <asm/unistd.h>
 
 31#include <asm/ptrace.h>
 32#include <asm/feature-fixups.h>
 33#include <asm/barrier.h>
 34#include <asm/kup.h>
 35#include <asm/bug.h>
 36#include <asm/interrupt.h>
 37
 38#include "head_32.h"
 
 39
 40/*
 41 * powerpc relies on return from interrupt/syscall being context synchronising
 42 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
 43 * synchronisation instructions.
 44 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 45
 46/*
 47 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
 48 * fit into one page in order to not encounter a TLB miss between the
 49 * modification of srr0/srr1 and the associated rfi.
 
 
 50 */
 51	.align	12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52
 53#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)
 54	.globl	prepare_transfer_to_handler
 55prepare_transfer_to_handler:
 56	/* if from kernel, check interrupted DOZE/NAP mode */
 57	lwz	r12,TI_LOCAL_FLAGS(r2)
 
 
 
 
 
 
 58	mtcrf	0x01,r12
 59	bt-	31-TLF_NAPPING,4f
 60	bt-	31-TLF_SLEEPING,7f
 61	blr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 634:	rlwinm	r12,r12,0,~_TLF_NAPPING
 64	stw	r12,TI_LOCAL_FLAGS(r2)
 65	b	power_save_ppc32_restore
 66
 677:	rlwinm	r12,r12,0,~_TLF_SLEEPING
 68	stw	r12,TI_LOCAL_FLAGS(r2)
 69	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
 70	rlwinm	r9,r9,0,~MSR_EE
 71	lwz	r12,_LINK(r11)		/* and return to address in LR */
 72	REST_GPR(2, r11)
 73	b	fast_exception_return
 74_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
 75#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
 76
 77#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
 78SYM_FUNC_START(__kuep_lock)
 79	lwz	r9, THREAD+THSR0(r2)
 80	update_user_segments_by_4 r9, r10, r11, r12
 81	blr
 82SYM_FUNC_END(__kuep_lock)
 83
 84SYM_FUNC_START_LOCAL(__kuep_unlock)
 85	lwz	r9, THREAD+THSR0(r2)
 86	rlwinm  r9,r9,0,~SR_NX
 87	update_user_segments_by_4 r9, r10, r11, r12
 88	blr
 89SYM_FUNC_END(__kuep_unlock)
 90
 91.macro	kuep_lock
 92	bl	__kuep_lock
 93.endm
 94.macro	kuep_unlock
 95	bl	__kuep_unlock
 96.endm
 97#else
 98.macro	kuep_lock
 99.endm
100.macro	kuep_unlock
101.endm
102#endif
103
104	.globl	transfer_to_syscall
105transfer_to_syscall:
106	stw	r3, ORIG_GPR3(r1)
107	stw	r11, GPR1(r1)
108	stw	r11, 0(r1)
109	mflr	r12
110	stw	r12, _LINK(r1)
111#ifdef CONFIG_BOOKE_OR_40x
112	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
113#endif
114	lis	r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
115	SAVE_GPR(2, r1)
116	addi	r12,r12,STACK_FRAME_REGS_MARKER@l
117	stw	r9,_MSR(r1)
118	li	r2, INTERRUPT_SYSCALL
119	stw	r12,STACK_INT_FRAME_MARKER(r1)
120	stw	r2,_TRAP(r1)
121	SAVE_GPR(0, r1)
122	SAVE_GPRS(3, 8, r1)
123	addi	r2,r10,-THREAD
124	SAVE_NVGPRS(r1)
125	kuep_lock
126
127	/* Calling convention has r3 = regs, r4 = orig r0 */
128	addi	r3,r1,STACK_INT_FRAME_REGS
129	mr	r4,r0
130	bl	system_call_exception
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132ret_from_syscall:
133	addi    r4,r1,STACK_INT_FRAME_REGS
134	li	r5,0
135	bl	syscall_exit_prepare
136#ifdef CONFIG_PPC_47x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137	lis	r4,icache_44x_need_flush@ha
138	lwz	r5,icache_44x_need_flush@l(r4)
139	cmplwi	cr0,r5,0
140	bne-	.L44x_icache_flush
141#endif /* CONFIG_PPC_47x */
142.L44x_icache_flush_return:
143	kuep_unlock
 
 
 
 
144	lwz	r4,_LINK(r1)
145	lwz	r5,_CCR(r1)
146	mtlr	r4
 
147	lwz	r7,_NIP(r1)
148	lwz	r8,_MSR(r1)
149	cmpwi	r3,0
150	REST_GPR(3, r1)
151syscall_exit_finish:
152	mtspr	SPRN_SRR0,r7
153	mtspr	SPRN_SRR1,r8
154
155	bne	3f
156	mtcr	r5
157
1581:	REST_GPR(2, r1)
159	REST_GPR(1, r1)
160	rfi
161#ifdef CONFIG_40x
162	b .	/* Prevent prefetch past rfi */
163#endif
164
1653:	mtcr	r5
166	lwz	r4,_CTR(r1)
167	lwz	r5,_XER(r1)
168	REST_NVGPRS(r1)
169	mtctr	r4
170	mtxer	r5
171	REST_GPR(0, r1)
172	REST_GPRS(3, 12, r1)
173	b	1b
174
175#ifdef CONFIG_44x
176.L44x_icache_flush:
177	li	r7,0
178	iccci	r0,r0
179	stw	r7,icache_44x_need_flush@l(r4)
180	b	.L44x_icache_flush_return
181#endif  /* CONFIG_44x */
182
 
 
 
183	.globl	ret_from_fork
184ret_from_fork:
185	REST_NVGPRS(r1)
186	bl	schedule_tail
187	li	r3,0	/* fork() return value */
188	b	ret_from_syscall
189
190	.globl	ret_from_kernel_user_thread
191ret_from_kernel_user_thread:
 
192	bl	schedule_tail
193	mtctr	r14
194	mr	r3,r15
195	PPC440EP_ERR42
196	bctrl
197	li	r3,0
198	b	ret_from_syscall
199
200	.globl	start_kernel_thread
201start_kernel_thread:
202	bl	schedule_tail
203	mtctr	r14
204	mr	r3,r15
205	PPC440EP_ERR42
206	bctrl
207	/*
208	 * This must not return. We actually want to BUG here, not WARN,
209	 * because BUG will exit the process which is what the kernel thread
210	 * should have done, which may give some hope of continuing.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211	 */
212100:	trap
213	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
215	.globl	fast_exception_return
216fast_exception_return:
217#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
218	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
219	beq	3f			/* if not, we've got problems */
220#endif
221
2222:	lwz	r10,_CCR(r11)
223	REST_GPRS(1, 6, r11)
 
224	mtcr	r10
225	lwz	r10,_LINK(r11)
226	mtlr	r10
227	/* Clear the exception marker on the stack to avoid confusing stacktrace */
228	li	r10, 0
229	stw	r10, 8(r11)
230	REST_GPR(10, r11)
231#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
232	mtspr	SPRN_NRI, r0
233#endif
234	mtspr	SPRN_SRR1,r9
235	mtspr	SPRN_SRR0,r12
236	REST_GPR(9, r11)
237	REST_GPR(12, r11)
238	REST_GPR(11, r11)
239	rfi
240#ifdef CONFIG_40x
241	b .	/* Prevent prefetch past rfi */
242#endif
243_ASM_NOKPROBE_SYMBOL(fast_exception_return)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
245/* aargh, a nonrecoverable interrupt, panic */
246/* aargh, we don't know which trap this is */
 
2473:
 
 
 
248	li	r10,-1
249	stw	r10,_TRAP(r11)
250	prepare_transfer_to_handler
251	bl	unrecoverable_exception
252	trap	/* should not get here */
253
254	.globl interrupt_return
255interrupt_return:
256	lwz	r4,_MSR(r1)
257	addi	r3,r1,STACK_INT_FRAME_REGS
258	andi.	r0,r4,MSR_PR
259	beq	.Lkernel_interrupt_return
260	bl	interrupt_exit_user_prepare
261	cmpwi	r3,0
262	kuep_unlock
263	bne-	.Lrestore_nvgprs
264
265.Lfast_user_interrupt_return:
266	lwz	r11,_NIP(r1)
267	lwz	r12,_MSR(r1)
268	mtspr	SPRN_SRR0,r11
269	mtspr	SPRN_SRR1,r12
270
271BEGIN_FTR_SECTION
272	stwcx.	r0,0,r1		/* to clear the reservation */
273FTR_SECTION_ELSE
274	lwarx	r0,0,r1
275ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
277	lwz	r3,_CCR(r1)
278	lwz	r4,_LINK(r1)
279	lwz	r5,_CTR(r1)
280	lwz	r6,_XER(r1)
281	li	r0,0
 
 
 
282
283	/*
284	 * Leaving a stale exception marker on the stack can confuse
285	 * the reliable stack unwinder later on. Clear it.
286	 */
287	stw	r0,8(r1)
288	REST_GPRS(7, 12, r1)
289
290	mtcr	r3
291	mtlr	r4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292	mtctr	r5
293	mtspr	SPRN_XER,r6
294
295	REST_GPRS(2, 6, r1)
296	REST_GPR(0, r1)
297	REST_GPR(1, r1)
298	rfi
299#ifdef CONFIG_40x
300	b .	/* Prevent prefetch past rfi */
 
 
 
 
 
 
 
 
301#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302
303.Lrestore_nvgprs:
304	REST_NVGPRS(r1)
305	b	.Lfast_user_interrupt_return
306
307.Lkernel_interrupt_return:
308	bl	interrupt_exit_kernel_prepare
 
 
 
 
 
 
 
 
 
309
310.Lfast_kernel_interrupt_return:
311	cmpwi	cr1,r3,0
312	lwz	r11,_NIP(r1)
313	lwz	r12,_MSR(r1)
314	mtspr	SPRN_SRR0,r11
315	mtspr	SPRN_SRR1,r12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
 
317BEGIN_FTR_SECTION
318	stwcx.	r0,0,r1		/* to clear the reservation */
319FTR_SECTION_ELSE
320	lwarx	r0,0,r1
321ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
322
323	lwz	r3,_LINK(r1)
324	lwz	r4,_CTR(r1)
325	lwz	r5,_XER(r1)
326	lwz	r6,_CCR(r1)
327	li	r0,0
328
329	REST_GPRS(7, 12, r1)
 
 
330
331	mtlr	r3
332	mtctr	r4
333	mtspr	SPRN_XER,r5
 
334
335	/*
336	 * Leaving a stale exception marker on the stack can confuse
337	 * the reliable stack unwinder later on. Clear it.
 
 
 
 
 
338	 */
339	stw	r0,8(r1)
340
341	REST_GPRS(2, 5, r1)
342
343	bne-	cr1,1f /* emulate stack store */
344	mtcr	r6
345	REST_GPR(6, r1)
346	REST_GPR(0, r1)
347	REST_GPR(1, r1)
348	rfi
349#ifdef CONFIG_40x
350	b .	/* Prevent prefetch past rfi */
351#endif
 
 
352
3531:	/*
354	 * Emulate stack store with update. New r1 value was already calculated
355	 * and updated in our interrupt regs by emulate_loadstore, but we can't
356	 * store the previous value of r1 to the stack before re-loading our
357	 * registers from it, otherwise they could be clobbered.  Use
358	 * SPRG Scratch0 as temporary storage to hold the store
359	 * data, as interrupts are disabled here so it won't be clobbered.
360	 */
361	mtcr	r6
362#ifdef CONFIG_BOOKE
363	mtspr	SPRN_SPRG_WSCRATCH0, r9
364#else
365	mtspr	SPRN_SPRG_SCRATCH0, r9
366#endif
367	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
368	REST_GPR(6, r1)
369	REST_GPR(0, r1)
370	REST_GPR(1, r1)
371	stw	r9,0(r1) /* perform store component of stwu */
372#ifdef CONFIG_BOOKE
373	mfspr	r9, SPRN_SPRG_RSCRATCH0
374#else
375	mfspr	r9, SPRN_SPRG_SCRATCH0
376#endif
 
377	rfi
378#ifdef CONFIG_40x
379	b .	/* Prevent prefetch past rfi */
380#endif
381_ASM_NOKPROBE_SYMBOL(interrupt_return)
382
383#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
384
385/*
386 * Returning from a critical interrupt in user mode doesn't need
387 * to be any different from a normal exception.  For a critical
388 * interrupt in the kernel, we just return (without checking for
389 * preemption) since the interrupt may have happened at some crucial
390 * place (e.g. inside the TLB miss handler), and because we will be
391 * running with r1 pointing into critical_stack, not the current
392 * process's kernel stack (and therefore current_thread_info() will
393 * give the wrong answer).
394 * We have to restore various SPRs that may have been in use at the
395 * time of the critical interrupt.
396 *
397 */
398#ifdef CONFIG_40x
399#define PPC_40x_TURN_OFF_MSR_DR						    \
400	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
401	 * assume the instructions here are mapped by a pinned TLB entry */ \
402	li	r10,MSR_IR;						    \
403	mtmsr	r10;							    \
404	isync;								    \
405	tophys(r1, r1);
406#else
407#define PPC_40x_TURN_OFF_MSR_DR
408#endif
409
410#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
411	REST_NVGPRS(r1);						\
412	lwz	r3,_MSR(r1);						\
413	andi.	r3,r3,MSR_PR;						\
414	bne	interrupt_return;					\
415	REST_GPR(0, r1);						\
416	REST_GPRS(2, 8, r1);						\
 
 
 
417	lwz	r10,_XER(r1);						\
418	lwz	r11,_CTR(r1);						\
419	mtspr	SPRN_XER,r10;						\
420	mtctr	r11;							\
 
421	stwcx.	r0,0,r1;		/* to clear the reservation */	\
422	lwz	r11,_LINK(r1);						\
423	mtlr	r11;							\
424	lwz	r10,_CCR(r1);						\
425	mtcrf	0xff,r10;						\
426	PPC_40x_TURN_OFF_MSR_DR;					\
427	lwz	r9,_DEAR(r1);						\
428	lwz	r10,_ESR(r1);						\
429	mtspr	SPRN_DEAR,r9;						\
430	mtspr	SPRN_ESR,r10;						\
431	lwz	r11,_NIP(r1);						\
432	lwz	r12,_MSR(r1);						\
433	mtspr	exc_lvl_srr0,r11;					\
434	mtspr	exc_lvl_srr1,r12;					\
435	REST_GPRS(9, 12, r1);						\
436	REST_GPR(1, r1);						\
 
 
 
 
437	exc_lvl_rfi;							\
438	b	.;		/* prevent prefetch past exc_lvl_rfi */
439
440#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
441	lwz	r9,_##exc_lvl_srr0(r1);					\
442	lwz	r10,_##exc_lvl_srr1(r1);				\
443	mtspr	SPRN_##exc_lvl_srr0,r9;					\
444	mtspr	SPRN_##exc_lvl_srr1,r10;
445
446#if defined(CONFIG_PPC_E500)
447#ifdef CONFIG_PHYS_64BIT
448#define	RESTORE_MAS7							\
449	lwz	r11,MAS7(r1);						\
450	mtspr	SPRN_MAS7,r11;
451#else
452#define	RESTORE_MAS7
453#endif /* CONFIG_PHYS_64BIT */
454#define RESTORE_MMU_REGS						\
455	lwz	r9,MAS0(r1);						\
456	lwz	r10,MAS1(r1);						\
457	lwz	r11,MAS2(r1);						\
458	mtspr	SPRN_MAS0,r9;						\
459	lwz	r9,MAS3(r1);						\
460	mtspr	SPRN_MAS1,r10;						\
461	lwz	r10,MAS6(r1);						\
462	mtspr	SPRN_MAS2,r11;						\
463	mtspr	SPRN_MAS3,r9;						\
464	mtspr	SPRN_MAS6,r10;						\
465	RESTORE_MAS7;
466#elif defined(CONFIG_44x)
467#define RESTORE_MMU_REGS						\
468	lwz	r9,MMUCR(r1);						\
469	mtspr	SPRN_MMUCR,r9;
470#else
471#define RESTORE_MMU_REGS
472#endif
473
474#ifdef CONFIG_40x
475	.globl	ret_from_crit_exc
476ret_from_crit_exc:
 
 
 
 
 
477	lis	r9,crit_srr0@ha;
478	lwz	r9,crit_srr0@l(r9);
479	lis	r10,crit_srr1@ha;
480	lwz	r10,crit_srr1@l(r10);
481	mtspr	SPRN_SRR0,r9;
482	mtspr	SPRN_SRR1,r10;
483	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
484_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
485#endif /* CONFIG_40x */
486
487#ifdef CONFIG_BOOKE
488	.globl	ret_from_crit_exc
489ret_from_crit_exc:
 
 
 
490	RESTORE_xSRR(SRR0,SRR1);
491	RESTORE_MMU_REGS;
492	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
493_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
494
495	.globl	ret_from_debug_exc
496ret_from_debug_exc:
 
 
 
 
 
 
 
497	RESTORE_xSRR(SRR0,SRR1);
498	RESTORE_xSRR(CSRR0,CSRR1);
499	RESTORE_MMU_REGS;
500	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
501_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
502
503	.globl	ret_from_mcheck_exc
504ret_from_mcheck_exc:
 
 
 
505	RESTORE_xSRR(SRR0,SRR1);
506	RESTORE_xSRR(CSRR0,CSRR1);
507	RESTORE_xSRR(DSRR0,DSRR1);
508	RESTORE_MMU_REGS;
509	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
510_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
511#endif /* CONFIG_BOOKE */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
v3.15
 
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
   5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
   6 *  Adapted for Power Macintosh by Paul Mackerras.
   7 *  Low-level exception handlers and MMU support
   8 *  rewritten by Paul Mackerras.
   9 *    Copyright (C) 1996 Paul Mackerras.
  10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11 *
  12 *  This file contains the system call entry code, context switch
  13 *  code, and exception/interrupt return code for PowerPC.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#include <linux/errno.h>
 
  23#include <linux/sys.h>
  24#include <linux/threads.h>
 
 
  25#include <asm/reg.h>
  26#include <asm/page.h>
  27#include <asm/mmu.h>
  28#include <asm/cputable.h>
  29#include <asm/thread_info.h>
  30#include <asm/ppc_asm.h>
  31#include <asm/asm-offsets.h>
  32#include <asm/unistd.h>
  33#include <asm/ftrace.h>
  34#include <asm/ptrace.h>
 
 
 
 
 
  35
  36#undef SHOW_SYSCALLS
  37#undef SHOW_SYSCALLS_TASK
  38
  39/*
  40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
 
 
  41 */
  42#if MSR_KERNEL >= 0x10000
  43#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
  44#else
  45#define LOAD_MSR_KERNEL(r, x)	li r,(x)
  46#endif
  47
  48#ifdef CONFIG_BOOKE
  49	.globl	mcheck_transfer_to_handler
  50mcheck_transfer_to_handler:
  51	mfspr	r0,SPRN_DSRR0
  52	stw	r0,_DSRR0(r11)
  53	mfspr	r0,SPRN_DSRR1
  54	stw	r0,_DSRR1(r11)
  55	/* fall through */
  56
  57	.globl	debug_transfer_to_handler
  58debug_transfer_to_handler:
  59	mfspr	r0,SPRN_CSRR0
  60	stw	r0,_CSRR0(r11)
  61	mfspr	r0,SPRN_CSRR1
  62	stw	r0,_CSRR1(r11)
  63	/* fall through */
  64
  65	.globl	crit_transfer_to_handler
  66crit_transfer_to_handler:
  67#ifdef CONFIG_PPC_BOOK3E_MMU
  68	mfspr	r0,SPRN_MAS0
  69	stw	r0,MAS0(r11)
  70	mfspr	r0,SPRN_MAS1
  71	stw	r0,MAS1(r11)
  72	mfspr	r0,SPRN_MAS2
  73	stw	r0,MAS2(r11)
  74	mfspr	r0,SPRN_MAS3
  75	stw	r0,MAS3(r11)
  76	mfspr	r0,SPRN_MAS6
  77	stw	r0,MAS6(r11)
  78#ifdef CONFIG_PHYS_64BIT
  79	mfspr	r0,SPRN_MAS7
  80	stw	r0,MAS7(r11)
  81#endif /* CONFIG_PHYS_64BIT */
  82#endif /* CONFIG_PPC_BOOK3E_MMU */
  83#ifdef CONFIG_44x
  84	mfspr	r0,SPRN_MMUCR
  85	stw	r0,MMUCR(r11)
  86#endif
  87	mfspr	r0,SPRN_SRR0
  88	stw	r0,_SRR0(r11)
  89	mfspr	r0,SPRN_SRR1
  90	stw	r0,_SRR1(r11)
  91
  92	/* set the stack limit to the current stack
  93	 * and set the limit to protect the thread_info
  94	 * struct
  95	 */
  96	mfspr	r8,SPRN_SPRG_THREAD
  97	lwz	r0,KSP_LIMIT(r8)
  98	stw	r0,SAVED_KSP_LIMIT(r11)
  99	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
 100	stw	r0,KSP_LIMIT(r8)
 101	/* fall through */
 102#endif
 103
 104#ifdef CONFIG_40x
 105	.globl	crit_transfer_to_handler
 106crit_transfer_to_handler:
 107	lwz	r0,crit_r10@l(0)
 108	stw	r0,GPR10(r11)
 109	lwz	r0,crit_r11@l(0)
 110	stw	r0,GPR11(r11)
 111	mfspr	r0,SPRN_SRR0
 112	stw	r0,crit_srr0@l(0)
 113	mfspr	r0,SPRN_SRR1
 114	stw	r0,crit_srr1@l(0)
 115
 116	/* set the stack limit to the current stack
 117	 * and set the limit to protect the thread_info
 118	 * struct
 119	 */
 120	mfspr	r8,SPRN_SPRG_THREAD
 121	lwz	r0,KSP_LIMIT(r8)
 122	stw	r0,saved_ksp_limit@l(0)
 123	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
 124	stw	r0,KSP_LIMIT(r8)
 125	/* fall through */
 126#endif
 127
 128/*
 129 * This code finishes saving the registers to the exception frame
 130 * and jumps to the appropriate handler for the exception, turning
 131 * on address translation.
 132 * Note that we rely on the caller having set cr0.eq iff the exception
 133 * occurred in kernel mode (i.e. MSR:PR = 0).
 134 */
 135	.globl	transfer_to_handler_full
 136transfer_to_handler_full:
 137	SAVE_NVGPRS(r11)
 138	/* fall through */
 139
 140	.globl	transfer_to_handler
 141transfer_to_handler:
 142	stw	r2,GPR2(r11)
 143	stw	r12,_NIP(r11)
 144	stw	r9,_MSR(r11)
 145	andi.	r2,r9,MSR_PR
 146	mfctr	r12
 147	mfspr	r2,SPRN_XER
 148	stw	r12,_CTR(r11)
 149	stw	r2,_XER(r11)
 150	mfspr	r12,SPRN_SPRG_THREAD
 151	addi	r2,r12,-THREAD
 152	tovirt(r2,r2)			/* set r2 to current */
 153	beq	2f			/* if from user, fix up THREAD.regs */
 154	addi	r11,r1,STACK_FRAME_OVERHEAD
 155	stw	r11,PT_REGS(r12)
 156#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 157	/* Check to see if the dbcr0 register is set up to debug.  Use the
 158	   internal debug mode bit to do this. */
 159	lwz	r12,THREAD_DBCR0(r12)
 160	andis.	r12,r12,DBCR0_IDM@h
 161	beq+	3f
 162	/* From user and task is ptraced - load up global dbcr0 */
 163	li	r12,-1			/* clear all pending debug events */
 164	mtspr	SPRN_DBSR,r12
 165	lis	r11,global_dbcr0@ha
 166	tophys(r11,r11)
 167	addi	r11,r11,global_dbcr0@l
 168#ifdef CONFIG_SMP
 169	CURRENT_THREAD_INFO(r9, r1)
 170	lwz	r9,TI_CPU(r9)
 171	slwi	r9,r9,3
 172	add	r11,r11,r9
 173#endif
 174	lwz	r12,0(r11)
 175	mtspr	SPRN_DBCR0,r12
 176	lwz	r12,4(r11)
 177	addi	r12,r12,-1
 178	stw	r12,4(r11)
 179#endif
 180	b	3f
 181
 1822:	/* if from kernel, check interrupted DOZE/NAP mode and
 183         * check for stack overflow
 184         */
 185	lwz	r9,KSP_LIMIT(r12)
 186	cmplw	r1,r9			/* if r1 <= ksp_limit */
 187	ble-	stack_ovf		/* then the kernel stack overflowed */
 1885:
 189#if defined(CONFIG_6xx) || defined(CONFIG_E500)
 190	CURRENT_THREAD_INFO(r9, r1)
 191	tophys(r9,r9)			/* check local flags */
 192	lwz	r12,TI_LOCAL_FLAGS(r9)
 193	mtcrf	0x01,r12
 194	bt-	31-TLF_NAPPING,4f
 195	bt-	31-TLF_SLEEPING,7f
 196#endif /* CONFIG_6xx || CONFIG_E500 */
 197	.globl transfer_to_handler_cont
 198transfer_to_handler_cont:
 1993:
 200	mflr	r9
 201	lwz	r11,0(r9)		/* virtual address of handler */
 202	lwz	r9,4(r9)		/* where to go when done */
 203#ifdef CONFIG_TRACE_IRQFLAGS
 204	lis	r12,reenable_mmu@h
 205	ori	r12,r12,reenable_mmu@l
 206	mtspr	SPRN_SRR0,r12
 207	mtspr	SPRN_SRR1,r10
 208	SYNC
 209	RFI
 210reenable_mmu:				/* re-enable mmu so we can */
 211	mfmsr	r10
 212	lwz	r12,_MSR(r1)
 213	xor	r10,r10,r12
 214	andi.	r10,r10,MSR_EE		/* Did EE change? */
 215	beq	1f
 216
 217	/*
 218	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
 219	 * If from user mode there is only one stack frame on the stack, and
 220	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
 221	 * stack frame to make trace_hardirqs_off happy.
 222	 *
 223	 * This is handy because we also need to save a bunch of GPRs,
 224	 * r3 can be different from GPR3(r1) at this point, r9 and r11
 225	 * contains the old MSR and handler address respectively,
 226	 * r4 & r5 can contain page fault arguments that need to be passed
 227	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
 228	 * they aren't useful past this point (aren't syscall arguments),
 229	 * the rest is restored from the exception frame.
 230	 */
 231	stwu	r1,-32(r1)
 232	stw	r9,8(r1)
 233	stw	r11,12(r1)
 234	stw	r3,16(r1)
 235	stw	r4,20(r1)
 236	stw	r5,24(r1)
 237	bl	trace_hardirqs_off
 238	lwz	r5,24(r1)
 239	lwz	r4,20(r1)
 240	lwz	r3,16(r1)
 241	lwz	r11,12(r1)
 242	lwz	r9,8(r1)
 243	addi	r1,r1,32
 244	lwz	r0,GPR0(r1)
 245	lwz	r6,GPR6(r1)
 246	lwz	r7,GPR7(r1)
 247	lwz	r8,GPR8(r1)
 2481:	mtctr	r11
 249	mtlr	r9
 250	bctr				/* jump to handler */
 251#else /* CONFIG_TRACE_IRQFLAGS */
 252	mtspr	SPRN_SRR0,r11
 253	mtspr	SPRN_SRR1,r10
 254	mtlr	r9
 255	SYNC
 256	RFI				/* jump to handler, enable MMU */
 257#endif /* CONFIG_TRACE_IRQFLAGS */
 258
 259#if defined (CONFIG_6xx) || defined(CONFIG_E500)
 2604:	rlwinm	r12,r12,0,~_TLF_NAPPING
 261	stw	r12,TI_LOCAL_FLAGS(r9)
 262	b	power_save_ppc32_restore
 263
 2647:	rlwinm	r12,r12,0,~_TLF_SLEEPING
 265	stw	r12,TI_LOCAL_FLAGS(r9)
 266	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
 267	rlwinm	r9,r9,0,~MSR_EE
 268	lwz	r12,_LINK(r11)		/* and return to address in LR */
 
 269	b	fast_exception_return
 270#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271
 272/*
 273 * On kernel stack overflow, load up an initial stack pointer
 274 * and call StackOverflow(regs), which should not return.
 275 */
 276stack_ovf:
 277	/* sometimes we use a statically-allocated stack, which is OK. */
 278	lis	r12,_end@h
 279	ori	r12,r12,_end@l
 280	cmplw	r1,r12
 281	ble	5b			/* r1 <= &_end is OK */
 282	SAVE_NVGPRS(r11)
 283	addi	r3,r1,STACK_FRAME_OVERHEAD
 284	lis	r1,init_thread_union@ha
 285	addi	r1,r1,init_thread_union@l
 286	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 287	lis	r9,StackOverflow@ha
 288	addi	r9,r9,StackOverflow@l
 289	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 290	FIX_SRR1(r10,r12)
 291	mtspr	SPRN_SRR0,r9
 292	mtspr	SPRN_SRR1,r10
 293	SYNC
 294	RFI
 295
 296/*
 297 * Handle a system call.
 298 */
 299	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
 300	.stabs	"entry_32.S",N_SO,0,0,0f
 3010:
 302
 303_GLOBAL(DoSyscall)
 304	stw	r3,ORIG_GPR3(r1)
 305	li	r12,0
 306	stw	r12,RESULT(r1)
 307	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
 308	rlwinm	r11,r11,0,4,2
 309	stw	r11,_CCR(r1)
 310#ifdef SHOW_SYSCALLS
 311	bl	do_show_syscall
 312#endif /* SHOW_SYSCALLS */
 313#ifdef CONFIG_TRACE_IRQFLAGS
 314	/* Return from syscalls can (and generally will) hard enable
 315	 * interrupts. You aren't supposed to call a syscall with
 316	 * interrupts disabled in the first place. However, to ensure
 317	 * that we get it right vs. lockdep if it happens, we force
 318	 * that hard enable here with appropriate tracing if we see
 319	 * that we have been called with interrupts off
 320	 */
 321	mfmsr	r11
 322	andi.	r12,r11,MSR_EE
 323	bne+	1f
 324	/* We came in with interrupts disabled, we enable them now */
 325	bl	trace_hardirqs_on
 326	mfmsr	r11
 327	lwz	r0,GPR0(r1)
 328	lwz	r3,GPR3(r1)
 329	lwz	r4,GPR4(r1)
 330	ori	r11,r11,MSR_EE
 331	lwz	r5,GPR5(r1)
 332	lwz	r6,GPR6(r1)
 333	lwz	r7,GPR7(r1)
 334	lwz	r8,GPR8(r1)
 335	mtmsr	r11
 3361:
 337#endif /* CONFIG_TRACE_IRQFLAGS */
 338	CURRENT_THREAD_INFO(r10, r1)
 339	lwz	r11,TI_FLAGS(r10)
 340	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
 341	bne-	syscall_dotrace
 342syscall_dotrace_cont:
 343	cmplwi	0,r0,NR_syscalls
 344	lis	r10,sys_call_table@h
 345	ori	r10,r10,sys_call_table@l
 346	slwi	r0,r0,2
 347	bge-	66f
 348	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
 349	mtlr	r10
 350	addi	r9,r1,STACK_FRAME_OVERHEAD
 351	PPC440EP_ERR42
 352	blrl			/* Call handler */
 353	.globl	ret_from_syscall
 354ret_from_syscall:
 355#ifdef SHOW_SYSCALLS
 356	bl	do_show_syscall_exit
 357#endif
 358	mr	r6,r3
 359	CURRENT_THREAD_INFO(r12, r1)
 360	/* disable interrupts so current_thread_info()->flags can't change */
 361	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
 362	/* Note: We don't bother telling lockdep about it */
 363	SYNC
 364	MTMSRD(r10)
 365	lwz	r9,TI_FLAGS(r12)
 366	li	r8,-_LAST_ERRNO
 367	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 368	bne-	syscall_exit_work
 369	cmplw	0,r3,r8
 370	blt+	syscall_exit_cont
 371	lwz	r11,_CCR(r1)			/* Load CR */
 372	neg	r3,r3
 373	oris	r11,r11,0x1000	/* Set SO bit in CR */
 374	stw	r11,_CCR(r1)
 375syscall_exit_cont:
 376	lwz	r8,_MSR(r1)
 377#ifdef CONFIG_TRACE_IRQFLAGS
 378	/* If we are going to return from the syscall with interrupts
 379	 * off, we trace that here. It shouldn't happen though but we
 380	 * want to catch the bugger if it does right ?
 381	 */
 382	andi.	r10,r8,MSR_EE
 383	bne+	1f
 384	stw	r3,GPR3(r1)
 385	bl      trace_hardirqs_off
 386	lwz	r3,GPR3(r1)
 3871:
 388#endif /* CONFIG_TRACE_IRQFLAGS */
 389#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 390	/* If the process has its own DBCR0 value, load it up.  The internal
 391	   debug mode bit tells us that dbcr0 should be loaded. */
 392	lwz	r0,THREAD+THREAD_DBCR0(r2)
 393	andis.	r10,r0,DBCR0_IDM@h
 394	bnel-	load_dbcr0
 395#endif
 396#ifdef CONFIG_44x
 397BEGIN_MMU_FTR_SECTION
 398	lis	r4,icache_44x_need_flush@ha
 399	lwz	r5,icache_44x_need_flush@l(r4)
 400	cmplwi	cr0,r5,0
 401	bne-	2f
 4021:
 403END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
 404#endif /* CONFIG_44x */
 405BEGIN_FTR_SECTION
 406	lwarx	r7,0,r1
 407END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 408	stwcx.	r0,0,r1			/* to clear the reservation */
 409	lwz	r4,_LINK(r1)
 410	lwz	r5,_CCR(r1)
 411	mtlr	r4
 412	mtcr	r5
 413	lwz	r7,_NIP(r1)
 414	FIX_SRR1(r8, r0)
 415	lwz	r2,GPR2(r1)
 416	lwz	r1,GPR1(r1)
 
 417	mtspr	SPRN_SRR0,r7
 418	mtspr	SPRN_SRR1,r8
 419	SYNC
 420	RFI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 421#ifdef CONFIG_44x
 4222:	li	r7,0
 
 423	iccci	r0,r0
 424	stw	r7,icache_44x_need_flush@l(r4)
 425	b	1b
 426#endif  /* CONFIG_44x */
 427
 42866:	li	r3,-ENOSYS
 429	b	ret_from_syscall
 430
 431	.globl	ret_from_fork
 432ret_from_fork:
 433	REST_NVGPRS(r1)
 434	bl	schedule_tail
 435	li	r3,0
 436	b	ret_from_syscall
 437
 438	.globl	ret_from_kernel_thread
 439ret_from_kernel_thread:
 440	REST_NVGPRS(r1)
 441	bl	schedule_tail
 442	mtlr	r14
 443	mr	r3,r15
 444	PPC440EP_ERR42
 445	blrl
 446	li	r3,0
 447	b	ret_from_syscall
 448
 449/* Traced system call support */
 450syscall_dotrace:
 451	SAVE_NVGPRS(r1)
 452	li	r0,0xc00
 453	stw	r0,_TRAP(r1)
 454	addi	r3,r1,STACK_FRAME_OVERHEAD
 455	bl	do_syscall_trace_enter
 456	/*
 457	 * Restore argument registers possibly just changed.
 458	 * We use the return value of do_syscall_trace_enter
 459	 * for call number to look up in the table (r0).
 460	 */
 461	mr	r0,r3
 462	lwz	r3,GPR3(r1)
 463	lwz	r4,GPR4(r1)
 464	lwz	r5,GPR5(r1)
 465	lwz	r6,GPR6(r1)
 466	lwz	r7,GPR7(r1)
 467	lwz	r8,GPR8(r1)
 468	REST_NVGPRS(r1)
 469	b	syscall_dotrace_cont
 470
 471syscall_exit_work:
 472	andi.	r0,r9,_TIF_RESTOREALL
 473	beq+	0f
 474	REST_NVGPRS(r1)
 475	b	2f
 4760:	cmplw	0,r3,r8
 477	blt+	1f
 478	andi.	r0,r9,_TIF_NOERROR
 479	bne-	1f
 480	lwz	r11,_CCR(r1)			/* Load CR */
 481	neg	r3,r3
 482	oris	r11,r11,0x1000	/* Set SO bit in CR */
 483	stw	r11,_CCR(r1)
 484
 4851:	stw	r6,RESULT(r1)	/* Save result */
 486	stw	r3,GPR3(r1)	/* Update return value */
 4872:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
 488	beq	4f
 489
 490	/* Clear per-syscall TIF flags if any are set.  */
 491
 492	li	r11,_TIF_PERSYSCALL_MASK
 493	addi	r12,r12,TI_FLAGS
 4943:	lwarx	r8,0,r12
 495	andc	r8,r8,r11
 496#ifdef CONFIG_IBM405_ERR77
 497	dcbt	0,r12
 498#endif
 499	stwcx.	r8,0,r12
 500	bne-	3b
 501	subi	r12,r12,TI_FLAGS
 502	
 5034:	/* Anything which requires enabling interrupts? */
 504	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
 505	beq	ret_from_except
 506
 507	/* Re-enable interrupts. There is no need to trace that with
 508	 * lockdep as we are supposed to have IRQs on at this point
 509	 */
 510	ori	r10,r10,MSR_EE
 511	SYNC
 512	MTMSRD(r10)
 513
 514	/* Save NVGPRS if they're not saved already */
 515	lwz	r4,_TRAP(r1)
 516	andi.	r4,r4,1
 517	beq	5f
 518	SAVE_NVGPRS(r1)
 519	li	r4,0xc00
 520	stw	r4,_TRAP(r1)
 5215:
 522	addi	r3,r1,STACK_FRAME_OVERHEAD
 523	bl	do_syscall_trace_leave
 524	b	ret_from_except_full
 525
 526#ifdef SHOW_SYSCALLS
 527do_show_syscall:
 528#ifdef SHOW_SYSCALLS_TASK
 529	lis	r11,show_syscalls_task@ha
 530	lwz	r11,show_syscalls_task@l(r11)
 531	cmp	0,r2,r11
 532	bnelr
 533#endif
 534	stw	r31,GPR31(r1)
 535	mflr	r31
 536	lis	r3,7f@ha
 537	addi	r3,r3,7f@l
 538	lwz	r4,GPR0(r1)
 539	lwz	r5,GPR3(r1)
 540	lwz	r6,GPR4(r1)
 541	lwz	r7,GPR5(r1)
 542	lwz	r8,GPR6(r1)
 543	lwz	r9,GPR7(r1)
 544	bl	printk
 545	lis	r3,77f@ha
 546	addi	r3,r3,77f@l
 547	lwz	r4,GPR8(r1)
 548	mr	r5,r2
 549	bl	printk
 550	lwz	r0,GPR0(r1)
 551	lwz	r3,GPR3(r1)
 552	lwz	r4,GPR4(r1)
 553	lwz	r5,GPR5(r1)
 554	lwz	r6,GPR6(r1)
 555	lwz	r7,GPR7(r1)
 556	lwz	r8,GPR8(r1)
 557	mtlr	r31
 558	lwz	r31,GPR31(r1)
 559	blr
 560
 561do_show_syscall_exit:
 562#ifdef SHOW_SYSCALLS_TASK
 563	lis	r11,show_syscalls_task@ha
 564	lwz	r11,show_syscalls_task@l(r11)
 565	cmp	0,r2,r11
 566	bnelr
 567#endif
 568	stw	r31,GPR31(r1)
 569	mflr	r31
 570	stw	r3,RESULT(r1)	/* Save result */
 571	mr	r4,r3
 572	lis	r3,79f@ha
 573	addi	r3,r3,79f@l
 574	bl	printk
 575	lwz	r3,RESULT(r1)
 576	mtlr	r31
 577	lwz	r31,GPR31(r1)
 578	blr
 579
 5807:	.string	"syscall %d(%x, %x, %x, %x, %x, "
 58177:	.string	"%x), current=%p\n"
 58279:	.string	" -> %x\n"
 583	.align	2,0
 584
 585#ifdef SHOW_SYSCALLS_TASK
 586	.data
 587	.globl	show_syscalls_task
 588show_syscalls_task:
 589	.long	-1
 590	.text
 591#endif
 592#endif /* SHOW_SYSCALLS */
 593
 594/*
 595 * The fork/clone functions need to copy the full register set into
 596 * the child process. Therefore we need to save all the nonvolatile
 597 * registers (r13 - r31) before calling the C code.
 598 */
 599	.globl	ppc_fork
 600ppc_fork:
 601	SAVE_NVGPRS(r1)
 602	lwz	r0,_TRAP(r1)
 603	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 604	stw	r0,_TRAP(r1)		/* register set saved */
 605	b	sys_fork
 606
 607	.globl	ppc_vfork
 608ppc_vfork:
 609	SAVE_NVGPRS(r1)
 610	lwz	r0,_TRAP(r1)
 611	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 612	stw	r0,_TRAP(r1)		/* register set saved */
 613	b	sys_vfork
 614
 615	.globl	ppc_clone
 616ppc_clone:
 617	SAVE_NVGPRS(r1)
 618	lwz	r0,_TRAP(r1)
 619	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 620	stw	r0,_TRAP(r1)		/* register set saved */
 621	b	sys_clone
 622
 623	.globl	ppc_swapcontext
 624ppc_swapcontext:
 625	SAVE_NVGPRS(r1)
 626	lwz	r0,_TRAP(r1)
 627	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
 628	stw	r0,_TRAP(r1)		/* register set saved */
 629	b	sys_swapcontext
 630
 631/*
 632 * Top-level page fault handling.
 633 * This is in assembler because if do_page_fault tells us that
 634 * it is a bad kernel page fault, we want to save the non-volatile
 635 * registers before calling bad_page_fault.
 636 */
 637	.globl	handle_page_fault
 638handle_page_fault:
 639	stw	r4,_DAR(r1)
 640	addi	r3,r1,STACK_FRAME_OVERHEAD
 641	bl	do_page_fault
 642	cmpwi	r3,0
 643	beq+	ret_from_except
 644	SAVE_NVGPRS(r1)
 645	lwz	r0,_TRAP(r1)
 646	clrrwi	r0,r0,1
 647	stw	r0,_TRAP(r1)
 648	mr	r5,r3
 649	addi	r3,r1,STACK_FRAME_OVERHEAD
 650	lwz	r4,_DAR(r1)
 651	bl	bad_page_fault
 652	b	ret_from_except_full
 653
 654/*
 655 * This routine switches between two different tasks.  The process
 656 * state of one is saved on its kernel stack.  Then the state
 657 * of the other is restored from its kernel stack.  The memory
 658 * management hardware is updated to the second process's state.
 659 * Finally, we can return to the second process.
 660 * On entry, r3 points to the THREAD for the current task, r4
 661 * points to the THREAD for the new task.
 662 *
 663 * This routine is always called with interrupts disabled.
 664 *
 665 * Note: there are two ways to get to the "going out" portion
 666 * of this code; either by coming in via the entry (_switch)
 667 * or via "fork" which must set up an environment equivalent
 668 * to the "_switch" path.  If you change this , you'll have to
 669 * change the fork code also.
 670 *
 671 * The code which creates the new task context is in 'copy_thread'
 672 * in arch/ppc/kernel/process.c
 673 */
 674_GLOBAL(_switch)
 675	stwu	r1,-INT_FRAME_SIZE(r1)
 676	mflr	r0
 677	stw	r0,INT_FRAME_SIZE+4(r1)
 678	/* r3-r12 are caller saved -- Cort */
 679	SAVE_NVGPRS(r1)
 680	stw	r0,_NIP(r1)	/* Return to switch caller */
 681	mfmsr	r11
 682	li	r0,MSR_FP	/* Disable floating-point */
 683#ifdef CONFIG_ALTIVEC
 684BEGIN_FTR_SECTION
 685	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
 686	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
 687	stw	r12,THREAD+THREAD_VRSAVE(r2)
 688END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 689#endif /* CONFIG_ALTIVEC */
 690#ifdef CONFIG_SPE
 691BEGIN_FTR_SECTION
 692	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
 693	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
 694	stw	r12,THREAD+THREAD_SPEFSCR(r2)
 695END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 696#endif /* CONFIG_SPE */
 697	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
 698	beq+	1f
 699	andc	r11,r11,r0
 700	MTMSRD(r11)
 701	isync
 7021:	stw	r11,_MSR(r1)
 703	mfcr	r10
 704	stw	r10,_CCR(r1)
 705	stw	r1,KSP(r3)	/* Set old stack pointer */
 706
 707#ifdef CONFIG_SMP
 708	/* We need a sync somewhere here to make sure that if the
 709	 * previous task gets rescheduled on another CPU, it sees all
 710	 * stores it has performed on this one.
 711	 */
 712	sync
 713#endif /* CONFIG_SMP */
 714
 715	tophys(r0,r4)
 716	CLR_TOP32(r0)
 717	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
 718	lwz	r1,KSP(r4)	/* Load new stack pointer */
 719
 720	/* save the old current 'last' for return value */
 721	mr	r3,r2
 722	addi	r2,r4,-THREAD	/* Update current */
 723
 724#ifdef CONFIG_ALTIVEC
 725BEGIN_FTR_SECTION
 726	lwz	r0,THREAD+THREAD_VRSAVE(r2)
 727	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
 728END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 729#endif /* CONFIG_ALTIVEC */
 730#ifdef CONFIG_SPE
 731BEGIN_FTR_SECTION
 732	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
 733	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 734END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 735#endif /* CONFIG_SPE */
 736
 737	lwz	r0,_CCR(r1)
 738	mtcrf	0xFF,r0
 739	/* r3-r12 are destroyed -- Cort */
 740	REST_NVGPRS(r1)
 741
 742	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
 743	mtlr	r4
 744	addi	r1,r1,INT_FRAME_SIZE
 745	blr
 746
 747	.globl	fast_exception_return
 748fast_exception_return:
 749#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 750	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
 751	beq	1f			/* if not, we've got problems */
 752#endif
 753
 7542:	REST_4GPRS(3, r11)
 755	lwz	r10,_CCR(r11)
 756	REST_GPR(1, r11)
 757	mtcr	r10
 758	lwz	r10,_LINK(r11)
 759	mtlr	r10
 
 
 
 760	REST_GPR(10, r11)
 
 
 
 761	mtspr	SPRN_SRR1,r9
 762	mtspr	SPRN_SRR0,r12
 763	REST_GPR(9, r11)
 764	REST_GPR(12, r11)
 765	lwz	r11,GPR11(r11)
 766	SYNC
 767	RFI
 768
 769#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 770/* check if the exception happened in a restartable section */
 7711:	lis	r3,exc_exit_restart_end@ha
 772	addi	r3,r3,exc_exit_restart_end@l
 773	cmplw	r12,r3
 774	bge	3f
 775	lis	r4,exc_exit_restart@ha
 776	addi	r4,r4,exc_exit_restart@l
 777	cmplw	r12,r4
 778	blt	3f
 779	lis	r3,fee_restarts@ha
 780	tophys(r3,r3)
 781	lwz	r5,fee_restarts@l(r3)
 782	addi	r5,r5,1
 783	stw	r5,fee_restarts@l(r3)
 784	mr	r12,r4		/* restart at exc_exit_restart */
 785	b	2b
 786
 787	.section .bss
 788	.align	2
 789fee_restarts:
 790	.space	4
 791	.previous
 792
 793/* aargh, a nonrecoverable interrupt, panic */
 794/* aargh, we don't know which trap this is */
 795/* but the 601 doesn't implement the RI bit, so assume it's OK */
 7963:
 797BEGIN_FTR_SECTION
 798	b	2b
 799END_FTR_SECTION_IFSET(CPU_FTR_601)
 800	li	r10,-1
 801	stw	r10,_TRAP(r11)
 802	addi	r3,r1,STACK_FRAME_OVERHEAD
 803	lis	r10,MSR_KERNEL@h
 804	ori	r10,r10,MSR_KERNEL@l
 805	bl	transfer_to_handler_full
 806	.long	nonrecoverable_exception
 807	.long	ret_from_except
 808#endif
 
 
 
 
 
 
 
 809
 810	.globl	ret_from_except_full
 811ret_from_except_full:
 812	REST_NVGPRS(r1)
 813	/* fall through */
 
 814
 815	.globl	ret_from_except
 816ret_from_except:
 817	/* Hard-disable interrupts so that current_thread_info()->flags
 818	 * can't change between when we test it and when we return
 819	 * from the interrupt. */
 820	/* Note: We don't bother telling lockdep about it */
 821	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 822	SYNC			/* Some chip revs have problems here... */
 823	MTMSRD(r10)		/* disable interrupts */
 824
 825	lwz	r3,_MSR(r1)	/* Returning to user mode? */
 826	andi.	r0,r3,MSR_PR
 827	beq	resume_kernel
 828
 829user_exc_return:		/* r10 contains MSR_KERNEL here */
 830	/* Check current_thread_info()->flags */
 831	CURRENT_THREAD_INFO(r9, r1)
 832	lwz	r9,TI_FLAGS(r9)
 833	andi.	r0,r9,_TIF_USER_WORK_MASK
 834	bne	do_work
 835
 836restore_user:
 837#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 838	/* Check whether this process has its own DBCR0 value.  The internal
 839	   debug mode bit tells us that dbcr0 should be loaded. */
 840	lwz	r0,THREAD+THREAD_DBCR0(r2)
 841	andis.	r10,r0,DBCR0_IDM@h
 842	bnel-	load_dbcr0
 843#endif
 844
 845	b	restore
 
 
 
 
 
 846
 847/* N.B. the only way to get here is from the beq following ret_from_except. */
 848resume_kernel:
 849	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
 850	CURRENT_THREAD_INFO(r9, r1)
 851	lwz	r8,TI_FLAGS(r9)
 852	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
 853	beq+	1f
 854
 855	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
 856
 857	lwz	r3,GPR1(r1)
 858	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
 859	mr	r4,r1			/* src:  current exception frame */
 860	mr	r1,r3			/* Reroute the trampoline frame to r1 */
 861
 862	/* Copy from the original to the trampoline. */
 863	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
 864	li	r6,0			/* start offset: 0 */
 865	mtctr	r5
 8662:	lwzx	r0,r6,r4
 867	stwx	r0,r6,r3
 868	addi	r6,r6,4
 869	bdnz	2b
 870
 871	/* Do real store operation to complete stwu */
 872	lwz	r5,GPR1(r1)
 873	stw	r8,0(r5)
 874
 875	/* Clear _TIF_EMULATE_STACK_STORE flag */
 876	lis	r11,_TIF_EMULATE_STACK_STORE@h
 877	addi	r5,r9,TI_FLAGS
 8780:	lwarx	r8,0,r5
 879	andc	r8,r8,r11
 880#ifdef CONFIG_IBM405_ERR77
 881	dcbt	0,r5
 882#endif
 883	stwcx.	r8,0,r5
 884	bne-	0b
 8851:
 886
 887#ifdef CONFIG_PREEMPT
 888	/* check current_thread_info->preempt_count */
 889	lwz	r0,TI_PREEMPT(r9)
 890	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
 891	bne	restore
 892	andi.	r8,r8,_TIF_NEED_RESCHED
 893	beq+	restore
 894	lwz	r3,_MSR(r1)
 895	andi.	r0,r3,MSR_EE	/* interrupts off? */
 896	beq	restore		/* don't schedule if so */
 897#ifdef CONFIG_TRACE_IRQFLAGS
 898	/* Lockdep thinks irqs are enabled, we need to call
 899	 * preempt_schedule_irq with IRQs off, so we inform lockdep
 900	 * now that we -did- turn them off already
 901	 */
 902	bl	trace_hardirqs_off
 903#endif
 9041:	bl	preempt_schedule_irq
 905	CURRENT_THREAD_INFO(r9, r1)
 906	lwz	r3,TI_FLAGS(r9)
 907	andi.	r0,r3,_TIF_NEED_RESCHED
 908	bne-	1b
 909#ifdef CONFIG_TRACE_IRQFLAGS
 910	/* And now, to properly rebalance the above, we tell lockdep they
 911	 * are being turned back on, which will happen when we return
 912	 */
 913	bl	trace_hardirqs_on
 914#endif
 915#endif /* CONFIG_PREEMPT */
 916
 917	/* interrupts are hard-disabled at this point */
 918restore:
 919#ifdef CONFIG_44x
 920BEGIN_MMU_FTR_SECTION
 921	b	1f
 922END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 923	lis	r4,icache_44x_need_flush@ha
 924	lwz	r5,icache_44x_need_flush@l(r4)
 925	cmplwi	cr0,r5,0
 926	beq+	1f
 927	li	r6,0
 928	iccci	r0,r0
 929	stw	r6,icache_44x_need_flush@l(r4)
 9301:
 931#endif  /* CONFIG_44x */
 932
 933	lwz	r9,_MSR(r1)
 934#ifdef CONFIG_TRACE_IRQFLAGS
 935	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
 936	 * off in this assembly code while peeking at TI_FLAGS() and such. However
 937	 * we need to inform it if the exception turned interrupts off, and we
 938	 * are about to trun them back on.
 939	 *
 940	 * The problem here sadly is that we don't know whether the exceptions was
 941	 * one that turned interrupts off or not. So we always tell lockdep about
 942	 * turning them on here when we go back to wherever we came from with EE
 943	 * on, even if that may meen some redudant calls being tracked. Maybe later
 944	 * we could encode what the exception did somewhere or test the exception
 945	 * type in the pt_regs but that sounds overkill
 946	 */
 947	andi.	r10,r9,MSR_EE
 948	beq	1f
 949	/*
 950	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
 951	 * which is the stack frame here, we need to force a stack frame
 952	 * in case we came from user space.
 953	 */
 954	stwu	r1,-32(r1)
 955	mflr	r0
 956	stw	r0,4(r1)
 957	stwu	r1,-32(r1)
 958	bl	trace_hardirqs_on
 959	lwz	r1,0(r1)
 960	lwz	r1,0(r1)
 961	lwz	r9,_MSR(r1)
 9621:
 963#endif /* CONFIG_TRACE_IRQFLAGS */
 964
 965	lwz	r0,GPR0(r1)
 966	lwz	r2,GPR2(r1)
 967	REST_4GPRS(3, r1)
 968	REST_2GPRS(7, r1)
 969
 970	lwz	r10,_XER(r1)
 971	lwz	r11,_CTR(r1)
 972	mtspr	SPRN_XER,r10
 973	mtctr	r11
 974
 975	PPC405_ERR77(0,r1)
 976BEGIN_FTR_SECTION
 977	lwarx	r11,0,r1
 978END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 979	stwcx.	r0,0,r1			/* to clear the reservation */
 
 
 
 
 
 
 
 980
 981#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 982	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
 983	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
 984
 985	lwz	r10,_CCR(r1)
 986	lwz	r11,_LINK(r1)
 987	mtcrf	0xFF,r10
 988	mtlr	r11
 989
 990	/*
 991	 * Once we put values in SRR0 and SRR1, we are in a state
 992	 * where exceptions are not recoverable, since taking an
 993	 * exception will trash SRR0 and SRR1.  Therefore we clear the
 994	 * MSR:RI bit to indicate this.  If we do take an exception,
 995	 * we can't return to the point of the exception but we
 996	 * can restart the exception exit path at the label
 997	 * exc_exit_restart below.  -- paulus
 998	 */
 999	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
1000	SYNC
1001	MTMSRD(r10)		/* clear the RI bit */
1002	.globl exc_exit_restart
1003exc_exit_restart:
1004	lwz	r12,_NIP(r1)
1005	FIX_SRR1(r9,r10)
1006	mtspr	SPRN_SRR0,r12
1007	mtspr	SPRN_SRR1,r9
1008	REST_4GPRS(9, r1)
1009	lwz	r1,GPR1(r1)
1010	.globl exc_exit_restart_end
1011exc_exit_restart_end:
1012	SYNC
1013	RFI
1014
1015#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1016	/*
1017	 * This is a bit different on 4xx/Book-E because it doesn't have
1018	 * the RI bit in the MSR.
1019	 * The TLB miss handler checks if we have interrupted
1020	 * the exception exit path and restarts it if so
1021	 * (well maybe one day it will... :).
1022	 */
1023	lwz	r11,_LINK(r1)
1024	mtlr	r11
1025	lwz	r10,_CCR(r1)
1026	mtcrf	0xff,r10
1027	REST_2GPRS(9, r1)
1028	.globl exc_exit_restart
1029exc_exit_restart:
1030	lwz	r11,_NIP(r1)
1031	lwz	r12,_MSR(r1)
1032exc_exit_start:
1033	mtspr	SPRN_SRR0,r11
1034	mtspr	SPRN_SRR1,r12
1035	REST_2GPRS(11, r1)
1036	lwz	r1,GPR1(r1)
1037	.globl exc_exit_restart_end
1038exc_exit_restart_end:
1039	PPC405_ERR77_SYNC
1040	rfi
1041	b	.			/* prevent prefetch past rfi */
 
 
 
 
 
1042
1043/*
1044 * Returning from a critical interrupt in user mode doesn't need
1045 * to be any different from a normal exception.  For a critical
1046 * interrupt in the kernel, we just return (without checking for
1047 * preemption) since the interrupt may have happened at some crucial
1048 * place (e.g. inside the TLB miss handler), and because we will be
1049 * running with r1 pointing into critical_stack, not the current
1050 * process's kernel stack (and therefore current_thread_info() will
1051 * give the wrong answer).
1052 * We have to restore various SPRs that may have been in use at the
1053 * time of the critical interrupt.
1054 *
1055 */
1056#ifdef CONFIG_40x
1057#define PPC_40x_TURN_OFF_MSR_DR						    \
1058	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1059	 * assume the instructions here are mapped by a pinned TLB entry */ \
1060	li	r10,MSR_IR;						    \
1061	mtmsr	r10;							    \
1062	isync;								    \
1063	tophys(r1, r1);
1064#else
1065#define PPC_40x_TURN_OFF_MSR_DR
1066#endif
1067
1068#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1069	REST_NVGPRS(r1);						\
1070	lwz	r3,_MSR(r1);						\
1071	andi.	r3,r3,MSR_PR;						\
1072	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1073	bne	user_exc_return;					\
1074	lwz	r0,GPR0(r1);						\
1075	lwz	r2,GPR2(r1);						\
1076	REST_4GPRS(3, r1);						\
1077	REST_2GPRS(7, r1);						\
1078	lwz	r10,_XER(r1);						\
1079	lwz	r11,_CTR(r1);						\
1080	mtspr	SPRN_XER,r10;						\
1081	mtctr	r11;							\
1082	PPC405_ERR77(0,r1);						\
1083	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1084	lwz	r11,_LINK(r1);						\
1085	mtlr	r11;							\
1086	lwz	r10,_CCR(r1);						\
1087	mtcrf	0xff,r10;						\
1088	PPC_40x_TURN_OFF_MSR_DR;					\
1089	lwz	r9,_DEAR(r1);						\
1090	lwz	r10,_ESR(r1);						\
1091	mtspr	SPRN_DEAR,r9;						\
1092	mtspr	SPRN_ESR,r10;						\
1093	lwz	r11,_NIP(r1);						\
1094	lwz	r12,_MSR(r1);						\
1095	mtspr	exc_lvl_srr0,r11;					\
1096	mtspr	exc_lvl_srr1,r12;					\
1097	lwz	r9,GPR9(r1);						\
1098	lwz	r12,GPR12(r1);						\
1099	lwz	r10,GPR10(r1);						\
1100	lwz	r11,GPR11(r1);						\
1101	lwz	r1,GPR1(r1);						\
1102	PPC405_ERR77_SYNC;						\
1103	exc_lvl_rfi;							\
1104	b	.;		/* prevent prefetch past exc_lvl_rfi */
1105
1106#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1107	lwz	r9,_##exc_lvl_srr0(r1);					\
1108	lwz	r10,_##exc_lvl_srr1(r1);				\
1109	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1110	mtspr	SPRN_##exc_lvl_srr1,r10;
1111
1112#if defined(CONFIG_PPC_BOOK3E_MMU)
1113#ifdef CONFIG_PHYS_64BIT
1114#define	RESTORE_MAS7							\
1115	lwz	r11,MAS7(r1);						\
1116	mtspr	SPRN_MAS7,r11;
1117#else
1118#define	RESTORE_MAS7
1119#endif /* CONFIG_PHYS_64BIT */
1120#define RESTORE_MMU_REGS						\
1121	lwz	r9,MAS0(r1);						\
1122	lwz	r10,MAS1(r1);						\
1123	lwz	r11,MAS2(r1);						\
1124	mtspr	SPRN_MAS0,r9;						\
1125	lwz	r9,MAS3(r1);						\
1126	mtspr	SPRN_MAS1,r10;						\
1127	lwz	r10,MAS6(r1);						\
1128	mtspr	SPRN_MAS2,r11;						\
1129	mtspr	SPRN_MAS3,r9;						\
1130	mtspr	SPRN_MAS6,r10;						\
1131	RESTORE_MAS7;
1132#elif defined(CONFIG_44x)
1133#define RESTORE_MMU_REGS						\
1134	lwz	r9,MMUCR(r1);						\
1135	mtspr	SPRN_MMUCR,r9;
1136#else
1137#define RESTORE_MMU_REGS
1138#endif
1139
1140#ifdef CONFIG_40x
1141	.globl	ret_from_crit_exc
1142ret_from_crit_exc:
1143	mfspr	r9,SPRN_SPRG_THREAD
1144	lis	r10,saved_ksp_limit@ha;
1145	lwz	r10,saved_ksp_limit@l(r10);
1146	tovirt(r9,r9);
1147	stw	r10,KSP_LIMIT(r9)
1148	lis	r9,crit_srr0@ha;
1149	lwz	r9,crit_srr0@l(r9);
1150	lis	r10,crit_srr1@ha;
1151	lwz	r10,crit_srr1@l(r10);
1152	mtspr	SPRN_SRR0,r9;
1153	mtspr	SPRN_SRR1,r10;
1154	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
 
1155#endif /* CONFIG_40x */
1156
1157#ifdef CONFIG_BOOKE
1158	.globl	ret_from_crit_exc
1159ret_from_crit_exc:
1160	mfspr	r9,SPRN_SPRG_THREAD
1161	lwz	r10,SAVED_KSP_LIMIT(r1)
1162	stw	r10,KSP_LIMIT(r9)
1163	RESTORE_xSRR(SRR0,SRR1);
1164	RESTORE_MMU_REGS;
1165	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
 
1166
1167	.globl	ret_from_debug_exc
1168ret_from_debug_exc:
1169	mfspr	r9,SPRN_SPRG_THREAD
1170	lwz	r10,SAVED_KSP_LIMIT(r1)
1171	stw	r10,KSP_LIMIT(r9)
1172	lwz	r9,THREAD_INFO-THREAD(r9)
1173	CURRENT_THREAD_INFO(r10, r1)
1174	lwz	r10,TI_PREEMPT(r10)
1175	stw	r10,TI_PREEMPT(r9)
1176	RESTORE_xSRR(SRR0,SRR1);
1177	RESTORE_xSRR(CSRR0,CSRR1);
1178	RESTORE_MMU_REGS;
1179	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
 
1180
1181	.globl	ret_from_mcheck_exc
1182ret_from_mcheck_exc:
1183	mfspr	r9,SPRN_SPRG_THREAD
1184	lwz	r10,SAVED_KSP_LIMIT(r1)
1185	stw	r10,KSP_LIMIT(r9)
1186	RESTORE_xSRR(SRR0,SRR1);
1187	RESTORE_xSRR(CSRR0,CSRR1);
1188	RESTORE_xSRR(DSRR0,DSRR1);
1189	RESTORE_MMU_REGS;
1190	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
 
1191#endif /* CONFIG_BOOKE */
1192
1193/*
1194 * Load the DBCR0 value for a task that is being ptraced,
1195 * having first saved away the global DBCR0.  Note that r0
1196 * has the dbcr0 value to set upon entry to this.
1197 */
1198load_dbcr0:
1199	mfmsr	r10		/* first disable debug exceptions */
1200	rlwinm	r10,r10,0,~MSR_DE
1201	mtmsr	r10
1202	isync
1203	mfspr	r10,SPRN_DBCR0
1204	lis	r11,global_dbcr0@ha
1205	addi	r11,r11,global_dbcr0@l
1206#ifdef CONFIG_SMP
1207	CURRENT_THREAD_INFO(r9, r1)
1208	lwz	r9,TI_CPU(r9)
1209	slwi	r9,r9,3
1210	add	r11,r11,r9
1211#endif
1212	stw	r10,0(r11)
1213	mtspr	SPRN_DBCR0,r0
1214	lwz	r10,4(r11)
1215	addi	r10,r10,1
1216	stw	r10,4(r11)
1217	li	r11,-1
1218	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1219	blr
1220
1221	.section .bss
1222	.align	4
1223global_dbcr0:
1224	.space	8*NR_CPUS
1225	.previous
1226#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1227
1228do_work:			/* r10 contains MSR_KERNEL here */
1229	andi.	r0,r9,_TIF_NEED_RESCHED
1230	beq	do_user_signal
1231
1232do_resched:			/* r10 contains MSR_KERNEL here */
1233	/* Note: We don't need to inform lockdep that we are enabling
1234	 * interrupts here. As far as it knows, they are already enabled
1235	 */
1236	ori	r10,r10,MSR_EE
1237	SYNC
1238	MTMSRD(r10)		/* hard-enable interrupts */
1239	bl	schedule
1240recheck:
1241	/* Note: And we don't tell it we are disabling them again
1242	 * neither. Those disable/enable cycles used to peek at
1243	 * TI_FLAGS aren't advertised.
1244	 */
1245	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1246	SYNC
1247	MTMSRD(r10)		/* disable interrupts */
1248	CURRENT_THREAD_INFO(r9, r1)
1249	lwz	r9,TI_FLAGS(r9)
1250	andi.	r0,r9,_TIF_NEED_RESCHED
1251	bne-	do_resched
1252	andi.	r0,r9,_TIF_USER_WORK_MASK
1253	beq	restore_user
1254do_user_signal:			/* r10 contains MSR_KERNEL here */
1255	ori	r10,r10,MSR_EE
1256	SYNC
1257	MTMSRD(r10)		/* hard-enable interrupts */
1258	/* save r13-r31 in the exception frame, if not already done */
1259	lwz	r3,_TRAP(r1)
1260	andi.	r0,r3,1
1261	beq	2f
1262	SAVE_NVGPRS(r1)
1263	rlwinm	r3,r3,0,0,30
1264	stw	r3,_TRAP(r1)
12652:	addi	r3,r1,STACK_FRAME_OVERHEAD
1266	mr	r4,r9
1267	bl	do_notify_resume
1268	REST_NVGPRS(r1)
1269	b	recheck
1270
1271/*
1272 * We come here when we are at the end of handling an exception
1273 * that occurred at a place where taking an exception will lose
1274 * state information, such as the contents of SRR0 and SRR1.
1275 */
1276nonrecoverable:
1277	lis	r10,exc_exit_restart_end@ha
1278	addi	r10,r10,exc_exit_restart_end@l
1279	cmplw	r12,r10
1280	bge	3f
1281	lis	r11,exc_exit_restart@ha
1282	addi	r11,r11,exc_exit_restart@l
1283	cmplw	r12,r11
1284	blt	3f
1285	lis	r10,ee_restarts@ha
1286	lwz	r12,ee_restarts@l(r10)
1287	addi	r12,r12,1
1288	stw	r12,ee_restarts@l(r10)
1289	mr	r12,r11		/* restart at exc_exit_restart */
1290	blr
12913:	/* OK, we can't recover, kill this process */
1292	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1293BEGIN_FTR_SECTION
1294	blr
1295END_FTR_SECTION_IFSET(CPU_FTR_601)
1296	lwz	r3,_TRAP(r1)
1297	andi.	r0,r3,1
1298	beq	4f
1299	SAVE_NVGPRS(r1)
1300	rlwinm	r3,r3,0,0,30
1301	stw	r3,_TRAP(r1)
13024:	addi	r3,r1,STACK_FRAME_OVERHEAD
1303	bl	nonrecoverable_exception
1304	/* shouldn't return */
1305	b	4b
1306
1307	.section .bss
1308	.align	2
1309ee_restarts:
1310	.space	4
1311	.previous
1312
1313/*
1314 * PROM code for specific machines follows.  Put it
1315 * here so it's easy to add arch-specific sections later.
1316 * -- Cort
1317 */
1318#ifdef CONFIG_PPC_RTAS
1319/*
1320 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1321 * called with the MMU off.
1322 */
1323_GLOBAL(enter_rtas)
1324	stwu	r1,-INT_FRAME_SIZE(r1)
1325	mflr	r0
1326	stw	r0,INT_FRAME_SIZE+4(r1)
1327	LOAD_REG_ADDR(r4, rtas)
1328	lis	r6,1f@ha	/* physical return address for rtas */
1329	addi	r6,r6,1f@l
1330	tophys(r6,r6)
1331	tophys(r7,r1)
1332	lwz	r8,RTASENTRY(r4)
1333	lwz	r4,RTASBASE(r4)
1334	mfmsr	r9
1335	stw	r9,8(r1)
1336	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1337	SYNC			/* disable interrupts so SRR0/1 */
1338	MTMSRD(r0)		/* don't get trashed */
1339	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1340	mtlr	r6
1341	mtspr	SPRN_SPRG_RTAS,r7
1342	mtspr	SPRN_SRR0,r8
1343	mtspr	SPRN_SRR1,r9
1344	RFI
13451:	tophys(r9,r1)
1346	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1347	lwz	r9,8(r9)	/* original msr value */
1348	FIX_SRR1(r9,r0)
1349	addi	r1,r1,INT_FRAME_SIZE
1350	li	r0,0
1351	mtspr	SPRN_SPRG_RTAS,r0
1352	mtspr	SPRN_SRR0,r8
1353	mtspr	SPRN_SRR1,r9
1354	RFI			/* return to caller */
1355
1356	.globl	machine_check_in_rtas
1357machine_check_in_rtas:
1358	twi	31,0,0
1359	/* XXX load up BATs and panic */
1360
1361#endif /* CONFIG_PPC_RTAS */
1362
1363#ifdef CONFIG_FUNCTION_TRACER
1364#ifdef CONFIG_DYNAMIC_FTRACE
1365_GLOBAL(mcount)
1366_GLOBAL(_mcount)
1367	/*
1368	 * It is required that _mcount on PPC32 must preserve the
1369	 * link register. But we have r0 to play with. We use r0
1370	 * to push the return address back to the caller of mcount
1371	 * into the ctr register, restore the link register and
1372	 * then jump back using the ctr register.
1373	 */
1374	mflr	r0
1375	mtctr	r0
1376	lwz	r0, 4(r1)
1377	mtlr	r0
1378	bctr
1379
1380_GLOBAL(ftrace_caller)
1381	MCOUNT_SAVE_FRAME
1382	/* r3 ends up with link register */
1383	subi	r3, r3, MCOUNT_INSN_SIZE
1384.globl ftrace_call
1385ftrace_call:
1386	bl	ftrace_stub
1387	nop
1388#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1389.globl ftrace_graph_call
1390ftrace_graph_call:
1391	b	ftrace_graph_stub
1392_GLOBAL(ftrace_graph_stub)
1393#endif
1394	MCOUNT_RESTORE_FRAME
1395	/* old link register ends up in ctr reg */
1396	bctr
1397#else
1398_GLOBAL(mcount)
1399_GLOBAL(_mcount)
1400
1401	MCOUNT_SAVE_FRAME
1402
1403	subi	r3, r3, MCOUNT_INSN_SIZE
1404	LOAD_REG_ADDR(r5, ftrace_trace_function)
1405	lwz	r5,0(r5)
1406
1407	mtctr	r5
1408	bctrl
1409	nop
1410
1411#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1412	b	ftrace_graph_caller
1413#endif
1414	MCOUNT_RESTORE_FRAME
1415	bctr
1416#endif
1417
1418_GLOBAL(ftrace_stub)
1419	blr
1420
1421#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1422_GLOBAL(ftrace_graph_caller)
1423	/* load r4 with local address */
1424	lwz	r4, 44(r1)
1425	subi	r4, r4, MCOUNT_INSN_SIZE
1426
1427	/* get the parent address */
1428	addi	r3, r1, 52
1429
1430	bl	prepare_ftrace_return
1431	nop
1432
1433	MCOUNT_RESTORE_FRAME
1434	/* old link register ends up in ctr reg */
1435	bctr
1436
1437_GLOBAL(return_to_handler)
1438	/* need to save return values */
1439	stwu	r1, -32(r1)
1440	stw	r3, 20(r1)
1441	stw	r4, 16(r1)
1442	stw	r31, 12(r1)
1443	mr	r31, r1
1444
1445	bl	ftrace_return_to_handler
1446	nop
1447
1448	/* return value has real return address */
1449	mtlr	r3
1450
1451	lwz	r3, 20(r1)
1452	lwz	r4, 16(r1)
1453	lwz	r31,12(r1)
1454	lwz	r1, 0(r1)
1455
1456	/* Jump back to real return address */
1457	blr
1458#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1459
1460#endif /* CONFIG_MCOUNT */