Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 * This file contains the system call entry code, context switch
14 * code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/feature-fixups.h>
32#include <asm/barrier.h>
33#include <asm/kup.h>
34#include <asm/bug.h>
35
36#include "head_32.h"
37
38/*
39 * powerpc relies on return from interrupt/syscall being context synchronising
40 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
41 * synchronisation instructions.
42 */
43
44/*
45 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
46 * fit into one page in order to not encounter a TLB miss between the
47 * modification of srr0/srr1 and the associated rfi.
48 */
49 .align 12
50
51#ifdef CONFIG_BOOKE
52 .globl mcheck_transfer_to_handler
53mcheck_transfer_to_handler:
54 mfspr r0,SPRN_DSRR0
55 stw r0,_DSRR0(r11)
56 mfspr r0,SPRN_DSRR1
57 stw r0,_DSRR1(r11)
58 /* fall through */
59_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
60
61 .globl debug_transfer_to_handler
62debug_transfer_to_handler:
63 mfspr r0,SPRN_CSRR0
64 stw r0,_CSRR0(r11)
65 mfspr r0,SPRN_CSRR1
66 stw r0,_CSRR1(r11)
67 /* fall through */
68_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
69
70 .globl crit_transfer_to_handler
71crit_transfer_to_handler:
72#ifdef CONFIG_PPC_BOOK3E_MMU
73 mfspr r0,SPRN_MAS0
74 stw r0,MAS0(r11)
75 mfspr r0,SPRN_MAS1
76 stw r0,MAS1(r11)
77 mfspr r0,SPRN_MAS2
78 stw r0,MAS2(r11)
79 mfspr r0,SPRN_MAS3
80 stw r0,MAS3(r11)
81 mfspr r0,SPRN_MAS6
82 stw r0,MAS6(r11)
83#ifdef CONFIG_PHYS_64BIT
84 mfspr r0,SPRN_MAS7
85 stw r0,MAS7(r11)
86#endif /* CONFIG_PHYS_64BIT */
87#endif /* CONFIG_PPC_BOOK3E_MMU */
88#ifdef CONFIG_44x
89 mfspr r0,SPRN_MMUCR
90 stw r0,MMUCR(r11)
91#endif
92 mfspr r0,SPRN_SRR0
93 stw r0,_SRR0(r11)
94 mfspr r0,SPRN_SRR1
95 stw r0,_SRR1(r11)
96
97 /* set the stack limit to the current stack */
98 mfspr r8,SPRN_SPRG_THREAD
99 lwz r0,KSP_LIMIT(r8)
100 stw r0,SAVED_KSP_LIMIT(r11)
101 rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
102 stw r0,KSP_LIMIT(r8)
103 /* fall through */
104_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
105#endif
106
107#ifdef CONFIG_40x
108 .globl crit_transfer_to_handler
109crit_transfer_to_handler:
110 lwz r0,crit_r10@l(0)
111 stw r0,GPR10(r11)
112 lwz r0,crit_r11@l(0)
113 stw r0,GPR11(r11)
114 mfspr r0,SPRN_SRR0
115 stw r0,crit_srr0@l(0)
116 mfspr r0,SPRN_SRR1
117 stw r0,crit_srr1@l(0)
118
119 /* set the stack limit to the current stack */
120 mfspr r8,SPRN_SPRG_THREAD
121 lwz r0,KSP_LIMIT(r8)
122 stw r0,saved_ksp_limit@l(0)
123 rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
124 stw r0,KSP_LIMIT(r8)
125 /* fall through */
126_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
127#endif
128
129/*
130 * This code finishes saving the registers to the exception frame
131 * and jumps to the appropriate handler for the exception, turning
132 * on address translation.
133 * Note that we rely on the caller having set cr0.eq iff the exception
134 * occurred in kernel mode (i.e. MSR:PR = 0).
135 */
136 .globl transfer_to_handler_full
137transfer_to_handler_full:
138 SAVE_NVGPRS(r11)
139_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
140 /* fall through */
141
142 .globl transfer_to_handler
143transfer_to_handler:
144 stw r2,GPR2(r11)
145 stw r12,_NIP(r11)
146 stw r9,_MSR(r11)
147 andi. r2,r9,MSR_PR
148 mfctr r12
149 mfspr r2,SPRN_XER
150 stw r12,_CTR(r11)
151 stw r2,_XER(r11)
152 mfspr r12,SPRN_SPRG_THREAD
153 tovirt_vmstack r12, r12
154 beq 2f /* if from user, fix up THREAD.regs */
155 addi r2, r12, -THREAD
156 addi r11,r1,STACK_FRAME_OVERHEAD
157 stw r11,PT_REGS(r12)
158#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
159 /* Check to see if the dbcr0 register is set up to debug. Use the
160 internal debug mode bit to do this. */
161 lwz r12,THREAD_DBCR0(r12)
162 andis. r12,r12,DBCR0_IDM@h
163#endif
164 ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
165#ifdef CONFIG_PPC_BOOK3S_32
166 kuep_lock r11, r12
167#endif
168#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
169 beq+ 3f
170 /* From user and task is ptraced - load up global dbcr0 */
171 li r12,-1 /* clear all pending debug events */
172 mtspr SPRN_DBSR,r12
173 lis r11,global_dbcr0@ha
174 tophys(r11,r11)
175 addi r11,r11,global_dbcr0@l
176#ifdef CONFIG_SMP
177 lwz r9,TASK_CPU(r2)
178 slwi r9,r9,3
179 add r11,r11,r9
180#endif
181 lwz r12,0(r11)
182 mtspr SPRN_DBCR0,r12
183 lwz r12,4(r11)
184 addi r12,r12,-1
185 stw r12,4(r11)
186#endif
187
188 b 3f
189
1902: /* if from kernel, check interrupted DOZE/NAP mode and
191 * check for stack overflow
192 */
193 kuap_save_and_lock r11, r12, r9, r2, r6
194 addi r2, r12, -THREAD
195#ifndef CONFIG_VMAP_STACK
196 lwz r9,KSP_LIMIT(r12)
197 cmplw r1,r9 /* if r1 <= ksp_limit */
198 ble- stack_ovf /* then the kernel stack overflowed */
199#endif
2005:
201#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
202 lwz r12,TI_LOCAL_FLAGS(r2)
203 mtcrf 0x01,r12
204 bt- 31-TLF_NAPPING,4f
205 bt- 31-TLF_SLEEPING,7f
206#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
207 .globl transfer_to_handler_cont
208transfer_to_handler_cont:
2093:
210 mflr r9
211 tovirt_novmstack r2, r2 /* set r2 to current */
212 tovirt_vmstack r9, r9
213 lwz r11,0(r9) /* virtual address of handler */
214 lwz r9,4(r9) /* where to go when done */
215#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
216 mtspr SPRN_NRI, r0
217#endif
218#ifdef CONFIG_TRACE_IRQFLAGS
219 /*
220 * When tracing IRQ state (lockdep) we enable the MMU before we call
221 * the IRQ tracing functions as they might access vmalloc space or
222 * perform IOs for console output.
223 *
224 * To speed up the syscall path where interrupts stay on, let's check
225 * first if we are changing the MSR value at all.
226 */
227 tophys_novmstack r12, r1
228 lwz r12,_MSR(r12)
229 andi. r12,r12,MSR_EE
230 bne 1f
231
232 /* MSR isn't changing, just transition directly */
233#endif
234 mtspr SPRN_SRR0,r11
235 mtspr SPRN_SRR1,r10
236 mtlr r9
237 SYNC
238 RFI /* jump to handler, enable MMU */
239
240#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
2414: rlwinm r12,r12,0,~_TLF_NAPPING
242 stw r12,TI_LOCAL_FLAGS(r2)
243 b power_save_ppc32_restore
244
2457: rlwinm r12,r12,0,~_TLF_SLEEPING
246 stw r12,TI_LOCAL_FLAGS(r2)
247 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
248 rlwinm r9,r9,0,~MSR_EE
249 lwz r12,_LINK(r11) /* and return to address in LR */
250 kuap_restore r11, r2, r3, r4, r5
251 lwz r2, GPR2(r11)
252 b fast_exception_return
253#endif
254_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
255_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
256
257#ifdef CONFIG_TRACE_IRQFLAGS
2581: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
259 * keep interrupts disabled at this point otherwise we might risk
260 * taking an interrupt before we tell lockdep they are enabled.
261 */
262 lis r12,reenable_mmu@h
263 ori r12,r12,reenable_mmu@l
264 LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
265 mtspr SPRN_SRR0,r12
266 mtspr SPRN_SRR1,r0
267 SYNC
268 RFI
269
270reenable_mmu:
271 /*
272 * We save a bunch of GPRs,
273 * r3 can be different from GPR3(r1) at this point, r9 and r11
274 * contains the old MSR and handler address respectively,
275 * r4 & r5 can contain page fault arguments that need to be passed
276 * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
277 * clobbered as they aren't useful past this point.
278 */
279
280 stwu r1,-32(r1)
281 stw r9,8(r1)
282 stw r11,12(r1)
283 stw r3,16(r1)
284 stw r4,20(r1)
285 stw r5,24(r1)
286
287 /* If we are disabling interrupts (normal case), simply log it with
288 * lockdep
289 */
2901: bl trace_hardirqs_off
291 lwz r5,24(r1)
292 lwz r4,20(r1)
293 lwz r3,16(r1)
294 lwz r11,12(r1)
295 lwz r9,8(r1)
296 addi r1,r1,32
297 mtctr r11
298 mtlr r9
299 bctr /* jump to handler */
300#endif /* CONFIG_TRACE_IRQFLAGS */
301
302#ifndef CONFIG_VMAP_STACK
303/*
304 * On kernel stack overflow, load up an initial stack pointer
305 * and call StackOverflow(regs), which should not return.
306 */
307stack_ovf:
308 /* sometimes we use a statically-allocated stack, which is OK. */
309 lis r12,_end@h
310 ori r12,r12,_end@l
311 cmplw r1,r12
312 ble 5b /* r1 <= &_end is OK */
313 SAVE_NVGPRS(r11)
314 addi r3,r1,STACK_FRAME_OVERHEAD
315 lis r1,init_thread_union@ha
316 addi r1,r1,init_thread_union@l
317 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
318 lis r9,StackOverflow@ha
319 addi r9,r9,StackOverflow@l
320 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
321#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
322 mtspr SPRN_NRI, r0
323#endif
324 mtspr SPRN_SRR0,r9
325 mtspr SPRN_SRR1,r10
326 SYNC
327 RFI
328_ASM_NOKPROBE_SYMBOL(stack_ovf)
329#endif
330
331#ifdef CONFIG_TRACE_IRQFLAGS
332trace_syscall_entry_irq_off:
333 /*
334 * Syscall shouldn't happen while interrupts are disabled,
335 * so let's do a warning here.
336 */
3370: trap
338 EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
339 bl trace_hardirqs_on
340
341 /* Now enable for real */
342 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
343 mtmsr r10
344
345 REST_GPR(0, r1)
346 REST_4GPRS(3, r1)
347 REST_2GPRS(7, r1)
348 b DoSyscall
349#endif /* CONFIG_TRACE_IRQFLAGS */
350
351 .globl transfer_to_syscall
352transfer_to_syscall:
353#ifdef CONFIG_TRACE_IRQFLAGS
354 andi. r12,r9,MSR_EE
355 beq- trace_syscall_entry_irq_off
356#endif /* CONFIG_TRACE_IRQFLAGS */
357
358/*
359 * Handle a system call.
360 */
361 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
362 .stabs "entry_32.S",N_SO,0,0,0f
3630:
364
365_GLOBAL(DoSyscall)
366 stw r3,ORIG_GPR3(r1)
367 li r12,0
368 stw r12,RESULT(r1)
369#ifdef CONFIG_TRACE_IRQFLAGS
370 /* Make sure interrupts are enabled */
371 mfmsr r11
372 andi. r12,r11,MSR_EE
373 /* We came in with interrupts disabled, we WARN and mark them enabled
374 * for lockdep now */
3750: tweqi r12, 0
376 EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
377#endif /* CONFIG_TRACE_IRQFLAGS */
378 lwz r11,TI_FLAGS(r2)
379 andi. r11,r11,_TIF_SYSCALL_DOTRACE
380 bne- syscall_dotrace
381syscall_dotrace_cont:
382 cmplwi 0,r0,NR_syscalls
383 lis r10,sys_call_table@h
384 ori r10,r10,sys_call_table@l
385 slwi r0,r0,2
386 bge- 66f
387
388 barrier_nospec_asm
389 /*
390 * Prevent the load of the handler below (based on the user-passed
391 * system call number) being speculatively executed until the test
392 * against NR_syscalls and branch to .66f above has
393 * committed.
394 */
395
396 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
397 mtlr r10
398 addi r9,r1,STACK_FRAME_OVERHEAD
399 PPC440EP_ERR42
400 blrl /* Call handler */
401 .globl ret_from_syscall
402ret_from_syscall:
403#ifdef CONFIG_DEBUG_RSEQ
404 /* Check whether the syscall is issued inside a restartable sequence */
405 stw r3,GPR3(r1)
406 addi r3,r1,STACK_FRAME_OVERHEAD
407 bl rseq_syscall
408 lwz r3,GPR3(r1)
409#endif
410 mr r6,r3
411 /* disable interrupts so current_thread_info()->flags can't change */
412 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */
413 /* Note: We don't bother telling lockdep about it */
414 SYNC
415 mtmsr r10
416 lwz r9,TI_FLAGS(r2)
417 li r8,-MAX_ERRNO
418 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
419 bne- syscall_exit_work
420 cmplw 0,r3,r8
421 blt+ syscall_exit_cont
422 lwz r11,_CCR(r1) /* Load CR */
423 neg r3,r3
424 oris r11,r11,0x1000 /* Set SO bit in CR */
425 stw r11,_CCR(r1)
426syscall_exit_cont:
427 lwz r8,_MSR(r1)
428#ifdef CONFIG_TRACE_IRQFLAGS
429 /* If we are going to return from the syscall with interrupts
430 * off, we trace that here. It shouldn't normally happen.
431 */
432 andi. r10,r8,MSR_EE
433 bne+ 1f
434 stw r3,GPR3(r1)
435 bl trace_hardirqs_off
436 lwz r3,GPR3(r1)
4371:
438#endif /* CONFIG_TRACE_IRQFLAGS */
439#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
440 /* If the process has its own DBCR0 value, load it up. The internal
441 debug mode bit tells us that dbcr0 should be loaded. */
442 lwz r0,THREAD+THREAD_DBCR0(r2)
443 andis. r10,r0,DBCR0_IDM@h
444 bnel- load_dbcr0
445#endif
446#ifdef CONFIG_44x
447BEGIN_MMU_FTR_SECTION
448 lis r4,icache_44x_need_flush@ha
449 lwz r5,icache_44x_need_flush@l(r4)
450 cmplwi cr0,r5,0
451 bne- 2f
4521:
453END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
454#endif /* CONFIG_44x */
455BEGIN_FTR_SECTION
456 lwarx r7,0,r1
457END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
458 stwcx. r0,0,r1 /* to clear the reservation */
459 ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
460#ifdef CONFIG_PPC_BOOK3S_32
461 kuep_unlock r5, r7
462#endif
463 kuap_check r2, r4
464 lwz r4,_LINK(r1)
465 lwz r5,_CCR(r1)
466 mtlr r4
467 mtcr r5
468 lwz r7,_NIP(r1)
469 lwz r2,GPR2(r1)
470 lwz r1,GPR1(r1)
471syscall_exit_finish:
472#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
473 mtspr SPRN_NRI, r0
474#endif
475 mtspr SPRN_SRR0,r7
476 mtspr SPRN_SRR1,r8
477 SYNC
478 RFI
479_ASM_NOKPROBE_SYMBOL(syscall_exit_finish)
480#ifdef CONFIG_44x
4812: li r7,0
482 iccci r0,r0
483 stw r7,icache_44x_need_flush@l(r4)
484 b 1b
485#endif /* CONFIG_44x */
486
48766: li r3,-ENOSYS
488 b ret_from_syscall
489
490 .globl ret_from_fork
491ret_from_fork:
492 REST_NVGPRS(r1)
493 bl schedule_tail
494 li r3,0
495 b ret_from_syscall
496
497 .globl ret_from_kernel_thread
498ret_from_kernel_thread:
499 REST_NVGPRS(r1)
500 bl schedule_tail
501 mtlr r14
502 mr r3,r15
503 PPC440EP_ERR42
504 blrl
505 li r3,0
506 b ret_from_syscall
507
508/* Traced system call support */
509syscall_dotrace:
510 SAVE_NVGPRS(r1)
511 li r0,0xc00
512 stw r0,_TRAP(r1)
513 addi r3,r1,STACK_FRAME_OVERHEAD
514 bl do_syscall_trace_enter
515 /*
516 * Restore argument registers possibly just changed.
517 * We use the return value of do_syscall_trace_enter
518 * for call number to look up in the table (r0).
519 */
520 mr r0,r3
521 lwz r3,GPR3(r1)
522 lwz r4,GPR4(r1)
523 lwz r5,GPR5(r1)
524 lwz r6,GPR6(r1)
525 lwz r7,GPR7(r1)
526 lwz r8,GPR8(r1)
527 REST_NVGPRS(r1)
528
529 cmplwi r0,NR_syscalls
530 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
531 bge- ret_from_syscall
532 b syscall_dotrace_cont
533
534syscall_exit_work:
535 andi. r0,r9,_TIF_RESTOREALL
536 beq+ 0f
537 REST_NVGPRS(r1)
538 b 2f
5390: cmplw 0,r3,r8
540 blt+ 1f
541 andi. r0,r9,_TIF_NOERROR
542 bne- 1f
543 lwz r11,_CCR(r1) /* Load CR */
544 neg r3,r3
545 oris r11,r11,0x1000 /* Set SO bit in CR */
546 stw r11,_CCR(r1)
547
5481: stw r6,RESULT(r1) /* Save result */
549 stw r3,GPR3(r1) /* Update return value */
5502: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
551 beq 4f
552
553 /* Clear per-syscall TIF flags if any are set. */
554
555 li r11,_TIF_PERSYSCALL_MASK
556 addi r12,r2,TI_FLAGS
5573: lwarx r8,0,r12
558 andc r8,r8,r11
559 stwcx. r8,0,r12
560 bne- 3b
561
5624: /* Anything which requires enabling interrupts? */
563 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
564 beq ret_from_except
565
566 /* Re-enable interrupts. There is no need to trace that with
567 * lockdep as we are supposed to have IRQs on at this point
568 */
569 ori r10,r10,MSR_EE
570 SYNC
571 mtmsr r10
572
573 /* Save NVGPRS if they're not saved already */
574 lwz r4,_TRAP(r1)
575 andi. r4,r4,1
576 beq 5f
577 SAVE_NVGPRS(r1)
578 li r4,0xc00
579 stw r4,_TRAP(r1)
5805:
581 addi r3,r1,STACK_FRAME_OVERHEAD
582 bl do_syscall_trace_leave
583 b ret_from_except_full
584
585 /*
586 * System call was called from kernel. We get here with SRR1 in r9.
587 * Mark the exception as recoverable once we have retrieved SRR0,
588 * trap a warning and return ENOSYS with CR[SO] set.
589 */
590 .globl ret_from_kernel_syscall
591ret_from_kernel_syscall:
592 mfspr r9, SPRN_SRR0
593 mfspr r10, SPRN_SRR1
594#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
595 LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
596 mtmsr r11
597#endif
598
5990: trap
600 EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
601
602 li r3, ENOSYS
603 crset so
604#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
605 mtspr SPRN_NRI, r0
606#endif
607 mtspr SPRN_SRR0, r9
608 mtspr SPRN_SRR1, r10
609 SYNC
610 RFI
611_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall)
612
613/*
614 * The fork/clone functions need to copy the full register set into
615 * the child process. Therefore we need to save all the nonvolatile
616 * registers (r13 - r31) before calling the C code.
617 */
618 .globl ppc_fork
619ppc_fork:
620 SAVE_NVGPRS(r1)
621 lwz r0,_TRAP(r1)
622 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
623 stw r0,_TRAP(r1) /* register set saved */
624 b sys_fork
625
626 .globl ppc_vfork
627ppc_vfork:
628 SAVE_NVGPRS(r1)
629 lwz r0,_TRAP(r1)
630 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
631 stw r0,_TRAP(r1) /* register set saved */
632 b sys_vfork
633
634 .globl ppc_clone
635ppc_clone:
636 SAVE_NVGPRS(r1)
637 lwz r0,_TRAP(r1)
638 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
639 stw r0,_TRAP(r1) /* register set saved */
640 b sys_clone
641
642 .globl ppc_clone3
643ppc_clone3:
644 SAVE_NVGPRS(r1)
645 lwz r0,_TRAP(r1)
646 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
647 stw r0,_TRAP(r1) /* register set saved */
648 b sys_clone3
649
650 .globl ppc_swapcontext
651ppc_swapcontext:
652 SAVE_NVGPRS(r1)
653 lwz r0,_TRAP(r1)
654 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
655 stw r0,_TRAP(r1) /* register set saved */
656 b sys_swapcontext
657
658/*
659 * Top-level page fault handling.
660 * This is in assembler because if do_page_fault tells us that
661 * it is a bad kernel page fault, we want to save the non-volatile
662 * registers before calling bad_page_fault.
663 */
664 .globl handle_page_fault
665handle_page_fault:
666 addi r3,r1,STACK_FRAME_OVERHEAD
667#ifdef CONFIG_PPC_BOOK3S_32
668 andis. r0,r5,DSISR_DABRMATCH@h
669 bne- handle_dabr_fault
670#endif
671 bl do_page_fault
672 cmpwi r3,0
673 beq+ ret_from_except
674 SAVE_NVGPRS(r1)
675 lwz r0,_TRAP(r1)
676 clrrwi r0,r0,1
677 stw r0,_TRAP(r1)
678 mr r5,r3
679 addi r3,r1,STACK_FRAME_OVERHEAD
680 lwz r4,_DAR(r1)
681 bl bad_page_fault
682 b ret_from_except_full
683
684#ifdef CONFIG_PPC_BOOK3S_32
685 /* We have a data breakpoint exception - handle it */
686handle_dabr_fault:
687 SAVE_NVGPRS(r1)
688 lwz r0,_TRAP(r1)
689 clrrwi r0,r0,1
690 stw r0,_TRAP(r1)
691 bl do_break
692 b ret_from_except_full
693#endif
694
695/*
696 * This routine switches between two different tasks. The process
697 * state of one is saved on its kernel stack. Then the state
698 * of the other is restored from its kernel stack. The memory
699 * management hardware is updated to the second process's state.
700 * Finally, we can return to the second process.
701 * On entry, r3 points to the THREAD for the current task, r4
702 * points to the THREAD for the new task.
703 *
704 * This routine is always called with interrupts disabled.
705 *
706 * Note: there are two ways to get to the "going out" portion
707 * of this code; either by coming in via the entry (_switch)
708 * or via "fork" which must set up an environment equivalent
709 * to the "_switch" path. If you change this , you'll have to
710 * change the fork code also.
711 *
712 * The code which creates the new task context is in 'copy_thread'
713 * in arch/ppc/kernel/process.c
714 */
715_GLOBAL(_switch)
716 stwu r1,-INT_FRAME_SIZE(r1)
717 mflr r0
718 stw r0,INT_FRAME_SIZE+4(r1)
719 /* r3-r12 are caller saved -- Cort */
720 SAVE_NVGPRS(r1)
721 stw r0,_NIP(r1) /* Return to switch caller */
722 mfmsr r11
723 li r0,MSR_FP /* Disable floating-point */
724#ifdef CONFIG_ALTIVEC
725BEGIN_FTR_SECTION
726 oris r0,r0,MSR_VEC@h /* Disable altivec */
727 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
728 stw r12,THREAD+THREAD_VRSAVE(r2)
729END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
730#endif /* CONFIG_ALTIVEC */
731#ifdef CONFIG_SPE
732BEGIN_FTR_SECTION
733 oris r0,r0,MSR_SPE@h /* Disable SPE */
734 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
735 stw r12,THREAD+THREAD_SPEFSCR(r2)
736END_FTR_SECTION_IFSET(CPU_FTR_SPE)
737#endif /* CONFIG_SPE */
738 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
739 beq+ 1f
740 andc r11,r11,r0
741 mtmsr r11
742 isync
7431: stw r11,_MSR(r1)
744 mfcr r10
745 stw r10,_CCR(r1)
746 stw r1,KSP(r3) /* Set old stack pointer */
747
748 kuap_check r2, r0
749#ifdef CONFIG_SMP
750 /* We need a sync somewhere here to make sure that if the
751 * previous task gets rescheduled on another CPU, it sees all
752 * stores it has performed on this one.
753 */
754 sync
755#endif /* CONFIG_SMP */
756
757 tophys(r0,r4)
758 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
759 lwz r1,KSP(r4) /* Load new stack pointer */
760
761 /* save the old current 'last' for return value */
762 mr r3,r2
763 addi r2,r4,-THREAD /* Update current */
764
765#ifdef CONFIG_ALTIVEC
766BEGIN_FTR_SECTION
767 lwz r0,THREAD+THREAD_VRSAVE(r2)
768 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
769END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
770#endif /* CONFIG_ALTIVEC */
771#ifdef CONFIG_SPE
772BEGIN_FTR_SECTION
773 lwz r0,THREAD+THREAD_SPEFSCR(r2)
774 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
775END_FTR_SECTION_IFSET(CPU_FTR_SPE)
776#endif /* CONFIG_SPE */
777
778 lwz r0,_CCR(r1)
779 mtcrf 0xFF,r0
780 /* r3-r12 are destroyed -- Cort */
781 REST_NVGPRS(r1)
782
783 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
784 mtlr r4
785 addi r1,r1,INT_FRAME_SIZE
786 blr
787
788 .globl fast_exception_return
789fast_exception_return:
790#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
791 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
792 beq 1f /* if not, we've got problems */
793#endif
794
7952: REST_4GPRS(3, r11)
796 lwz r10,_CCR(r11)
797 REST_GPR(1, r11)
798 mtcr r10
799 lwz r10,_LINK(r11)
800 mtlr r10
801 /* Clear the exception_marker on the stack to avoid confusing stacktrace */
802 li r10, 0
803 stw r10, 8(r11)
804 REST_GPR(10, r11)
805#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
806 mtspr SPRN_NRI, r0
807#endif
808 mtspr SPRN_SRR1,r9
809 mtspr SPRN_SRR0,r12
810 REST_GPR(9, r11)
811 REST_GPR(12, r11)
812 lwz r11,GPR11(r11)
813 SYNC
814 RFI
815_ASM_NOKPROBE_SYMBOL(fast_exception_return)
816
817#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
818/* check if the exception happened in a restartable section */
8191: lis r3,exc_exit_restart_end@ha
820 addi r3,r3,exc_exit_restart_end@l
821 cmplw r12,r3
822#ifdef CONFIG_PPC_BOOK3S_601
823 bge 2b
824#else
825 bge 3f
826#endif
827 lis r4,exc_exit_restart@ha
828 addi r4,r4,exc_exit_restart@l
829 cmplw r12,r4
830#ifdef CONFIG_PPC_BOOK3S_601
831 blt 2b
832#else
833 blt 3f
834#endif
835 lis r3,fee_restarts@ha
836 tophys(r3,r3)
837 lwz r5,fee_restarts@l(r3)
838 addi r5,r5,1
839 stw r5,fee_restarts@l(r3)
840 mr r12,r4 /* restart at exc_exit_restart */
841 b 2b
842
843 .section .bss
844 .align 2
845fee_restarts:
846 .space 4
847 .previous
848
849/* aargh, a nonrecoverable interrupt, panic */
850/* aargh, we don't know which trap this is */
851/* but the 601 doesn't implement the RI bit, so assume it's OK */
8523:
853 li r10,-1
854 stw r10,_TRAP(r11)
855 addi r3,r1,STACK_FRAME_OVERHEAD
856 lis r10,MSR_KERNEL@h
857 ori r10,r10,MSR_KERNEL@l
858 bl transfer_to_handler_full
859 .long unrecoverable_exception
860 .long ret_from_except
861#endif
862
863 .globl ret_from_except_full
864ret_from_except_full:
865 REST_NVGPRS(r1)
866 /* fall through */
867
868 .globl ret_from_except
869ret_from_except:
870 /* Hard-disable interrupts so that current_thread_info()->flags
871 * can't change between when we test it and when we return
872 * from the interrupt. */
873 /* Note: We don't bother telling lockdep about it */
874 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
875 SYNC /* Some chip revs have problems here... */
876 mtmsr r10 /* disable interrupts */
877
878 lwz r3,_MSR(r1) /* Returning to user mode? */
879 andi. r0,r3,MSR_PR
880 beq resume_kernel
881
882user_exc_return: /* r10 contains MSR_KERNEL here */
883 /* Check current_thread_info()->flags */
884 lwz r9,TI_FLAGS(r2)
885 andi. r0,r9,_TIF_USER_WORK_MASK
886 bne do_work
887
888restore_user:
889#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
890 /* Check whether this process has its own DBCR0 value. The internal
891 debug mode bit tells us that dbcr0 should be loaded. */
892 lwz r0,THREAD+THREAD_DBCR0(r2)
893 andis. r10,r0,DBCR0_IDM@h
894 bnel- load_dbcr0
895#endif
896 ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
897#ifdef CONFIG_PPC_BOOK3S_32
898 kuep_unlock r10, r11
899#endif
900
901 b restore
902
903/* N.B. the only way to get here is from the beq following ret_from_except. */
904resume_kernel:
905 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
906 lwz r8,TI_FLAGS(r2)
907 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
908 beq+ 1f
909
910 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
911
912 lwz r3,GPR1(r1)
913 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
914 mr r4,r1 /* src: current exception frame */
915 mr r1,r3 /* Reroute the trampoline frame to r1 */
916
917 /* Copy from the original to the trampoline. */
918 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
919 li r6,0 /* start offset: 0 */
920 mtctr r5
9212: lwzx r0,r6,r4
922 stwx r0,r6,r3
923 addi r6,r6,4
924 bdnz 2b
925
926 /* Do real store operation to complete stwu */
927 lwz r5,GPR1(r1)
928 stw r8,0(r5)
929
930 /* Clear _TIF_EMULATE_STACK_STORE flag */
931 lis r11,_TIF_EMULATE_STACK_STORE@h
932 addi r5,r2,TI_FLAGS
9330: lwarx r8,0,r5
934 andc r8,r8,r11
935 stwcx. r8,0,r5
936 bne- 0b
9371:
938
939#ifdef CONFIG_PREEMPTION
940 /* check current_thread_info->preempt_count */
941 lwz r0,TI_PREEMPT(r2)
942 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
943 bne restore_kuap
944 andi. r8,r8,_TIF_NEED_RESCHED
945 beq+ restore_kuap
946 lwz r3,_MSR(r1)
947 andi. r0,r3,MSR_EE /* interrupts off? */
948 beq restore_kuap /* don't schedule if so */
949#ifdef CONFIG_TRACE_IRQFLAGS
950 /* Lockdep thinks irqs are enabled, we need to call
951 * preempt_schedule_irq with IRQs off, so we inform lockdep
952 * now that we -did- turn them off already
953 */
954 bl trace_hardirqs_off
955#endif
956 bl preempt_schedule_irq
957#ifdef CONFIG_TRACE_IRQFLAGS
958 /* And now, to properly rebalance the above, we tell lockdep they
959 * are being turned back on, which will happen when we return
960 */
961 bl trace_hardirqs_on
962#endif
963#endif /* CONFIG_PREEMPTION */
964restore_kuap:
965 kuap_restore r1, r2, r9, r10, r0
966
967 /* interrupts are hard-disabled at this point */
968restore:
969#ifdef CONFIG_44x
970BEGIN_MMU_FTR_SECTION
971 b 1f
972END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
973 lis r4,icache_44x_need_flush@ha
974 lwz r5,icache_44x_need_flush@l(r4)
975 cmplwi cr0,r5,0
976 beq+ 1f
977 li r6,0
978 iccci r0,r0
979 stw r6,icache_44x_need_flush@l(r4)
9801:
981#endif /* CONFIG_44x */
982
983 lwz r9,_MSR(r1)
984#ifdef CONFIG_TRACE_IRQFLAGS
985 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
986 * off in this assembly code while peeking at TI_FLAGS() and such. However
987 * we need to inform it if the exception turned interrupts off, and we
988 * are about to trun them back on.
989 */
990 andi. r10,r9,MSR_EE
991 beq 1f
992 stwu r1,-32(r1)
993 mflr r0
994 stw r0,4(r1)
995 bl trace_hardirqs_on
996 addi r1, r1, 32
997 lwz r9,_MSR(r1)
9981:
999#endif /* CONFIG_TRACE_IRQFLAGS */
1000
1001 lwz r0,GPR0(r1)
1002 lwz r2,GPR2(r1)
1003 REST_4GPRS(3, r1)
1004 REST_2GPRS(7, r1)
1005
1006 lwz r10,_XER(r1)
1007 lwz r11,_CTR(r1)
1008 mtspr SPRN_XER,r10
1009 mtctr r11
1010
1011BEGIN_FTR_SECTION
1012 lwarx r11,0,r1
1013END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
1014 stwcx. r0,0,r1 /* to clear the reservation */
1015
1016#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
1017 andi. r10,r9,MSR_RI /* check if this exception occurred */
1018 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
1019
1020 lwz r10,_CCR(r1)
1021 lwz r11,_LINK(r1)
1022 mtcrf 0xFF,r10
1023 mtlr r11
1024
1025 /* Clear the exception_marker on the stack to avoid confusing stacktrace */
1026 li r10, 0
1027 stw r10, 8(r1)
1028 /*
1029 * Once we put values in SRR0 and SRR1, we are in a state
1030 * where exceptions are not recoverable, since taking an
1031 * exception will trash SRR0 and SRR1. Therefore we clear the
1032 * MSR:RI bit to indicate this. If we do take an exception,
1033 * we can't return to the point of the exception but we
1034 * can restart the exception exit path at the label
1035 * exc_exit_restart below. -- paulus
1036 */
1037 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
1038 SYNC
1039 mtmsr r10 /* clear the RI bit */
1040 .globl exc_exit_restart
1041exc_exit_restart:
1042 lwz r12,_NIP(r1)
1043 mtspr SPRN_SRR0,r12
1044 mtspr SPRN_SRR1,r9
1045 REST_4GPRS(9, r1)
1046 lwz r1,GPR1(r1)
1047 .globl exc_exit_restart_end
1048exc_exit_restart_end:
1049 SYNC
1050 RFI
1051_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
1052_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end)
1053
1054#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1055 /*
1056 * This is a bit different on 4xx/Book-E because it doesn't have
1057 * the RI bit in the MSR.
1058 * The TLB miss handler checks if we have interrupted
1059 * the exception exit path and restarts it if so
1060 * (well maybe one day it will... :).
1061 */
1062 lwz r11,_LINK(r1)
1063 mtlr r11
1064 lwz r10,_CCR(r1)
1065 mtcrf 0xff,r10
1066 /* Clear the exception_marker on the stack to avoid confusing stacktrace */
1067 li r10, 0
1068 stw r10, 8(r1)
1069 REST_2GPRS(9, r1)
1070 .globl exc_exit_restart
1071exc_exit_restart:
1072 lwz r11,_NIP(r1)
1073 lwz r12,_MSR(r1)
1074 mtspr SPRN_SRR0,r11
1075 mtspr SPRN_SRR1,r12
1076 REST_2GPRS(11, r1)
1077 lwz r1,GPR1(r1)
1078 .globl exc_exit_restart_end
1079exc_exit_restart_end:
1080 rfi
1081 b . /* prevent prefetch past rfi */
1082_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
1083
1084/*
1085 * Returning from a critical interrupt in user mode doesn't need
1086 * to be any different from a normal exception. For a critical
1087 * interrupt in the kernel, we just return (without checking for
1088 * preemption) since the interrupt may have happened at some crucial
1089 * place (e.g. inside the TLB miss handler), and because we will be
1090 * running with r1 pointing into critical_stack, not the current
1091 * process's kernel stack (and therefore current_thread_info() will
1092 * give the wrong answer).
1093 * We have to restore various SPRs that may have been in use at the
1094 * time of the critical interrupt.
1095 *
1096 */
1097#ifdef CONFIG_40x
1098#define PPC_40x_TURN_OFF_MSR_DR \
1099 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1100 * assume the instructions here are mapped by a pinned TLB entry */ \
1101 li r10,MSR_IR; \
1102 mtmsr r10; \
1103 isync; \
1104 tophys(r1, r1);
1105#else
1106#define PPC_40x_TURN_OFF_MSR_DR
1107#endif
1108
1109#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1110 REST_NVGPRS(r1); \
1111 lwz r3,_MSR(r1); \
1112 andi. r3,r3,MSR_PR; \
1113 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL); \
1114 bne user_exc_return; \
1115 lwz r0,GPR0(r1); \
1116 lwz r2,GPR2(r1); \
1117 REST_4GPRS(3, r1); \
1118 REST_2GPRS(7, r1); \
1119 lwz r10,_XER(r1); \
1120 lwz r11,_CTR(r1); \
1121 mtspr SPRN_XER,r10; \
1122 mtctr r11; \
1123 stwcx. r0,0,r1; /* to clear the reservation */ \
1124 lwz r11,_LINK(r1); \
1125 mtlr r11; \
1126 lwz r10,_CCR(r1); \
1127 mtcrf 0xff,r10; \
1128 PPC_40x_TURN_OFF_MSR_DR; \
1129 lwz r9,_DEAR(r1); \
1130 lwz r10,_ESR(r1); \
1131 mtspr SPRN_DEAR,r9; \
1132 mtspr SPRN_ESR,r10; \
1133 lwz r11,_NIP(r1); \
1134 lwz r12,_MSR(r1); \
1135 mtspr exc_lvl_srr0,r11; \
1136 mtspr exc_lvl_srr1,r12; \
1137 lwz r9,GPR9(r1); \
1138 lwz r12,GPR12(r1); \
1139 lwz r10,GPR10(r1); \
1140 lwz r11,GPR11(r1); \
1141 lwz r1,GPR1(r1); \
1142 exc_lvl_rfi; \
1143 b .; /* prevent prefetch past exc_lvl_rfi */
1144
1145#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1146 lwz r9,_##exc_lvl_srr0(r1); \
1147 lwz r10,_##exc_lvl_srr1(r1); \
1148 mtspr SPRN_##exc_lvl_srr0,r9; \
1149 mtspr SPRN_##exc_lvl_srr1,r10;
1150
1151#if defined(CONFIG_PPC_BOOK3E_MMU)
1152#ifdef CONFIG_PHYS_64BIT
1153#define RESTORE_MAS7 \
1154 lwz r11,MAS7(r1); \
1155 mtspr SPRN_MAS7,r11;
1156#else
1157#define RESTORE_MAS7
1158#endif /* CONFIG_PHYS_64BIT */
1159#define RESTORE_MMU_REGS \
1160 lwz r9,MAS0(r1); \
1161 lwz r10,MAS1(r1); \
1162 lwz r11,MAS2(r1); \
1163 mtspr SPRN_MAS0,r9; \
1164 lwz r9,MAS3(r1); \
1165 mtspr SPRN_MAS1,r10; \
1166 lwz r10,MAS6(r1); \
1167 mtspr SPRN_MAS2,r11; \
1168 mtspr SPRN_MAS3,r9; \
1169 mtspr SPRN_MAS6,r10; \
1170 RESTORE_MAS7;
1171#elif defined(CONFIG_44x)
1172#define RESTORE_MMU_REGS \
1173 lwz r9,MMUCR(r1); \
1174 mtspr SPRN_MMUCR,r9;
1175#else
1176#define RESTORE_MMU_REGS
1177#endif
1178
1179#ifdef CONFIG_40x
1180 .globl ret_from_crit_exc
1181ret_from_crit_exc:
1182 mfspr r9,SPRN_SPRG_THREAD
1183 lis r10,saved_ksp_limit@ha;
1184 lwz r10,saved_ksp_limit@l(r10);
1185 tovirt(r9,r9);
1186 stw r10,KSP_LIMIT(r9)
1187 lis r9,crit_srr0@ha;
1188 lwz r9,crit_srr0@l(r9);
1189 lis r10,crit_srr1@ha;
1190 lwz r10,crit_srr1@l(r10);
1191 mtspr SPRN_SRR0,r9;
1192 mtspr SPRN_SRR1,r10;
1193 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1194_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
1195#endif /* CONFIG_40x */
1196
1197#ifdef CONFIG_BOOKE
1198 .globl ret_from_crit_exc
1199ret_from_crit_exc:
1200 mfspr r9,SPRN_SPRG_THREAD
1201 lwz r10,SAVED_KSP_LIMIT(r1)
1202 stw r10,KSP_LIMIT(r9)
1203 RESTORE_xSRR(SRR0,SRR1);
1204 RESTORE_MMU_REGS;
1205 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1206_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
1207
1208 .globl ret_from_debug_exc
1209ret_from_debug_exc:
1210 mfspr r9,SPRN_SPRG_THREAD
1211 lwz r10,SAVED_KSP_LIMIT(r1)
1212 stw r10,KSP_LIMIT(r9)
1213 RESTORE_xSRR(SRR0,SRR1);
1214 RESTORE_xSRR(CSRR0,CSRR1);
1215 RESTORE_MMU_REGS;
1216 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1217_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
1218
1219 .globl ret_from_mcheck_exc
1220ret_from_mcheck_exc:
1221 mfspr r9,SPRN_SPRG_THREAD
1222 lwz r10,SAVED_KSP_LIMIT(r1)
1223 stw r10,KSP_LIMIT(r9)
1224 RESTORE_xSRR(SRR0,SRR1);
1225 RESTORE_xSRR(CSRR0,CSRR1);
1226 RESTORE_xSRR(DSRR0,DSRR1);
1227 RESTORE_MMU_REGS;
1228 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1229_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
1230#endif /* CONFIG_BOOKE */
1231
1232/*
1233 * Load the DBCR0 value for a task that is being ptraced,
1234 * having first saved away the global DBCR0. Note that r0
1235 * has the dbcr0 value to set upon entry to this.
1236 */
1237load_dbcr0:
1238 mfmsr r10 /* first disable debug exceptions */
1239 rlwinm r10,r10,0,~MSR_DE
1240 mtmsr r10
1241 isync
1242 mfspr r10,SPRN_DBCR0
1243 lis r11,global_dbcr0@ha
1244 addi r11,r11,global_dbcr0@l
1245#ifdef CONFIG_SMP
1246 lwz r9,TASK_CPU(r2)
1247 slwi r9,r9,3
1248 add r11,r11,r9
1249#endif
1250 stw r10,0(r11)
1251 mtspr SPRN_DBCR0,r0
1252 lwz r10,4(r11)
1253 addi r10,r10,1
1254 stw r10,4(r11)
1255 li r11,-1
1256 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1257 blr
1258
1259 .section .bss
1260 .align 4
1261 .global global_dbcr0
1262global_dbcr0:
1263 .space 8*NR_CPUS
1264 .previous
1265#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1266
1267do_work: /* r10 contains MSR_KERNEL here */
1268 andi. r0,r9,_TIF_NEED_RESCHED
1269 beq do_user_signal
1270
1271do_resched: /* r10 contains MSR_KERNEL here */
1272#ifdef CONFIG_TRACE_IRQFLAGS
1273 bl trace_hardirqs_on
1274 mfmsr r10
1275#endif
1276 ori r10,r10,MSR_EE
1277 SYNC
1278 mtmsr r10 /* hard-enable interrupts */
1279 bl schedule
1280recheck:
1281 /* Note: And we don't tell it we are disabling them again
1282 * neither. Those disable/enable cycles used to peek at
1283 * TI_FLAGS aren't advertised.
1284 */
1285 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
1286 SYNC
1287 mtmsr r10 /* disable interrupts */
1288 lwz r9,TI_FLAGS(r2)
1289 andi. r0,r9,_TIF_NEED_RESCHED
1290 bne- do_resched
1291 andi. r0,r9,_TIF_USER_WORK_MASK
1292 beq restore_user
1293do_user_signal: /* r10 contains MSR_KERNEL here */
1294 ori r10,r10,MSR_EE
1295 SYNC
1296 mtmsr r10 /* hard-enable interrupts */
1297 /* save r13-r31 in the exception frame, if not already done */
1298 lwz r3,_TRAP(r1)
1299 andi. r0,r3,1
1300 beq 2f
1301 SAVE_NVGPRS(r1)
1302 rlwinm r3,r3,0,0,30
1303 stw r3,_TRAP(r1)
13042: addi r3,r1,STACK_FRAME_OVERHEAD
1305 mr r4,r9
1306 bl do_notify_resume
1307 REST_NVGPRS(r1)
1308 b recheck
1309
1310/*
1311 * We come here when we are at the end of handling an exception
1312 * that occurred at a place where taking an exception will lose
1313 * state information, such as the contents of SRR0 and SRR1.
1314 */
1315nonrecoverable:
1316 lis r10,exc_exit_restart_end@ha
1317 addi r10,r10,exc_exit_restart_end@l
1318 cmplw r12,r10
1319#ifdef CONFIG_PPC_BOOK3S_601
1320 bgelr
1321#else
1322 bge 3f
1323#endif
1324 lis r11,exc_exit_restart@ha
1325 addi r11,r11,exc_exit_restart@l
1326 cmplw r12,r11
1327#ifdef CONFIG_PPC_BOOK3S_601
1328 bltlr
1329#else
1330 blt 3f
1331#endif
1332 lis r10,ee_restarts@ha
1333 lwz r12,ee_restarts@l(r10)
1334 addi r12,r12,1
1335 stw r12,ee_restarts@l(r10)
1336 mr r12,r11 /* restart at exc_exit_restart */
1337 blr
13383: /* OK, we can't recover, kill this process */
1339 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1340 lwz r3,_TRAP(r1)
1341 andi. r0,r3,1
1342 beq 5f
1343 SAVE_NVGPRS(r1)
1344 rlwinm r3,r3,0,0,30
1345 stw r3,_TRAP(r1)
13465: mfspr r2,SPRN_SPRG_THREAD
1347 addi r2,r2,-THREAD
1348 tovirt(r2,r2) /* set back r2 to current */
13494: addi r3,r1,STACK_FRAME_OVERHEAD
1350 bl unrecoverable_exception
1351 /* shouldn't return */
1352 b 4b
1353_ASM_NOKPROBE_SYMBOL(nonrecoverable)
1354
1355 .section .bss
1356 .align 2
1357ee_restarts:
1358 .space 4
1359 .previous
1360
1361/*
1362 * PROM code for specific machines follows. Put it
1363 * here so it's easy to add arch-specific sections later.
1364 * -- Cort
1365 */
1366#ifdef CONFIG_PPC_RTAS
1367/*
1368 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1369 * called with the MMU off.
1370 */
1371_GLOBAL(enter_rtas)
1372 stwu r1,-INT_FRAME_SIZE(r1)
1373 mflr r0
1374 stw r0,INT_FRAME_SIZE+4(r1)
1375 LOAD_REG_ADDR(r4, rtas)
1376 lis r6,1f@ha /* physical return address for rtas */
1377 addi r6,r6,1f@l
1378 tophys(r6,r6)
1379 tophys_novmstack r7, r1
1380 lwz r8,RTASENTRY(r4)
1381 lwz r4,RTASBASE(r4)
1382 mfmsr r9
1383 stw r9,8(r1)
1384 LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
1385 SYNC /* disable interrupts so SRR0/1 */
1386 mtmsr r0 /* don't get trashed */
1387 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1388 mtlr r6
1389 stw r7, THREAD + RTAS_SP(r2)
1390 mtspr SPRN_SRR0,r8
1391 mtspr SPRN_SRR1,r9
1392 RFI
13931: tophys_novmstack r9, r1
1394#ifdef CONFIG_VMAP_STACK
1395 li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */
1396 mtmsr r0
1397 isync
1398#endif
1399 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1400 lwz r9,8(r9) /* original msr value */
1401 addi r1,r1,INT_FRAME_SIZE
1402 li r0,0
1403 tophys_novmstack r7, r2
1404 stw r0, THREAD + RTAS_SP(r7)
1405 mtspr SPRN_SRR0,r8
1406 mtspr SPRN_SRR1,r9
1407 RFI /* return to caller */
1408_ASM_NOKPROBE_SYMBOL(enter_rtas)
1409#endif /* CONFIG_PPC_RTAS */
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33#include <asm/ftrace.h>
34#include <asm/ptrace.h>
35
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39/*
40 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 */
42#if MSR_KERNEL >= 0x10000
43#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
44#else
45#define LOAD_MSR_KERNEL(r, x) li r,(x)
46#endif
47
48#ifdef CONFIG_BOOKE
49 .globl mcheck_transfer_to_handler
50mcheck_transfer_to_handler:
51 mfspr r0,SPRN_DSRR0
52 stw r0,_DSRR0(r11)
53 mfspr r0,SPRN_DSRR1
54 stw r0,_DSRR1(r11)
55 /* fall through */
56
57 .globl debug_transfer_to_handler
58debug_transfer_to_handler:
59 mfspr r0,SPRN_CSRR0
60 stw r0,_CSRR0(r11)
61 mfspr r0,SPRN_CSRR1
62 stw r0,_CSRR1(r11)
63 /* fall through */
64
65 .globl crit_transfer_to_handler
66crit_transfer_to_handler:
67#ifdef CONFIG_PPC_BOOK3E_MMU
68 mfspr r0,SPRN_MAS0
69 stw r0,MAS0(r11)
70 mfspr r0,SPRN_MAS1
71 stw r0,MAS1(r11)
72 mfspr r0,SPRN_MAS2
73 stw r0,MAS2(r11)
74 mfspr r0,SPRN_MAS3
75 stw r0,MAS3(r11)
76 mfspr r0,SPRN_MAS6
77 stw r0,MAS6(r11)
78#ifdef CONFIG_PHYS_64BIT
79 mfspr r0,SPRN_MAS7
80 stw r0,MAS7(r11)
81#endif /* CONFIG_PHYS_64BIT */
82#endif /* CONFIG_PPC_BOOK3E_MMU */
83#ifdef CONFIG_44x
84 mfspr r0,SPRN_MMUCR
85 stw r0,MMUCR(r11)
86#endif
87 mfspr r0,SPRN_SRR0
88 stw r0,_SRR0(r11)
89 mfspr r0,SPRN_SRR1
90 stw r0,_SRR1(r11)
91
92 mfspr r8,SPRN_SPRG_THREAD
93 lwz r0,KSP_LIMIT(r8)
94 stw r0,SAVED_KSP_LIMIT(r11)
95 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
96 stw r0,KSP_LIMIT(r8)
97 /* fall through */
98#endif
99
100#ifdef CONFIG_40x
101 .globl crit_transfer_to_handler
102crit_transfer_to_handler:
103 lwz r0,crit_r10@l(0)
104 stw r0,GPR10(r11)
105 lwz r0,crit_r11@l(0)
106 stw r0,GPR11(r11)
107 mfspr r0,SPRN_SRR0
108 stw r0,crit_srr0@l(0)
109 mfspr r0,SPRN_SRR1
110 stw r0,crit_srr1@l(0)
111
112 mfspr r8,SPRN_SPRG_THREAD
113 lwz r0,KSP_LIMIT(r8)
114 stw r0,saved_ksp_limit@l(0)
115 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
116 stw r0,KSP_LIMIT(r8)
117 /* fall through */
118#endif
119
120/*
121 * This code finishes saving the registers to the exception frame
122 * and jumps to the appropriate handler for the exception, turning
123 * on address translation.
124 * Note that we rely on the caller having set cr0.eq iff the exception
125 * occurred in kernel mode (i.e. MSR:PR = 0).
126 */
127 .globl transfer_to_handler_full
128transfer_to_handler_full:
129 SAVE_NVGPRS(r11)
130 /* fall through */
131
132 .globl transfer_to_handler
133transfer_to_handler:
134 stw r2,GPR2(r11)
135 stw r12,_NIP(r11)
136 stw r9,_MSR(r11)
137 andi. r2,r9,MSR_PR
138 mfctr r12
139 mfspr r2,SPRN_XER
140 stw r12,_CTR(r11)
141 stw r2,_XER(r11)
142 mfspr r12,SPRN_SPRG_THREAD
143 addi r2,r12,-THREAD
144 tovirt(r2,r2) /* set r2 to current */
145 beq 2f /* if from user, fix up THREAD.regs */
146 addi r11,r1,STACK_FRAME_OVERHEAD
147 stw r11,PT_REGS(r12)
148#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
149 /* Check to see if the dbcr0 register is set up to debug. Use the
150 internal debug mode bit to do this. */
151 lwz r12,THREAD_DBCR0(r12)
152 andis. r12,r12,DBCR0_IDM@h
153 beq+ 3f
154 /* From user and task is ptraced - load up global dbcr0 */
155 li r12,-1 /* clear all pending debug events */
156 mtspr SPRN_DBSR,r12
157 lis r11,global_dbcr0@ha
158 tophys(r11,r11)
159 addi r11,r11,global_dbcr0@l
160#ifdef CONFIG_SMP
161 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
162 lwz r9,TI_CPU(r9)
163 slwi r9,r9,3
164 add r11,r11,r9
165#endif
166 lwz r12,0(r11)
167 mtspr SPRN_DBCR0,r12
168 lwz r12,4(r11)
169 addi r12,r12,-1
170 stw r12,4(r11)
171#endif
172 b 3f
173
1742: /* if from kernel, check interrupted DOZE/NAP mode and
175 * check for stack overflow
176 */
177 lwz r9,KSP_LIMIT(r12)
178 cmplw r1,r9 /* if r1 <= ksp_limit */
179 ble- stack_ovf /* then the kernel stack overflowed */
1805:
181#if defined(CONFIG_6xx) || defined(CONFIG_E500)
182 rlwinm r9,r1,0,0,31-THREAD_SHIFT
183 tophys(r9,r9) /* check local flags */
184 lwz r12,TI_LOCAL_FLAGS(r9)
185 mtcrf 0x01,r12
186 bt- 31-TLF_NAPPING,4f
187 bt- 31-TLF_SLEEPING,7f
188#endif /* CONFIG_6xx || CONFIG_E500 */
189 .globl transfer_to_handler_cont
190transfer_to_handler_cont:
1913:
192 mflr r9
193 lwz r11,0(r9) /* virtual address of handler */
194 lwz r9,4(r9) /* where to go when done */
195#ifdef CONFIG_TRACE_IRQFLAGS
196 lis r12,reenable_mmu@h
197 ori r12,r12,reenable_mmu@l
198 mtspr SPRN_SRR0,r12
199 mtspr SPRN_SRR1,r10
200 SYNC
201 RFI
202reenable_mmu: /* re-enable mmu so we can */
203 mfmsr r10
204 lwz r12,_MSR(r1)
205 xor r10,r10,r12
206 andi. r10,r10,MSR_EE /* Did EE change? */
207 beq 1f
208
209 /*
210 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
211 * If from user mode there is only one stack frame on the stack, and
212 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
213 * stack frame to make trace_hardirqs_off happy.
214 *
215 * This is handy because we also need to save a bunch of GPRs,
216 * r3 can be different from GPR3(r1) at this point, r9 and r11
217 * contains the old MSR and handler address respectively,
218 * r4 & r5 can contain page fault arguments that need to be passed
219 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
220 * they aren't useful past this point (aren't syscall arguments),
221 * the rest is restored from the exception frame.
222 */
223 stwu r1,-32(r1)
224 stw r9,8(r1)
225 stw r11,12(r1)
226 stw r3,16(r1)
227 stw r4,20(r1)
228 stw r5,24(r1)
229 andi. r12,r12,MSR_PR
230 b 11f
231 bl trace_hardirqs_off
232 b 12f
23311:
234 bl trace_hardirqs_off
23512:
236 lwz r5,24(r1)
237 lwz r4,20(r1)
238 lwz r3,16(r1)
239 lwz r11,12(r1)
240 lwz r9,8(r1)
241 addi r1,r1,32
242 lwz r0,GPR0(r1)
243 lwz r6,GPR6(r1)
244 lwz r7,GPR7(r1)
245 lwz r8,GPR8(r1)
2461: mtctr r11
247 mtlr r9
248 bctr /* jump to handler */
249#else /* CONFIG_TRACE_IRQFLAGS */
250 mtspr SPRN_SRR0,r11
251 mtspr SPRN_SRR1,r10
252 mtlr r9
253 SYNC
254 RFI /* jump to handler, enable MMU */
255#endif /* CONFIG_TRACE_IRQFLAGS */
256
257#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2584: rlwinm r12,r12,0,~_TLF_NAPPING
259 stw r12,TI_LOCAL_FLAGS(r9)
260 b power_save_ppc32_restore
261
2627: rlwinm r12,r12,0,~_TLF_SLEEPING
263 stw r12,TI_LOCAL_FLAGS(r9)
264 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
265 rlwinm r9,r9,0,~MSR_EE
266 lwz r12,_LINK(r11) /* and return to address in LR */
267 b fast_exception_return
268#endif
269
270/*
271 * On kernel stack overflow, load up an initial stack pointer
272 * and call StackOverflow(regs), which should not return.
273 */
274stack_ovf:
275 /* sometimes we use a statically-allocated stack, which is OK. */
276 lis r12,_end@h
277 ori r12,r12,_end@l
278 cmplw r1,r12
279 ble 5b /* r1 <= &_end is OK */
280 SAVE_NVGPRS(r11)
281 addi r3,r1,STACK_FRAME_OVERHEAD
282 lis r1,init_thread_union@ha
283 addi r1,r1,init_thread_union@l
284 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
285 lis r9,StackOverflow@ha
286 addi r9,r9,StackOverflow@l
287 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
288 FIX_SRR1(r10,r12)
289 mtspr SPRN_SRR0,r9
290 mtspr SPRN_SRR1,r10
291 SYNC
292 RFI
293
294/*
295 * Handle a system call.
296 */
297 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
298 .stabs "entry_32.S",N_SO,0,0,0f
2990:
300
301_GLOBAL(DoSyscall)
302 stw r3,ORIG_GPR3(r1)
303 li r12,0
304 stw r12,RESULT(r1)
305 lwz r11,_CCR(r1) /* Clear SO bit in CR */
306 rlwinm r11,r11,0,4,2
307 stw r11,_CCR(r1)
308#ifdef SHOW_SYSCALLS
309 bl do_show_syscall
310#endif /* SHOW_SYSCALLS */
311#ifdef CONFIG_TRACE_IRQFLAGS
312 /* Return from syscalls can (and generally will) hard enable
313 * interrupts. You aren't supposed to call a syscall with
314 * interrupts disabled in the first place. However, to ensure
315 * that we get it right vs. lockdep if it happens, we force
316 * that hard enable here with appropriate tracing if we see
317 * that we have been called with interrupts off
318 */
319 mfmsr r11
320 andi. r12,r11,MSR_EE
321 bne+ 1f
322 /* We came in with interrupts disabled, we enable them now */
323 bl trace_hardirqs_on
324 mfmsr r11
325 lwz r0,GPR0(r1)
326 lwz r3,GPR3(r1)
327 lwz r4,GPR4(r1)
328 ori r11,r11,MSR_EE
329 lwz r5,GPR5(r1)
330 lwz r6,GPR6(r1)
331 lwz r7,GPR7(r1)
332 lwz r8,GPR8(r1)
333 mtmsr r11
3341:
335#endif /* CONFIG_TRACE_IRQFLAGS */
336 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
337 lwz r11,TI_FLAGS(r10)
338 andi. r11,r11,_TIF_SYSCALL_T_OR_A
339 bne- syscall_dotrace
340syscall_dotrace_cont:
341 cmplwi 0,r0,NR_syscalls
342 lis r10,sys_call_table@h
343 ori r10,r10,sys_call_table@l
344 slwi r0,r0,2
345 bge- 66f
346 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
347 mtlr r10
348 addi r9,r1,STACK_FRAME_OVERHEAD
349 PPC440EP_ERR42
350 blrl /* Call handler */
351 .globl ret_from_syscall
352ret_from_syscall:
353#ifdef SHOW_SYSCALLS
354 bl do_show_syscall_exit
355#endif
356 mr r6,r3
357 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
358 /* disable interrupts so current_thread_info()->flags can't change */
359 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
360 /* Note: We don't bother telling lockdep about it */
361 SYNC
362 MTMSRD(r10)
363 lwz r9,TI_FLAGS(r12)
364 li r8,-_LAST_ERRNO
365 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
366 bne- syscall_exit_work
367 cmplw 0,r3,r8
368 blt+ syscall_exit_cont
369 lwz r11,_CCR(r1) /* Load CR */
370 neg r3,r3
371 oris r11,r11,0x1000 /* Set SO bit in CR */
372 stw r11,_CCR(r1)
373syscall_exit_cont:
374 lwz r8,_MSR(r1)
375#ifdef CONFIG_TRACE_IRQFLAGS
376 /* If we are going to return from the syscall with interrupts
377 * off, we trace that here. It shouldn't happen though but we
378 * want to catch the bugger if it does right ?
379 */
380 andi. r10,r8,MSR_EE
381 bne+ 1f
382 stw r3,GPR3(r1)
383 bl trace_hardirqs_off
384 lwz r3,GPR3(r1)
3851:
386#endif /* CONFIG_TRACE_IRQFLAGS */
387#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
388 /* If the process has its own DBCR0 value, load it up. The internal
389 debug mode bit tells us that dbcr0 should be loaded. */
390 lwz r0,THREAD+THREAD_DBCR0(r2)
391 andis. r10,r0,DBCR0_IDM@h
392 bnel- load_dbcr0
393#endif
394#ifdef CONFIG_44x
395BEGIN_MMU_FTR_SECTION
396 lis r4,icache_44x_need_flush@ha
397 lwz r5,icache_44x_need_flush@l(r4)
398 cmplwi cr0,r5,0
399 bne- 2f
4001:
401END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
402#endif /* CONFIG_44x */
403BEGIN_FTR_SECTION
404 lwarx r7,0,r1
405END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
406 stwcx. r0,0,r1 /* to clear the reservation */
407 lwz r4,_LINK(r1)
408 lwz r5,_CCR(r1)
409 mtlr r4
410 mtcr r5
411 lwz r7,_NIP(r1)
412 FIX_SRR1(r8, r0)
413 lwz r2,GPR2(r1)
414 lwz r1,GPR1(r1)
415 mtspr SPRN_SRR0,r7
416 mtspr SPRN_SRR1,r8
417 SYNC
418 RFI
419#ifdef CONFIG_44x
4202: li r7,0
421 iccci r0,r0
422 stw r7,icache_44x_need_flush@l(r4)
423 b 1b
424#endif /* CONFIG_44x */
425
42666: li r3,-ENOSYS
427 b ret_from_syscall
428
429 .globl ret_from_fork
430ret_from_fork:
431 REST_NVGPRS(r1)
432 bl schedule_tail
433 li r3,0
434 b ret_from_syscall
435
436/* Traced system call support */
437syscall_dotrace:
438 SAVE_NVGPRS(r1)
439 li r0,0xc00
440 stw r0,_TRAP(r1)
441 addi r3,r1,STACK_FRAME_OVERHEAD
442 bl do_syscall_trace_enter
443 /*
444 * Restore argument registers possibly just changed.
445 * We use the return value of do_syscall_trace_enter
446 * for call number to look up in the table (r0).
447 */
448 mr r0,r3
449 lwz r3,GPR3(r1)
450 lwz r4,GPR4(r1)
451 lwz r5,GPR5(r1)
452 lwz r6,GPR6(r1)
453 lwz r7,GPR7(r1)
454 lwz r8,GPR8(r1)
455 REST_NVGPRS(r1)
456 b syscall_dotrace_cont
457
458syscall_exit_work:
459 andi. r0,r9,_TIF_RESTOREALL
460 beq+ 0f
461 REST_NVGPRS(r1)
462 b 2f
4630: cmplw 0,r3,r8
464 blt+ 1f
465 andi. r0,r9,_TIF_NOERROR
466 bne- 1f
467 lwz r11,_CCR(r1) /* Load CR */
468 neg r3,r3
469 oris r11,r11,0x1000 /* Set SO bit in CR */
470 stw r11,_CCR(r1)
471
4721: stw r6,RESULT(r1) /* Save result */
473 stw r3,GPR3(r1) /* Update return value */
4742: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
475 beq 4f
476
477 /* Clear per-syscall TIF flags if any are set. */
478
479 li r11,_TIF_PERSYSCALL_MASK
480 addi r12,r12,TI_FLAGS
4813: lwarx r8,0,r12
482 andc r8,r8,r11
483#ifdef CONFIG_IBM405_ERR77
484 dcbt 0,r12
485#endif
486 stwcx. r8,0,r12
487 bne- 3b
488 subi r12,r12,TI_FLAGS
489
4904: /* Anything which requires enabling interrupts? */
491 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
492 beq ret_from_except
493
494 /* Re-enable interrupts. There is no need to trace that with
495 * lockdep as we are supposed to have IRQs on at this point
496 */
497 ori r10,r10,MSR_EE
498 SYNC
499 MTMSRD(r10)
500
501 /* Save NVGPRS if they're not saved already */
502 lwz r4,_TRAP(r1)
503 andi. r4,r4,1
504 beq 5f
505 SAVE_NVGPRS(r1)
506 li r4,0xc00
507 stw r4,_TRAP(r1)
5085:
509 addi r3,r1,STACK_FRAME_OVERHEAD
510 bl do_syscall_trace_leave
511 b ret_from_except_full
512
513#ifdef SHOW_SYSCALLS
514do_show_syscall:
515#ifdef SHOW_SYSCALLS_TASK
516 lis r11,show_syscalls_task@ha
517 lwz r11,show_syscalls_task@l(r11)
518 cmp 0,r2,r11
519 bnelr
520#endif
521 stw r31,GPR31(r1)
522 mflr r31
523 lis r3,7f@ha
524 addi r3,r3,7f@l
525 lwz r4,GPR0(r1)
526 lwz r5,GPR3(r1)
527 lwz r6,GPR4(r1)
528 lwz r7,GPR5(r1)
529 lwz r8,GPR6(r1)
530 lwz r9,GPR7(r1)
531 bl printk
532 lis r3,77f@ha
533 addi r3,r3,77f@l
534 lwz r4,GPR8(r1)
535 mr r5,r2
536 bl printk
537 lwz r0,GPR0(r1)
538 lwz r3,GPR3(r1)
539 lwz r4,GPR4(r1)
540 lwz r5,GPR5(r1)
541 lwz r6,GPR6(r1)
542 lwz r7,GPR7(r1)
543 lwz r8,GPR8(r1)
544 mtlr r31
545 lwz r31,GPR31(r1)
546 blr
547
548do_show_syscall_exit:
549#ifdef SHOW_SYSCALLS_TASK
550 lis r11,show_syscalls_task@ha
551 lwz r11,show_syscalls_task@l(r11)
552 cmp 0,r2,r11
553 bnelr
554#endif
555 stw r31,GPR31(r1)
556 mflr r31
557 stw r3,RESULT(r1) /* Save result */
558 mr r4,r3
559 lis r3,79f@ha
560 addi r3,r3,79f@l
561 bl printk
562 lwz r3,RESULT(r1)
563 mtlr r31
564 lwz r31,GPR31(r1)
565 blr
566
5677: .string "syscall %d(%x, %x, %x, %x, %x, "
56877: .string "%x), current=%p\n"
56979: .string " -> %x\n"
570 .align 2,0
571
572#ifdef SHOW_SYSCALLS_TASK
573 .data
574 .globl show_syscalls_task
575show_syscalls_task:
576 .long -1
577 .text
578#endif
579#endif /* SHOW_SYSCALLS */
580
581/*
582 * The fork/clone functions need to copy the full register set into
583 * the child process. Therefore we need to save all the nonvolatile
584 * registers (r13 - r31) before calling the C code.
585 */
586 .globl ppc_fork
587ppc_fork:
588 SAVE_NVGPRS(r1)
589 lwz r0,_TRAP(r1)
590 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
591 stw r0,_TRAP(r1) /* register set saved */
592 b sys_fork
593
594 .globl ppc_vfork
595ppc_vfork:
596 SAVE_NVGPRS(r1)
597 lwz r0,_TRAP(r1)
598 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
599 stw r0,_TRAP(r1) /* register set saved */
600 b sys_vfork
601
602 .globl ppc_clone
603ppc_clone:
604 SAVE_NVGPRS(r1)
605 lwz r0,_TRAP(r1)
606 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
607 stw r0,_TRAP(r1) /* register set saved */
608 b sys_clone
609
610 .globl ppc_swapcontext
611ppc_swapcontext:
612 SAVE_NVGPRS(r1)
613 lwz r0,_TRAP(r1)
614 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
615 stw r0,_TRAP(r1) /* register set saved */
616 b sys_swapcontext
617
618/*
619 * Top-level page fault handling.
620 * This is in assembler because if do_page_fault tells us that
621 * it is a bad kernel page fault, we want to save the non-volatile
622 * registers before calling bad_page_fault.
623 */
624 .globl handle_page_fault
625handle_page_fault:
626 stw r4,_DAR(r1)
627 addi r3,r1,STACK_FRAME_OVERHEAD
628 bl do_page_fault
629 cmpwi r3,0
630 beq+ ret_from_except
631 SAVE_NVGPRS(r1)
632 lwz r0,_TRAP(r1)
633 clrrwi r0,r0,1
634 stw r0,_TRAP(r1)
635 mr r5,r3
636 addi r3,r1,STACK_FRAME_OVERHEAD
637 lwz r4,_DAR(r1)
638 bl bad_page_fault
639 b ret_from_except_full
640
641/*
642 * This routine switches between two different tasks. The process
643 * state of one is saved on its kernel stack. Then the state
644 * of the other is restored from its kernel stack. The memory
645 * management hardware is updated to the second process's state.
646 * Finally, we can return to the second process.
647 * On entry, r3 points to the THREAD for the current task, r4
648 * points to the THREAD for the new task.
649 *
650 * This routine is always called with interrupts disabled.
651 *
652 * Note: there are two ways to get to the "going out" portion
653 * of this code; either by coming in via the entry (_switch)
654 * or via "fork" which must set up an environment equivalent
655 * to the "_switch" path. If you change this , you'll have to
656 * change the fork code also.
657 *
658 * The code which creates the new task context is in 'copy_thread'
659 * in arch/ppc/kernel/process.c
660 */
661_GLOBAL(_switch)
662 stwu r1,-INT_FRAME_SIZE(r1)
663 mflr r0
664 stw r0,INT_FRAME_SIZE+4(r1)
665 /* r3-r12 are caller saved -- Cort */
666 SAVE_NVGPRS(r1)
667 stw r0,_NIP(r1) /* Return to switch caller */
668 mfmsr r11
669 li r0,MSR_FP /* Disable floating-point */
670#ifdef CONFIG_ALTIVEC
671BEGIN_FTR_SECTION
672 oris r0,r0,MSR_VEC@h /* Disable altivec */
673 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
674 stw r12,THREAD+THREAD_VRSAVE(r2)
675END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
676#endif /* CONFIG_ALTIVEC */
677#ifdef CONFIG_SPE
678BEGIN_FTR_SECTION
679 oris r0,r0,MSR_SPE@h /* Disable SPE */
680 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
681 stw r12,THREAD+THREAD_SPEFSCR(r2)
682END_FTR_SECTION_IFSET(CPU_FTR_SPE)
683#endif /* CONFIG_SPE */
684 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
685 beq+ 1f
686 andc r11,r11,r0
687 MTMSRD(r11)
688 isync
6891: stw r11,_MSR(r1)
690 mfcr r10
691 stw r10,_CCR(r1)
692 stw r1,KSP(r3) /* Set old stack pointer */
693
694#ifdef CONFIG_SMP
695 /* We need a sync somewhere here to make sure that if the
696 * previous task gets rescheduled on another CPU, it sees all
697 * stores it has performed on this one.
698 */
699 sync
700#endif /* CONFIG_SMP */
701
702 tophys(r0,r4)
703 CLR_TOP32(r0)
704 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
705 lwz r1,KSP(r4) /* Load new stack pointer */
706
707 /* save the old current 'last' for return value */
708 mr r3,r2
709 addi r2,r4,-THREAD /* Update current */
710
711#ifdef CONFIG_ALTIVEC
712BEGIN_FTR_SECTION
713 lwz r0,THREAD+THREAD_VRSAVE(r2)
714 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
715END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
716#endif /* CONFIG_ALTIVEC */
717#ifdef CONFIG_SPE
718BEGIN_FTR_SECTION
719 lwz r0,THREAD+THREAD_SPEFSCR(r2)
720 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
721END_FTR_SECTION_IFSET(CPU_FTR_SPE)
722#endif /* CONFIG_SPE */
723
724 lwz r0,_CCR(r1)
725 mtcrf 0xFF,r0
726 /* r3-r12 are destroyed -- Cort */
727 REST_NVGPRS(r1)
728
729 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
730 mtlr r4
731 addi r1,r1,INT_FRAME_SIZE
732 blr
733
734 .globl fast_exception_return
735fast_exception_return:
736#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
737 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
738 beq 1f /* if not, we've got problems */
739#endif
740
7412: REST_4GPRS(3, r11)
742 lwz r10,_CCR(r11)
743 REST_GPR(1, r11)
744 mtcr r10
745 lwz r10,_LINK(r11)
746 mtlr r10
747 REST_GPR(10, r11)
748 mtspr SPRN_SRR1,r9
749 mtspr SPRN_SRR0,r12
750 REST_GPR(9, r11)
751 REST_GPR(12, r11)
752 lwz r11,GPR11(r11)
753 SYNC
754 RFI
755
756#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
757/* check if the exception happened in a restartable section */
7581: lis r3,exc_exit_restart_end@ha
759 addi r3,r3,exc_exit_restart_end@l
760 cmplw r12,r3
761 bge 3f
762 lis r4,exc_exit_restart@ha
763 addi r4,r4,exc_exit_restart@l
764 cmplw r12,r4
765 blt 3f
766 lis r3,fee_restarts@ha
767 tophys(r3,r3)
768 lwz r5,fee_restarts@l(r3)
769 addi r5,r5,1
770 stw r5,fee_restarts@l(r3)
771 mr r12,r4 /* restart at exc_exit_restart */
772 b 2b
773
774 .section .bss
775 .align 2
776fee_restarts:
777 .space 4
778 .previous
779
780/* aargh, a nonrecoverable interrupt, panic */
781/* aargh, we don't know which trap this is */
782/* but the 601 doesn't implement the RI bit, so assume it's OK */
7833:
784BEGIN_FTR_SECTION
785 b 2b
786END_FTR_SECTION_IFSET(CPU_FTR_601)
787 li r10,-1
788 stw r10,_TRAP(r11)
789 addi r3,r1,STACK_FRAME_OVERHEAD
790 lis r10,MSR_KERNEL@h
791 ori r10,r10,MSR_KERNEL@l
792 bl transfer_to_handler_full
793 .long nonrecoverable_exception
794 .long ret_from_except
795#endif
796
797 .globl ret_from_except_full
798ret_from_except_full:
799 REST_NVGPRS(r1)
800 /* fall through */
801
802 .globl ret_from_except
803ret_from_except:
804 /* Hard-disable interrupts so that current_thread_info()->flags
805 * can't change between when we test it and when we return
806 * from the interrupt. */
807 /* Note: We don't bother telling lockdep about it */
808 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
809 SYNC /* Some chip revs have problems here... */
810 MTMSRD(r10) /* disable interrupts */
811
812 lwz r3,_MSR(r1) /* Returning to user mode? */
813 andi. r0,r3,MSR_PR
814 beq resume_kernel
815
816user_exc_return: /* r10 contains MSR_KERNEL here */
817 /* Check current_thread_info()->flags */
818 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
819 lwz r9,TI_FLAGS(r9)
820 andi. r0,r9,_TIF_USER_WORK_MASK
821 bne do_work
822
823restore_user:
824#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
825 /* Check whether this process has its own DBCR0 value. The internal
826 debug mode bit tells us that dbcr0 should be loaded. */
827 lwz r0,THREAD+THREAD_DBCR0(r2)
828 andis. r10,r0,DBCR0_IDM@h
829 bnel- load_dbcr0
830#endif
831
832#ifdef CONFIG_PREEMPT
833 b restore
834
835/* N.B. the only way to get here is from the beq following ret_from_except. */
836resume_kernel:
837 /* check current_thread_info->preempt_count */
838 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
839 lwz r0,TI_PREEMPT(r9)
840 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
841 bne restore
842 lwz r0,TI_FLAGS(r9)
843 andi. r0,r0,_TIF_NEED_RESCHED
844 beq+ restore
845 andi. r0,r3,MSR_EE /* interrupts off? */
846 beq restore /* don't schedule if so */
847#ifdef CONFIG_TRACE_IRQFLAGS
848 /* Lockdep thinks irqs are enabled, we need to call
849 * preempt_schedule_irq with IRQs off, so we inform lockdep
850 * now that we -did- turn them off already
851 */
852 bl trace_hardirqs_off
853#endif
8541: bl preempt_schedule_irq
855 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
856 lwz r3,TI_FLAGS(r9)
857 andi. r0,r3,_TIF_NEED_RESCHED
858 bne- 1b
859#ifdef CONFIG_TRACE_IRQFLAGS
860 /* And now, to properly rebalance the above, we tell lockdep they
861 * are being turned back on, which will happen when we return
862 */
863 bl trace_hardirqs_on
864#endif
865#else
866resume_kernel:
867#endif /* CONFIG_PREEMPT */
868
869 /* interrupts are hard-disabled at this point */
870restore:
871#ifdef CONFIG_44x
872BEGIN_MMU_FTR_SECTION
873 b 1f
874END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
875 lis r4,icache_44x_need_flush@ha
876 lwz r5,icache_44x_need_flush@l(r4)
877 cmplwi cr0,r5,0
878 beq+ 1f
879 li r6,0
880 iccci r0,r0
881 stw r6,icache_44x_need_flush@l(r4)
8821:
883#endif /* CONFIG_44x */
884
885 lwz r9,_MSR(r1)
886#ifdef CONFIG_TRACE_IRQFLAGS
887 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
888 * off in this assembly code while peeking at TI_FLAGS() and such. However
889 * we need to inform it if the exception turned interrupts off, and we
890 * are about to trun them back on.
891 *
892 * The problem here sadly is that we don't know whether the exceptions was
893 * one that turned interrupts off or not. So we always tell lockdep about
894 * turning them on here when we go back to wherever we came from with EE
895 * on, even if that may meen some redudant calls being tracked. Maybe later
896 * we could encode what the exception did somewhere or test the exception
897 * type in the pt_regs but that sounds overkill
898 */
899 andi. r10,r9,MSR_EE
900 beq 1f
901 /*
902 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
903 * which is the stack frame here, we need to force a stack frame
904 * in case we came from user space.
905 */
906 stwu r1,-32(r1)
907 mflr r0
908 stw r0,4(r1)
909 stwu r1,-32(r1)
910 bl trace_hardirqs_on
911 lwz r1,0(r1)
912 lwz r1,0(r1)
913 lwz r9,_MSR(r1)
9141:
915#endif /* CONFIG_TRACE_IRQFLAGS */
916
917 lwz r0,GPR0(r1)
918 lwz r2,GPR2(r1)
919 REST_4GPRS(3, r1)
920 REST_2GPRS(7, r1)
921
922 lwz r10,_XER(r1)
923 lwz r11,_CTR(r1)
924 mtspr SPRN_XER,r10
925 mtctr r11
926
927 PPC405_ERR77(0,r1)
928BEGIN_FTR_SECTION
929 lwarx r11,0,r1
930END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
931 stwcx. r0,0,r1 /* to clear the reservation */
932
933#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
934 andi. r10,r9,MSR_RI /* check if this exception occurred */
935 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
936
937 lwz r10,_CCR(r1)
938 lwz r11,_LINK(r1)
939 mtcrf 0xFF,r10
940 mtlr r11
941
942 /*
943 * Once we put values in SRR0 and SRR1, we are in a state
944 * where exceptions are not recoverable, since taking an
945 * exception will trash SRR0 and SRR1. Therefore we clear the
946 * MSR:RI bit to indicate this. If we do take an exception,
947 * we can't return to the point of the exception but we
948 * can restart the exception exit path at the label
949 * exc_exit_restart below. -- paulus
950 */
951 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
952 SYNC
953 MTMSRD(r10) /* clear the RI bit */
954 .globl exc_exit_restart
955exc_exit_restart:
956 lwz r12,_NIP(r1)
957 FIX_SRR1(r9,r10)
958 mtspr SPRN_SRR0,r12
959 mtspr SPRN_SRR1,r9
960 REST_4GPRS(9, r1)
961 lwz r1,GPR1(r1)
962 .globl exc_exit_restart_end
963exc_exit_restart_end:
964 SYNC
965 RFI
966
967#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
968 /*
969 * This is a bit different on 4xx/Book-E because it doesn't have
970 * the RI bit in the MSR.
971 * The TLB miss handler checks if we have interrupted
972 * the exception exit path and restarts it if so
973 * (well maybe one day it will... :).
974 */
975 lwz r11,_LINK(r1)
976 mtlr r11
977 lwz r10,_CCR(r1)
978 mtcrf 0xff,r10
979 REST_2GPRS(9, r1)
980 .globl exc_exit_restart
981exc_exit_restart:
982 lwz r11,_NIP(r1)
983 lwz r12,_MSR(r1)
984exc_exit_start:
985 mtspr SPRN_SRR0,r11
986 mtspr SPRN_SRR1,r12
987 REST_2GPRS(11, r1)
988 lwz r1,GPR1(r1)
989 .globl exc_exit_restart_end
990exc_exit_restart_end:
991 PPC405_ERR77_SYNC
992 rfi
993 b . /* prevent prefetch past rfi */
994
995/*
996 * Returning from a critical interrupt in user mode doesn't need
997 * to be any different from a normal exception. For a critical
998 * interrupt in the kernel, we just return (without checking for
999 * preemption) since the interrupt may have happened at some crucial
1000 * place (e.g. inside the TLB miss handler), and because we will be
1001 * running with r1 pointing into critical_stack, not the current
1002 * process's kernel stack (and therefore current_thread_info() will
1003 * give the wrong answer).
1004 * We have to restore various SPRs that may have been in use at the
1005 * time of the critical interrupt.
1006 *
1007 */
1008#ifdef CONFIG_40x
1009#define PPC_40x_TURN_OFF_MSR_DR \
1010 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1011 * assume the instructions here are mapped by a pinned TLB entry */ \
1012 li r10,MSR_IR; \
1013 mtmsr r10; \
1014 isync; \
1015 tophys(r1, r1);
1016#else
1017#define PPC_40x_TURN_OFF_MSR_DR
1018#endif
1019
1020#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1021 REST_NVGPRS(r1); \
1022 lwz r3,_MSR(r1); \
1023 andi. r3,r3,MSR_PR; \
1024 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
1025 bne user_exc_return; \
1026 lwz r0,GPR0(r1); \
1027 lwz r2,GPR2(r1); \
1028 REST_4GPRS(3, r1); \
1029 REST_2GPRS(7, r1); \
1030 lwz r10,_XER(r1); \
1031 lwz r11,_CTR(r1); \
1032 mtspr SPRN_XER,r10; \
1033 mtctr r11; \
1034 PPC405_ERR77(0,r1); \
1035 stwcx. r0,0,r1; /* to clear the reservation */ \
1036 lwz r11,_LINK(r1); \
1037 mtlr r11; \
1038 lwz r10,_CCR(r1); \
1039 mtcrf 0xff,r10; \
1040 PPC_40x_TURN_OFF_MSR_DR; \
1041 lwz r9,_DEAR(r1); \
1042 lwz r10,_ESR(r1); \
1043 mtspr SPRN_DEAR,r9; \
1044 mtspr SPRN_ESR,r10; \
1045 lwz r11,_NIP(r1); \
1046 lwz r12,_MSR(r1); \
1047 mtspr exc_lvl_srr0,r11; \
1048 mtspr exc_lvl_srr1,r12; \
1049 lwz r9,GPR9(r1); \
1050 lwz r12,GPR12(r1); \
1051 lwz r10,GPR10(r1); \
1052 lwz r11,GPR11(r1); \
1053 lwz r1,GPR1(r1); \
1054 PPC405_ERR77_SYNC; \
1055 exc_lvl_rfi; \
1056 b .; /* prevent prefetch past exc_lvl_rfi */
1057
1058#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1059 lwz r9,_##exc_lvl_srr0(r1); \
1060 lwz r10,_##exc_lvl_srr1(r1); \
1061 mtspr SPRN_##exc_lvl_srr0,r9; \
1062 mtspr SPRN_##exc_lvl_srr1,r10;
1063
1064#if defined(CONFIG_PPC_BOOK3E_MMU)
1065#ifdef CONFIG_PHYS_64BIT
1066#define RESTORE_MAS7 \
1067 lwz r11,MAS7(r1); \
1068 mtspr SPRN_MAS7,r11;
1069#else
1070#define RESTORE_MAS7
1071#endif /* CONFIG_PHYS_64BIT */
1072#define RESTORE_MMU_REGS \
1073 lwz r9,MAS0(r1); \
1074 lwz r10,MAS1(r1); \
1075 lwz r11,MAS2(r1); \
1076 mtspr SPRN_MAS0,r9; \
1077 lwz r9,MAS3(r1); \
1078 mtspr SPRN_MAS1,r10; \
1079 lwz r10,MAS6(r1); \
1080 mtspr SPRN_MAS2,r11; \
1081 mtspr SPRN_MAS3,r9; \
1082 mtspr SPRN_MAS6,r10; \
1083 RESTORE_MAS7;
1084#elif defined(CONFIG_44x)
1085#define RESTORE_MMU_REGS \
1086 lwz r9,MMUCR(r1); \
1087 mtspr SPRN_MMUCR,r9;
1088#else
1089#define RESTORE_MMU_REGS
1090#endif
1091
1092#ifdef CONFIG_40x
1093 .globl ret_from_crit_exc
1094ret_from_crit_exc:
1095 mfspr r9,SPRN_SPRG_THREAD
1096 lis r10,saved_ksp_limit@ha;
1097 lwz r10,saved_ksp_limit@l(r10);
1098 tovirt(r9,r9);
1099 stw r10,KSP_LIMIT(r9)
1100 lis r9,crit_srr0@ha;
1101 lwz r9,crit_srr0@l(r9);
1102 lis r10,crit_srr1@ha;
1103 lwz r10,crit_srr1@l(r10);
1104 mtspr SPRN_SRR0,r9;
1105 mtspr SPRN_SRR1,r10;
1106 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1107#endif /* CONFIG_40x */
1108
1109#ifdef CONFIG_BOOKE
1110 .globl ret_from_crit_exc
1111ret_from_crit_exc:
1112 mfspr r9,SPRN_SPRG_THREAD
1113 lwz r10,SAVED_KSP_LIMIT(r1)
1114 stw r10,KSP_LIMIT(r9)
1115 RESTORE_xSRR(SRR0,SRR1);
1116 RESTORE_MMU_REGS;
1117 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1118
1119 .globl ret_from_debug_exc
1120ret_from_debug_exc:
1121 mfspr r9,SPRN_SPRG_THREAD
1122 lwz r10,SAVED_KSP_LIMIT(r1)
1123 stw r10,KSP_LIMIT(r9)
1124 lwz r9,THREAD_INFO-THREAD(r9)
1125 rlwinm r10,r1,0,0,(31-THREAD_SHIFT)
1126 lwz r10,TI_PREEMPT(r10)
1127 stw r10,TI_PREEMPT(r9)
1128 RESTORE_xSRR(SRR0,SRR1);
1129 RESTORE_xSRR(CSRR0,CSRR1);
1130 RESTORE_MMU_REGS;
1131 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1132
1133 .globl ret_from_mcheck_exc
1134ret_from_mcheck_exc:
1135 mfspr r9,SPRN_SPRG_THREAD
1136 lwz r10,SAVED_KSP_LIMIT(r1)
1137 stw r10,KSP_LIMIT(r9)
1138 RESTORE_xSRR(SRR0,SRR1);
1139 RESTORE_xSRR(CSRR0,CSRR1);
1140 RESTORE_xSRR(DSRR0,DSRR1);
1141 RESTORE_MMU_REGS;
1142 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1143#endif /* CONFIG_BOOKE */
1144
1145/*
1146 * Load the DBCR0 value for a task that is being ptraced,
1147 * having first saved away the global DBCR0. Note that r0
1148 * has the dbcr0 value to set upon entry to this.
1149 */
1150load_dbcr0:
1151 mfmsr r10 /* first disable debug exceptions */
1152 rlwinm r10,r10,0,~MSR_DE
1153 mtmsr r10
1154 isync
1155 mfspr r10,SPRN_DBCR0
1156 lis r11,global_dbcr0@ha
1157 addi r11,r11,global_dbcr0@l
1158#ifdef CONFIG_SMP
1159 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
1160 lwz r9,TI_CPU(r9)
1161 slwi r9,r9,3
1162 add r11,r11,r9
1163#endif
1164 stw r10,0(r11)
1165 mtspr SPRN_DBCR0,r0
1166 lwz r10,4(r11)
1167 addi r10,r10,1
1168 stw r10,4(r11)
1169 li r11,-1
1170 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1171 blr
1172
1173 .section .bss
1174 .align 4
1175global_dbcr0:
1176 .space 8*NR_CPUS
1177 .previous
1178#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1179
1180do_work: /* r10 contains MSR_KERNEL here */
1181 andi. r0,r9,_TIF_NEED_RESCHED
1182 beq do_user_signal
1183
1184do_resched: /* r10 contains MSR_KERNEL here */
1185 /* Note: We don't need to inform lockdep that we are enabling
1186 * interrupts here. As far as it knows, they are already enabled
1187 */
1188 ori r10,r10,MSR_EE
1189 SYNC
1190 MTMSRD(r10) /* hard-enable interrupts */
1191 bl schedule
1192recheck:
1193 /* Note: And we don't tell it we are disabling them again
1194 * neither. Those disable/enable cycles used to peek at
1195 * TI_FLAGS aren't advertised.
1196 */
1197 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1198 SYNC
1199 MTMSRD(r10) /* disable interrupts */
1200 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
1201 lwz r9,TI_FLAGS(r9)
1202 andi. r0,r9,_TIF_NEED_RESCHED
1203 bne- do_resched
1204 andi. r0,r9,_TIF_USER_WORK_MASK
1205 beq restore_user
1206do_user_signal: /* r10 contains MSR_KERNEL here */
1207 ori r10,r10,MSR_EE
1208 SYNC
1209 MTMSRD(r10) /* hard-enable interrupts */
1210 /* save r13-r31 in the exception frame, if not already done */
1211 lwz r3,_TRAP(r1)
1212 andi. r0,r3,1
1213 beq 2f
1214 SAVE_NVGPRS(r1)
1215 rlwinm r3,r3,0,0,30
1216 stw r3,_TRAP(r1)
12172: addi r3,r1,STACK_FRAME_OVERHEAD
1218 mr r4,r9
1219 bl do_notify_resume
1220 REST_NVGPRS(r1)
1221 b recheck
1222
1223/*
1224 * We come here when we are at the end of handling an exception
1225 * that occurred at a place where taking an exception will lose
1226 * state information, such as the contents of SRR0 and SRR1.
1227 */
1228nonrecoverable:
1229 lis r10,exc_exit_restart_end@ha
1230 addi r10,r10,exc_exit_restart_end@l
1231 cmplw r12,r10
1232 bge 3f
1233 lis r11,exc_exit_restart@ha
1234 addi r11,r11,exc_exit_restart@l
1235 cmplw r12,r11
1236 blt 3f
1237 lis r10,ee_restarts@ha
1238 lwz r12,ee_restarts@l(r10)
1239 addi r12,r12,1
1240 stw r12,ee_restarts@l(r10)
1241 mr r12,r11 /* restart at exc_exit_restart */
1242 blr
12433: /* OK, we can't recover, kill this process */
1244 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1245BEGIN_FTR_SECTION
1246 blr
1247END_FTR_SECTION_IFSET(CPU_FTR_601)
1248 lwz r3,_TRAP(r1)
1249 andi. r0,r3,1
1250 beq 4f
1251 SAVE_NVGPRS(r1)
1252 rlwinm r3,r3,0,0,30
1253 stw r3,_TRAP(r1)
12544: addi r3,r1,STACK_FRAME_OVERHEAD
1255 bl nonrecoverable_exception
1256 /* shouldn't return */
1257 b 4b
1258
1259 .section .bss
1260 .align 2
1261ee_restarts:
1262 .space 4
1263 .previous
1264
1265/*
1266 * PROM code for specific machines follows. Put it
1267 * here so it's easy to add arch-specific sections later.
1268 * -- Cort
1269 */
1270#ifdef CONFIG_PPC_RTAS
1271/*
1272 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1273 * called with the MMU off.
1274 */
1275_GLOBAL(enter_rtas)
1276 stwu r1,-INT_FRAME_SIZE(r1)
1277 mflr r0
1278 stw r0,INT_FRAME_SIZE+4(r1)
1279 LOAD_REG_ADDR(r4, rtas)
1280 lis r6,1f@ha /* physical return address for rtas */
1281 addi r6,r6,1f@l
1282 tophys(r6,r6)
1283 tophys(r7,r1)
1284 lwz r8,RTASENTRY(r4)
1285 lwz r4,RTASBASE(r4)
1286 mfmsr r9
1287 stw r9,8(r1)
1288 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1289 SYNC /* disable interrupts so SRR0/1 */
1290 MTMSRD(r0) /* don't get trashed */
1291 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1292 mtlr r6
1293 mtspr SPRN_SPRG_RTAS,r7
1294 mtspr SPRN_SRR0,r8
1295 mtspr SPRN_SRR1,r9
1296 RFI
12971: tophys(r9,r1)
1298 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1299 lwz r9,8(r9) /* original msr value */
1300 FIX_SRR1(r9,r0)
1301 addi r1,r1,INT_FRAME_SIZE
1302 li r0,0
1303 mtspr SPRN_SPRG_RTAS,r0
1304 mtspr SPRN_SRR0,r8
1305 mtspr SPRN_SRR1,r9
1306 RFI /* return to caller */
1307
1308 .globl machine_check_in_rtas
1309machine_check_in_rtas:
1310 twi 31,0,0
1311 /* XXX load up BATs and panic */
1312
1313#endif /* CONFIG_PPC_RTAS */
1314
1315#ifdef CONFIG_FUNCTION_TRACER
1316#ifdef CONFIG_DYNAMIC_FTRACE
1317_GLOBAL(mcount)
1318_GLOBAL(_mcount)
1319 /*
1320 * It is required that _mcount on PPC32 must preserve the
1321 * link register. But we have r0 to play with. We use r0
1322 * to push the return address back to the caller of mcount
1323 * into the ctr register, restore the link register and
1324 * then jump back using the ctr register.
1325 */
1326 mflr r0
1327 mtctr r0
1328 lwz r0, 4(r1)
1329 mtlr r0
1330 bctr
1331
1332_GLOBAL(ftrace_caller)
1333 MCOUNT_SAVE_FRAME
1334 /* r3 ends up with link register */
1335 subi r3, r3, MCOUNT_INSN_SIZE
1336.globl ftrace_call
1337ftrace_call:
1338 bl ftrace_stub
1339 nop
1340#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1341.globl ftrace_graph_call
1342ftrace_graph_call:
1343 b ftrace_graph_stub
1344_GLOBAL(ftrace_graph_stub)
1345#endif
1346 MCOUNT_RESTORE_FRAME
1347 /* old link register ends up in ctr reg */
1348 bctr
1349#else
1350_GLOBAL(mcount)
1351_GLOBAL(_mcount)
1352
1353 MCOUNT_SAVE_FRAME
1354
1355 subi r3, r3, MCOUNT_INSN_SIZE
1356 LOAD_REG_ADDR(r5, ftrace_trace_function)
1357 lwz r5,0(r5)
1358
1359 mtctr r5
1360 bctrl
1361 nop
1362
1363#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1364 b ftrace_graph_caller
1365#endif
1366 MCOUNT_RESTORE_FRAME
1367 bctr
1368#endif
1369
1370_GLOBAL(ftrace_stub)
1371 blr
1372
1373#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1374_GLOBAL(ftrace_graph_caller)
1375 /* load r4 with local address */
1376 lwz r4, 44(r1)
1377 subi r4, r4, MCOUNT_INSN_SIZE
1378
1379 /* get the parent address */
1380 addi r3, r1, 52
1381
1382 bl prepare_ftrace_return
1383 nop
1384
1385 MCOUNT_RESTORE_FRAME
1386 /* old link register ends up in ctr reg */
1387 bctr
1388
1389_GLOBAL(return_to_handler)
1390 /* need to save return values */
1391 stwu r1, -32(r1)
1392 stw r3, 20(r1)
1393 stw r4, 16(r1)
1394 stw r31, 12(r1)
1395 mr r31, r1
1396
1397 bl ftrace_return_to_handler
1398 nop
1399
1400 /* return value has real return address */
1401 mtlr r3
1402
1403 lwz r3, 20(r1)
1404 lwz r4, 16(r1)
1405 lwz r31,12(r1)
1406 lwz r1, 0(r1)
1407
1408 /* Jump back to real return address */
1409 blr
1410#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1411
1412#endif /* CONFIG_MCOUNT */