Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 * This file contains the system call entry code, context switch
14 * code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/objtool.h>
18#include <linux/errno.h>
19#include <linux/err.h>
20#include <asm/cache.h>
21#include <asm/unistd.h>
22#include <asm/processor.h>
23#include <asm/page.h>
24#include <asm/mmu.h>
25#include <asm/thread_info.h>
26#include <asm/code-patching-asm.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
30#include <asm/firmware.h>
31#include <asm/bug.h>
32#include <asm/ptrace.h>
33#include <asm/irqflags.h>
34#include <asm/hw_irq.h>
35#include <asm/context_tracking.h>
36#include <asm/ppc-opcode.h>
37#include <asm/barrier.h>
38#include <asm/export.h>
39#include <asm/asm-compat.h>
40#ifdef CONFIG_PPC_BOOK3S
41#include <asm/exception-64s.h>
42#else
43#include <asm/exception-64e.h>
44#endif
45#include <asm/feature-fixups.h>
46#include <asm/kup.h>
47
48/*
49 * System calls.
50 */
51 .section ".text"
52
53#ifdef CONFIG_PPC_BOOK3S_64
54
55#define FLUSH_COUNT_CACHE \
561: nop; \
57 patch_site 1b, patch__call_flush_branch_caches1; \
581: nop; \
59 patch_site 1b, patch__call_flush_branch_caches2; \
601: nop; \
61 patch_site 1b, patch__call_flush_branch_caches3
62
63.macro nops number
64 .rept \number
65 nop
66 .endr
67.endm
68
69.balign 32
70.global flush_branch_caches
71flush_branch_caches:
72 /* Save LR into r9 */
73 mflr r9
74
75 // Flush the link stack
76 .rept 64
77 ANNOTATE_INTRA_FUNCTION_CALL
78 bl .+4
79 .endr
80 b 1f
81 nops 6
82
83 .balign 32
84 /* Restore LR */
851: mtlr r9
86
87 // If we're just flushing the link stack, return here
883: nop
89 patch_site 3b patch__flush_link_stack_return
90
91 li r9,0x7fff
92 mtctr r9
93
94 PPC_BCCTR_FLUSH
95
962: nop
97 patch_site 2b patch__flush_count_cache_return
98
99 nops 3
100
101 .rept 278
102 .balign 32
103 PPC_BCCTR_FLUSH
104 nops 7
105 .endr
106
107 blr
108#else
109#define FLUSH_COUNT_CACHE
110#endif /* CONFIG_PPC_BOOK3S_64 */
111
112/*
113 * This routine switches between two different tasks. The process
114 * state of one is saved on its kernel stack. Then the state
115 * of the other is restored from its kernel stack. The memory
116 * management hardware is updated to the second process's state.
117 * Finally, we can return to the second process, via interrupt_return.
118 * On entry, r3 points to the THREAD for the current task, r4
119 * points to the THREAD for the new task.
120 *
121 * Note: there are two ways to get to the "going out" portion
122 * of this code; either by coming in via the entry (_switch)
123 * or via "fork" which must set up an environment equivalent
124 * to the "_switch" path. If you change this you'll have to change
125 * the fork code also.
126 *
127 * The code which creates the new task context is in 'copy_thread'
128 * in arch/powerpc/kernel/process.c
129 */
130 .align 7
131_GLOBAL(_switch)
132 mflr r0
133 std r0,16(r1)
134 stdu r1,-SWITCH_FRAME_SIZE(r1)
135 /* r3-r13 are caller saved -- Cort */
136 SAVE_NVGPRS(r1)
137 std r0,_NIP(r1) /* Return to switch caller */
138 mfcr r23
139 std r23,_CCR(r1)
140 std r1,KSP(r3) /* Set old stack pointer */
141
142 kuap_check_amr r9, r10
143
144 FLUSH_COUNT_CACHE /* Clobbers r9, ctr */
145
146 /*
147 * On SMP kernels, care must be taken because a task may be
148 * scheduled off CPUx and on to CPUy. Memory ordering must be
149 * considered.
150 *
151 * Cacheable stores on CPUx will be visible when the task is
152 * scheduled on CPUy by virtue of the core scheduler barriers
153 * (see "Notes on Program-Order guarantees on SMP systems." in
154 * kernel/sched/core.c).
155 *
156 * Uncacheable stores in the case of involuntary preemption must
157 * be taken care of. The smp_mb__after_spinlock() in __schedule()
158 * is implemented as hwsync on powerpc, which orders MMIO too. So
159 * long as there is an hwsync in the context switch path, it will
160 * be executed on the source CPU after the task has performed
161 * all MMIO ops on that CPU, and on the destination CPU before the
162 * task performs any MMIO ops there.
163 */
164
165 /*
166 * The kernel context switch path must contain a spin_lock,
167 * which contains larx/stcx, which will clear any reservation
168 * of the task being switched.
169 */
170#ifdef CONFIG_PPC_BOOK3S
171/* Cancel all explict user streams as they will have no use after context
172 * switch and will stop the HW from creating streams itself
173 */
174 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
175#endif
176
177 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
178 std r6,PACACURRENT(r13) /* Set new 'current' */
179#if defined(CONFIG_STACKPROTECTOR)
180 ld r6, TASK_CANARY(r6)
181 std r6, PACA_CANARY(r13)
182#endif
183
184 ld r8,KSP(r4) /* new stack pointer */
185#ifdef CONFIG_PPC_64S_HASH_MMU
186BEGIN_MMU_FTR_SECTION
187 b 2f
188END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
189BEGIN_FTR_SECTION
190 clrrdi r6,r8,28 /* get its ESID */
191 clrrdi r9,r1,28 /* get current sp ESID */
192FTR_SECTION_ELSE
193 clrrdi r6,r8,40 /* get its 1T ESID */
194 clrrdi r9,r1,40 /* get current sp 1T ESID */
195ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
196 clrldi. r0,r6,2 /* is new ESID c00000000? */
197 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
198 cror eq,4*cr1+eq,eq
199 beq 2f /* if yes, don't slbie it */
200
201 /* Bolt in the new stack SLB entry */
202 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
203 oris r0,r6,(SLB_ESID_V)@h
204 ori r0,r0,(SLB_NUM_BOLTED-1)@l
205BEGIN_FTR_SECTION
206 li r9,MMU_SEGSIZE_1T /* insert B field */
207 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
208 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
209END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
210
211 /* Update the last bolted SLB. No write barriers are needed
212 * here, provided we only update the current CPU's SLB shadow
213 * buffer.
214 */
215 ld r9,PACA_SLBSHADOWPTR(r13)
216 li r12,0
217 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
218 li r12,SLBSHADOW_STACKVSID
219 STDX_BE r7,r12,r9 /* Save VSID */
220 li r12,SLBSHADOW_STACKESID
221 STDX_BE r0,r12,r9 /* Save ESID */
222
223 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
224 * we have 1TB segments, the only CPUs known to have the errata
225 * only support less than 1TB of system memory and we'll never
226 * actually hit this code path.
227 */
228
229 isync
230 slbie r6
231BEGIN_FTR_SECTION
232 slbie r6 /* Workaround POWER5 < DD2.1 issue */
233END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
234 slbmte r7,r0
235 isync
2362:
237#endif /* CONFIG_PPC_64S_HASH_MMU */
238
239 clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
240 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
241 because we don't need to leave the 288-byte ABI gap at the
242 top of the kernel stack. */
243 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
244
245 /*
246 * PMU interrupts in radix may come in here. They will use r1, not
247 * PACAKSAVE, so this stack switch will not cause a problem. They
248 * will store to the process stack, which may then be migrated to
249 * another CPU. However the rq lock release on this CPU paired with
250 * the rq lock acquire on the new CPU before the stack becomes
251 * active on the new CPU, will order those stores.
252 */
253 mr r1,r8 /* start using new stack pointer */
254 std r7,PACAKSAVE(r13)
255
256 ld r6,_CCR(r1)
257 mtcrf 0xFF,r6
258
259 /* r3-r13 are destroyed -- Cort */
260 REST_NVGPRS(r1)
261
262 /* convert old thread to its task_struct for return value */
263 addi r3,r3,-THREAD
264 ld r7,_NIP(r1) /* Return to _switch caller in new task */
265 mtlr r7
266 addi r1,r1,SWITCH_FRAME_SIZE
267 blr
268
269_GLOBAL(enter_prom)
270 mflr r0
271 std r0,16(r1)
272 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
273
274 /* Because PROM is running in 32b mode, it clobbers the high order half
275 * of all registers that it saves. We therefore save those registers
276 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
277 */
278 SAVE_GPR(2, r1)
279 SAVE_GPR(13, r1)
280 SAVE_NVGPRS(r1)
281 mfcr r10
282 mfmsr r11
283 std r10,_CCR(r1)
284 std r11,_MSR(r1)
285
286 /* Put PROM address in SRR0 */
287 mtsrr0 r4
288
289 /* Setup our trampoline return addr in LR */
290 bcl 20,31,$+4
2910: mflr r4
292 addi r4,r4,(1f - 0b)
293 mtlr r4
294
295 /* Prepare a 32-bit mode big endian MSR
296 */
297#ifdef CONFIG_PPC_BOOK3E_64
298 rlwinm r11,r11,0,1,31
299 mtsrr1 r11
300 rfi
301#else /* CONFIG_PPC_BOOK3E_64 */
302 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE)
303 andc r11,r11,r12
304 mtsrr1 r11
305 RFI_TO_KERNEL
306#endif /* CONFIG_PPC_BOOK3E_64 */
307
3081: /* Return from OF */
309 FIXUP_ENDIAN
310
311 /* Just make sure that r1 top 32 bits didn't get
312 * corrupt by OF
313 */
314 rldicl r1,r1,0,32
315
316 /* Restore the MSR (back to 64 bits) */
317 ld r0,_MSR(r1)
318 MTMSRD(r0)
319 isync
320
321 /* Restore other registers */
322 REST_GPR(2, r1)
323 REST_GPR(13, r1)
324 REST_NVGPRS(r1)
325 ld r4,_CCR(r1)
326 mtcr r4
327
328 addi r1,r1,SWITCH_FRAME_SIZE
329 ld r0,16(r1)
330 mtlr r0
331 blr
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 * This file contains the system call entry code, context switch
14 * code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <asm/unistd.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/thread_info.h>
24#include <asm/code-patching-asm.h>
25#include <asm/ppc_asm.h>
26#include <asm/asm-offsets.h>
27#include <asm/cputable.h>
28#include <asm/firmware.h>
29#include <asm/bug.h>
30#include <asm/ptrace.h>
31#include <asm/irqflags.h>
32#include <asm/hw_irq.h>
33#include <asm/context_tracking.h>
34#include <asm/tm.h>
35#include <asm/ppc-opcode.h>
36#include <asm/barrier.h>
37#include <asm/export.h>
38#include <asm/asm-compat.h>
39#ifdef CONFIG_PPC_BOOK3S
40#include <asm/exception-64s.h>
41#else
42#include <asm/exception-64e.h>
43#endif
44#include <asm/feature-fixups.h>
45#include <asm/kup.h>
46
47/*
48 * System calls.
49 */
50 .section ".toc","aw"
51SYS_CALL_TABLE:
52 .tc sys_call_table[TC],sys_call_table
53
54COMPAT_SYS_CALL_TABLE:
55 .tc compat_sys_call_table[TC],compat_sys_call_table
56
57/* This value is used to mark exception frames on the stack. */
58exception_marker:
59 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
60
61 .section ".text"
62 .align 7
63
64 .globl system_call_common
65system_call_common:
66#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
67BEGIN_FTR_SECTION
68 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
69 bne .Ltabort_syscall
70END_FTR_SECTION_IFSET(CPU_FTR_TM)
71#endif
72 mr r10,r1
73 ld r1,PACAKSAVE(r13)
74 std r10,0(r1)
75 std r11,_NIP(r1)
76 std r12,_MSR(r1)
77 std r0,GPR0(r1)
78 std r10,GPR1(r1)
79#ifdef CONFIG_PPC_FSL_BOOK3E
80START_BTB_FLUSH_SECTION
81 BTB_FLUSH(r10)
82END_BTB_FLUSH_SECTION
83#endif
84 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
85 std r2,GPR2(r1)
86 std r3,GPR3(r1)
87 mfcr r2
88 std r4,GPR4(r1)
89 std r5,GPR5(r1)
90 std r6,GPR6(r1)
91 std r7,GPR7(r1)
92 std r8,GPR8(r1)
93 li r11,0
94 std r11,GPR9(r1)
95 std r11,GPR10(r1)
96 std r11,GPR11(r1)
97 std r11,GPR12(r1)
98 std r11,_XER(r1)
99 std r11,_CTR(r1)
100 std r9,GPR13(r1)
101 mflr r10
102 /*
103 * This clears CR0.SO (bit 28), which is the error indication on
104 * return from this system call.
105 */
106 rldimi r2,r11,28,(63-28)
107 li r11,0xc01
108 std r10,_LINK(r1)
109 std r11,_TRAP(r1)
110 std r3,ORIG_GPR3(r1)
111 std r2,_CCR(r1)
112 ld r2,PACATOC(r13)
113 addi r9,r1,STACK_FRAME_OVERHEAD
114 ld r11,exception_marker@toc(r2)
115 std r11,-16(r9) /* "regshere" marker */
116
117 kuap_check_amr r10, r11
118
119#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
120BEGIN_FW_FTR_SECTION
121 /* see if there are any DTL entries to process */
122 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
123 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
124 addi r10,r10,LPPACA_DTLIDX
125 LDX_BE r10,0,r10 /* get log write index */
126 cmpd r11,r10
127 beq+ 33f
128 bl accumulate_stolen_time
129 REST_GPR(0,r1)
130 REST_4GPRS(3,r1)
131 REST_2GPRS(7,r1)
132 addi r9,r1,STACK_FRAME_OVERHEAD
13333:
134END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
135#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
136
137 /*
138 * A syscall should always be called with interrupts enabled
139 * so we just unconditionally hard-enable here. When some kind
140 * of irq tracing is used, we additionally check that condition
141 * is correct
142 */
143#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
144 lbz r10,PACAIRQSOFTMASK(r13)
1451: tdnei r10,IRQS_ENABLED
146 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
147#endif
148
149#ifdef CONFIG_PPC_BOOK3E
150 wrteei 1
151#else
152 li r11,MSR_RI
153 ori r11,r11,MSR_EE
154 mtmsrd r11,1
155#endif /* CONFIG_PPC_BOOK3E */
156
157system_call: /* label this so stack traces look sane */
158 /* We do need to set SOFTE in the stack frame or the return
159 * from interrupt will be painful
160 */
161 li r10,IRQS_ENABLED
162 std r10,SOFTE(r1)
163
164 ld r11, PACA_THREAD_INFO(r13)
165 ld r10,TI_FLAGS(r11)
166 andi. r11,r10,_TIF_SYSCALL_DOTRACE
167 bne .Lsyscall_dotrace /* does not return */
168 cmpldi 0,r0,NR_syscalls
169 bge- .Lsyscall_enosys
170
171.Lsyscall:
172/*
173 * Need to vector to 32 Bit or default sys_call_table here,
174 * based on caller's run-mode / personality.
175 */
176 ld r11,SYS_CALL_TABLE@toc(2)
177 andis. r10,r10,_TIF_32BIT@h
178 beq 15f
179 ld r11,COMPAT_SYS_CALL_TABLE@toc(2)
180 clrldi r3,r3,32
181 clrldi r4,r4,32
182 clrldi r5,r5,32
183 clrldi r6,r6,32
184 clrldi r7,r7,32
185 clrldi r8,r8,32
18615:
187 slwi r0,r0,3
188
189 barrier_nospec_asm
190 /*
191 * Prevent the load of the handler below (based on the user-passed
192 * system call number) being speculatively executed until the test
193 * against NR_syscalls and branch to .Lsyscall_enosys above has
194 * committed.
195 */
196
197 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
198 mtctr r12
199 bctrl /* Call handler */
200
201 /* syscall_exit can exit to kernel mode, via ret_from_kernel_thread */
202.Lsyscall_exit:
203 std r3,RESULT(r1)
204
205#ifdef CONFIG_DEBUG_RSEQ
206 /* Check whether the syscall is issued inside a restartable sequence */
207 addi r3,r1,STACK_FRAME_OVERHEAD
208 bl rseq_syscall
209 ld r3,RESULT(r1)
210#endif
211
212 ld r12, PACA_THREAD_INFO(r13)
213
214 ld r8,_MSR(r1)
215
216/*
217 * This is a few instructions into the actual syscall exit path (which actually
218 * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the
219 * number of visible symbols for profiling purposes.
220 *
221 * We can probe from system_call until this point as MSR_RI is set. But once it
222 * is cleared below, we won't be able to take a trap.
223 *
224 * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL().
225 */
226system_call_exit:
227 /*
228 * Disable interrupts so current_thread_info()->flags can't change,
229 * and so that we don't get interrupted after loading SRR0/1.
230 *
231 * Leave MSR_RI enabled for now, because with THREAD_INFO_IN_TASK we
232 * could fault on the load of the TI_FLAGS below.
233 */
234#ifdef CONFIG_PPC_BOOK3E
235 wrteei 0
236#else
237 li r11,MSR_RI
238 mtmsrd r11,1
239#endif /* CONFIG_PPC_BOOK3E */
240
241 ld r9,TI_FLAGS(r12)
242 li r11,-MAX_ERRNO
243 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
244 bne- .Lsyscall_exit_work
245
246 andi. r0,r8,MSR_FP
247 beq 2f
248#ifdef CONFIG_ALTIVEC
249 andis. r0,r8,MSR_VEC@h
250 bne 3f
251#endif
2522: addi r3,r1,STACK_FRAME_OVERHEAD
253 bl restore_math
254 ld r8,_MSR(r1)
255 ld r3,RESULT(r1)
256 li r11,-MAX_ERRNO
257
2583: cmpld r3,r11
259 ld r5,_CCR(r1)
260 bge- .Lsyscall_error
261.Lsyscall_error_cont:
262 ld r7,_NIP(r1)
263BEGIN_FTR_SECTION
264 stdcx. r0,0,r1 /* to clear the reservation */
265END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
266 andi. r6,r8,MSR_PR
267 ld r4,_LINK(r1)
268
269 kuap_check_amr r10, r11
270
271#ifdef CONFIG_PPC_BOOK3S
272 /*
273 * Clear MSR_RI, MSR_EE is already and remains disabled. We could do
274 * this later, but testing shows that doing it here causes less slow
275 * down than doing it closer to the rfid.
276 */
277 li r11,0
278 mtmsrd r11,1
279#endif
280
281 beq- 1f
282 ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
283
284BEGIN_FTR_SECTION
285 HMT_MEDIUM_LOW
286END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
287
288#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
289 std r8, PACATMSCRATCH(r13)
290#endif
291
292 /*
293 * We don't need to restore AMR on the way back to userspace for KUAP.
294 * The value of AMR only matters while we're in the kernel.
295 */
296 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
297 ld r2,GPR2(r1)
298 ld r1,GPR1(r1)
299 mtlr r4
300 mtcr r5
301 mtspr SPRN_SRR0,r7
302 mtspr SPRN_SRR1,r8
303 RFI_TO_USER
304 b . /* prevent speculative execution */
305
3061: /* exit to kernel */
307 kuap_restore_amr r2
308
309 ld r2,GPR2(r1)
310 ld r1,GPR1(r1)
311 mtlr r4
312 mtcr r5
313 mtspr SPRN_SRR0,r7
314 mtspr SPRN_SRR1,r8
315 RFI_TO_KERNEL
316 b . /* prevent speculative execution */
317
318.Lsyscall_error:
319 oris r5,r5,0x1000 /* Set SO bit in CR */
320 neg r3,r3
321 std r5,_CCR(r1)
322 b .Lsyscall_error_cont
323
324/* Traced system call support */
325.Lsyscall_dotrace:
326 bl save_nvgprs
327 addi r3,r1,STACK_FRAME_OVERHEAD
328 bl do_syscall_trace_enter
329
330 /*
331 * We use the return value of do_syscall_trace_enter() as the syscall
332 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
333 * returns an invalid syscall number and the test below against
334 * NR_syscalls will fail.
335 */
336 mr r0,r3
337
338 /* Restore argument registers just clobbered and/or possibly changed. */
339 ld r3,GPR3(r1)
340 ld r4,GPR4(r1)
341 ld r5,GPR5(r1)
342 ld r6,GPR6(r1)
343 ld r7,GPR7(r1)
344 ld r8,GPR8(r1)
345
346 /* Repopulate r9 and r10 for the syscall path */
347 addi r9,r1,STACK_FRAME_OVERHEAD
348 ld r10, PACA_THREAD_INFO(r13)
349 ld r10,TI_FLAGS(r10)
350
351 cmpldi r0,NR_syscalls
352 blt+ .Lsyscall
353
354 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
355 b .Lsyscall_exit
356
357
358.Lsyscall_enosys:
359 li r3,-ENOSYS
360 b .Lsyscall_exit
361
362.Lsyscall_exit_work:
363 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
364 If TIF_NOERROR is set, just save r3 as it is. */
365
366 andi. r0,r9,_TIF_RESTOREALL
367 beq+ 0f
368 REST_NVGPRS(r1)
369 b 2f
3700: cmpld r3,r11 /* r11 is -MAX_ERRNO */
371 blt+ 1f
372 andi. r0,r9,_TIF_NOERROR
373 bne- 1f
374 ld r5,_CCR(r1)
375 neg r3,r3
376 oris r5,r5,0x1000 /* Set SO bit in CR */
377 std r5,_CCR(r1)
3781: std r3,GPR3(r1)
3792: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
380 beq 4f
381
382 /* Clear per-syscall TIF flags if any are set. */
383
384 li r11,_TIF_PERSYSCALL_MASK
385 addi r12,r12,TI_FLAGS
3863: ldarx r10,0,r12
387 andc r10,r10,r11
388 stdcx. r10,0,r12
389 bne- 3b
390 subi r12,r12,TI_FLAGS
391
3924: /* Anything else left to do? */
393BEGIN_FTR_SECTION
394 lis r3,DEFAULT_PPR@highest /* Set default PPR */
395 sldi r3,r3,32 /* bits 11-13 are used for ppr */
396 std r3,_PPR(r1)
397END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
398
399 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
400 beq ret_from_except_lite
401
402 /* Re-enable interrupts */
403#ifdef CONFIG_PPC_BOOK3E
404 wrteei 1
405#else
406 li r10,MSR_RI
407 ori r10,r10,MSR_EE
408 mtmsrd r10,1
409#endif /* CONFIG_PPC_BOOK3E */
410
411 bl save_nvgprs
412 addi r3,r1,STACK_FRAME_OVERHEAD
413 bl do_syscall_trace_leave
414 b ret_from_except
415
416#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
417.Ltabort_syscall:
418 /* Firstly we need to enable TM in the kernel */
419 mfmsr r10
420 li r9, 1
421 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
422 mtmsrd r10, 0
423
424 /* tabort, this dooms the transaction, nothing else */
425 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
426 TABORT(R9)
427
428 /*
429 * Return directly to userspace. We have corrupted user register state,
430 * but userspace will never see that register state. Execution will
431 * resume after the tbegin of the aborted transaction with the
432 * checkpointed register state.
433 */
434 li r9, MSR_RI
435 andc r10, r10, r9
436 mtmsrd r10, 1
437 mtspr SPRN_SRR0, r11
438 mtspr SPRN_SRR1, r12
439 RFI_TO_USER
440 b . /* prevent speculative execution */
441#endif
442_ASM_NOKPROBE_SYMBOL(system_call_common);
443_ASM_NOKPROBE_SYMBOL(system_call_exit);
444
445/* Save non-volatile GPRs, if not already saved. */
446_GLOBAL(save_nvgprs)
447 ld r11,_TRAP(r1)
448 andi. r0,r11,1
449 beqlr-
450 SAVE_NVGPRS(r1)
451 clrrdi r0,r11,1
452 std r0,_TRAP(r1)
453 blr
454_ASM_NOKPROBE_SYMBOL(save_nvgprs);
455
456
457/*
458 * The sigsuspend and rt_sigsuspend system calls can call do_signal
459 * and thus put the process into the stopped state where we might
460 * want to examine its user state with ptrace. Therefore we need
461 * to save all the nonvolatile registers (r14 - r31) before calling
462 * the C code. Similarly, fork, vfork and clone need the full
463 * register state on the stack so that it can be copied to the child.
464 */
465
466_GLOBAL(ppc_fork)
467 bl save_nvgprs
468 bl sys_fork
469 b .Lsyscall_exit
470
471_GLOBAL(ppc_vfork)
472 bl save_nvgprs
473 bl sys_vfork
474 b .Lsyscall_exit
475
476_GLOBAL(ppc_clone)
477 bl save_nvgprs
478 bl sys_clone
479 b .Lsyscall_exit
480
481_GLOBAL(ppc_clone3)
482 bl save_nvgprs
483 bl sys_clone3
484 b .Lsyscall_exit
485
486_GLOBAL(ppc32_swapcontext)
487 bl save_nvgprs
488 bl compat_sys_swapcontext
489 b .Lsyscall_exit
490
491_GLOBAL(ppc64_swapcontext)
492 bl save_nvgprs
493 bl sys_swapcontext
494 b .Lsyscall_exit
495
496_GLOBAL(ppc_switch_endian)
497 bl save_nvgprs
498 bl sys_switch_endian
499 b .Lsyscall_exit
500
501_GLOBAL(ret_from_fork)
502 bl schedule_tail
503 REST_NVGPRS(r1)
504 li r3,0
505 b .Lsyscall_exit
506
507_GLOBAL(ret_from_kernel_thread)
508 bl schedule_tail
509 REST_NVGPRS(r1)
510 mtlr r14
511 mr r3,r15
512#ifdef PPC64_ELF_ABI_v2
513 mr r12,r14
514#endif
515 blrl
516 li r3,0
517 b .Lsyscall_exit
518
519#ifdef CONFIG_PPC_BOOK3S_64
520
521#define FLUSH_COUNT_CACHE \
5221: nop; \
523 patch_site 1b, patch__call_flush_count_cache
524
525
526#define BCCTR_FLUSH .long 0x4c400420
527
528.macro nops number
529 .rept \number
530 nop
531 .endr
532.endm
533
534.balign 32
535.global flush_count_cache
536flush_count_cache:
537 /* Save LR into r9 */
538 mflr r9
539
540 .rept 64
541 bl .+4
542 .endr
543 b 1f
544 nops 6
545
546 .balign 32
547 /* Restore LR */
5481: mtlr r9
549 li r9,0x7fff
550 mtctr r9
551
552 BCCTR_FLUSH
553
5542: nop
555 patch_site 2b patch__flush_count_cache_return
556
557 nops 3
558
559 .rept 278
560 .balign 32
561 BCCTR_FLUSH
562 nops 7
563 .endr
564
565 blr
566#else
567#define FLUSH_COUNT_CACHE
568#endif /* CONFIG_PPC_BOOK3S_64 */
569
570/*
571 * This routine switches between two different tasks. The process
572 * state of one is saved on its kernel stack. Then the state
573 * of the other is restored from its kernel stack. The memory
574 * management hardware is updated to the second process's state.
575 * Finally, we can return to the second process, via ret_from_except.
576 * On entry, r3 points to the THREAD for the current task, r4
577 * points to the THREAD for the new task.
578 *
579 * Note: there are two ways to get to the "going out" portion
580 * of this code; either by coming in via the entry (_switch)
581 * or via "fork" which must set up an environment equivalent
582 * to the "_switch" path. If you change this you'll have to change
583 * the fork code also.
584 *
585 * The code which creates the new task context is in 'copy_thread'
586 * in arch/powerpc/kernel/process.c
587 */
588 .align 7
589_GLOBAL(_switch)
590 mflr r0
591 std r0,16(r1)
592 stdu r1,-SWITCH_FRAME_SIZE(r1)
593 /* r3-r13 are caller saved -- Cort */
594 SAVE_8GPRS(14, r1)
595 SAVE_10GPRS(22, r1)
596 std r0,_NIP(r1) /* Return to switch caller */
597 mfcr r23
598 std r23,_CCR(r1)
599 std r1,KSP(r3) /* Set old stack pointer */
600
601 kuap_check_amr r9, r10
602
603 FLUSH_COUNT_CACHE
604
605 /*
606 * On SMP kernels, care must be taken because a task may be
607 * scheduled off CPUx and on to CPUy. Memory ordering must be
608 * considered.
609 *
610 * Cacheable stores on CPUx will be visible when the task is
611 * scheduled on CPUy by virtue of the core scheduler barriers
612 * (see "Notes on Program-Order guarantees on SMP systems." in
613 * kernel/sched/core.c).
614 *
615 * Uncacheable stores in the case of involuntary preemption must
616 * be taken care of. The smp_mb__before_spin_lock() in __schedule()
617 * is implemented as hwsync on powerpc, which orders MMIO too. So
618 * long as there is an hwsync in the context switch path, it will
619 * be executed on the source CPU after the task has performed
620 * all MMIO ops on that CPU, and on the destination CPU before the
621 * task performs any MMIO ops there.
622 */
623
624 /*
625 * The kernel context switch path must contain a spin_lock,
626 * which contains larx/stcx, which will clear any reservation
627 * of the task being switched.
628 */
629#ifdef CONFIG_PPC_BOOK3S
630/* Cancel all explict user streams as they will have no use after context
631 * switch and will stop the HW from creating streams itself
632 */
633 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
634#endif
635
636 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
637 std r6,PACACURRENT(r13) /* Set new 'current' */
638#if defined(CONFIG_STACKPROTECTOR)
639 ld r6, TASK_CANARY(r6)
640 std r6, PACA_CANARY(r13)
641#endif
642
643 ld r8,KSP(r4) /* new stack pointer */
644#ifdef CONFIG_PPC_BOOK3S_64
645BEGIN_MMU_FTR_SECTION
646 b 2f
647END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
648BEGIN_FTR_SECTION
649 clrrdi r6,r8,28 /* get its ESID */
650 clrrdi r9,r1,28 /* get current sp ESID */
651FTR_SECTION_ELSE
652 clrrdi r6,r8,40 /* get its 1T ESID */
653 clrrdi r9,r1,40 /* get current sp 1T ESID */
654ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
655 clrldi. r0,r6,2 /* is new ESID c00000000? */
656 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
657 cror eq,4*cr1+eq,eq
658 beq 2f /* if yes, don't slbie it */
659
660 /* Bolt in the new stack SLB entry */
661 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
662 oris r0,r6,(SLB_ESID_V)@h
663 ori r0,r0,(SLB_NUM_BOLTED-1)@l
664BEGIN_FTR_SECTION
665 li r9,MMU_SEGSIZE_1T /* insert B field */
666 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
667 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
668END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
669
670 /* Update the last bolted SLB. No write barriers are needed
671 * here, provided we only update the current CPU's SLB shadow
672 * buffer.
673 */
674 ld r9,PACA_SLBSHADOWPTR(r13)
675 li r12,0
676 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
677 li r12,SLBSHADOW_STACKVSID
678 STDX_BE r7,r12,r9 /* Save VSID */
679 li r12,SLBSHADOW_STACKESID
680 STDX_BE r0,r12,r9 /* Save ESID */
681
682 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
683 * we have 1TB segments, the only CPUs known to have the errata
684 * only support less than 1TB of system memory and we'll never
685 * actually hit this code path.
686 */
687
688 isync
689 slbie r6
690BEGIN_FTR_SECTION
691 slbie r6 /* Workaround POWER5 < DD2.1 issue */
692END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
693 slbmte r7,r0
694 isync
6952:
696#endif /* CONFIG_PPC_BOOK3S_64 */
697
698 clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
699 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
700 because we don't need to leave the 288-byte ABI gap at the
701 top of the kernel stack. */
702 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
703
704 /*
705 * PMU interrupts in radix may come in here. They will use r1, not
706 * PACAKSAVE, so this stack switch will not cause a problem. They
707 * will store to the process stack, which may then be migrated to
708 * another CPU. However the rq lock release on this CPU paired with
709 * the rq lock acquire on the new CPU before the stack becomes
710 * active on the new CPU, will order those stores.
711 */
712 mr r1,r8 /* start using new stack pointer */
713 std r7,PACAKSAVE(r13)
714
715 ld r6,_CCR(r1)
716 mtcrf 0xFF,r6
717
718 /* r3-r13 are destroyed -- Cort */
719 REST_8GPRS(14, r1)
720 REST_10GPRS(22, r1)
721
722 /* convert old thread to its task_struct for return value */
723 addi r3,r3,-THREAD
724 ld r7,_NIP(r1) /* Return to _switch caller in new task */
725 mtlr r7
726 addi r1,r1,SWITCH_FRAME_SIZE
727 blr
728
729 .align 7
730_GLOBAL(ret_from_except)
731 ld r11,_TRAP(r1)
732 andi. r0,r11,1
733 bne ret_from_except_lite
734 REST_NVGPRS(r1)
735
736_GLOBAL(ret_from_except_lite)
737 /*
738 * Disable interrupts so that current_thread_info()->flags
739 * can't change between when we test it and when we return
740 * from the interrupt.
741 */
742#ifdef CONFIG_PPC_BOOK3E
743 wrteei 0
744#else
745 li r10,MSR_RI
746 mtmsrd r10,1 /* Update machine state */
747#endif /* CONFIG_PPC_BOOK3E */
748
749 ld r9, PACA_THREAD_INFO(r13)
750 ld r3,_MSR(r1)
751#ifdef CONFIG_PPC_BOOK3E
752 ld r10,PACACURRENT(r13)
753#endif /* CONFIG_PPC_BOOK3E */
754 ld r4,TI_FLAGS(r9)
755 andi. r3,r3,MSR_PR
756 beq resume_kernel
757#ifdef CONFIG_PPC_BOOK3E
758 lwz r3,(THREAD+THREAD_DBCR0)(r10)
759#endif /* CONFIG_PPC_BOOK3E */
760
761 /* Check current_thread_info()->flags */
762 andi. r0,r4,_TIF_USER_WORK_MASK
763 bne 1f
764#ifdef CONFIG_PPC_BOOK3E
765 /*
766 * Check to see if the dbcr0 register is set up to debug.
767 * Use the internal debug mode bit to do this.
768 */
769 andis. r0,r3,DBCR0_IDM@h
770 beq restore
771 mfmsr r0
772 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
773 mtmsr r0
774 mtspr SPRN_DBCR0,r3
775 li r10, -1
776 mtspr SPRN_DBSR,r10
777 b restore
778#else
779 addi r3,r1,STACK_FRAME_OVERHEAD
780 bl restore_math
781 b restore
782#endif
7831: andi. r0,r4,_TIF_NEED_RESCHED
784 beq 2f
785 bl restore_interrupts
786 SCHEDULE_USER
787 b ret_from_except_lite
7882:
789#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
790 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
791 bne 3f /* only restore TM if nothing else to do */
792 addi r3,r1,STACK_FRAME_OVERHEAD
793 bl restore_tm_state
794 b restore
7953:
796#endif
797 bl save_nvgprs
798 /*
799 * Use a non volatile GPR to save and restore our thread_info flags
800 * across the call to restore_interrupts.
801 */
802 mr r30,r4
803 bl restore_interrupts
804 mr r4,r30
805 addi r3,r1,STACK_FRAME_OVERHEAD
806 bl do_notify_resume
807 b ret_from_except
808
809resume_kernel:
810 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
811 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
812 beq+ 1f
813
814 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
815
816 ld r3,GPR1(r1)
817 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
818 mr r4,r1 /* src: current exception frame */
819 mr r1,r3 /* Reroute the trampoline frame to r1 */
820
821 /* Copy from the original to the trampoline. */
822 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
823 li r6,0 /* start offset: 0 */
824 mtctr r5
8252: ldx r0,r6,r4
826 stdx r0,r6,r3
827 addi r6,r6,8
828 bdnz 2b
829
830 /* Do real store operation to complete stdu */
831 ld r5,GPR1(r1)
832 std r8,0(r5)
833
834 /* Clear _TIF_EMULATE_STACK_STORE flag */
835 lis r11,_TIF_EMULATE_STACK_STORE@h
836 addi r5,r9,TI_FLAGS
8370: ldarx r4,0,r5
838 andc r4,r4,r11
839 stdcx. r4,0,r5
840 bne- 0b
8411:
842
843#ifdef CONFIG_PREEMPT
844 /* Check if we need to preempt */
845 andi. r0,r4,_TIF_NEED_RESCHED
846 beq+ restore
847 /* Check that preempt_count() == 0 and interrupts are enabled */
848 lwz r8,TI_PREEMPT(r9)
849 cmpwi cr0,r8,0
850 bne restore
851 ld r0,SOFTE(r1)
852 andi. r0,r0,IRQS_DISABLED
853 bne restore
854
855 /*
856 * Here we are preempting the current task. We want to make
857 * sure we are soft-disabled first and reconcile irq state.
858 */
859 RECONCILE_IRQ_STATE(r3,r4)
860 bl preempt_schedule_irq
861
862 /*
863 * arch_local_irq_restore() from preempt_schedule_irq above may
864 * enable hard interrupt but we really should disable interrupts
865 * when we return from the interrupt, and so that we don't get
866 * interrupted after loading SRR0/1.
867 */
868#ifdef CONFIG_PPC_BOOK3E
869 wrteei 0
870#else
871 li r10,MSR_RI
872 mtmsrd r10,1 /* Update machine state */
873#endif /* CONFIG_PPC_BOOK3E */
874#endif /* CONFIG_PREEMPT */
875
876 .globl fast_exc_return_irq
877fast_exc_return_irq:
878restore:
879 /*
880 * This is the main kernel exit path. First we check if we
881 * are about to re-enable interrupts
882 */
883 ld r5,SOFTE(r1)
884 lbz r6,PACAIRQSOFTMASK(r13)
885 andi. r5,r5,IRQS_DISABLED
886 bne .Lrestore_irq_off
887
888 /* We are enabling, were we already enabled ? Yes, just return */
889 andi. r6,r6,IRQS_DISABLED
890 beq cr0,.Ldo_restore
891
892 /*
893 * We are about to soft-enable interrupts (we are hard disabled
894 * at this point). We check if there's anything that needs to
895 * be replayed first.
896 */
897 lbz r0,PACAIRQHAPPENED(r13)
898 cmpwi cr0,r0,0
899 bne- .Lrestore_check_irq_replay
900
901 /*
902 * Get here when nothing happened while soft-disabled, just
903 * soft-enable and move-on. We will hard-enable as a side
904 * effect of rfi
905 */
906.Lrestore_no_replay:
907 TRACE_ENABLE_INTS
908 li r0,IRQS_ENABLED
909 stb r0,PACAIRQSOFTMASK(r13);
910
911 /*
912 * Final return path. BookE is handled in a different file
913 */
914.Ldo_restore:
915#ifdef CONFIG_PPC_BOOK3E
916 b exception_return_book3e
917#else
918 /*
919 * Clear the reservation. If we know the CPU tracks the address of
920 * the reservation then we can potentially save some cycles and use
921 * a larx. On POWER6 and POWER7 this is significantly faster.
922 */
923BEGIN_FTR_SECTION
924 stdcx. r0,0,r1 /* to clear the reservation */
925FTR_SECTION_ELSE
926 ldarx r4,0,r1
927ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
928
929 /*
930 * Some code path such as load_up_fpu or altivec return directly
931 * here. They run entirely hard disabled and do not alter the
932 * interrupt state. They also don't use lwarx/stwcx. and thus
933 * are known not to leave dangling reservations.
934 */
935 .globl fast_exception_return
936fast_exception_return:
937 ld r3,_MSR(r1)
938 ld r4,_CTR(r1)
939 ld r0,_LINK(r1)
940 mtctr r4
941 mtlr r0
942 ld r4,_XER(r1)
943 mtspr SPRN_XER,r4
944
945 kuap_check_amr r5, r6
946
947 REST_8GPRS(5, r1)
948
949 andi. r0,r3,MSR_RI
950 beq- .Lunrecov_restore
951
952 /*
953 * Clear RI before restoring r13. If we are returning to
954 * userspace and we take an exception after restoring r13,
955 * we end up corrupting the userspace r13 value.
956 */
957 li r4,0
958 mtmsrd r4,1
959
960#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
961 /* TM debug */
962 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
963#endif
964 /*
965 * r13 is our per cpu area, only restore it if we are returning to
966 * userspace the value stored in the stack frame may belong to
967 * another CPU.
968 */
969 andi. r0,r3,MSR_PR
970 beq 1f
971BEGIN_FTR_SECTION
972 /* Restore PPR */
973 ld r2,_PPR(r1)
974 mtspr SPRN_PPR,r2
975END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
976 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
977 REST_GPR(13, r1)
978
979 /*
980 * We don't need to restore AMR on the way back to userspace for KUAP.
981 * The value of AMR only matters while we're in the kernel.
982 */
983 mtspr SPRN_SRR1,r3
984
985 ld r2,_CCR(r1)
986 mtcrf 0xFF,r2
987 ld r2,_NIP(r1)
988 mtspr SPRN_SRR0,r2
989
990 ld r0,GPR0(r1)
991 ld r2,GPR2(r1)
992 ld r3,GPR3(r1)
993 ld r4,GPR4(r1)
994 ld r1,GPR1(r1)
995 RFI_TO_USER
996 b . /* prevent speculative execution */
997
9981: mtspr SPRN_SRR1,r3
999
1000 ld r2,_CCR(r1)
1001 mtcrf 0xFF,r2
1002 ld r2,_NIP(r1)
1003 mtspr SPRN_SRR0,r2
1004
1005 /*
1006 * Leaving a stale exception_marker on the stack can confuse
1007 * the reliable stack unwinder later on. Clear it.
1008 */
1009 li r2,0
1010 std r2,STACK_FRAME_OVERHEAD-16(r1)
1011
1012 ld r0,GPR0(r1)
1013 ld r2,GPR2(r1)
1014 ld r3,GPR3(r1)
1015
1016 kuap_restore_amr r4
1017
1018 ld r4,GPR4(r1)
1019 ld r1,GPR1(r1)
1020 RFI_TO_KERNEL
1021 b . /* prevent speculative execution */
1022
1023#endif /* CONFIG_PPC_BOOK3E */
1024
1025 /*
1026 * We are returning to a context with interrupts soft disabled.
1027 *
1028 * However, we may also about to hard enable, so we need to
1029 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
1030 * or that bit can get out of sync and bad things will happen
1031 */
1032.Lrestore_irq_off:
1033 ld r3,_MSR(r1)
1034 lbz r7,PACAIRQHAPPENED(r13)
1035 andi. r0,r3,MSR_EE
1036 beq 1f
1037 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
1038 stb r7,PACAIRQHAPPENED(r13)
10391:
1040#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
1041 /* The interrupt should not have soft enabled. */
1042 lbz r7,PACAIRQSOFTMASK(r13)
10431: tdeqi r7,IRQS_ENABLED
1044 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1045#endif
1046 b .Ldo_restore
1047
1048 /*
1049 * Something did happen, check if a re-emit is needed
1050 * (this also clears paca->irq_happened)
1051 */
1052.Lrestore_check_irq_replay:
1053 /* XXX: We could implement a fast path here where we check
1054 * for irq_happened being just 0x01, in which case we can
1055 * clear it and return. That means that we would potentially
1056 * miss a decrementer having wrapped all the way around.
1057 *
1058 * Still, this might be useful for things like hash_page
1059 */
1060 bl __check_irq_replay
1061 cmpwi cr0,r3,0
1062 beq .Lrestore_no_replay
1063
1064 /*
1065 * We need to re-emit an interrupt. We do so by re-using our
1066 * existing exception frame. We first change the trap value,
1067 * but we need to ensure we preserve the low nibble of it
1068 */
1069 ld r4,_TRAP(r1)
1070 clrldi r4,r4,60
1071 or r4,r4,r3
1072 std r4,_TRAP(r1)
1073
1074 /*
1075 * PACA_IRQ_HARD_DIS won't always be set here, so set it now
1076 * to reconcile the IRQ state. Tracing is already accounted for.
1077 */
1078 lbz r4,PACAIRQHAPPENED(r13)
1079 ori r4,r4,PACA_IRQ_HARD_DIS
1080 stb r4,PACAIRQHAPPENED(r13)
1081
1082 /*
1083 * Then find the right handler and call it. Interrupts are
1084 * still soft-disabled and we keep them that way.
1085 */
1086 cmpwi cr0,r3,0x500
1087 bne 1f
1088 addi r3,r1,STACK_FRAME_OVERHEAD;
1089 bl do_IRQ
1090 b ret_from_except
10911: cmpwi cr0,r3,0xf00
1092 bne 1f
1093 addi r3,r1,STACK_FRAME_OVERHEAD;
1094 bl performance_monitor_exception
1095 b ret_from_except
10961: cmpwi cr0,r3,0xe60
1097 bne 1f
1098 addi r3,r1,STACK_FRAME_OVERHEAD;
1099 bl handle_hmi_exception
1100 b ret_from_except
11011: cmpwi cr0,r3,0x900
1102 bne 1f
1103 addi r3,r1,STACK_FRAME_OVERHEAD;
1104 bl timer_interrupt
1105 b ret_from_except
1106#ifdef CONFIG_PPC_DOORBELL
11071:
1108#ifdef CONFIG_PPC_BOOK3E
1109 cmpwi cr0,r3,0x280
1110#else
1111 cmpwi cr0,r3,0xa00
1112#endif /* CONFIG_PPC_BOOK3E */
1113 bne 1f
1114 addi r3,r1,STACK_FRAME_OVERHEAD;
1115 bl doorbell_exception
1116#endif /* CONFIG_PPC_DOORBELL */
11171: b ret_from_except /* What else to do here ? */
1118
1119.Lunrecov_restore:
1120 addi r3,r1,STACK_FRAME_OVERHEAD
1121 bl unrecoverable_exception
1122 b .Lunrecov_restore
1123
1124_ASM_NOKPROBE_SYMBOL(ret_from_except);
1125_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
1126_ASM_NOKPROBE_SYMBOL(resume_kernel);
1127_ASM_NOKPROBE_SYMBOL(fast_exc_return_irq);
1128_ASM_NOKPROBE_SYMBOL(restore);
1129_ASM_NOKPROBE_SYMBOL(fast_exception_return);
1130
1131
1132#ifdef CONFIG_PPC_RTAS
1133/*
1134 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1135 * called with the MMU off.
1136 *
1137 * In addition, we need to be in 32b mode, at least for now.
1138 *
1139 * Note: r3 is an input parameter to rtas, so don't trash it...
1140 */
1141_GLOBAL(enter_rtas)
1142 mflr r0
1143 std r0,16(r1)
1144 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
1145
1146 /* Because RTAS is running in 32b mode, it clobbers the high order half
1147 * of all registers that it saves. We therefore save those registers
1148 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
1149 */
1150 SAVE_GPR(2, r1) /* Save the TOC */
1151 SAVE_GPR(13, r1) /* Save paca */
1152 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
1153 SAVE_10GPRS(22, r1) /* ditto */
1154
1155 mfcr r4
1156 std r4,_CCR(r1)
1157 mfctr r5
1158 std r5,_CTR(r1)
1159 mfspr r6,SPRN_XER
1160 std r6,_XER(r1)
1161 mfdar r7
1162 std r7,_DAR(r1)
1163 mfdsisr r8
1164 std r8,_DSISR(r1)
1165
1166 /* Temporary workaround to clear CR until RTAS can be modified to
1167 * ignore all bits.
1168 */
1169 li r0,0
1170 mtcr r0
1171
1172#ifdef CONFIG_BUG
1173 /* There is no way it is acceptable to get here with interrupts enabled,
1174 * check it with the asm equivalent of WARN_ON
1175 */
1176 lbz r0,PACAIRQSOFTMASK(r13)
11771: tdeqi r0,IRQS_ENABLED
1178 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1179#endif
1180
1181 /* Hard-disable interrupts */
1182 mfmsr r6
1183 rldicl r7,r6,48,1
1184 rotldi r7,r7,16
1185 mtmsrd r7,1
1186
1187 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1188 * so they are saved in the PACA which allows us to restore
1189 * our original state after RTAS returns.
1190 */
1191 std r1,PACAR1(r13)
1192 std r6,PACASAVEDMSR(r13)
1193
1194 /* Setup our real return addr */
1195 LOAD_REG_ADDR(r4,rtas_return_loc)
1196 clrldi r4,r4,2 /* convert to realmode address */
1197 mtlr r4
1198
1199 li r0,0
1200 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1201 andc r0,r6,r0
1202
1203 li r9,1
1204 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1205 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1206 andc r6,r0,r9
1207
1208__enter_rtas:
1209 sync /* disable interrupts so SRR0/1 */
1210 mtmsrd r0 /* don't get trashed */
1211
1212 LOAD_REG_ADDR(r4, rtas)
1213 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1214 ld r4,RTASBASE(r4) /* get the rtas->base value */
1215
1216 mtspr SPRN_SRR0,r5
1217 mtspr SPRN_SRR1,r6
1218 RFI_TO_KERNEL
1219 b . /* prevent speculative execution */
1220
1221rtas_return_loc:
1222 FIXUP_ENDIAN
1223
1224 /*
1225 * Clear RI and set SF before anything.
1226 */
1227 mfmsr r6
1228 li r0,MSR_RI
1229 andc r6,r6,r0
1230 sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
1231 or r6,r6,r0
1232 sync
1233 mtmsrd r6
1234
1235 /* relocation is off at this point */
1236 GET_PACA(r4)
1237 clrldi r4,r4,2 /* convert to realmode address */
1238
1239 bcl 20,31,$+4
12400: mflr r3
1241 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
1242
1243 ld r1,PACAR1(r4) /* Restore our SP */
1244 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1245
1246 mtspr SPRN_SRR0,r3
1247 mtspr SPRN_SRR1,r4
1248 RFI_TO_KERNEL
1249 b . /* prevent speculative execution */
1250_ASM_NOKPROBE_SYMBOL(__enter_rtas)
1251_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
1252
1253 .align 3
12541: .8byte rtas_restore_regs
1255
1256rtas_restore_regs:
1257 /* relocation is on at this point */
1258 REST_GPR(2, r1) /* Restore the TOC */
1259 REST_GPR(13, r1) /* Restore paca */
1260 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1261 REST_10GPRS(22, r1) /* ditto */
1262
1263 GET_PACA(r13)
1264
1265 ld r4,_CCR(r1)
1266 mtcr r4
1267 ld r5,_CTR(r1)
1268 mtctr r5
1269 ld r6,_XER(r1)
1270 mtspr SPRN_XER,r6
1271 ld r7,_DAR(r1)
1272 mtdar r7
1273 ld r8,_DSISR(r1)
1274 mtdsisr r8
1275
1276 addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
1277 ld r0,16(r1) /* get return address */
1278
1279 mtlr r0
1280 blr /* return to caller */
1281
1282#endif /* CONFIG_PPC_RTAS */
1283
1284_GLOBAL(enter_prom)
1285 mflr r0
1286 std r0,16(r1)
1287 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
1288
1289 /* Because PROM is running in 32b mode, it clobbers the high order half
1290 * of all registers that it saves. We therefore save those registers
1291 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1292 */
1293 SAVE_GPR(2, r1)
1294 SAVE_GPR(13, r1)
1295 SAVE_8GPRS(14, r1)
1296 SAVE_10GPRS(22, r1)
1297 mfcr r10
1298 mfmsr r11
1299 std r10,_CCR(r1)
1300 std r11,_MSR(r1)
1301
1302 /* Put PROM address in SRR0 */
1303 mtsrr0 r4
1304
1305 /* Setup our trampoline return addr in LR */
1306 bcl 20,31,$+4
13070: mflr r4
1308 addi r4,r4,(1f - 0b)
1309 mtlr r4
1310
1311 /* Prepare a 32-bit mode big endian MSR
1312 */
1313#ifdef CONFIG_PPC_BOOK3E
1314 rlwinm r11,r11,0,1,31
1315 mtsrr1 r11
1316 rfi
1317#else /* CONFIG_PPC_BOOK3E */
1318 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1319 andc r11,r11,r12
1320 mtsrr1 r11
1321 RFI_TO_KERNEL
1322#endif /* CONFIG_PPC_BOOK3E */
1323
13241: /* Return from OF */
1325 FIXUP_ENDIAN
1326
1327 /* Just make sure that r1 top 32 bits didn't get
1328 * corrupt by OF
1329 */
1330 rldicl r1,r1,0,32
1331
1332 /* Restore the MSR (back to 64 bits) */
1333 ld r0,_MSR(r1)
1334 MTMSRD(r0)
1335 isync
1336
1337 /* Restore other registers */
1338 REST_GPR(2, r1)
1339 REST_GPR(13, r1)
1340 REST_8GPRS(14, r1)
1341 REST_10GPRS(22, r1)
1342 ld r4,_CCR(r1)
1343 mtcr r4
1344
1345 addi r1,r1,SWITCH_FRAME_SIZE
1346 ld r0,16(r1)
1347 mtlr r0
1348 blr