Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 *  PowerPC version
  4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
  6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
  7 *  Adapted for Power Macintosh by Paul Mackerras.
  8 *  Low-level exception handlers and MMU support
  9 *  rewritten by Paul Mackerras.
 10 *    Copyright (C) 1996 Paul Mackerras.
 11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 12 *
 13 *  This file contains the system call entry code, context switch
 14 *  code, and exception/interrupt return code for PowerPC.
 15 */
 16
 17#include <linux/errno.h>
 18#include <linux/err.h>
 19#include <linux/sys.h>
 20#include <linux/threads.h>
 21#include <linux/linkage.h>
 22
 23#include <asm/reg.h>
 24#include <asm/page.h>
 25#include <asm/mmu.h>
 26#include <asm/cputable.h>
 27#include <asm/thread_info.h>
 28#include <asm/ppc_asm.h>
 29#include <asm/asm-offsets.h>
 30#include <asm/unistd.h>
 31#include <asm/ptrace.h>
 32#include <asm/feature-fixups.h>
 33#include <asm/barrier.h>
 34#include <asm/kup.h>
 35#include <asm/bug.h>
 36#include <asm/interrupt.h>
 37
 38#include "head_32.h"
 39
 40/*
 41 * powerpc relies on return from interrupt/syscall being context synchronising
 42 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
 43 * synchronisation instructions.
 44 */
 45
 46/*
 47 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
 48 * fit into one page in order to not encounter a TLB miss between the
 49 * modification of srr0/srr1 and the associated rfi.
 50 */
 51	.align	12
 52
 53#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)
 54	.globl	prepare_transfer_to_handler
 55prepare_transfer_to_handler:
 56	/* if from kernel, check interrupted DOZE/NAP mode */
 57	lwz	r12,TI_LOCAL_FLAGS(r2)
 58	mtcrf	0x01,r12
 59	bt-	31-TLF_NAPPING,4f
 60	bt-	31-TLF_SLEEPING,7f
 61	blr
 62
 634:	rlwinm	r12,r12,0,~_TLF_NAPPING
 64	stw	r12,TI_LOCAL_FLAGS(r2)
 65	b	power_save_ppc32_restore
 66
 677:	rlwinm	r12,r12,0,~_TLF_SLEEPING
 68	stw	r12,TI_LOCAL_FLAGS(r2)
 69	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
 70	rlwinm	r9,r9,0,~MSR_EE
 71	lwz	r12,_LINK(r11)		/* and return to address in LR */
 72	REST_GPR(2, r11)
 73	b	fast_exception_return
 74_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
 75#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
 76
 77#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
 78SYM_FUNC_START(__kuep_lock)
 79	lwz	r9, THREAD+THSR0(r2)
 80	update_user_segments_by_4 r9, r10, r11, r12
 81	blr
 82SYM_FUNC_END(__kuep_lock)
 83
 84SYM_FUNC_START_LOCAL(__kuep_unlock)
 85	lwz	r9, THREAD+THSR0(r2)
 86	rlwinm  r9,r9,0,~SR_NX
 87	update_user_segments_by_4 r9, r10, r11, r12
 88	blr
 89SYM_FUNC_END(__kuep_unlock)
 90
 91.macro	kuep_lock
 92	bl	__kuep_lock
 93.endm
 94.macro	kuep_unlock
 95	bl	__kuep_unlock
 96.endm
 97#else
 98.macro	kuep_lock
 99.endm
100.macro	kuep_unlock
101.endm
102#endif
103
104	.globl	transfer_to_syscall
105transfer_to_syscall:
106	stw	r3, ORIG_GPR3(r1)
107	stw	r11, GPR1(r1)
108	stw	r11, 0(r1)
109	mflr	r12
110	stw	r12, _LINK(r1)
111#ifdef CONFIG_BOOKE_OR_40x
112	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
113#endif
114	lis	r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
115	SAVE_GPR(2, r1)
116	addi	r12,r12,STACK_FRAME_REGS_MARKER@l
117	stw	r9,_MSR(r1)
118	li	r2, INTERRUPT_SYSCALL
119	stw	r12,STACK_INT_FRAME_MARKER(r1)
120	stw	r2,_TRAP(r1)
121	SAVE_GPR(0, r1)
122	SAVE_GPRS(3, 8, r1)
123	addi	r2,r10,-THREAD
124	SAVE_NVGPRS(r1)
125	kuep_lock
126
127	/* Calling convention has r3 = regs, r4 = orig r0 */
128	addi	r3,r1,STACK_INT_FRAME_REGS
129	mr	r4,r0
130	bl	system_call_exception
131
132ret_from_syscall:
133	addi    r4,r1,STACK_INT_FRAME_REGS
134	li	r5,0
135	bl	syscall_exit_prepare
136#ifdef CONFIG_PPC_47x
137	lis	r4,icache_44x_need_flush@ha
138	lwz	r5,icache_44x_need_flush@l(r4)
139	cmplwi	cr0,r5,0
140	bne-	.L44x_icache_flush
141#endif /* CONFIG_PPC_47x */
142.L44x_icache_flush_return:
143	kuep_unlock
144	lwz	r4,_LINK(r1)
145	lwz	r5,_CCR(r1)
146	mtlr	r4
147	lwz	r7,_NIP(r1)
148	lwz	r8,_MSR(r1)
149	cmpwi	r3,0
150	REST_GPR(3, r1)
151syscall_exit_finish:
152	mtspr	SPRN_SRR0,r7
153	mtspr	SPRN_SRR1,r8
154
155	bne	3f
156	mtcr	r5
157
1581:	REST_GPR(2, r1)
159	REST_GPR(1, r1)
160	rfi
161#ifdef CONFIG_40x
162	b .	/* Prevent prefetch past rfi */
163#endif
164
1653:	mtcr	r5
166	lwz	r4,_CTR(r1)
167	lwz	r5,_XER(r1)
168	REST_NVGPRS(r1)
169	mtctr	r4
170	mtxer	r5
171	REST_GPR(0, r1)
172	REST_GPRS(3, 12, r1)
173	b	1b
174
175#ifdef CONFIG_44x
176.L44x_icache_flush:
177	li	r7,0
178	iccci	r0,r0
179	stw	r7,icache_44x_need_flush@l(r4)
180	b	.L44x_icache_flush_return
181#endif  /* CONFIG_44x */
182
183	.globl	ret_from_fork
184ret_from_fork:
185	REST_NVGPRS(r1)
186	bl	schedule_tail
187	li	r3,0	/* fork() return value */
188	b	ret_from_syscall
189
190	.globl	ret_from_kernel_user_thread
191ret_from_kernel_user_thread:
192	bl	schedule_tail
193	mtctr	r14
194	mr	r3,r15
195	PPC440EP_ERR42
196	bctrl
197	li	r3,0
198	b	ret_from_syscall
199
200	.globl	start_kernel_thread
201start_kernel_thread:
202	bl	schedule_tail
203	mtctr	r14
204	mr	r3,r15
205	PPC440EP_ERR42
206	bctrl
207	/*
208	 * This must not return. We actually want to BUG here, not WARN,
209	 * because BUG will exit the process which is what the kernel thread
210	 * should have done, which may give some hope of continuing.
211	 */
212100:	trap
213	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
214
215	.globl	fast_exception_return
216fast_exception_return:
217#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
218	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
219	beq	3f			/* if not, we've got problems */
220#endif
221
2222:	lwz	r10,_CCR(r11)
223	REST_GPRS(1, 6, r11)
224	mtcr	r10
225	lwz	r10,_LINK(r11)
226	mtlr	r10
227	/* Clear the exception marker on the stack to avoid confusing stacktrace */
228	li	r10, 0
229	stw	r10, 8(r11)
230	REST_GPR(10, r11)
231#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
232	mtspr	SPRN_NRI, r0
233#endif
234	mtspr	SPRN_SRR1,r9
235	mtspr	SPRN_SRR0,r12
236	REST_GPR(9, r11)
237	REST_GPR(12, r11)
238	REST_GPR(11, r11)
239	rfi
240#ifdef CONFIG_40x
241	b .	/* Prevent prefetch past rfi */
242#endif
243_ASM_NOKPROBE_SYMBOL(fast_exception_return)
244
245/* aargh, a nonrecoverable interrupt, panic */
246/* aargh, we don't know which trap this is */
2473:
248	li	r10,-1
249	stw	r10,_TRAP(r11)
250	prepare_transfer_to_handler
251	bl	unrecoverable_exception
252	trap	/* should not get here */
253
254	.globl interrupt_return
255interrupt_return:
256	lwz	r4,_MSR(r1)
257	addi	r3,r1,STACK_INT_FRAME_REGS
258	andi.	r0,r4,MSR_PR
259	beq	.Lkernel_interrupt_return
260	bl	interrupt_exit_user_prepare
261	cmpwi	r3,0
262	kuep_unlock
263	bne-	.Lrestore_nvgprs
264
265.Lfast_user_interrupt_return:
266	lwz	r11,_NIP(r1)
267	lwz	r12,_MSR(r1)
268	mtspr	SPRN_SRR0,r11
269	mtspr	SPRN_SRR1,r12
270
271BEGIN_FTR_SECTION
272	stwcx.	r0,0,r1		/* to clear the reservation */
273FTR_SECTION_ELSE
274	lwarx	r0,0,r1
275ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
276
277	lwz	r3,_CCR(r1)
278	lwz	r4,_LINK(r1)
279	lwz	r5,_CTR(r1)
280	lwz	r6,_XER(r1)
281	li	r0,0
282
283	/*
284	 * Leaving a stale exception marker on the stack can confuse
285	 * the reliable stack unwinder later on. Clear it.
286	 */
287	stw	r0,8(r1)
288	REST_GPRS(7, 12, r1)
289
290	mtcr	r3
291	mtlr	r4
292	mtctr	r5
293	mtspr	SPRN_XER,r6
294
295	REST_GPRS(2, 6, r1)
296	REST_GPR(0, r1)
297	REST_GPR(1, r1)
298	rfi
299#ifdef CONFIG_40x
300	b .	/* Prevent prefetch past rfi */
301#endif
302
303.Lrestore_nvgprs:
304	REST_NVGPRS(r1)
305	b	.Lfast_user_interrupt_return
306
307.Lkernel_interrupt_return:
308	bl	interrupt_exit_kernel_prepare
309
310.Lfast_kernel_interrupt_return:
311	cmpwi	cr1,r3,0
312	lwz	r11,_NIP(r1)
313	lwz	r12,_MSR(r1)
314	mtspr	SPRN_SRR0,r11
315	mtspr	SPRN_SRR1,r12
316
317BEGIN_FTR_SECTION
318	stwcx.	r0,0,r1		/* to clear the reservation */
319FTR_SECTION_ELSE
320	lwarx	r0,0,r1
321ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
322
323	lwz	r3,_LINK(r1)
324	lwz	r4,_CTR(r1)
325	lwz	r5,_XER(r1)
326	lwz	r6,_CCR(r1)
327	li	r0,0
328
329	REST_GPRS(7, 12, r1)
330
331	mtlr	r3
332	mtctr	r4
333	mtspr	SPRN_XER,r5
334
335	/*
336	 * Leaving a stale exception marker on the stack can confuse
337	 * the reliable stack unwinder later on. Clear it.
338	 */
339	stw	r0,8(r1)
340
341	REST_GPRS(2, 5, r1)
342
343	bne-	cr1,1f /* emulate stack store */
344	mtcr	r6
345	REST_GPR(6, r1)
346	REST_GPR(0, r1)
347	REST_GPR(1, r1)
348	rfi
349#ifdef CONFIG_40x
350	b .	/* Prevent prefetch past rfi */
351#endif
352
3531:	/*
354	 * Emulate stack store with update. New r1 value was already calculated
355	 * and updated in our interrupt regs by emulate_loadstore, but we can't
356	 * store the previous value of r1 to the stack before re-loading our
357	 * registers from it, otherwise they could be clobbered.  Use
358	 * SPRG Scratch0 as temporary storage to hold the store
359	 * data, as interrupts are disabled here so it won't be clobbered.
360	 */
361	mtcr	r6
362#ifdef CONFIG_BOOKE
363	mtspr	SPRN_SPRG_WSCRATCH0, r9
364#else
365	mtspr	SPRN_SPRG_SCRATCH0, r9
366#endif
367	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
368	REST_GPR(6, r1)
369	REST_GPR(0, r1)
370	REST_GPR(1, r1)
371	stw	r9,0(r1) /* perform store component of stwu */
372#ifdef CONFIG_BOOKE
373	mfspr	r9, SPRN_SPRG_RSCRATCH0
374#else
375	mfspr	r9, SPRN_SPRG_SCRATCH0
376#endif
377	rfi
378#ifdef CONFIG_40x
379	b .	/* Prevent prefetch past rfi */
380#endif
381_ASM_NOKPROBE_SYMBOL(interrupt_return)
382
383#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
384
385/*
386 * Returning from a critical interrupt in user mode doesn't need
387 * to be any different from a normal exception.  For a critical
388 * interrupt in the kernel, we just return (without checking for
389 * preemption) since the interrupt may have happened at some crucial
390 * place (e.g. inside the TLB miss handler), and because we will be
391 * running with r1 pointing into critical_stack, not the current
392 * process's kernel stack (and therefore current_thread_info() will
393 * give the wrong answer).
394 * We have to restore various SPRs that may have been in use at the
395 * time of the critical interrupt.
396 *
397 */
398#ifdef CONFIG_40x
399#define PPC_40x_TURN_OFF_MSR_DR						    \
400	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
401	 * assume the instructions here are mapped by a pinned TLB entry */ \
402	li	r10,MSR_IR;						    \
403	mtmsr	r10;							    \
404	isync;								    \
405	tophys(r1, r1);
406#else
407#define PPC_40x_TURN_OFF_MSR_DR
408#endif
409
410#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
411	REST_NVGPRS(r1);						\
412	lwz	r3,_MSR(r1);						\
413	andi.	r3,r3,MSR_PR;						\
414	bne	interrupt_return;					\
415	REST_GPR(0, r1);						\
416	REST_GPRS(2, 8, r1);						\
417	lwz	r10,_XER(r1);						\
418	lwz	r11,_CTR(r1);						\
419	mtspr	SPRN_XER,r10;						\
420	mtctr	r11;							\
421	stwcx.	r0,0,r1;		/* to clear the reservation */	\
422	lwz	r11,_LINK(r1);						\
423	mtlr	r11;							\
424	lwz	r10,_CCR(r1);						\
425	mtcrf	0xff,r10;						\
426	PPC_40x_TURN_OFF_MSR_DR;					\
427	lwz	r9,_DEAR(r1);						\
428	lwz	r10,_ESR(r1);						\
429	mtspr	SPRN_DEAR,r9;						\
430	mtspr	SPRN_ESR,r10;						\
431	lwz	r11,_NIP(r1);						\
432	lwz	r12,_MSR(r1);						\
433	mtspr	exc_lvl_srr0,r11;					\
434	mtspr	exc_lvl_srr1,r12;					\
435	REST_GPRS(9, 12, r1);						\
436	REST_GPR(1, r1);						\
437	exc_lvl_rfi;							\
438	b	.;		/* prevent prefetch past exc_lvl_rfi */
439
440#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
441	lwz	r9,_##exc_lvl_srr0(r1);					\
442	lwz	r10,_##exc_lvl_srr1(r1);				\
443	mtspr	SPRN_##exc_lvl_srr0,r9;					\
444	mtspr	SPRN_##exc_lvl_srr1,r10;
445
446#if defined(CONFIG_PPC_E500)
447#ifdef CONFIG_PHYS_64BIT
448#define	RESTORE_MAS7							\
449	lwz	r11,MAS7(r1);						\
450	mtspr	SPRN_MAS7,r11;
451#else
452#define	RESTORE_MAS7
453#endif /* CONFIG_PHYS_64BIT */
454#define RESTORE_MMU_REGS						\
455	lwz	r9,MAS0(r1);						\
456	lwz	r10,MAS1(r1);						\
457	lwz	r11,MAS2(r1);						\
458	mtspr	SPRN_MAS0,r9;						\
459	lwz	r9,MAS3(r1);						\
460	mtspr	SPRN_MAS1,r10;						\
461	lwz	r10,MAS6(r1);						\
462	mtspr	SPRN_MAS2,r11;						\
463	mtspr	SPRN_MAS3,r9;						\
464	mtspr	SPRN_MAS6,r10;						\
465	RESTORE_MAS7;
466#elif defined(CONFIG_44x)
467#define RESTORE_MMU_REGS						\
468	lwz	r9,MMUCR(r1);						\
469	mtspr	SPRN_MMUCR,r9;
470#else
471#define RESTORE_MMU_REGS
472#endif
473
474#ifdef CONFIG_40x
475	.globl	ret_from_crit_exc
476ret_from_crit_exc:
477	lis	r9,crit_srr0@ha;
478	lwz	r9,crit_srr0@l(r9);
479	lis	r10,crit_srr1@ha;
480	lwz	r10,crit_srr1@l(r10);
481	mtspr	SPRN_SRR0,r9;
482	mtspr	SPRN_SRR1,r10;
483	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
484_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
485#endif /* CONFIG_40x */
486
487#ifdef CONFIG_BOOKE
488	.globl	ret_from_crit_exc
489ret_from_crit_exc:
490	RESTORE_xSRR(SRR0,SRR1);
491	RESTORE_MMU_REGS;
492	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
493_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
494
495	.globl	ret_from_debug_exc
496ret_from_debug_exc:
497	RESTORE_xSRR(SRR0,SRR1);
498	RESTORE_xSRR(CSRR0,CSRR1);
499	RESTORE_MMU_REGS;
500	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
501_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
502
503	.globl	ret_from_mcheck_exc
504ret_from_mcheck_exc:
505	RESTORE_xSRR(SRR0,SRR1);
506	RESTORE_xSRR(CSRR0,CSRR1);
507	RESTORE_xSRR(DSRR0,DSRR1);
508	RESTORE_MMU_REGS;
509	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
510_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
511#endif /* CONFIG_BOOKE */
512#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 *  PowerPC version
  4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
  6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
  7 *  Adapted for Power Macintosh by Paul Mackerras.
  8 *  Low-level exception handlers and MMU support
  9 *  rewritten by Paul Mackerras.
 10 *    Copyright (C) 1996 Paul Mackerras.
 11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 12 *
 13 *  This file contains the system call entry code, context switch
 14 *  code, and exception/interrupt return code for PowerPC.
 15 */
 16
 17#include <linux/errno.h>
 18#include <linux/err.h>
 19#include <linux/sys.h>
 20#include <linux/threads.h>
 21#include <linux/linkage.h>
 22
 23#include <asm/reg.h>
 24#include <asm/page.h>
 25#include <asm/mmu.h>
 26#include <asm/cputable.h>
 27#include <asm/thread_info.h>
 28#include <asm/ppc_asm.h>
 29#include <asm/asm-offsets.h>
 30#include <asm/unistd.h>
 31#include <asm/ptrace.h>
 32#include <asm/feature-fixups.h>
 33#include <asm/barrier.h>
 34#include <asm/kup.h>
 35#include <asm/bug.h>
 36#include <asm/interrupt.h>
 37
 38#include "head_32.h"
 39
 40/*
 41 * powerpc relies on return from interrupt/syscall being context synchronising
 42 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
 43 * synchronisation instructions.
 44 */
 45
 46/*
 47 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
 48 * fit into one page in order to not encounter a TLB miss between the
 49 * modification of srr0/srr1 and the associated rfi.
 50 */
 51	.align	12
 52
 53#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)
 54	.globl	prepare_transfer_to_handler
 55prepare_transfer_to_handler:
 56	/* if from kernel, check interrupted DOZE/NAP mode */
 57	lwz	r12,TI_LOCAL_FLAGS(r2)
 58	mtcrf	0x01,r12
 59	bt-	31-TLF_NAPPING,4f
 60	bt-	31-TLF_SLEEPING,7f
 61	blr
 62
 634:	rlwinm	r12,r12,0,~_TLF_NAPPING
 64	stw	r12,TI_LOCAL_FLAGS(r2)
 65	b	power_save_ppc32_restore
 66
 677:	rlwinm	r12,r12,0,~_TLF_SLEEPING
 68	stw	r12,TI_LOCAL_FLAGS(r2)
 69	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
 70	rlwinm	r9,r9,0,~MSR_EE
 71	lwz	r12,_LINK(r11)		/* and return to address in LR */
 72	REST_GPR(2, r11)
 73	b	fast_exception_return
 74_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
 75#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
 76
 77#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
 78SYM_FUNC_START(__kuep_lock)
 79	lwz	r9, THREAD+THSR0(r2)
 80	update_user_segments_by_4 r9, r10, r11, r12
 81	blr
 82SYM_FUNC_END(__kuep_lock)
 83
 84SYM_FUNC_START_LOCAL(__kuep_unlock)
 85	lwz	r9, THREAD+THSR0(r2)
 86	rlwinm  r9,r9,0,~SR_NX
 87	update_user_segments_by_4 r9, r10, r11, r12
 88	blr
 89SYM_FUNC_END(__kuep_unlock)
 90
 91.macro	kuep_lock
 92	bl	__kuep_lock
 93.endm
 94.macro	kuep_unlock
 95	bl	__kuep_unlock
 96.endm
 97#else
 98.macro	kuep_lock
 99.endm
100.macro	kuep_unlock
101.endm
102#endif
103
104	.globl	transfer_to_syscall
105transfer_to_syscall:
106	stw	r3, ORIG_GPR3(r1)
107	stw	r11, GPR1(r1)
108	stw	r11, 0(r1)
109	mflr	r12
110	stw	r12, _LINK(r1)
111#ifdef CONFIG_BOOKE
112	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
113#endif
114	lis	r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
115	SAVE_GPR(2, r1)
116	addi	r12,r12,STACK_FRAME_REGS_MARKER@l
117	stw	r9,_MSR(r1)
118	li	r2, INTERRUPT_SYSCALL
119	stw	r12,STACK_INT_FRAME_MARKER(r1)
120	stw	r2,_TRAP(r1)
121	SAVE_GPR(0, r1)
122	SAVE_GPRS(3, 8, r1)
123	addi	r2,r10,-THREAD
124	SAVE_NVGPRS(r1)
125	kuep_lock
126
127	/* Calling convention has r3 = regs, r4 = orig r0 */
128	addi	r3,r1,STACK_INT_FRAME_REGS
129	mr	r4,r0
130	bl	system_call_exception
131
132ret_from_syscall:
133	addi    r4,r1,STACK_INT_FRAME_REGS
134	li	r5,0
135	bl	syscall_exit_prepare
136#ifdef CONFIG_PPC_47x
137	lis	r4,icache_44x_need_flush@ha
138	lwz	r5,icache_44x_need_flush@l(r4)
139	cmplwi	cr0,r5,0
140	bne-	.L44x_icache_flush
141#endif /* CONFIG_PPC_47x */
142.L44x_icache_flush_return:
143	kuep_unlock
144	lwz	r4,_LINK(r1)
145	lwz	r5,_CCR(r1)
146	mtlr	r4
147	lwz	r7,_NIP(r1)
148	lwz	r8,_MSR(r1)
149	cmpwi	r3,0
150	REST_GPR(3, r1)
151syscall_exit_finish:
152	mtspr	SPRN_SRR0,r7
153	mtspr	SPRN_SRR1,r8
154
155	bne	3f
156	mtcr	r5
157
1581:	REST_GPR(2, r1)
159	REST_GPR(1, r1)
160	rfi
 
 
 
161
1623:	mtcr	r5
163	lwz	r4,_CTR(r1)
164	lwz	r5,_XER(r1)
165	REST_NVGPRS(r1)
166	mtctr	r4
167	mtxer	r5
168	REST_GPR(0, r1)
169	REST_GPRS(3, 12, r1)
170	b	1b
171
172#ifdef CONFIG_44x
173.L44x_icache_flush:
174	li	r7,0
175	iccci	r0,r0
176	stw	r7,icache_44x_need_flush@l(r4)
177	b	.L44x_icache_flush_return
178#endif  /* CONFIG_44x */
179
180	.globl	ret_from_fork
181ret_from_fork:
182	REST_NVGPRS(r1)
183	bl	schedule_tail
184	li	r3,0	/* fork() return value */
185	b	ret_from_syscall
186
187	.globl	ret_from_kernel_user_thread
188ret_from_kernel_user_thread:
189	bl	schedule_tail
190	mtctr	r14
191	mr	r3,r15
192	PPC440EP_ERR42
193	bctrl
194	li	r3,0
195	b	ret_from_syscall
196
197	.globl	start_kernel_thread
198start_kernel_thread:
199	bl	schedule_tail
200	mtctr	r14
201	mr	r3,r15
202	PPC440EP_ERR42
203	bctrl
204	/*
205	 * This must not return. We actually want to BUG here, not WARN,
206	 * because BUG will exit the process which is what the kernel thread
207	 * should have done, which may give some hope of continuing.
208	 */
209100:	trap
210	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
211
212	.globl	fast_exception_return
213fast_exception_return:
214#ifndef CONFIG_BOOKE
215	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
216	beq	3f			/* if not, we've got problems */
217#endif
218
2192:	lwz	r10,_CCR(r11)
220	REST_GPRS(1, 6, r11)
221	mtcr	r10
222	lwz	r10,_LINK(r11)
223	mtlr	r10
224	/* Clear the exception marker on the stack to avoid confusing stacktrace */
225	li	r10, 0
226	stw	r10, 8(r11)
227	REST_GPR(10, r11)
228#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
229	mtspr	SPRN_NRI, r0
230#endif
231	mtspr	SPRN_SRR1,r9
232	mtspr	SPRN_SRR0,r12
233	REST_GPR(9, r11)
234	REST_GPR(12, r11)
235	REST_GPR(11, r11)
236	rfi
 
 
 
237_ASM_NOKPROBE_SYMBOL(fast_exception_return)
238
239/* aargh, a nonrecoverable interrupt, panic */
240/* aargh, we don't know which trap this is */
2413:
242	li	r10,-1
243	stw	r10,_TRAP(r11)
244	prepare_transfer_to_handler
245	bl	unrecoverable_exception
246	trap	/* should not get here */
247
248	.globl interrupt_return
249interrupt_return:
250	lwz	r4,_MSR(r1)
251	addi	r3,r1,STACK_INT_FRAME_REGS
252	andi.	r0,r4,MSR_PR
253	beq	.Lkernel_interrupt_return
254	bl	interrupt_exit_user_prepare
255	cmpwi	r3,0
256	kuep_unlock
257	bne-	.Lrestore_nvgprs
258
259.Lfast_user_interrupt_return:
260	lwz	r11,_NIP(r1)
261	lwz	r12,_MSR(r1)
262	mtspr	SPRN_SRR0,r11
263	mtspr	SPRN_SRR1,r12
264
265BEGIN_FTR_SECTION
266	stwcx.	r0,0,r1		/* to clear the reservation */
267FTR_SECTION_ELSE
268	lwarx	r0,0,r1
269ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
270
271	lwz	r3,_CCR(r1)
272	lwz	r4,_LINK(r1)
273	lwz	r5,_CTR(r1)
274	lwz	r6,_XER(r1)
275	li	r0,0
276
277	/*
278	 * Leaving a stale exception marker on the stack can confuse
279	 * the reliable stack unwinder later on. Clear it.
280	 */
281	stw	r0,8(r1)
282	REST_GPRS(7, 12, r1)
283
284	mtcr	r3
285	mtlr	r4
286	mtctr	r5
287	mtspr	SPRN_XER,r6
288
289	REST_GPRS(2, 6, r1)
290	REST_GPR(0, r1)
291	REST_GPR(1, r1)
292	rfi
 
 
 
293
294.Lrestore_nvgprs:
295	REST_NVGPRS(r1)
296	b	.Lfast_user_interrupt_return
297
298.Lkernel_interrupt_return:
299	bl	interrupt_exit_kernel_prepare
300
301.Lfast_kernel_interrupt_return:
302	cmpwi	cr1,r3,0
303	lwz	r11,_NIP(r1)
304	lwz	r12,_MSR(r1)
305	mtspr	SPRN_SRR0,r11
306	mtspr	SPRN_SRR1,r12
307
308BEGIN_FTR_SECTION
309	stwcx.	r0,0,r1		/* to clear the reservation */
310FTR_SECTION_ELSE
311	lwarx	r0,0,r1
312ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
313
314	lwz	r3,_LINK(r1)
315	lwz	r4,_CTR(r1)
316	lwz	r5,_XER(r1)
317	lwz	r6,_CCR(r1)
318	li	r0,0
319
320	REST_GPRS(7, 12, r1)
321
322	mtlr	r3
323	mtctr	r4
324	mtspr	SPRN_XER,r5
325
326	/*
327	 * Leaving a stale exception marker on the stack can confuse
328	 * the reliable stack unwinder later on. Clear it.
329	 */
330	stw	r0,8(r1)
331
332	REST_GPRS(2, 5, r1)
333
334	bne-	cr1,1f /* emulate stack store */
335	mtcr	r6
336	REST_GPR(6, r1)
337	REST_GPR(0, r1)
338	REST_GPR(1, r1)
339	rfi
 
 
 
340
3411:	/*
342	 * Emulate stack store with update. New r1 value was already calculated
343	 * and updated in our interrupt regs by emulate_loadstore, but we can't
344	 * store the previous value of r1 to the stack before re-loading our
345	 * registers from it, otherwise they could be clobbered.  Use
346	 * SPRG Scratch0 as temporary storage to hold the store
347	 * data, as interrupts are disabled here so it won't be clobbered.
348	 */
349	mtcr	r6
350#ifdef CONFIG_BOOKE
351	mtspr	SPRN_SPRG_WSCRATCH0, r9
352#else
353	mtspr	SPRN_SPRG_SCRATCH0, r9
354#endif
355	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
356	REST_GPR(6, r1)
357	REST_GPR(0, r1)
358	REST_GPR(1, r1)
359	stw	r9,0(r1) /* perform store component of stwu */
360#ifdef CONFIG_BOOKE
361	mfspr	r9, SPRN_SPRG_RSCRATCH0
362#else
363	mfspr	r9, SPRN_SPRG_SCRATCH0
364#endif
365	rfi
 
 
 
366_ASM_NOKPROBE_SYMBOL(interrupt_return)
367
368#ifdef CONFIG_BOOKE
369
370/*
371 * Returning from a critical interrupt in user mode doesn't need
372 * to be any different from a normal exception.  For a critical
373 * interrupt in the kernel, we just return (without checking for
374 * preemption) since the interrupt may have happened at some crucial
375 * place (e.g. inside the TLB miss handler), and because we will be
376 * running with r1 pointing into critical_stack, not the current
377 * process's kernel stack (and therefore current_thread_info() will
378 * give the wrong answer).
379 * We have to restore various SPRs that may have been in use at the
380 * time of the critical interrupt.
381 *
382 */
 
 
 
 
 
 
 
 
 
 
 
383
384#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
385	REST_NVGPRS(r1);						\
386	lwz	r3,_MSR(r1);						\
387	andi.	r3,r3,MSR_PR;						\
388	bne	interrupt_return;					\
389	REST_GPR(0, r1);						\
390	REST_GPRS(2, 8, r1);						\
391	lwz	r10,_XER(r1);						\
392	lwz	r11,_CTR(r1);						\
393	mtspr	SPRN_XER,r10;						\
394	mtctr	r11;							\
395	stwcx.	r0,0,r1;		/* to clear the reservation */	\
396	lwz	r11,_LINK(r1);						\
397	mtlr	r11;							\
398	lwz	r10,_CCR(r1);						\
399	mtcrf	0xff,r10;						\
 
400	lwz	r9,_DEAR(r1);						\
401	lwz	r10,_ESR(r1);						\
402	mtspr	SPRN_DEAR,r9;						\
403	mtspr	SPRN_ESR,r10;						\
404	lwz	r11,_NIP(r1);						\
405	lwz	r12,_MSR(r1);						\
406	mtspr	exc_lvl_srr0,r11;					\
407	mtspr	exc_lvl_srr1,r12;					\
408	REST_GPRS(9, 12, r1);						\
409	REST_GPR(1, r1);						\
410	exc_lvl_rfi;							\
411	b	.;		/* prevent prefetch past exc_lvl_rfi */
412
413#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
414	lwz	r9,_##exc_lvl_srr0(r1);					\
415	lwz	r10,_##exc_lvl_srr1(r1);				\
416	mtspr	SPRN_##exc_lvl_srr0,r9;					\
417	mtspr	SPRN_##exc_lvl_srr1,r10;
418
419#if defined(CONFIG_PPC_E500)
420#ifdef CONFIG_PHYS_64BIT
421#define	RESTORE_MAS7							\
422	lwz	r11,MAS7(r1);						\
423	mtspr	SPRN_MAS7,r11;
424#else
425#define	RESTORE_MAS7
426#endif /* CONFIG_PHYS_64BIT */
427#define RESTORE_MMU_REGS						\
428	lwz	r9,MAS0(r1);						\
429	lwz	r10,MAS1(r1);						\
430	lwz	r11,MAS2(r1);						\
431	mtspr	SPRN_MAS0,r9;						\
432	lwz	r9,MAS3(r1);						\
433	mtspr	SPRN_MAS1,r10;						\
434	lwz	r10,MAS6(r1);						\
435	mtspr	SPRN_MAS2,r11;						\
436	mtspr	SPRN_MAS3,r9;						\
437	mtspr	SPRN_MAS6,r10;						\
438	RESTORE_MAS7;
439#elif defined(CONFIG_44x)
440#define RESTORE_MMU_REGS						\
441	lwz	r9,MMUCR(r1);						\
442	mtspr	SPRN_MMUCR,r9;
443#else
444#define RESTORE_MMU_REGS
445#endif
446
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447	.globl	ret_from_crit_exc
448ret_from_crit_exc:
449	RESTORE_xSRR(SRR0,SRR1);
450	RESTORE_MMU_REGS;
451	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
452_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
453
454	.globl	ret_from_debug_exc
455ret_from_debug_exc:
456	RESTORE_xSRR(SRR0,SRR1);
457	RESTORE_xSRR(CSRR0,CSRR1);
458	RESTORE_MMU_REGS;
459	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
460_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
461
462	.globl	ret_from_mcheck_exc
463ret_from_mcheck_exc:
464	RESTORE_xSRR(SRR0,SRR1);
465	RESTORE_xSRR(CSRR0,CSRR1);
466	RESTORE_xSRR(DSRR0,DSRR1);
467	RESTORE_MMU_REGS;
468	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
469_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
470#endif /* CONFIG_BOOKE */