Linux Audio

Check our new training course

Loading...
v5.14.15
  1#include <asm/asm-offsets.h>
  2#include <asm/bug.h>
  3#ifdef CONFIG_PPC_BOOK3S
  4#include <asm/exception-64s.h>
  5#else
  6#include <asm/exception-64e.h>
  7#endif
  8#include <asm/feature-fixups.h>
  9#include <asm/head-64.h>
 10#include <asm/hw_irq.h>
 11#include <asm/kup.h>
 12#include <asm/mmu.h>
 13#include <asm/ppc_asm.h>
 14#include <asm/ptrace.h>
 15
 16	.section	".toc","aw"
 17SYS_CALL_TABLE:
 18	.tc sys_call_table[TC],sys_call_table
 19
 20#ifdef CONFIG_COMPAT
 21COMPAT_SYS_CALL_TABLE:
 22	.tc compat_sys_call_table[TC],compat_sys_call_table
 23#endif
 24	.previous
 25
 26	.align 7
 27
 28.macro DEBUG_SRR_VALID srr
 29#ifdef CONFIG_PPC_RFI_SRR_DEBUG
 30	.ifc \srr,srr
 31	mfspr	r11,SPRN_SRR0
 32	ld	r12,_NIP(r1)
 
 
 33100:	tdne	r11,r12
 34	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
 35	mfspr	r11,SPRN_SRR1
 36	ld	r12,_MSR(r1)
 37100:	tdne	r11,r12
 38	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
 39	.else
 40	mfspr	r11,SPRN_HSRR0
 41	ld	r12,_NIP(r1)
 
 
 42100:	tdne	r11,r12
 43	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
 44	mfspr	r11,SPRN_HSRR1
 45	ld	r12,_MSR(r1)
 46100:	tdne	r11,r12
 47	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
 48	.endif
 49#endif
 50.endm
 51
 52#ifdef CONFIG_PPC_BOOK3S
 53.macro system_call_vectored name trapnr
 54	.globl system_call_vectored_\name
 55system_call_vectored_\name:
 56_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
 57	SCV_INTERRUPT_TO_KERNEL
 58	mr	r10,r1
 59	ld	r1,PACAKSAVE(r13)
 60	std	r10,0(r1)
 61	std	r11,_NIP(r1)
 
 62	std	r12,_MSR(r1)
 63	std	r0,GPR0(r1)
 64	std	r10,GPR1(r1)
 65	std	r2,GPR2(r1)
 66	ld	r2,PACATOC(r13)
 67	mfcr	r12
 68	li	r11,0
 69	/* Can we avoid saving r3-r8 in common case? */
 70	std	r3,GPR3(r1)
 71	std	r4,GPR4(r1)
 72	std	r5,GPR5(r1)
 73	std	r6,GPR6(r1)
 74	std	r7,GPR7(r1)
 75	std	r8,GPR8(r1)
 76	/* Zero r9-r12, this should only be required when restoring all GPRs */
 77	std	r11,GPR9(r1)
 78	std	r11,GPR10(r1)
 79	std	r11,GPR11(r1)
 80	std	r11,GPR12(r1)
 81	std	r9,GPR13(r1)
 82	SAVE_NVGPRS(r1)
 83	std	r11,_XER(r1)
 84	std	r11,_LINK(r1)
 85	std	r11,_CTR(r1)
 86
 87	li	r11,\trapnr
 88	std	r11,_TRAP(r1)
 89	std	r12,_CCR(r1)
 90	addi	r10,r1,STACK_FRAME_OVERHEAD
 91	ld	r11,exception_marker@toc(r2)
 92	std	r11,-16(r10)		/* "regshere" marker */
 
 
 
 93
 94BEGIN_FTR_SECTION
 95	HMT_MEDIUM
 96END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 97
 98	/*
 99	 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
100	 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
101	 * and interrupts may be masked and pending already.
102	 * system_call_exception() will call trace_hardirqs_off() which means
103	 * interrupts could already have been blocked before trace_hardirqs_off,
104	 * but this is the best we can do.
105	 */
106
107	/* Calling convention has r9 = orig r0, r10 = regs */
108	mr	r9,r0
109	bl	system_call_exception
 
 
 
110
111.Lsyscall_vectored_\name\()_exit:
112	addi	r4,r1,STACK_FRAME_OVERHEAD
113	li	r5,1 /* scv */
114	bl	syscall_exit_prepare
115	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
116.Lsyscall_vectored_\name\()_rst_start:
117	lbz	r11,PACAIRQHAPPENED(r13)
118	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
119	bne-	syscall_vectored_\name\()_restart
120	li	r11,IRQS_ENABLED
121	stb	r11,PACAIRQSOFTMASK(r13)
122	li	r11,0
123	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
124
125	ld	r2,_CCR(r1)
126	ld	r4,_NIP(r1)
127	ld	r5,_MSR(r1)
128
129BEGIN_FTR_SECTION
130	stdcx.	r0,0,r1			/* to clear the reservation */
131END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
132
133BEGIN_FTR_SECTION
134	HMT_MEDIUM_LOW
135END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
136
 
137	cmpdi	r3,0
138	bne	.Lsyscall_vectored_\name\()_restore_regs
139
140	/* rfscv returns with LR->NIA and CTR->MSR */
141	mtlr	r4
142	mtctr	r5
143
144	/* Could zero these as per ABI, but we may consider a stricter ABI
145	 * which preserves these if libc implementations can benefit, so
146	 * restore them for now until further measurement is done. */
147	ld	r0,GPR0(r1)
148	ld	r4,GPR4(r1)
149	ld	r5,GPR5(r1)
150	ld	r6,GPR6(r1)
151	ld	r7,GPR7(r1)
152	ld	r8,GPR8(r1)
153	/* Zero volatile regs that may contain sensitive kernel data */
154	li	r9,0
155	li	r10,0
156	li	r11,0
157	li	r12,0
158	mtspr	SPRN_XER,r0
159
160	/*
161	 * We don't need to restore AMR on the way back to userspace for KUAP.
162	 * The value of AMR only matters while we're in the kernel.
163	 */
164	mtcr	r2
165	ld	r2,GPR2(r1)
166	ld	r3,GPR3(r1)
167	ld	r13,GPR13(r1)
168	ld	r1,GPR1(r1)
169	RFSCV_TO_USER
170	b	.	/* prevent speculative execution */
171
172.Lsyscall_vectored_\name\()_restore_regs:
173	mtspr	SPRN_SRR0,r4
174	mtspr	SPRN_SRR1,r5
175
176	ld	r3,_CTR(r1)
177	ld	r4,_LINK(r1)
178	ld	r5,_XER(r1)
179
180	REST_NVGPRS(r1)
181	ld	r0,GPR0(r1)
182	mtcr	r2
183	mtctr	r3
184	mtlr	r4
185	mtspr	SPRN_XER,r5
186	REST_10GPRS(2, r1)
187	REST_2GPRS(12, r1)
188	ld	r1,GPR1(r1)
189	RFI_TO_USER
190.Lsyscall_vectored_\name\()_rst_end:
191
192syscall_vectored_\name\()_restart:
193_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
194	GET_PACA(r13)
195	ld	r1,PACA_EXIT_SAVE_R1(r13)
196	ld	r2,PACATOC(r13)
197	ld	r3,RESULT(r1)
198	addi	r4,r1,STACK_FRAME_OVERHEAD
199	li	r11,IRQS_ALL_DISABLED
200	stb	r11,PACAIRQSOFTMASK(r13)
201	bl	syscall_exit_restart
202	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
203	b	.Lsyscall_vectored_\name\()_rst_start
2041:
205
206SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
207RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
208
209.endm
210
211system_call_vectored common 0x3000
212
213/*
214 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
215 * which is tested by system_call_exception when r0 is -1 (as set by vector
216 * entry code).
217 */
218system_call_vectored sigill 0x7ff0
219
220
221/*
222 * Entered via kernel return set up by kernel/sstep.c, must match entry regs
223 */
224	.globl system_call_vectored_emulate
225system_call_vectored_emulate:
226_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
227	li	r10,IRQS_ALL_DISABLED
228	stb	r10,PACAIRQSOFTMASK(r13)
229	b	system_call_vectored_common
230#endif /* CONFIG_PPC_BOOK3S */
231
232	.balign IFETCH_ALIGN_BYTES
233	.globl system_call_common_real
234system_call_common_real:
235_ASM_NOKPROBE_SYMBOL(system_call_common_real)
236	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
237	mtmsrd	r10
238
239	.balign IFETCH_ALIGN_BYTES
240	.globl system_call_common
241system_call_common:
242_ASM_NOKPROBE_SYMBOL(system_call_common)
243	mr	r10,r1
244	ld	r1,PACAKSAVE(r13)
245	std	r10,0(r1)
246	std	r11,_NIP(r1)
247	std	r12,_MSR(r1)
248	std	r0,GPR0(r1)
249	std	r10,GPR1(r1)
250	std	r2,GPR2(r1)
251#ifdef CONFIG_PPC_FSL_BOOK3E
252START_BTB_FLUSH_SECTION
253	BTB_FLUSH(r10)
254END_BTB_FLUSH_SECTION
255#endif
256	ld	r2,PACATOC(r13)
257	mfcr	r12
258	li	r11,0
259	/* Can we avoid saving r3-r8 in common case? */
260	std	r3,GPR3(r1)
261	std	r4,GPR4(r1)
262	std	r5,GPR5(r1)
263	std	r6,GPR6(r1)
264	std	r7,GPR7(r1)
265	std	r8,GPR8(r1)
266	/* Zero r9-r12, this should only be required when restoring all GPRs */
267	std	r11,GPR9(r1)
268	std	r11,GPR10(r1)
269	std	r11,GPR11(r1)
270	std	r11,GPR12(r1)
271	std	r9,GPR13(r1)
272	SAVE_NVGPRS(r1)
273	std	r11,_XER(r1)
274	std	r11,_CTR(r1)
275	mflr	r10
276
277	/*
278	 * This clears CR0.SO (bit 28), which is the error indication on
279	 * return from this system call.
280	 */
281	rldimi	r12,r11,28,(63-28)
282	li	r11,0xc00
283	std	r10,_LINK(r1)
284	std	r11,_TRAP(r1)
285	std	r12,_CCR(r1)
286	addi	r10,r1,STACK_FRAME_OVERHEAD
287	ld	r11,exception_marker@toc(r2)
288	std	r11,-16(r10)		/* "regshere" marker */
 
 
 
289
290#ifdef CONFIG_PPC_BOOK3S
291	li	r11,1
292	stb	r11,PACASRR_VALID(r13)
293#endif
294
295	/*
296	 * We always enter kernel from userspace with irq soft-mask enabled and
297	 * nothing pending. system_call_exception() will call
298	 * trace_hardirqs_off().
299	 */
300	li	r11,IRQS_ALL_DISABLED
301	stb	r11,PACAIRQSOFTMASK(r13)
302#ifdef CONFIG_PPC_BOOK3S
303	li	r12,-1 /* Set MSR_EE and MSR_RI */
304	mtmsrd	r12,1
305#else
306	wrteei	1
307#endif
308
309	/* Calling convention has r9 = orig r0, r10 = regs */
310	mr	r9,r0
311	bl	system_call_exception
 
 
 
312
313.Lsyscall_exit:
314	addi	r4,r1,STACK_FRAME_OVERHEAD
315	li	r5,0 /* !scv */
316	bl	syscall_exit_prepare
317	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
318#ifdef CONFIG_PPC_BOOK3S
319.Lsyscall_rst_start:
320	lbz	r11,PACAIRQHAPPENED(r13)
321	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
322	bne-	syscall_restart
323#endif
324	li	r11,IRQS_ENABLED
325	stb	r11,PACAIRQSOFTMASK(r13)
326	li	r11,0
327	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
328
329	ld	r2,_CCR(r1)
330	ld	r6,_LINK(r1)
331	mtlr	r6
332
333#ifdef CONFIG_PPC_BOOK3S
334	lbz	r4,PACASRR_VALID(r13)
335	cmpdi	r4,0
336	bne	1f
337	li	r4,0
338	stb	r4,PACASRR_VALID(r13)
339#endif
340	ld	r4,_NIP(r1)
341	ld	r5,_MSR(r1)
342	mtspr	SPRN_SRR0,r4
343	mtspr	SPRN_SRR1,r5
3441:
345	DEBUG_SRR_VALID srr
346
347BEGIN_FTR_SECTION
348	stdcx.	r0,0,r1			/* to clear the reservation */
349END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
350
 
351	cmpdi	r3,0
352	bne	.Lsyscall_restore_regs
353	/* Zero volatile regs that may contain sensitive kernel data */
354	li	r0,0
355	li	r4,0
356	li	r5,0
357	li	r6,0
358	li	r7,0
359	li	r8,0
360	li	r9,0
361	li	r10,0
362	li	r11,0
363	li	r12,0
364	mtctr	r0
365	mtspr	SPRN_XER,r0
366.Lsyscall_restore_regs_cont:
367
368BEGIN_FTR_SECTION
369	HMT_MEDIUM_LOW
370END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
371
372	/*
373	 * We don't need to restore AMR on the way back to userspace for KUAP.
374	 * The value of AMR only matters while we're in the kernel.
375	 */
376	mtcr	r2
377	ld	r2,GPR2(r1)
378	ld	r3,GPR3(r1)
379	ld	r13,GPR13(r1)
380	ld	r1,GPR1(r1)
381	RFI_TO_USER
382	b	.	/* prevent speculative execution */
383
384.Lsyscall_restore_regs:
385	ld	r3,_CTR(r1)
386	ld	r4,_XER(r1)
387	REST_NVGPRS(r1)
388	mtctr	r3
389	mtspr	SPRN_XER,r4
390	ld	r0,GPR0(r1)
391	REST_8GPRS(4, r1)
392	ld	r12,GPR12(r1)
393	b	.Lsyscall_restore_regs_cont
394.Lsyscall_rst_end:
395
396#ifdef CONFIG_PPC_BOOK3S
397syscall_restart:
398_ASM_NOKPROBE_SYMBOL(syscall_restart)
399	GET_PACA(r13)
400	ld	r1,PACA_EXIT_SAVE_R1(r13)
401	ld	r2,PACATOC(r13)
402	ld	r3,RESULT(r1)
403	addi	r4,r1,STACK_FRAME_OVERHEAD
404	li	r11,IRQS_ALL_DISABLED
405	stb	r11,PACAIRQSOFTMASK(r13)
406	bl	syscall_exit_restart
407	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
408	b	.Lsyscall_rst_start
4091:
410
411SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
412RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
413#endif
414
415	/*
416	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
417	 * touched, no exit work created, then this can be used.
418	 */
419	.balign IFETCH_ALIGN_BYTES
420	.globl fast_interrupt_return_srr
421fast_interrupt_return_srr:
422_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
423	kuap_check_amr r3, r4
424	ld	r5,_MSR(r1)
425	andi.	r0,r5,MSR_PR
426#ifdef CONFIG_PPC_BOOK3S
427	beq	1f
428	kuap_user_restore r3, r4
429	b	.Lfast_user_interrupt_return_srr
4301:	kuap_kernel_restore r3, r4
431	andi.	r0,r5,MSR_RI
432	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
433	bne+	.Lfast_kernel_interrupt_return_srr
434	addi	r3,r1,STACK_FRAME_OVERHEAD
435	bl	unrecoverable_exception
436	b	. /* should not get here */
437#else
438	bne	.Lfast_user_interrupt_return_srr
439	b	.Lfast_kernel_interrupt_return_srr
440#endif
441
442.macro interrupt_return_macro srr
443	.balign IFETCH_ALIGN_BYTES
444	.globl interrupt_return_\srr
445interrupt_return_\srr\():
446_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
447	ld	r4,_MSR(r1)
448	andi.	r0,r4,MSR_PR
449	beq	interrupt_return_\srr\()_kernel
450interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
451_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
452	addi	r3,r1,STACK_FRAME_OVERHEAD
453	bl	interrupt_exit_user_prepare
 
454	cmpdi	r3,0
455	bne-	.Lrestore_nvgprs_\srr
456.Lrestore_nvgprs_\srr\()_cont:
 
457	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
458#ifdef CONFIG_PPC_BOOK3S
459.Linterrupt_return_\srr\()_user_rst_start:
460	lbz	r11,PACAIRQHAPPENED(r13)
461	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
462	bne-	interrupt_return_\srr\()_user_restart
463#endif
464	li	r11,IRQS_ENABLED
465	stb	r11,PACAIRQSOFTMASK(r13)
466	li	r11,0
467	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
468
469.Lfast_user_interrupt_return_\srr\():
 
470#ifdef CONFIG_PPC_BOOK3S
471	.ifc \srr,srr
472	lbz	r4,PACASRR_VALID(r13)
473	.else
474	lbz	r4,PACAHSRR_VALID(r13)
475	.endif
476	cmpdi	r4,0
477	li	r4,0
478	bne	1f
479#endif
480	ld	r11,_NIP(r1)
481	ld	r12,_MSR(r1)
482	.ifc \srr,srr
483	mtspr	SPRN_SRR0,r11
484	mtspr	SPRN_SRR1,r12
4851:
486#ifdef CONFIG_PPC_BOOK3S
487	stb	r4,PACASRR_VALID(r13)
488#endif
489	.else
490	mtspr	SPRN_HSRR0,r11
491	mtspr	SPRN_HSRR1,r12
4921:
493#ifdef CONFIG_PPC_BOOK3S
494	stb	r4,PACAHSRR_VALID(r13)
495#endif
496	.endif
497	DEBUG_SRR_VALID \srr
498
499#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
500	lbz	r4,PACAIRQSOFTMASK(r13)
501	tdnei	r4,IRQS_ENABLED
502#endif
503
504BEGIN_FTR_SECTION
505	ld	r10,_PPR(r1)
506	mtspr	SPRN_PPR,r10
507END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
508
509BEGIN_FTR_SECTION
510	stdcx.	r0,0,r1		/* to clear the reservation */
511FTR_SECTION_ELSE
512	ldarx	r0,0,r1
513ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
514
515	ld	r3,_CCR(r1)
516	ld	r4,_LINK(r1)
517	ld	r5,_CTR(r1)
518	ld	r6,_XER(r1)
519	li	r0,0
520
521	REST_4GPRS(7, r1)
522	REST_2GPRS(11, r1)
523	REST_GPR(13, r1)
524
525	mtcr	r3
526	mtlr	r4
527	mtctr	r5
528	mtspr	SPRN_XER,r6
529
530	REST_4GPRS(2, r1)
531	REST_GPR(6, r1)
532	REST_GPR(0, r1)
533	REST_GPR(1, r1)
534	.ifc \srr,srr
535	RFI_TO_USER
536	.else
537	HRFI_TO_USER
538	.endif
539	b	.	/* prevent speculative execution */
540.Linterrupt_return_\srr\()_user_rst_end:
541
 
542.Lrestore_nvgprs_\srr\():
543	REST_NVGPRS(r1)
544	b	.Lrestore_nvgprs_\srr\()_cont
 
545
546#ifdef CONFIG_PPC_BOOK3S
547interrupt_return_\srr\()_user_restart:
548_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
549	GET_PACA(r13)
550	ld	r1,PACA_EXIT_SAVE_R1(r13)
551	ld	r2,PACATOC(r13)
552	addi	r3,r1,STACK_FRAME_OVERHEAD
553	li	r11,IRQS_ALL_DISABLED
554	stb	r11,PACAIRQSOFTMASK(r13)
555	bl	interrupt_exit_user_restart
556	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
557	b	.Linterrupt_return_\srr\()_user_rst_start
5581:
559
560SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
561RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
562#endif
563
564	.balign IFETCH_ALIGN_BYTES
565interrupt_return_\srr\()_kernel:
566_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
567	addi	r3,r1,STACK_FRAME_OVERHEAD
568	bl	interrupt_exit_kernel_prepare
569
570	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
571.Linterrupt_return_\srr\()_kernel_rst_start:
572	ld	r11,SOFTE(r1)
573	cmpwi	r11,IRQS_ENABLED
574	stb	r11,PACAIRQSOFTMASK(r13)
575	bne	1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
576#ifdef CONFIG_PPC_BOOK3S
577	lbz	r11,PACAIRQHAPPENED(r13)
578	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
579	bne-	interrupt_return_\srr\()_kernel_restart
580#endif
581	li	r11,0
582	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
5831:
584
585.Lfast_kernel_interrupt_return_\srr\():
 
586	cmpdi	cr1,r3,0
587#ifdef CONFIG_PPC_BOOK3S
588	.ifc \srr,srr
589	lbz	r4,PACASRR_VALID(r13)
590	.else
591	lbz	r4,PACAHSRR_VALID(r13)
592	.endif
593	cmpdi	r4,0
594	li	r4,0
595	bne	1f
596#endif
597	ld	r11,_NIP(r1)
598	ld	r12,_MSR(r1)
599	.ifc \srr,srr
600	mtspr	SPRN_SRR0,r11
601	mtspr	SPRN_SRR1,r12
6021:
603#ifdef CONFIG_PPC_BOOK3S
604	stb	r4,PACASRR_VALID(r13)
605#endif
606	.else
607	mtspr	SPRN_HSRR0,r11
608	mtspr	SPRN_HSRR1,r12
6091:
610#ifdef CONFIG_PPC_BOOK3S
611	stb	r4,PACAHSRR_VALID(r13)
612#endif
613	.endif
614	DEBUG_SRR_VALID \srr
615
616BEGIN_FTR_SECTION
617	stdcx.	r0,0,r1		/* to clear the reservation */
618FTR_SECTION_ELSE
619	ldarx	r0,0,r1
620ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
621
622	ld	r3,_LINK(r1)
623	ld	r4,_CTR(r1)
624	ld	r5,_XER(r1)
625	ld	r6,_CCR(r1)
626	li	r0,0
627
628	REST_4GPRS(7, r1)
629	REST_2GPRS(11, r1)
630
631	mtlr	r3
632	mtctr	r4
633	mtspr	SPRN_XER,r5
634
635	/*
636	 * Leaving a stale exception_marker on the stack can confuse
637	 * the reliable stack unwinder later on. Clear it.
638	 */
639	std	r0,STACK_FRAME_OVERHEAD-16(r1)
640
641	REST_4GPRS(2, r1)
642
643	bne-	cr1,1f /* emulate stack store */
644	mtcr	r6
645	REST_GPR(6, r1)
646	REST_GPR(0, r1)
647	REST_GPR(1, r1)
648	.ifc \srr,srr
649	RFI_TO_KERNEL
650	.else
651	HRFI_TO_KERNEL
652	.endif
653	b	.	/* prevent speculative execution */
654
6551:	/*
656	 * Emulate stack store with update. New r1 value was already calculated
657	 * and updated in our interrupt regs by emulate_loadstore, but we can't
658	 * store the previous value of r1 to the stack before re-loading our
659	 * registers from it, otherwise they could be clobbered.  Use
660	 * PACA_EXGEN as temporary storage to hold the store data, as
661	 * interrupts are disabled here so it won't be clobbered.
662	 */
663	mtcr	r6
664	std	r9,PACA_EXGEN+0(r13)
665	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
666	REST_GPR(6, r1)
667	REST_GPR(0, r1)
668	REST_GPR(1, r1)
669	std	r9,0(r1) /* perform store component of stdu */
670	ld	r9,PACA_EXGEN+0(r13)
671
672	.ifc \srr,srr
673	RFI_TO_KERNEL
674	.else
675	HRFI_TO_KERNEL
676	.endif
677	b	.	/* prevent speculative execution */
678.Linterrupt_return_\srr\()_kernel_rst_end:
679
680#ifdef CONFIG_PPC_BOOK3S
681interrupt_return_\srr\()_kernel_restart:
682_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
683	GET_PACA(r13)
684	ld	r1,PACA_EXIT_SAVE_R1(r13)
685	ld	r2,PACATOC(r13)
686	addi	r3,r1,STACK_FRAME_OVERHEAD
687	li	r11,IRQS_ALL_DISABLED
688	stb	r11,PACAIRQSOFTMASK(r13)
689	bl	interrupt_exit_kernel_restart
690	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
691	b	.Linterrupt_return_\srr\()_kernel_rst_start
6921:
693
694SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
695RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
696#endif
697
698.endm
699
700interrupt_return_macro srr
701#ifdef CONFIG_PPC_BOOK3S
702interrupt_return_macro hsrr
703
704	.globl __end_soft_masked
705__end_soft_masked:
706DEFINE_FIXED_SYMBOL(__end_soft_masked)
707#endif /* CONFIG_PPC_BOOK3S */
708
709#ifdef CONFIG_PPC_BOOK3S
710_GLOBAL(ret_from_fork_scv)
711	bl	schedule_tail
712	REST_NVGPRS(r1)
713	li	r3,0	/* fork() return value */
714	b	.Lsyscall_vectored_common_exit
715#endif
716
717_GLOBAL(ret_from_fork)
718	bl	schedule_tail
719	REST_NVGPRS(r1)
720	li	r3,0	/* fork() return value */
721	b	.Lsyscall_exit
722
723_GLOBAL(ret_from_kernel_thread)
724	bl	schedule_tail
725	REST_NVGPRS(r1)
726	mtctr	r14
727	mr	r3,r15
728#ifdef PPC64_ELF_ABI_v2
729	mr	r12,r14
730#endif
731	bctrl
732	li	r3,0
 
 
 
 
 
733	b	.Lsyscall_exit
v6.8
  1#include <asm/asm-offsets.h>
  2#include <asm/bug.h>
  3#ifdef CONFIG_PPC_BOOK3S
  4#include <asm/exception-64s.h>
  5#else
  6#include <asm/exception-64e.h>
  7#endif
  8#include <asm/feature-fixups.h>
  9#include <asm/head-64.h>
 10#include <asm/hw_irq.h>
 11#include <asm/kup.h>
 12#include <asm/mmu.h>
 13#include <asm/ppc_asm.h>
 14#include <asm/ptrace.h>
 15
 
 
 
 
 
 
 
 
 
 
 16	.align 7
 17
 18.macro DEBUG_SRR_VALID srr
 19#ifdef CONFIG_PPC_RFI_SRR_DEBUG
 20	.ifc \srr,srr
 21	mfspr	r11,SPRN_SRR0
 22	ld	r12,_NIP(r1)
 23	clrrdi  r11,r11,2
 24	clrrdi  r12,r12,2
 25100:	tdne	r11,r12
 26	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
 27	mfspr	r11,SPRN_SRR1
 28	ld	r12,_MSR(r1)
 29100:	tdne	r11,r12
 30	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
 31	.else
 32	mfspr	r11,SPRN_HSRR0
 33	ld	r12,_NIP(r1)
 34	clrrdi  r11,r11,2
 35	clrrdi  r12,r12,2
 36100:	tdne	r11,r12
 37	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
 38	mfspr	r11,SPRN_HSRR1
 39	ld	r12,_MSR(r1)
 40100:	tdne	r11,r12
 41	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
 42	.endif
 43#endif
 44.endm
 45
 46#ifdef CONFIG_PPC_BOOK3S
 47.macro system_call_vectored name trapnr
 48	.globl system_call_vectored_\name
 49system_call_vectored_\name:
 50_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
 51	SCV_INTERRUPT_TO_KERNEL
 52	mr	r10,r1
 53	ld	r1,PACAKSAVE(r13)
 54	std	r10,0(r1)
 55	std	r11,_LINK(r1)
 56	std	r11,_NIP(r1)	/* Saved LR is also the next instruction */
 57	std	r12,_MSR(r1)
 58	std	r0,GPR0(r1)
 59	std	r10,GPR1(r1)
 60	std	r2,GPR2(r1)
 61	LOAD_PACA_TOC()
 62	mfcr	r12
 63	li	r11,0
 64	/* Save syscall parameters in r3-r8 */
 65	SAVE_GPRS(3, 8, r1)
 
 
 
 
 
 66	/* Zero r9-r12, this should only be required when restoring all GPRs */
 67	std	r11,GPR9(r1)
 68	std	r11,GPR10(r1)
 69	std	r11,GPR11(r1)
 70	std	r11,GPR12(r1)
 71	std	r9,GPR13(r1)
 72	SAVE_NVGPRS(r1)
 73	std	r11,_XER(r1)
 
 74	std	r11,_CTR(r1)
 75
 76	li	r11,\trapnr
 77	std	r11,_TRAP(r1)
 78	std	r12,_CCR(r1)
 79	std	r3,ORIG_GPR3(r1)
 80	LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
 81	std	r11,STACK_INT_FRAME_MARKER(r1)		/* "regs" marker */
 82	/* Calling convention has r3 = regs, r4 = orig r0 */
 83	addi	r3,r1,STACK_INT_FRAME_REGS
 84	mr	r4,r0
 85
 86BEGIN_FTR_SECTION
 87	HMT_MEDIUM
 88END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 89
 90	/*
 91	 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
 92	 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
 93	 * and interrupts may be masked and pending already.
 94	 * system_call_exception() will call trace_hardirqs_off() which means
 95	 * interrupts could already have been blocked before trace_hardirqs_off,
 96	 * but this is the best we can do.
 97	 */
 98
 99	/*
100	 * Zero user registers to prevent influencing speculative execution
101	 * state of kernel code.
102	 */
103	SANITIZE_SYSCALL_GPRS()
104	bl	CFUNC(system_call_exception)
105
106.Lsyscall_vectored_\name\()_exit:
107	addi	r4,r1,STACK_INT_FRAME_REGS
108	li	r5,1 /* scv */
109	bl	CFUNC(syscall_exit_prepare)
110	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
111.Lsyscall_vectored_\name\()_rst_start:
112	lbz	r11,PACAIRQHAPPENED(r13)
113	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
114	bne-	syscall_vectored_\name\()_restart
115	li	r11,IRQS_ENABLED
116	stb	r11,PACAIRQSOFTMASK(r13)
117	li	r11,0
118	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
119
120	ld	r2,_CCR(r1)
121	ld	r4,_NIP(r1)
122	ld	r5,_MSR(r1)
123
124BEGIN_FTR_SECTION
125	stdcx.	r0,0,r1			/* to clear the reservation */
126END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
127
128BEGIN_FTR_SECTION
129	HMT_MEDIUM_LOW
130END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
131
132	SANITIZE_RESTORE_NVGPRS()
133	cmpdi	r3,0
134	bne	.Lsyscall_vectored_\name\()_restore_regs
135
136	/* rfscv returns with LR->NIA and CTR->MSR */
137	mtlr	r4
138	mtctr	r5
139
140	/* Could zero these as per ABI, but we may consider a stricter ABI
141	 * which preserves these if libc implementations can benefit, so
142	 * restore them for now until further measurement is done. */
143	REST_GPR(0, r1)
144	REST_GPRS(4, 8, r1)
 
 
 
 
145	/* Zero volatile regs that may contain sensitive kernel data */
146	ZEROIZE_GPRS(9, 12)
 
 
 
147	mtspr	SPRN_XER,r0
148
149	/*
150	 * We don't need to restore AMR on the way back to userspace for KUAP.
151	 * The value of AMR only matters while we're in the kernel.
152	 */
153	mtcr	r2
154	REST_GPRS(2, 3, r1)
155	REST_GPR(13, r1)
156	REST_GPR(1, r1)
 
157	RFSCV_TO_USER
158	b	.	/* prevent speculative execution */
159
160.Lsyscall_vectored_\name\()_restore_regs:
161	mtspr	SPRN_SRR0,r4
162	mtspr	SPRN_SRR1,r5
163
164	ld	r3,_CTR(r1)
165	ld	r4,_LINK(r1)
166	ld	r5,_XER(r1)
167
168	HANDLER_RESTORE_NVGPRS()
169	REST_GPR(0, r1)
170	mtcr	r2
171	mtctr	r3
172	mtlr	r4
173	mtspr	SPRN_XER,r5
174	REST_GPRS(2, 13, r1)
175	REST_GPR(1, r1)
 
176	RFI_TO_USER
177.Lsyscall_vectored_\name\()_rst_end:
178
179syscall_vectored_\name\()_restart:
180_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
181	GET_PACA(r13)
182	ld	r1,PACA_EXIT_SAVE_R1(r13)
183	LOAD_PACA_TOC()
184	ld	r3,RESULT(r1)
185	addi	r4,r1,STACK_INT_FRAME_REGS
186	li	r11,IRQS_ALL_DISABLED
187	stb	r11,PACAIRQSOFTMASK(r13)
188	bl	CFUNC(syscall_exit_restart)
189	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
190	b	.Lsyscall_vectored_\name\()_rst_start
1911:
192
193SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
194RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
195
196.endm
197
198system_call_vectored common 0x3000
199
200/*
201 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
202 * which is tested by system_call_exception when r0 is -1 (as set by vector
203 * entry code).
204 */
205system_call_vectored sigill 0x7ff0
206
 
 
 
 
 
 
 
 
 
 
207#endif /* CONFIG_PPC_BOOK3S */
208
209	.balign IFETCH_ALIGN_BYTES
210	.globl system_call_common_real
211system_call_common_real:
212_ASM_NOKPROBE_SYMBOL(system_call_common_real)
213	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
214	mtmsrd	r10
215
216	.balign IFETCH_ALIGN_BYTES
217	.globl system_call_common
218system_call_common:
219_ASM_NOKPROBE_SYMBOL(system_call_common)
220	mr	r10,r1
221	ld	r1,PACAKSAVE(r13)
222	std	r10,0(r1)
223	std	r11,_NIP(r1)
224	std	r12,_MSR(r1)
225	std	r0,GPR0(r1)
226	std	r10,GPR1(r1)
227	std	r2,GPR2(r1)
228#ifdef CONFIG_PPC_E500
229START_BTB_FLUSH_SECTION
230	BTB_FLUSH(r10)
231END_BTB_FLUSH_SECTION
232#endif
233	LOAD_PACA_TOC()
234	mfcr	r12
235	li	r11,0
236	/* Save syscall parameters in r3-r8 */
237	SAVE_GPRS(3, 8, r1)
 
 
 
 
 
238	/* Zero r9-r12, this should only be required when restoring all GPRs */
239	std	r11,GPR9(r1)
240	std	r11,GPR10(r1)
241	std	r11,GPR11(r1)
242	std	r11,GPR12(r1)
243	std	r9,GPR13(r1)
244	SAVE_NVGPRS(r1)
245	std	r11,_XER(r1)
246	std	r11,_CTR(r1)
247	mflr	r10
248
249	/*
250	 * This clears CR0.SO (bit 28), which is the error indication on
251	 * return from this system call.
252	 */
253	rldimi	r12,r11,28,(63-28)
254	li	r11,0xc00
255	std	r10,_LINK(r1)
256	std	r11,_TRAP(r1)
257	std	r12,_CCR(r1)
258	std	r3,ORIG_GPR3(r1)
259	LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
260	std	r11,STACK_INT_FRAME_MARKER(r1)		/* "regs" marker */
261	/* Calling convention has r3 = regs, r4 = orig r0 */
262	addi	r3,r1,STACK_INT_FRAME_REGS
263	mr	r4,r0
264
265#ifdef CONFIG_PPC_BOOK3S
266	li	r11,1
267	stb	r11,PACASRR_VALID(r13)
268#endif
269
270	/*
271	 * We always enter kernel from userspace with irq soft-mask enabled and
272	 * nothing pending. system_call_exception() will call
273	 * trace_hardirqs_off().
274	 */
275	li	r11,IRQS_ALL_DISABLED
276	stb	r11,PACAIRQSOFTMASK(r13)
277#ifdef CONFIG_PPC_BOOK3S
278	li	r12,-1 /* Set MSR_EE and MSR_RI */
279	mtmsrd	r12,1
280#else
281	wrteei	1
282#endif
283
284	/*
285	 * Zero user registers to prevent influencing speculative execution
286	 * state of kernel code.
287	 */
288	SANITIZE_SYSCALL_GPRS()
289	bl	CFUNC(system_call_exception)
290
291.Lsyscall_exit:
292	addi	r4,r1,STACK_INT_FRAME_REGS
293	li	r5,0 /* !scv */
294	bl	CFUNC(syscall_exit_prepare)
295	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
296#ifdef CONFIG_PPC_BOOK3S
297.Lsyscall_rst_start:
298	lbz	r11,PACAIRQHAPPENED(r13)
299	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
300	bne-	syscall_restart
301#endif
302	li	r11,IRQS_ENABLED
303	stb	r11,PACAIRQSOFTMASK(r13)
304	li	r11,0
305	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
306
307	ld	r2,_CCR(r1)
308	ld	r6,_LINK(r1)
309	mtlr	r6
310
311#ifdef CONFIG_PPC_BOOK3S
312	lbz	r4,PACASRR_VALID(r13)
313	cmpdi	r4,0
314	bne	1f
315	li	r4,0
316	stb	r4,PACASRR_VALID(r13)
317#endif
318	ld	r4,_NIP(r1)
319	ld	r5,_MSR(r1)
320	mtspr	SPRN_SRR0,r4
321	mtspr	SPRN_SRR1,r5
3221:
323	DEBUG_SRR_VALID srr
324
325BEGIN_FTR_SECTION
326	stdcx.	r0,0,r1			/* to clear the reservation */
327END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
328
329	SANITIZE_RESTORE_NVGPRS()
330	cmpdi	r3,0
331	bne	.Lsyscall_restore_regs
332	/* Zero volatile regs that may contain sensitive kernel data */
333	ZEROIZE_GPR(0)
334	ZEROIZE_GPRS(4, 12)
 
 
 
 
 
 
 
 
335	mtctr	r0
336	mtspr	SPRN_XER,r0
337.Lsyscall_restore_regs_cont:
338
339BEGIN_FTR_SECTION
340	HMT_MEDIUM_LOW
341END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
342
343	/*
344	 * We don't need to restore AMR on the way back to userspace for KUAP.
345	 * The value of AMR only matters while we're in the kernel.
346	 */
347	mtcr	r2
348	REST_GPRS(2, 3, r1)
349	REST_GPR(13, r1)
350	REST_GPR(1, r1)
 
351	RFI_TO_USER
352	b	.	/* prevent speculative execution */
353
354.Lsyscall_restore_regs:
355	ld	r3,_CTR(r1)
356	ld	r4,_XER(r1)
357	HANDLER_RESTORE_NVGPRS()
358	mtctr	r3
359	mtspr	SPRN_XER,r4
360	REST_GPR(0, r1)
361	REST_GPRS(4, 12, r1)
 
362	b	.Lsyscall_restore_regs_cont
363.Lsyscall_rst_end:
364
365#ifdef CONFIG_PPC_BOOK3S
366syscall_restart:
367_ASM_NOKPROBE_SYMBOL(syscall_restart)
368	GET_PACA(r13)
369	ld	r1,PACA_EXIT_SAVE_R1(r13)
370	LOAD_PACA_TOC()
371	ld	r3,RESULT(r1)
372	addi	r4,r1,STACK_INT_FRAME_REGS
373	li	r11,IRQS_ALL_DISABLED
374	stb	r11,PACAIRQSOFTMASK(r13)
375	bl	CFUNC(syscall_exit_restart)
376	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
377	b	.Lsyscall_rst_start
3781:
379
380SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
381RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
382#endif
383
384	/*
385	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
386	 * touched, no exit work created, then this can be used.
387	 */
388	.balign IFETCH_ALIGN_BYTES
389	.globl fast_interrupt_return_srr
390fast_interrupt_return_srr:
391_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
392	kuap_check_amr r3, r4
393	ld	r5,_MSR(r1)
394	andi.	r0,r5,MSR_PR
395#ifdef CONFIG_PPC_BOOK3S
396	beq	1f
397	kuap_user_restore r3, r4
398	b	.Lfast_user_interrupt_return_srr
3991:	kuap_kernel_restore r3, r4
400	andi.	r0,r5,MSR_RI
401	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
402	bne+	.Lfast_kernel_interrupt_return_srr
403	addi	r3,r1,STACK_INT_FRAME_REGS
404	bl	CFUNC(unrecoverable_exception)
405	b	. /* should not get here */
406#else
407	bne	.Lfast_user_interrupt_return_srr
408	b	.Lfast_kernel_interrupt_return_srr
409#endif
410
411.macro interrupt_return_macro srr
412	.balign IFETCH_ALIGN_BYTES
413	.globl interrupt_return_\srr
414interrupt_return_\srr\():
415_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
416	ld	r4,_MSR(r1)
417	andi.	r0,r4,MSR_PR
418	beq	interrupt_return_\srr\()_kernel
419interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
420_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
421	addi	r3,r1,STACK_INT_FRAME_REGS
422	bl	CFUNC(interrupt_exit_user_prepare)
423#ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS
424	cmpdi	r3,0
425	bne-	.Lrestore_nvgprs_\srr
426.Lrestore_nvgprs_\srr\()_cont:
427#endif
428	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
429#ifdef CONFIG_PPC_BOOK3S
430.Linterrupt_return_\srr\()_user_rst_start:
431	lbz	r11,PACAIRQHAPPENED(r13)
432	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
433	bne-	interrupt_return_\srr\()_user_restart
434#endif
435	li	r11,IRQS_ENABLED
436	stb	r11,PACAIRQSOFTMASK(r13)
437	li	r11,0
438	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
439
440.Lfast_user_interrupt_return_\srr\():
441	SANITIZE_RESTORE_NVGPRS()
442#ifdef CONFIG_PPC_BOOK3S
443	.ifc \srr,srr
444	lbz	r4,PACASRR_VALID(r13)
445	.else
446	lbz	r4,PACAHSRR_VALID(r13)
447	.endif
448	cmpdi	r4,0
449	li	r4,0
450	bne	1f
451#endif
452	ld	r11,_NIP(r1)
453	ld	r12,_MSR(r1)
454	.ifc \srr,srr
455	mtspr	SPRN_SRR0,r11
456	mtspr	SPRN_SRR1,r12
4571:
458#ifdef CONFIG_PPC_BOOK3S
459	stb	r4,PACASRR_VALID(r13)
460#endif
461	.else
462	mtspr	SPRN_HSRR0,r11
463	mtspr	SPRN_HSRR1,r12
4641:
465#ifdef CONFIG_PPC_BOOK3S
466	stb	r4,PACAHSRR_VALID(r13)
467#endif
468	.endif
469	DEBUG_SRR_VALID \srr
470
471#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
472	lbz	r4,PACAIRQSOFTMASK(r13)
473	tdnei	r4,IRQS_ENABLED
474#endif
475
476BEGIN_FTR_SECTION
477	ld	r10,_PPR(r1)
478	mtspr	SPRN_PPR,r10
479END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
480
481BEGIN_FTR_SECTION
482	stdcx.	r0,0,r1		/* to clear the reservation */
483FTR_SECTION_ELSE
484	ldarx	r0,0,r1
485ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
486
487	ld	r3,_CCR(r1)
488	ld	r4,_LINK(r1)
489	ld	r5,_CTR(r1)
490	ld	r6,_XER(r1)
491	li	r0,0
492
493	REST_GPRS(7, 13, r1)
 
 
494
495	mtcr	r3
496	mtlr	r4
497	mtctr	r5
498	mtspr	SPRN_XER,r6
499
500	REST_GPRS(2, 6, r1)
 
501	REST_GPR(0, r1)
502	REST_GPR(1, r1)
503	.ifc \srr,srr
504	RFI_TO_USER
505	.else
506	HRFI_TO_USER
507	.endif
508	b	.	/* prevent speculative execution */
509.Linterrupt_return_\srr\()_user_rst_end:
510
511#ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS
512.Lrestore_nvgprs_\srr\():
513	REST_NVGPRS(r1)
514	b	.Lrestore_nvgprs_\srr\()_cont
515#endif
516
517#ifdef CONFIG_PPC_BOOK3S
518interrupt_return_\srr\()_user_restart:
519_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
520	GET_PACA(r13)
521	ld	r1,PACA_EXIT_SAVE_R1(r13)
522	LOAD_PACA_TOC()
523	addi	r3,r1,STACK_INT_FRAME_REGS
524	li	r11,IRQS_ALL_DISABLED
525	stb	r11,PACAIRQSOFTMASK(r13)
526	bl	CFUNC(interrupt_exit_user_restart)
527	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
528	b	.Linterrupt_return_\srr\()_user_rst_start
5291:
530
531SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
532RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
533#endif
534
535	.balign IFETCH_ALIGN_BYTES
536interrupt_return_\srr\()_kernel:
537_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
538	addi	r3,r1,STACK_INT_FRAME_REGS
539	bl	CFUNC(interrupt_exit_kernel_prepare)
540
541	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
542.Linterrupt_return_\srr\()_kernel_rst_start:
543	ld	r11,SOFTE(r1)
544	cmpwi	r11,IRQS_ENABLED
545	stb	r11,PACAIRQSOFTMASK(r13)
546	beq	.Linterrupt_return_\srr\()_soft_enabled
547
548	/*
549	 * Returning to soft-disabled context.
550	 * Check if a MUST_HARD_MASK interrupt has become pending, in which
551	 * case we need to disable MSR[EE] in the return context.
552	 *
553	 * The MSR[EE] check catches among other things the short incoherency
554	 * in hard_irq_disable() between clearing MSR[EE] and setting
555	 * PACA_IRQ_HARD_DIS.
556	 */
557	ld	r12,_MSR(r1)
558	andi.	r10,r12,MSR_EE
559	beq	.Lfast_kernel_interrupt_return_\srr\() // EE already disabled
560	lbz	r11,PACAIRQHAPPENED(r13)
561	andi.	r10,r11,PACA_IRQ_MUST_HARD_MASK
562	bne	1f // HARD_MASK is pending
563	// No HARD_MASK pending, clear possible HARD_DIS set by interrupt
564	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
565	stb	r11,PACAIRQHAPPENED(r13)
566	b	.Lfast_kernel_interrupt_return_\srr\()
567
568
5691:	/* Must clear MSR_EE from _MSR */
570#ifdef CONFIG_PPC_BOOK3S
571	li	r10,0
572	/* Clear valid before changing _MSR */
573	.ifc \srr,srr
574	stb	r10,PACASRR_VALID(r13)
575	.else
576	stb	r10,PACAHSRR_VALID(r13)
577	.endif
578#endif
579	xori	r12,r12,MSR_EE
580	std	r12,_MSR(r1)
581	b	.Lfast_kernel_interrupt_return_\srr\()
582
583.Linterrupt_return_\srr\()_soft_enabled:
584	/*
585	 * In the soft-enabled case, need to double-check that we have no
586	 * pending interrupts that might have come in before we reached the
587	 * restart section of code, and restart the exit so those can be
588	 * handled.
589	 *
590	 * If there are none, it is be possible that the interrupt still
591	 * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the
592	 * interrupted context. This clear will not clobber a new pending
593	 * interrupt coming in, because we're in the restart section, so
594	 * such would return to the restart location.
595	 */
596#ifdef CONFIG_PPC_BOOK3S
597	lbz	r11,PACAIRQHAPPENED(r13)
598	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
599	bne-	interrupt_return_\srr\()_kernel_restart
600#endif
601	li	r11,0
602	stb	r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
 
603
604.Lfast_kernel_interrupt_return_\srr\():
605	SANITIZE_RESTORE_NVGPRS()
606	cmpdi	cr1,r3,0
607#ifdef CONFIG_PPC_BOOK3S
608	.ifc \srr,srr
609	lbz	r4,PACASRR_VALID(r13)
610	.else
611	lbz	r4,PACAHSRR_VALID(r13)
612	.endif
613	cmpdi	r4,0
614	li	r4,0
615	bne	1f
616#endif
617	ld	r11,_NIP(r1)
618	ld	r12,_MSR(r1)
619	.ifc \srr,srr
620	mtspr	SPRN_SRR0,r11
621	mtspr	SPRN_SRR1,r12
6221:
623#ifdef CONFIG_PPC_BOOK3S
624	stb	r4,PACASRR_VALID(r13)
625#endif
626	.else
627	mtspr	SPRN_HSRR0,r11
628	mtspr	SPRN_HSRR1,r12
6291:
630#ifdef CONFIG_PPC_BOOK3S
631	stb	r4,PACAHSRR_VALID(r13)
632#endif
633	.endif
634	DEBUG_SRR_VALID \srr
635
636BEGIN_FTR_SECTION
637	stdcx.	r0,0,r1		/* to clear the reservation */
638FTR_SECTION_ELSE
639	ldarx	r0,0,r1
640ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
641
642	ld	r3,_LINK(r1)
643	ld	r4,_CTR(r1)
644	ld	r5,_XER(r1)
645	ld	r6,_CCR(r1)
646	li	r0,0
647
648	REST_GPRS(7, 12, r1)
 
649
650	mtlr	r3
651	mtctr	r4
652	mtspr	SPRN_XER,r5
653
654	/*
655	 * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
656	 * the reliable stack unwinder later on. Clear it.
657	 */
658	std	r0,STACK_INT_FRAME_MARKER(r1)
659
660	REST_GPRS(2, 5, r1)
661
662	bne-	cr1,1f /* emulate stack store */
663	mtcr	r6
664	REST_GPR(6, r1)
665	REST_GPR(0, r1)
666	REST_GPR(1, r1)
667	.ifc \srr,srr
668	RFI_TO_KERNEL
669	.else
670	HRFI_TO_KERNEL
671	.endif
672	b	.	/* prevent speculative execution */
673
6741:	/*
675	 * Emulate stack store with update. New r1 value was already calculated
676	 * and updated in our interrupt regs by emulate_loadstore, but we can't
677	 * store the previous value of r1 to the stack before re-loading our
678	 * registers from it, otherwise they could be clobbered.  Use
679	 * PACA_EXGEN as temporary storage to hold the store data, as
680	 * interrupts are disabled here so it won't be clobbered.
681	 */
682	mtcr	r6
683	std	r9,PACA_EXGEN+0(r13)
684	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
685	REST_GPR(6, r1)
686	REST_GPR(0, r1)
687	REST_GPR(1, r1)
688	std	r9,0(r1) /* perform store component of stdu */
689	ld	r9,PACA_EXGEN+0(r13)
690
691	.ifc \srr,srr
692	RFI_TO_KERNEL
693	.else
694	HRFI_TO_KERNEL
695	.endif
696	b	.	/* prevent speculative execution */
697.Linterrupt_return_\srr\()_kernel_rst_end:
698
699#ifdef CONFIG_PPC_BOOK3S
700interrupt_return_\srr\()_kernel_restart:
701_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
702	GET_PACA(r13)
703	ld	r1,PACA_EXIT_SAVE_R1(r13)
704	LOAD_PACA_TOC()
705	addi	r3,r1,STACK_INT_FRAME_REGS
706	li	r11,IRQS_ALL_DISABLED
707	stb	r11,PACAIRQSOFTMASK(r13)
708	bl	CFUNC(interrupt_exit_kernel_restart)
709	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
710	b	.Linterrupt_return_\srr\()_kernel_rst_start
7111:
712
713SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
714RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
715#endif
716
717.endm
718
719interrupt_return_macro srr
720#ifdef CONFIG_PPC_BOOK3S
721interrupt_return_macro hsrr
722
723	.globl __end_soft_masked
724__end_soft_masked:
725DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
726#endif /* CONFIG_PPC_BOOK3S */
727
728#ifdef CONFIG_PPC_BOOK3S
729_GLOBAL(ret_from_fork_scv)
730	bl	CFUNC(schedule_tail)
731	HANDLER_RESTORE_NVGPRS()
732	li	r3,0	/* fork() return value */
733	b	.Lsyscall_vectored_common_exit
734#endif
735
736_GLOBAL(ret_from_fork)
737	bl	CFUNC(schedule_tail)
738	HANDLER_RESTORE_NVGPRS()
739	li	r3,0	/* fork() return value */
740	b	.Lsyscall_exit
741
742_GLOBAL(ret_from_kernel_user_thread)
743	bl	CFUNC(schedule_tail)
 
744	mtctr	r14
745	mr	r3,r15
746#ifdef CONFIG_PPC64_ELF_ABI_V2
747	mr	r12,r14
748#endif
749	bctrl
750	li	r3,0
751	/*
752	 * It does not matter whether this returns via the scv or sc path
753	 * because it returns as execve() and therefore has no calling ABI
754	 * (i.e., it sets registers according to the exec()ed entry point).
755	 */
756	b	.Lsyscall_exit
757
758_GLOBAL(start_kernel_thread)
759	bl	CFUNC(schedule_tail)
760	mtctr	r14
761	mr	r3,r15
762#ifdef CONFIG_PPC64_ELF_ABI_V2
763	mr	r12,r14
764#endif
765	bctrl
766	/*
767	 * This must not return. We actually want to BUG here, not WARN,
768	 * because BUG will exit the process which is what the kernel thread
769	 * should have done, which may give some hope of continuing.
770	 */
771100:	trap
772	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0