Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 *    S390 low-level entry points.
  4 *
  5 *    Copyright IBM Corp. 1999, 2012
  6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7 *		 Hartmut Penner (hp@de.ibm.com),
  8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
 
  9 */
 10
 11#include <linux/export.h>
 12#include <linux/init.h>
 13#include <linux/linkage.h>
 14#include <asm/asm-extable.h>
 15#include <asm/alternative.h>
 16#include <asm/processor.h>
 17#include <asm/cache.h>
 18#include <asm/dwarf.h>
 19#include <asm/errno.h>
 20#include <asm/ptrace.h>
 21#include <asm/thread_info.h>
 22#include <asm/asm-offsets.h>
 23#include <asm/unistd.h>
 24#include <asm/page.h>
 25#include <asm/sigp.h>
 26#include <asm/irq.h>
 27#include <asm/fpu-insn.h>
 28#include <asm/setup.h>
 29#include <asm/nmi.h>
 30#include <asm/nospec-insn.h>
 31#include <asm/lowcore.h>
 32
 33_LPP_OFFSET	= __LC_LPP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34
 35	.macro STBEAR address
 36	ALTERNATIVE "nop", ".insn s,0xb2010000,\address", ALT_FACILITY(193)
 
 
 
 37	.endm
 38
 39	.macro LBEAR address
 40	ALTERNATIVE "nop", ".insn s,0xb2000000,\address", ALT_FACILITY(193)
 
 
 
 
 41	.endm
 42
 43	.macro LPSWEY address, lpswe
 44	ALTERNATIVE_2 "b \lpswe;nopr", \
 45		".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193),		\
 46		__stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0),	\
 47		ALT_LOWCORE
 
 48	.endm
 49
 50	.macro MBEAR reg, lowcore
 51	ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK(\lowcore)),\
 52		ALT_FACILITY(193)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53	.endm
 54
 55	.macro	CHECK_STACK savearea, lowcore
 56#ifdef CONFIG_CHECK_STACK
 57	tml	%r15,THREAD_SIZE - CONFIG_STACK_GUARD
 58	la	%r14,\savearea(\lowcore)
 59	jz	stack_overflow
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60#endif
 61	.endm
 62
 63	.macro	CHECK_VMAP_STACK savearea, lowcore, oklabel
 64#ifdef CONFIG_VMAP_STACK
 65	lgr	%r14,%r15
 66	nill	%r14,0x10000 - THREAD_SIZE
 67	oill	%r14,STACK_INIT_OFFSET
 68	clg	%r14,__LC_KERNEL_STACK(\lowcore)
 69	je	\oklabel
 70	clg	%r14,__LC_ASYNC_STACK(\lowcore)
 71	je	\oklabel
 72	clg	%r14,__LC_MCCK_STACK(\lowcore)
 73	je	\oklabel
 74	clg	%r14,__LC_NODAT_STACK(\lowcore)
 75	je	\oklabel
 76	clg	%r14,__LC_RESTART_STACK(\lowcore)
 77	je	\oklabel
 78	la	%r14,\savearea(\lowcore)
 79	j	stack_overflow
 80#else
 81	j	\oklabel
 82#endif
 83	.endm
 84
 85	/*
 86	 * The TSTMSK macro generates a test-under-mask instruction by
 87	 * calculating the memory offset for the specified mask value.
 88	 * Mask value can be any constant.  The macro shifts the mask
 89	 * value to calculate the memory offset for the test-under-mask
 90	 * instruction.
 91	 */
 92	.macro TSTMSK addr, mask, size=8, bytepos=0
 93		.if (\bytepos < \size) && (\mask >> 8)
 94			.if (\mask & 0xff)
 95				.error "Mask exceeds byte boundary"
 96			.endif
 97			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
 98			.exitm
 99		.endif
100		.ifeq \mask
101			.error "Mask must not be zero"
102		.endif
103		off = \size - \bytepos - 1
104		tm	off+\addr, \mask
105	.endm
106
107	.macro BPOFF
108	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", ALT_SPEC(82)
109	.endm
110
111	.macro BPON
112	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
113	.endm
114
115	.macro BPENTER tif_ptr,tif_mask
116	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
117		    "j .+12; nop; nop", ALT_SPEC(82)
118	.endm
119
120	.macro BPEXIT tif_ptr,tif_mask
121	TSTMSK	\tif_ptr,\tif_mask
122	ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
123		    "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
124	.endm
125
126#if IS_ENABLED(CONFIG_KVM)
127	.macro SIEEXIT sie_control,lowcore
128	lg	%r9,\sie_control			# get control block pointer
129	ni	__SIE_PROG0C+3(%r9),0xfe		# no longer in SIE
130	lctlg	%c1,%c1,__LC_KERNEL_ASCE(\lowcore)	# load primary asce
131	lg	%r9,__LC_CURRENT(\lowcore)
132	mvi	__TI_sie(%r9),0
133	larl	%r9,sie_exit			# skip forward to sie_exit
134	.endm
135#endif
136
137	.macro STACKLEAK_ERASE
138#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
139	brasl	%r14,stackleak_erase_on_task_stack
140#endif
141	.endm
142
143	GEN_BR_THUNK %r14
144
145	.section .kprobes.text, "ax"
146.Ldummy:
147	/*
148	 * The following nop exists only in order to avoid that the next
149	 * symbol starts at the beginning of the kprobes text section.
150	 * In that case there would be several symbols at the same address.
151	 * E.g. objdump would take an arbitrary symbol when disassembling
152	 * the code.
153	 * With the added nop in between this cannot happen.
154	 */
155	nop	0
156
157/*
158 * Scheduler resume function, called by __switch_to
159 *  gpr2 = (task_struct *)prev
160 *  gpr3 = (task_struct *)next
161 * Returns:
162 *  gpr2 = prev
163 */
164SYM_FUNC_START(__switch_to_asm)
165	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
166	lghi	%r4,__TASK_stack
167	lghi	%r1,__TASK_thread
168	llill	%r5,STACK_INIT_OFFSET
169	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
170	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
171	agr	%r15,%r5			# end of kernel stack of next
172	GET_LC	%r13
173	stg	%r3,__LC_CURRENT(%r13)		# store task struct of next
174	stg	%r15,__LC_KERNEL_STACK(%r13)	# store end of kernel stack
175	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
176	aghi	%r3,__TASK_pid
177	mvc	__LC_CURRENT_PID(4,%r13),0(%r3)	# store pid of next
178	ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40)
 
179	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
180	BR_EX	%r14
181SYM_FUNC_END(__switch_to_asm)
 
 
 
 
182
183#if IS_ENABLED(CONFIG_KVM)
184/*
185 * __sie64a calling convention:
186 * %r2 pointer to sie control block phys
187 * %r3 pointer to sie control block virt
188 * %r4 guest register save area
189 * %r5 guest asce
190 */
191SYM_FUNC_START(__sie64a)
192	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
193	GET_LC	%r13
194	lg	%r14,__LC_CURRENT(%r13)
195	stg	%r2,__SF_SIE_CONTROL_PHYS(%r15)	# save sie block physical..
196	stg	%r3,__SF_SIE_CONTROL(%r15)	# ...and virtual addresses
197	stg	%r4,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
198	stg	%r5,__SF_SIE_GUEST_ASCE(%r15)	# save guest asce
199	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
200	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags
201	lmg	%r0,%r13,0(%r4)			# load guest gprs 0-13
202	mvi	__TI_sie(%r14),1
203	lctlg	%c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce
204	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
 
 
205	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
206	tm	__SIE_PROG20+3(%r14),3		# last exit...
207	jnz	.Lsie_skip
208	lg	%r14,__SF_SIE_CONTROL_PHYS(%r15)	# get sie block phys addr
209	BPEXIT	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
210.Lsie_entry:
211	sie	0(%r14)
212# Let the next instruction be NOP to avoid triggering a machine check
213# and handling it in a guest as result of the instruction execution.
214	nopr	7
215.Lsie_leave:
216	BPOFF
217	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
218.Lsie_skip:
219	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
220	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
221	GET_LC	%r14
222	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r14)	# load primary asce
223	lg	%r14,__LC_CURRENT(%r14)
224	mvi	__TI_sie(%r14),0
225SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
226	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
 
 
 
 
 
 
227	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
228	xgr	%r0,%r0				# clear guest registers to
229	xgr	%r1,%r1				# prevent speculative use
230	xgr	%r3,%r3
231	xgr	%r4,%r4
232	xgr	%r5,%r5
233	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
234	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
235	BR_EX	%r14
236SYM_FUNC_END(__sie64a)
237EXPORT_SYMBOL(__sie64a)
 
 
 
 
 
 
238EXPORT_SYMBOL(sie_exit)
239#endif
240
241/*
242 * SVC interrupt handler routine. System calls are synchronous events and
243 * are entered with interrupts disabled.
244 */
245
246SYM_CODE_START(system_call)
247	STMG_LC	%r8,%r15,__LC_SAVE_AREA
248	GET_LC	%r13
249	stpt	__LC_SYS_ENTER_TIMER(%r13)
250	BPOFF
251	lghi	%r14,0
 
252.Lsysc_per:
253	STBEAR	__LC_LAST_BREAK(%r13)
254	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
255	lg	%r15,__LC_KERNEL_STACK(%r13)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
257	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
258	# clear user controlled register to prevent speculative use
259	xgr	%r0,%r0
260	xgr	%r1,%r1
261	xgr	%r4,%r4
262	xgr	%r5,%r5
263	xgr	%r6,%r6
264	xgr	%r7,%r7
265	xgr	%r8,%r8
266	xgr	%r9,%r9
267	xgr	%r10,%r10
268	xgr	%r11,%r11
269	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
270	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA(%r13)
271	MBEAR	%r2,%r13
272	lgr	%r3,%r14
273	brasl	%r14,__do_syscall
274	STACKLEAK_ERASE
275	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
276	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
277	BPON
278	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
279	stpt	__LC_EXIT_TIMER(%r13)
280	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
281	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
282SYM_CODE_END(system_call)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
284#
285# a new process exits the kernel with ret_from_fork
286#
287SYM_CODE_START(ret_from_fork)
288	lgr	%r3,%r11
289	brasl	%r14,__ret_from_fork
290	STACKLEAK_ERASE
291	GET_LC	%r13
292	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
293	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
294	BPON
295	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
296	stpt	__LC_EXIT_TIMER(%r13)
297	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
298	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
299SYM_CODE_END(ret_from_fork)
 
300
301/*
302 * Program check handler routine
303 */
304
305SYM_CODE_START(pgm_check_handler)
306	STMG_LC	%r8,%r15,__LC_SAVE_AREA
307	GET_LC	%r13
308	stpt	__LC_SYS_ENTER_TIMER(%r13)
309	BPOFF
310	lmg	%r8,%r9,__LC_PGM_OLD_PSW(%r13)
311	xgr	%r10,%r10
312	tmhh	%r8,0x0001		# coming from user space?
313	jno	.Lpgm_skip_asce
314	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
315	j	3f			# -> fault in user space
316.Lpgm_skip_asce:
317#if IS_ENABLED(CONFIG_KVM)
318	lg	%r11,__LC_CURRENT(%r13)
319	tm	__TI_sie(%r11),0xff
320	jz	1f
321	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
322	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
323	lghi	%r10,_PIF_GUEST_FAULT
324#endif
3251:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
326	jnz	2f			# -> enabled, can't be a double fault
327	tm	__LC_PGM_ILC+3(%r13),0x80	# check for per exception
328	jnz	.Lpgm_svcper		# -> single stepped svc
3292:	CHECK_STACK __LC_SAVE_AREA,%r13
330	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
331	# CHECK_VMAP_STACK branches to stack_overflow or 4f
332	CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f
3333:	lg	%r15,__LC_KERNEL_STACK(%r13)
3344:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
335	stg	%r10,__PT_FLAGS(%r11)
336	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 
 
 
 
 
337	stmg	%r0,%r7,__PT_R0(%r11)
338	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
339	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13)
340	stmg	%r8,%r9,__PT_PSW(%r11)
341	# clear user controlled registers to prevent speculative use
342	xgr	%r0,%r0
343	xgr	%r1,%r1
344	xgr	%r3,%r3
345	xgr	%r4,%r4
346	xgr	%r5,%r5
347	xgr	%r6,%r6
348	xgr	%r7,%r7
349	xgr	%r12,%r12
350	lgr	%r2,%r11
351	brasl	%r14,__do_pgm_check
352	tmhh	%r8,0x0001		# returning to user space?
353	jno	.Lpgm_exit_kernel
354	STACKLEAK_ERASE
355	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
356	BPON
357	stpt	__LC_EXIT_TIMER(%r13)
358.Lpgm_exit_kernel:
359	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
360	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
361	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
362	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
364#
365# single stepped system call
366#
367.Lpgm_svcper:
368	mvc	__LC_RETURN_PSW(8,%r13),__LC_SVC_NEW_PSW(%r13)
369	larl	%r14,.Lsysc_per
370	stg	%r14,__LC_RETURN_PSW+8(%r13)
371	lghi	%r14,1
372	LBEAR	__LC_PGM_LAST_BREAK(%r13)
373	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
374SYM_CODE_END(pgm_check_handler)
375
376/*
377 * Interrupt handler macro used for external and IO interrupts.
378 */
379.macro INT_HANDLER name,lc_old_psw,handler
380SYM_CODE_START(\name)
381	STMG_LC	%r8,%r15,__LC_SAVE_AREA
382	GET_LC	%r13
383	stckf	__LC_INT_CLOCK(%r13)
384	stpt	__LC_SYS_ENTER_TIMER(%r13)
385	STBEAR	__LC_LAST_BREAK(%r13)
386	BPOFF
387	lmg	%r8,%r9,\lc_old_psw(%r13)
388	tmhh	%r8,0x0001			# interrupting from user ?
389	jnz	1f
390#if IS_ENABLED(CONFIG_KVM)
391	lg	%r10,__LC_CURRENT(%r13)
392	tm	__TI_sie(%r10),0xff
393	jz	0f
394	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
395	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
396#endif
3970:	CHECK_STACK __LC_SAVE_AREA,%r13
398	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
399	j	2f
4001:	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
401	lg	%r15,__LC_KERNEL_STACK(%r13)
4022:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
403	la	%r11,STACK_FRAME_OVERHEAD(%r15)
404	stmg	%r0,%r7,__PT_R0(%r11)
405	# clear user controlled registers to prevent speculative use
406	xgr	%r0,%r0
407	xgr	%r1,%r1
408	xgr	%r3,%r3
409	xgr	%r4,%r4
410	xgr	%r5,%r5
411	xgr	%r6,%r6
412	xgr	%r7,%r7
413	xgr	%r10,%r10
414	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
415	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
416	MBEAR	%r11,%r13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417	stmg	%r8,%r9,__PT_PSW(%r11)
 
 
 
 
 
 
 
 
 
418	lgr	%r2,%r11		# pass pointer to pt_regs
419	brasl	%r14,\handler
420	mvc	__LC_RETURN_PSW(16,%r13),__PT_PSW(%r11)
421	tmhh	%r8,0x0001		# returning to user ?
422	jno	2f
423	STACKLEAK_ERASE
424	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
425	BPON
426	stpt	__LC_EXIT_TIMER(%r13)
4272:	LBEAR	__PT_LAST_BREAK(%r11)
428	lmg	%r0,%r15,__PT_R0(%r11)
429	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
430SYM_CODE_END(\name)
431.endm
432
433	.section .irqentry.text, "ax"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434
435INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
436INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437
438	.section .kprobes.text, "ax"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439
440/*
441 * Machine check handler routines
442 */
443SYM_CODE_START(mcck_int_handler)
444	BPOFF
445	GET_LC	%r13
446	lmg	%r8,%r9,__LC_MCK_OLD_PSW(%r13)
447	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_SYSTEM_DAMAGE
 
 
 
 
 
448	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
449	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_CR_VALID
450	jno	.Lmcck_panic		# control registers invalid -> panic
451	ptlb
452	lay	%r14,__LC_CPU_TIMER_SAVE_AREA(%r13)
453	mvc	__LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
454	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_CPU_TIMER_VALID
455	jo	3f
456	la	%r14,__LC_SYS_ENTER_TIMER(%r13)
457	clc	0(8,%r14),__LC_EXIT_TIMER(%r13)
 
 
 
458	jl	1f
459	la	%r14,__LC_EXIT_TIMER(%r13)
4601:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER(%r13)
461	jl	2f
462	la	%r14,__LC_LAST_UPDATE_TIMER(%r13)
4632:	spt	0(%r14)
464	mvc	__LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
4653:	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_PSW_MWP_VALID
466	jno	.Lmcck_panic
467	tmhh	%r8,0x0001		# interrupting from user ?
468	jnz	.Lmcck_user
469	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_PSW_IA_VALID
470	jno	.Lmcck_panic
471#if IS_ENABLED(CONFIG_KVM)
472	lg	%r10,__LC_CURRENT(%r13)
473	tm	__TI_sie(%r10),0xff
474	jz	.Lmcck_user
475	# Need to compare the address instead of __TI_SIE flag.
476	# Otherwise there would be a race between setting the flag
477	# and entering SIE (or leaving and clearing the flag). This
478	# would cause machine checks targeted at the guest to be
479	# handled by the host.
480	larl	%r14,.Lsie_entry
481	clgrjl	%r9,%r14, 4f
482	larl	%r14,.Lsie_leave
483	clgrjhe	%r9,%r14, 4f
484	lg	%r10,__LC_PCPU
485	oi	__PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
4864:	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
487	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
488#endif
489.Lmcck_user:
490	lg	%r15,__LC_MCCK_STACK(%r13)
491	la	%r11,STACK_FRAME_OVERHEAD(%r15)
492	stctg	%c1,%c1,__PT_CR1(%r11)
493	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
494	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
495	lay	%r14,__LC_GPREGS_SAVE_AREA(%r13)
496	mvc	__PT_R0(128,%r11),0(%r14)
497	# clear user controlled registers to prevent speculative use
498	xgr	%r0,%r0
499	xgr	%r1,%r1
500	xgr	%r3,%r3
501	xgr	%r4,%r4
502	xgr	%r5,%r5
503	xgr	%r6,%r6
504	xgr	%r7,%r7
505	xgr	%r10,%r10
506	stmg	%r8,%r9,__PT_PSW(%r11)
507	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
508	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
509	lgr	%r2,%r11		# pass pointer to pt_regs
510	brasl	%r14,s390_do_machine_check
511	lctlg	%c1,%c1,__PT_CR1(%r11)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512	lmg	%r0,%r10,__PT_R0(%r11)
513	mvc	__LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW
514	tm	__LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ?
515	jno	0f
516	BPON
517	stpt	__LC_EXIT_TIMER(%r13)
5180:	ALTERNATIVE "brcl 0,0", __stringify(lay %r12,__LC_LAST_BREAK_SAVE_AREA(%r13)),\
519		ALT_FACILITY(193)
520	LBEAR	0(%r12)
521	lmg	%r11,%r15,__PT_R11(%r11)
522	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
523
524.Lmcck_panic:
525	/*
526	 * Iterate over all possible CPU addresses in the range 0..0xffff
527	 * and stop each CPU using signal processor. Use compare and swap
528	 * to allow just one CPU-stopper and prevent concurrent CPUs from
529	 * stopping each other while leaving the others running.
530	 */
531	lhi	%r5,0
532	lhi	%r6,1
533	larl	%r7,stop_lock
534	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
535	jnz	4f
536	larl	%r7,this_cpu
537	stap	0(%r7)			# this CPU address
538	lh	%r4,0(%r7)
539	nilh	%r4,0
540	lhi	%r0,1
541	sll	%r0,16			# CPU counter
542	lhi	%r3,0			# next CPU address
5430:	cr	%r3,%r4
544	je	2f
5451:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
546	brc	SIGP_CC_BUSY,1b
5472:	ahi	%r3,1
548	brct	%r0,0b
5493:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
550	brc	SIGP_CC_BUSY,3b
5514:	j	4b
552SYM_CODE_END(mcck_int_handler)
553
554SYM_CODE_START(restart_int_handler)
555	ALTERNATIVE "nop", "lpp _LPP_OFFSET", ALT_FACILITY(40)
556	stg	%r15,__LC_SAVE_AREA_RESTART
557	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
558	jz	0f
559	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA
5600:	larl	%r15,daton_psw
561	lpswe	0(%r15)				# turn dat on, keep irqs off
562.Ldaton:
563	GET_LC	%r15
564	lg	%r15,__LC_RESTART_STACK(%r15)
565	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
566	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
567	GET_LC	%r13
568	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART(%r13)
569	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW(%r13)
570	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
571	lg	%r1,__LC_RESTART_FN(%r13)	# load fn, parm & source cpu
572	lg	%r2,__LC_RESTART_DATA(%r13)
573	lgf	%r3,__LC_RESTART_SOURCE(%r13)
574	ltgr	%r3,%r3				# test source cpu address
575	jm	1f				# negative -> skip source stop
5760:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
577	brc	10,0b				# wait for status stored
5781:	basr	%r14,%r1			# call function
579	stap	__SF_EMPTY(%r15)		# store cpu address
580	llgh	%r3,__SF_EMPTY(%r15)
5812:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
582	brc	2,2b
5833:	j	3b
584SYM_CODE_END(restart_int_handler)
585
586	__INIT
587SYM_CODE_START(early_pgm_check_handler)
588	STMG_LC %r8,%r15,__LC_SAVE_AREA
589	GET_LC	%r13
590	aghi	%r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
591	la	%r11,STACK_FRAME_OVERHEAD(%r15)
592	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
593	stmg	%r0,%r7,__PT_R0(%r11)
594	mvc	__PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13)
595	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
596	lgr	%r2,%r11
597	brasl	%r14,__do_early_pgm_check
598	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
599	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
600	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
601SYM_CODE_END(early_pgm_check_handler)
602	__FINIT
603
604	.section .kprobes.text, "ax"
605
606#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
607/*
608 * The synchronous or the asynchronous stack overflowed. We are dead.
609 * No need to properly save the registers, we are going to panic anyway.
610 * Setup a pt_regs so that show_trace can provide a good call trace.
611 */
612SYM_CODE_START(stack_overflow)
613	GET_LC	%r15
614	lg	%r15,__LC_NODAT_STACK(%r15) # change to panic stack
615	la	%r11,STACK_FRAME_OVERHEAD(%r15)
616	stmg	%r0,%r7,__PT_R0(%r11)
617	stmg	%r8,%r9,__PT_PSW(%r11)
618	mvc	__PT_R8(64,%r11),0(%r14)
619	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
620	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
621	lgr	%r2,%r11		# pass pointer to pt_regs
622	jg	kernel_stack_overflow
623SYM_CODE_END(stack_overflow)
624#endif
625
626	.section .data, "aw"
627	.balign	4
628SYM_DATA_LOCAL(stop_lock,	.long 0)
629SYM_DATA_LOCAL(this_cpu,	.short 0)
630	.balign	8
631SYM_DATA_START_LOCAL(daton_psw)
632	.quad	PSW_KERNEL_BITS
633	.quad	.Ldaton
634SYM_DATA_END(daton_psw)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
636	.section .rodata, "a"
637	.balign	8
638#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
639SYM_DATA_START(sys_call_table)
640#include "asm/syscall_table.h"
641SYM_DATA_END(sys_call_table)
642#undef SYSCALL
643
644#ifdef CONFIG_COMPAT
645
646#define SYSCALL(esame,emu)	.quad __s390_ ## emu
647SYM_DATA_START(sys_call_table_emu)
648#include "asm/syscall_table.h"
649SYM_DATA_END(sys_call_table_emu)
650#undef SYSCALL
651#endif
v4.10.11
 
   1/*
   2 *    S390 low-level entry points.
   3 *
   4 *    Copyright IBM Corp. 1999, 2012
   5 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
   6 *		 Hartmut Penner (hp@de.ibm.com),
   7 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
   8 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
   9 */
  10
 
  11#include <linux/init.h>
  12#include <linux/linkage.h>
 
 
  13#include <asm/processor.h>
  14#include <asm/cache.h>
 
  15#include <asm/errno.h>
  16#include <asm/ptrace.h>
  17#include <asm/thread_info.h>
  18#include <asm/asm-offsets.h>
  19#include <asm/unistd.h>
  20#include <asm/page.h>
  21#include <asm/sigp.h>
  22#include <asm/irq.h>
  23#include <asm/vx-insn.h>
  24#include <asm/setup.h>
  25#include <asm/nmi.h>
  26#include <asm/export.h>
 
  27
  28__PT_R0      =	__PT_GPRS
  29__PT_R1      =	__PT_GPRS + 8
  30__PT_R2      =	__PT_GPRS + 16
  31__PT_R3      =	__PT_GPRS + 24
  32__PT_R4      =	__PT_GPRS + 32
  33__PT_R5      =	__PT_GPRS + 40
  34__PT_R6      =	__PT_GPRS + 48
  35__PT_R7      =	__PT_GPRS + 56
  36__PT_R8      =	__PT_GPRS + 64
  37__PT_R9      =	__PT_GPRS + 72
  38__PT_R10     =	__PT_GPRS + 80
  39__PT_R11     =	__PT_GPRS + 88
  40__PT_R12     =	__PT_GPRS + 96
  41__PT_R13     =	__PT_GPRS + 104
  42__PT_R14     =	__PT_GPRS + 112
  43__PT_R15     =	__PT_GPRS + 120
  44
  45STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
  46STACK_SIZE  = 1 << STACK_SHIFT
  47STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
  48
  49_TIF_WORK	= (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
  50		   _TIF_UPROBE)
  51_TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
  52		   _TIF_SYSCALL_TRACEPOINT)
  53_CIF_WORK	= (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU)
  54_PIF_WORK	= (_PIF_PER_TRAP)
  55
  56#define BASED(name) name-cleanup_critical(%r13)
  57
  58	.macro	TRACE_IRQS_ON
  59#ifdef CONFIG_TRACE_IRQFLAGS
  60	basr	%r2,%r0
  61	brasl	%r14,trace_hardirqs_on_caller
  62#endif
  63	.endm
  64
  65	.macro	TRACE_IRQS_OFF
  66#ifdef CONFIG_TRACE_IRQFLAGS
  67	basr	%r2,%r0
  68	brasl	%r14,trace_hardirqs_off_caller
  69#endif
  70	.endm
  71
  72	.macro	LOCKDEP_SYS_EXIT
  73#ifdef CONFIG_LOCKDEP
  74	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
  75	jz	.+10
  76	brasl	%r14,lockdep_sys_exit
  77#endif
  78	.endm
  79
  80	.macro	CHECK_STACK stacksize,savearea
  81#ifdef CONFIG_CHECK_STACK
  82	tml	%r15,\stacksize - CONFIG_STACK_GUARD
  83	lghi	%r14,\savearea
  84	jz	stack_overflow
  85#endif
  86	.endm
  87
  88	.macro	SWITCH_ASYNC savearea,timer
  89	tmhh	%r8,0x0001		# interrupting from user ?
  90	jnz	1f
  91	lgr	%r14,%r9
  92	slg	%r14,BASED(.Lcritical_start)
  93	clg	%r14,BASED(.Lcritical_length)
  94	jhe	0f
  95	lghi	%r11,\savearea		# inside critical section, do cleanup
  96	brasl	%r14,cleanup_critical
  97	tmhh	%r8,0x0001		# retest problem state after cleanup
  98	jnz	1f
  990:	lg	%r14,__LC_ASYNC_STACK	# are we already on the async stack?
 100	slgr	%r14,%r15
 101	srag	%r14,%r14,STACK_SHIFT
 102	jnz	2f
 103	CHECK_STACK 1<<STACK_SHIFT,\savearea
 104	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 105	j	3f
 1061:	LAST_BREAK %r14
 107	UPDATE_VTIME %r14,%r15,\timer
 1082:	lg	%r15,__LC_ASYNC_STACK	# load async stack
 1093:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 110	.endm
 111
 112	.macro UPDATE_VTIME w1,w2,enter_timer
 113	lg	\w1,__LC_EXIT_TIMER
 114	lg	\w2,__LC_LAST_UPDATE_TIMER
 115	slg	\w1,\enter_timer
 116	slg	\w2,__LC_EXIT_TIMER
 117	alg	\w1,__LC_USER_TIMER
 118	alg	\w2,__LC_SYSTEM_TIMER
 119	stg	\w1,__LC_USER_TIMER
 120	stg	\w2,__LC_SYSTEM_TIMER
 121	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
 122	.endm
 123
 124	.macro	LAST_BREAK scratch
 125	srag	\scratch,%r10,23
 126#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
 127	jz	.+10
 128	stg	%r10,__TASK_thread+__THREAD_last_break(%r12)
 129#else
 130	jz	.+14
 131	lghi	\scratch,__TASK_thread
 132	stg	%r10,__THREAD_last_break(\scratch,%r12)
 133#endif
 134	.endm
 135
 136	.macro REENABLE_IRQS
 137	stg	%r8,__LC_RETURN_PSW
 138	ni	__LC_RETURN_PSW,0xbf
 139	ssm	__LC_RETURN_PSW
 140	.endm
 141
 142	.macro STCK savearea
 143#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
 144	.insn	s,0xb27c0000,\savearea		# store clock fast
 
 
 
 
 
 
 
 
 145#else
 146	.insn	s,0xb2050000,\savearea		# store clock
 147#endif
 148	.endm
 149
 150	/*
 151	 * The TSTMSK macro generates a test-under-mask instruction by
 152	 * calculating the memory offset for the specified mask value.
 153	 * Mask value can be any constant.  The macro shifts the mask
 154	 * value to calculate the memory offset for the test-under-mask
 155	 * instruction.
 156	 */
 157	.macro TSTMSK addr, mask, size=8, bytepos=0
 158		.if (\bytepos < \size) && (\mask >> 8)
 159			.if (\mask & 0xff)
 160				.error "Mask exceeds byte boundary"
 161			.endif
 162			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
 163			.exitm
 164		.endif
 165		.ifeq \mask
 166			.error "Mask must not be zero"
 167		.endif
 168		off = \size - \bytepos - 1
 169		tm	off+\addr, \mask
 170	.endm
 171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172	.section .kprobes.text, "ax"
 173.Ldummy:
 174	/*
 175	 * This nop exists only in order to avoid that __switch_to starts at
 176	 * the beginning of the kprobes text section. In that case we would
 177	 * have several symbols at the same address. E.g. objdump would take
 178	 * an arbitrary symbol name when disassembling this code.
 179	 * With the added nop in between the __switch_to symbol is unique
 180	 * again.
 181	 */
 182	nop	0
 183
 184/*
 185 * Scheduler resume function, called by switch_to
 186 *  gpr2 = (task_struct *) prev
 187 *  gpr3 = (task_struct *) next
 188 * Returns:
 189 *  gpr2 = prev
 190 */
 191ENTRY(__switch_to)
 192	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
 193	lgr	%r1,%r2
 194	aghi	%r1,__TASK_thread		# thread_struct of prev task
 195	lg	%r5,__TASK_stack(%r3)		# start of kernel stack of next
 196	stg	%r15,__THREAD_ksp(%r1)		# store kernel stack of prev
 197	lgr	%r1,%r3
 198	aghi	%r1,__TASK_thread		# thread_struct of next task
 199	lgr	%r15,%r5
 200	aghi	%r15,STACK_INIT			# end of kernel stack of next
 201	stg	%r3,__LC_CURRENT		# store task struct of next
 202	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
 203	lg	%r15,__THREAD_ksp(%r1)		# load kernel stack of next
 204	/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
 205	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
 206	mvc	__LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
 207	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
 208	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
 209	bzr	%r14
 210	.insn	s,0xb2800000,__LC_LPP		# set program parameter
 211	br	%r14
 212
 213.L__critical_start:
 214
 215#if IS_ENABLED(CONFIG_KVM)
 216/*
 217 * sie64a calling convention:
 218 * %r2 pointer to sie control block
 219 * %r3 guest register save area
 
 
 220 */
 221ENTRY(sie64a)
 222	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
 223	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
 224	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
 225	xc	__SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
 226	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU		# load guest fp/vx registers ?
 227	jno	.Lsie_load_guest_gprs
 228	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
 229.Lsie_load_guest_gprs:
 230	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
 231	lg	%r14,__LC_GMAP			# get gmap pointer
 232	ltgr	%r14,%r14
 233	jz	.Lsie_gmap
 234	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
 235.Lsie_gmap:
 236	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
 237	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
 238	tm	__SIE_PROG20+3(%r14),3		# last exit...
 239	jnz	.Lsie_skip
 240	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 241	jo	.Lsie_skip			# exit if fp/vx regs changed
 
 242	sie	0(%r14)
 
 
 
 
 
 
 243.Lsie_skip:
 
 244	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
 245	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 246.Lsie_done:
 247# some program checks are suppressing. C code (e.g. do_protection_exception)
 248# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
 249# instructions between sie64a and .Lsie_done should not cause program
 250# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
 251# See also .Lcleanup_sie
 252.Lrewind_pad:
 253	nop	0
 254	.globl sie_exit
 255sie_exit:
 256	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
 257	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
 
 
 
 
 
 258	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
 259	lg	%r2,__SF_EMPTY+16(%r15)		# return exit reason code
 260	br	%r14
 261.Lsie_fault:
 262	lghi	%r14,-EFAULT
 263	stg	%r14,__SF_EMPTY+16(%r15)	# set exit reason code
 264	j	sie_exit
 265
 266	EX_TABLE(.Lrewind_pad,.Lsie_fault)
 267	EX_TABLE(sie_exit,.Lsie_fault)
 268EXPORT_SYMBOL(sie64a)
 269EXPORT_SYMBOL(sie_exit)
 270#endif
 271
 272/*
 273 * SVC interrupt handler routine. System calls are synchronous events and
 274 * are executed with interrupts enabled.
 275 */
 276
 277ENTRY(system_call)
 278	stpt	__LC_SYNC_ENTER_TIMER
 279.Lsysc_stmg:
 280	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 281	lg	%r10,__LC_LAST_BREAK
 282	lg	%r12,__LC_CURRENT
 283	lghi	%r14,_PIF_SYSCALL
 284.Lsysc_per:
 285	lg	%r15,__LC_KERNEL_STACK
 286	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
 287	LAST_BREAK %r13
 288.Lsysc_vtime:
 289	UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
 290	stmg	%r0,%r7,__PT_R0(%r11)
 291	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 292	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
 293	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
 294	stg	%r14,__PT_FLAGS(%r11)
 295.Lsysc_do_svc:
 296	# load address of system call table
 297#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
 298	lg	%r10,__TASK_thread+__THREAD_sysc_table(%r12)
 299#else
 300	lghi	%r13,__TASK_thread
 301	lg	%r10,__THREAD_sysc_table(%r13,%r12)
 302#endif
 303	llgh	%r8,__PT_INT_CODE+2(%r11)
 304	slag	%r8,%r8,2			# shift and test for svc 0
 305	jnz	.Lsysc_nr_ok
 306	# svc 0: system call number in %r1
 307	llgfr	%r1,%r1				# clear high word in r1
 308	cghi	%r1,NR_syscalls
 309	jnl	.Lsysc_nr_ok
 310	sth	%r1,__PT_INT_CODE+2(%r11)
 311	slag	%r8,%r1,2
 312.Lsysc_nr_ok:
 313	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 314	stg	%r2,__PT_ORIG_GPR2(%r11)
 315	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
 316	lgf	%r9,0(%r8,%r10)			# get system call add.
 317	TSTMSK	__TI_flags(%r12),_TIF_TRACE
 318	jnz	.Lsysc_tracesys
 319	basr	%r14,%r9			# call sys_xxxx
 320	stg	%r2,__PT_R2(%r11)		# store return value
 321
 322.Lsysc_return:
 323	LOCKDEP_SYS_EXIT
 324.Lsysc_tif:
 325	TSTMSK	__PT_FLAGS(%r11),_PIF_WORK
 326	jnz	.Lsysc_work
 327	TSTMSK	__TI_flags(%r12),_TIF_WORK
 328	jnz	.Lsysc_work			# check for work
 329	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
 330	jnz	.Lsysc_work
 331.Lsysc_restore:
 332	lg	%r14,__LC_VDSO_PER_CPU
 333	lmg	%r0,%r10,__PT_R0(%r11)
 334	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
 335	stpt	__LC_EXIT_TIMER
 336	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 337	lmg	%r11,%r15,__PT_R11(%r11)
 338	lpswe	__LC_RETURN_PSW
 339.Lsysc_done:
 340
 341#
 342# One of the work bits is on. Find out which one.
 343#
 344.Lsysc_work:
 345	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
 346	jo	.Lsysc_mcck_pending
 347	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
 348	jo	.Lsysc_reschedule
 349#ifdef CONFIG_UPROBES
 350	TSTMSK	__TI_flags(%r12),_TIF_UPROBE
 351	jo	.Lsysc_uprobe_notify
 352#endif
 353	TSTMSK	__PT_FLAGS(%r11),_PIF_PER_TRAP
 354	jo	.Lsysc_singlestep
 355	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
 356	jo	.Lsysc_sigpending
 357	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
 358	jo	.Lsysc_notify_resume
 359	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 360	jo	.Lsysc_vxrs
 361	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE
 362	jo	.Lsysc_uaccess
 363	j	.Lsysc_return		# beware of critical section cleanup
 364
 365#
 366# _TIF_NEED_RESCHED is set, call schedule
 367#
 368.Lsysc_reschedule:
 369	larl	%r14,.Lsysc_return
 370	jg	schedule
 371
 372#
 373# _CIF_MCCK_PENDING is set, call handler
 374#
 375.Lsysc_mcck_pending:
 376	larl	%r14,.Lsysc_return
 377	jg	s390_handle_mcck	# TIF bit will be cleared by handler
 378
 379#
 380# _CIF_ASCE is set, load user space asce
 381#
 382.Lsysc_uaccess:
 383	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
 384	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 385	j	.Lsysc_return
 386
 387#
 388# CIF_FPU is set, restore floating-point controls and floating-point registers.
 389#
 390.Lsysc_vxrs:
 391	larl	%r14,.Lsysc_return
 392	jg	load_fpu_regs
 393
 394#
 395# _TIF_SIGPENDING is set, call do_signal
 396#
 397.Lsysc_sigpending:
 398	lgr	%r2,%r11		# pass pointer to pt_regs
 399	brasl	%r14,do_signal
 400	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
 401	jno	.Lsysc_return
 402	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
 403	lghi	%r8,0			# svc 0 returns -ENOSYS
 404	llgh	%r1,__PT_INT_CODE+2(%r11)	# load new svc number
 405	cghi	%r1,NR_syscalls
 406	jnl	.Lsysc_nr_ok		# invalid svc number -> do svc 0
 407	slag	%r8,%r1,2
 408	j	.Lsysc_nr_ok		# restart svc
 409
 410#
 411# _TIF_NOTIFY_RESUME is set, call do_notify_resume
 412#
 413.Lsysc_notify_resume:
 414	lgr	%r2,%r11		# pass pointer to pt_regs
 415	larl	%r14,.Lsysc_return
 416	jg	do_notify_resume
 417
 418#
 419# _TIF_UPROBE is set, call uprobe_notify_resume
 420#
 421#ifdef CONFIG_UPROBES
 422.Lsysc_uprobe_notify:
 423	lgr	%r2,%r11		# pass pointer to pt_regs
 424	larl	%r14,.Lsysc_return
 425	jg	uprobe_notify_resume
 426#endif
 427
 428#
 429# _PIF_PER_TRAP is set, call do_per_trap
 430#
 431.Lsysc_singlestep:
 432	ni	__PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
 433	lgr	%r2,%r11		# pass pointer to pt_regs
 434	larl	%r14,.Lsysc_return
 435	jg	do_per_trap
 436
 437#
 438# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
 439# and after the system call
 440#
 441.Lsysc_tracesys:
 442	lgr	%r2,%r11		# pass pointer to pt_regs
 443	la	%r3,0
 444	llgh	%r0,__PT_INT_CODE+2(%r11)
 445	stg	%r0,__PT_R2(%r11)
 446	brasl	%r14,do_syscall_trace_enter
 447	lghi	%r0,NR_syscalls
 448	clgr	%r0,%r2
 449	jnh	.Lsysc_tracenogo
 450	sllg	%r8,%r2,2
 451	lgf	%r9,0(%r8,%r10)
 452.Lsysc_tracego:
 453	lmg	%r3,%r7,__PT_R3(%r11)
 454	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
 455	lg	%r2,__PT_ORIG_GPR2(%r11)
 456	basr	%r14,%r9		# call sys_xxx
 457	stg	%r2,__PT_R2(%r11)	# store return value
 458.Lsysc_tracenogo:
 459	TSTMSK	__TI_flags(%r12),_TIF_TRACE
 460	jz	.Lsysc_return
 461	lgr	%r2,%r11		# pass pointer to pt_regs
 462	larl	%r14,.Lsysc_return
 463	jg	do_syscall_trace_exit
 464
 465#
 466# a new process exits the kernel with ret_from_fork
 467#
 468ENTRY(ret_from_fork)
 469	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 470	lg	%r12,__LC_CURRENT
 471	brasl	%r14,schedule_tail
 472	TRACE_IRQS_ON
 473	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 474	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
 475	jne	.Lsysc_tracenogo
 476	# it's a kernel thread
 477	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
 478ENTRY(kernel_thread_starter)
 479	la	%r2,0(%r10)
 480	basr	%r14,%r9
 481	j	.Lsysc_tracenogo
 482
 483/*
 484 * Program check handler routine
 485 */
 486
 487ENTRY(pgm_check_handler)
 488	stpt	__LC_SYNC_ENTER_TIMER
 489	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 490	lg	%r10,__LC_LAST_BREAK
 491	lg	%r12,__LC_CURRENT
 492	larl	%r13,cleanup_critical
 493	lmg	%r8,%r9,__LC_PGM_OLD_PSW
 494	tmhh	%r8,0x0001		# test problem state bit
 495	jnz	2f			# -> fault in user space
 
 
 
 496#if IS_ENABLED(CONFIG_KVM)
 497	# cleanup critical section for sie64a
 498	lgr	%r14,%r9
 499	slg	%r14,BASED(.Lsie_critical_start)
 500	clg	%r14,BASED(.Lsie_critical_length)
 501	jhe	0f
 502	brasl	%r14,.Lcleanup_sie
 503#endif
 5040:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
 505	jnz	1f			# -> enabled, can't be a double fault
 506	tm	__LC_PGM_ILC+3,0x80	# check for per exception
 507	jnz	.Lpgm_svcper		# -> single stepped svc
 5081:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
 509	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 510	j	3f
 5112:	LAST_BREAK %r14
 512	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
 513	lg	%r15,__LC_KERNEL_STACK
 514	lgr	%r14,%r12
 515	aghi	%r14,__TASK_thread	# pointer to thread_struct
 516	lghi	%r13,__LC_PGM_TDB
 517	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
 518	jz	3f
 519	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
 5203:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 521	stmg	%r0,%r7,__PT_R0(%r11)
 522	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 
 523	stmg	%r8,%r9,__PT_PSW(%r11)
 524	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
 525	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
 526	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 527	stg	%r10,__PT_ARGS(%r11)
 528	tm	__LC_PGM_ILC+3,0x80	# check for per exception
 529	jz	4f
 530	tmhh	%r8,0x0001		# kernel per event ?
 531	jz	.Lpgm_kprobe
 532	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
 533	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
 534	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
 535	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
 5364:	REENABLE_IRQS
 537	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 538	larl	%r1,pgm_check_table
 539	llgh	%r10,__PT_INT_CODE+2(%r11)
 540	nill	%r10,0x007f
 541	sll	%r10,2
 542	je	.Lpgm_return
 543	lgf	%r1,0(%r10,%r1)		# load address of handler routine
 544	lgr	%r2,%r11		# pass pointer to pt_regs
 545	basr	%r14,%r1		# branch to interrupt-handler
 546.Lpgm_return:
 547	LOCKDEP_SYS_EXIT
 548	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 549	jno	.Lsysc_restore
 550	j	.Lsysc_tif
 551
 552#
 553# PER event in supervisor state, must be kprobes
 554#
 555.Lpgm_kprobe:
 556	REENABLE_IRQS
 557	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 558	lgr	%r2,%r11		# pass pointer to pt_regs
 559	brasl	%r14,do_per_trap
 560	j	.Lpgm_return
 561
 562#
 563# single stepped system call
 564#
 565.Lpgm_svcper:
 566	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
 567	larl	%r14,.Lsysc_per
 568	stg	%r14,__LC_RETURN_PSW+8
 569	lghi	%r14,_PIF_SYSCALL | _PIF_PER_TRAP
 570	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per and enable irqs
 571
 572/*
 573 * IO interrupt handler routine
 574 */
 575ENTRY(io_int_handler)
 576	STCK	__LC_INT_CLOCK
 577	stpt	__LC_ASYNC_ENTER_TIMER
 578	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 579	lg	%r10,__LC_LAST_BREAK
 580	lg	%r12,__LC_CURRENT
 581	larl	%r13,cleanup_critical
 582	lmg	%r8,%r9,__LC_IO_OLD_PSW
 583	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584	stmg	%r0,%r7,__PT_R0(%r11)
 585	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 586	stmg	%r8,%r9,__PT_PSW(%r11)
 587	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
 
 
 
 
 
 
 588	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 589	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
 590	jo	.Lio_restore
 591	TRACE_IRQS_OFF
 592	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 593.Lio_loop:
 594	lgr	%r2,%r11		# pass pointer to pt_regs
 595	lghi	%r3,IO_INTERRUPT
 596	tm	__PT_INT_CODE+8(%r11),0x80	# adapter interrupt ?
 597	jz	.Lio_call
 598	lghi	%r3,THIN_INTERRUPT
 599.Lio_call:
 600	brasl	%r14,do_IRQ
 601	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
 602	jz	.Lio_return
 603	tpi	0
 604	jz	.Lio_return
 605	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
 606	j	.Lio_loop
 607.Lio_return:
 608	LOCKDEP_SYS_EXIT
 609	TRACE_IRQS_ON
 610.Lio_tif:
 611	TSTMSK	__TI_flags(%r12),_TIF_WORK
 612	jnz	.Lio_work		# there is work to do (signals etc.)
 613	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
 614	jnz	.Lio_work
 615.Lio_restore:
 616	lg	%r14,__LC_VDSO_PER_CPU
 617	lmg	%r0,%r10,__PT_R0(%r11)
 618	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
 619	stpt	__LC_EXIT_TIMER
 620	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 621	lmg	%r11,%r15,__PT_R11(%r11)
 622	lpswe	__LC_RETURN_PSW
 623.Lio_done:
 624
 625#
 626# There is work todo, find out in which context we have been interrupted:
 627# 1) if we return to user space we can do all _TIF_WORK work
 628# 2) if we return to kernel code and kvm is enabled check if we need to
 629#    modify the psw to leave SIE
 630# 3) if we return to kernel code and preemptive scheduling is enabled check
 631#    the preemption counter and if it is zero call preempt_schedule_irq
 632# Before any work can be done, a switch to the kernel stack is required.
 633#
 634.Lio_work:
 635	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 636	jo	.Lio_work_user		# yes -> do resched & signal
 637#ifdef CONFIG_PREEMPT
 638	# check for preemptive scheduling
 639	icm	%r0,15,__LC_PREEMPT_COUNT
 640	jnz	.Lio_restore		# preemption is disabled
 641	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
 642	jno	.Lio_restore
 643	# switch to kernel stack
 644	lg	%r1,__PT_R15(%r11)
 645	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 646	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
 647	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
 648	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 649	lgr	%r15,%r1
 650	# TRACE_IRQS_ON already done at .Lio_return, call
 651	# TRACE_IRQS_OFF to keep things symmetrical
 652	TRACE_IRQS_OFF
 653	brasl	%r14,preempt_schedule_irq
 654	j	.Lio_return
 655#else
 656	j	.Lio_restore
 657#endif
 658
 659#
 660# Need to do work before returning to userspace, switch to kernel stack
 661#
 662.Lio_work_user:
 663	lg	%r1,__LC_KERNEL_STACK
 664	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
 665	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
 666	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 667	lgr	%r15,%r1
 668
 669#
 670# One of the work bits is on. Find out which one.
 671#
 672.Lio_work_tif:
 673	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
 674	jo	.Lio_mcck_pending
 675	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
 676	jo	.Lio_reschedule
 677	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
 678	jo	.Lio_sigpending
 679	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
 680	jo	.Lio_notify_resume
 681	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 682	jo	.Lio_vxrs
 683	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE
 684	jo	.Lio_uaccess
 685	j	.Lio_return		# beware of critical section cleanup
 686
 687#
 688# _CIF_MCCK_PENDING is set, call handler
 689#
 690.Lio_mcck_pending:
 691	# TRACE_IRQS_ON already done at .Lio_return
 692	brasl	%r14,s390_handle_mcck	# TIF bit will be cleared by handler
 693	TRACE_IRQS_OFF
 694	j	.Lio_return
 695
 696#
 697# _CIF_ASCE is set, load user space asce
 698#
 699.Lio_uaccess:
 700	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
 701	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 702	j	.Lio_return
 703
 704#
 705# CIF_FPU is set, restore floating-point controls and floating-point registers.
 706#
 707.Lio_vxrs:
 708	larl	%r14,.Lio_return
 709	jg	load_fpu_regs
 710
 711#
 712# _TIF_NEED_RESCHED is set, call schedule
 713#
 714.Lio_reschedule:
 715	# TRACE_IRQS_ON already done at .Lio_return
 716	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 717	brasl	%r14,schedule		# call scheduler
 718	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 719	TRACE_IRQS_OFF
 720	j	.Lio_return
 721
 722#
 723# _TIF_SIGPENDING or is set, call do_signal
 724#
 725.Lio_sigpending:
 726	# TRACE_IRQS_ON already done at .Lio_return
 727	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 728	lgr	%r2,%r11		# pass pointer to pt_regs
 729	brasl	%r14,do_signal
 730	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 731	TRACE_IRQS_OFF
 732	j	.Lio_return
 733
 734#
 735# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
 736#
 737.Lio_notify_resume:
 738	# TRACE_IRQS_ON already done at .Lio_return
 739	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 740	lgr	%r2,%r11		# pass pointer to pt_regs
 741	brasl	%r14,do_notify_resume
 742	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 743	TRACE_IRQS_OFF
 744	j	.Lio_return
 745
 746/*
 747 * External interrupt handler routine
 748 */
 749ENTRY(ext_int_handler)
 750	STCK	__LC_INT_CLOCK
 751	stpt	__LC_ASYNC_ENTER_TIMER
 752	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 753	lg	%r10,__LC_LAST_BREAK
 754	lg	%r12,__LC_CURRENT
 755	larl	%r13,cleanup_critical
 756	lmg	%r8,%r9,__LC_EXT_OLD_PSW
 757	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 758	stmg	%r0,%r7,__PT_R0(%r11)
 759	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 760	stmg	%r8,%r9,__PT_PSW(%r11)
 761	lghi	%r1,__LC_EXT_PARAMS2
 762	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
 763	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
 764	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
 765	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 766	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
 767	jo	.Lio_restore
 768	TRACE_IRQS_OFF
 769	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 770	lgr	%r2,%r11		# pass pointer to pt_regs
 771	lghi	%r3,EXT_INTERRUPT
 772	brasl	%r14,do_IRQ
 773	j	.Lio_return
 
 
 
 
 
 
 
 
 
 
 774
 775/*
 776 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
 777 */
 778ENTRY(psw_idle)
 779	stg	%r3,__SF_EMPTY(%r15)
 780	larl	%r1,.Lpsw_idle_lpsw+4
 781	stg	%r1,__SF_EMPTY+8(%r15)
 782#ifdef CONFIG_SMP
 783	larl	%r1,smp_cpu_mtid
 784	llgf	%r1,0(%r1)
 785	ltgr	%r1,%r1
 786	jz	.Lpsw_idle_stcctm
 787	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
 788.Lpsw_idle_stcctm:
 789#endif
 790	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
 791	STCK	__CLOCK_IDLE_ENTER(%r2)
 792	stpt	__TIMER_IDLE_ENTER(%r2)
 793.Lpsw_idle_lpsw:
 794	lpswe	__SF_EMPTY(%r15)
 795	br	%r14
 796.Lpsw_idle_end:
 797
 798/*
 799 * Store floating-point controls and floating-point or vector register
 800 * depending whether the vector facility is available.	A critical section
 801 * cleanup assures that the registers are stored even if interrupted for
 802 * some other work.  The CIF_FPU flag is set to trigger a lazy restore
 803 * of the register contents at return from io or a system call.
 804 */
 805ENTRY(save_fpu_regs)
 806	lg	%r2,__LC_CURRENT
 807	aghi	%r2,__TASK_thread
 808	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 809	bor	%r14
 810	stfpc	__THREAD_FPU_fpc(%r2)
 811	lg	%r3,__THREAD_FPU_regs(%r2)
 812	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
 813	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
 814	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
 815	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
 816	j	.Lsave_fpu_regs_done	  # -> set CIF_FPU flag
 817.Lsave_fpu_regs_fp:
 818	std	0,0(%r3)
 819	std	1,8(%r3)
 820	std	2,16(%r3)
 821	std	3,24(%r3)
 822	std	4,32(%r3)
 823	std	5,40(%r3)
 824	std	6,48(%r3)
 825	std	7,56(%r3)
 826	std	8,64(%r3)
 827	std	9,72(%r3)
 828	std	10,80(%r3)
 829	std	11,88(%r3)
 830	std	12,96(%r3)
 831	std	13,104(%r3)
 832	std	14,112(%r3)
 833	std	15,120(%r3)
 834.Lsave_fpu_regs_done:
 835	oi	__LC_CPU_FLAGS+7,_CIF_FPU
 836	br	%r14
 837.Lsave_fpu_regs_end:
 838#if IS_ENABLED(CONFIG_KVM)
 839EXPORT_SYMBOL(save_fpu_regs)
 840#endif
 841
 842/*
 843 * Load floating-point controls and floating-point or vector registers.
 844 * A critical section cleanup assures that the register contents are
 845 * loaded even if interrupted for some other work.
 846 *
 847 * There are special calling conventions to fit into sysc and io return work:
 848 *	%r15:	<kernel stack>
 849 * The function requires:
 850 *	%r4
 851 */
 852load_fpu_regs:
 853	lg	%r4,__LC_CURRENT
 854	aghi	%r4,__TASK_thread
 855	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 856	bnor	%r14
 857	lfpc	__THREAD_FPU_fpc(%r4)
 858	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
 859	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
 860	jz	.Lload_fpu_regs_fp		# -> no VX, load FP regs
 861	VLM	%v0,%v15,0,%r4
 862	VLM	%v16,%v31,256,%r4
 863	j	.Lload_fpu_regs_done
 864.Lload_fpu_regs_fp:
 865	ld	0,0(%r4)
 866	ld	1,8(%r4)
 867	ld	2,16(%r4)
 868	ld	3,24(%r4)
 869	ld	4,32(%r4)
 870	ld	5,40(%r4)
 871	ld	6,48(%r4)
 872	ld	7,56(%r4)
 873	ld	8,64(%r4)
 874	ld	9,72(%r4)
 875	ld	10,80(%r4)
 876	ld	11,88(%r4)
 877	ld	12,96(%r4)
 878	ld	13,104(%r4)
 879	ld	14,112(%r4)
 880	ld	15,120(%r4)
 881.Lload_fpu_regs_done:
 882	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
 883	br	%r14
 884.Lload_fpu_regs_end:
 885
 886.L__critical_end:
 887
 888/*
 889 * Machine check handler routines
 890 */
 891ENTRY(mcck_int_handler)
 892	STCK	__LC_MCCK_CLOCK
 893	la	%r1,4095		# revalidate r1
 894	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
 895	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
 896	lg	%r10,__LC_LAST_BREAK
 897	lg	%r12,__LC_CURRENT
 898	larl	%r13,cleanup_critical
 899	lmg	%r8,%r9,__LC_MCK_OLD_PSW
 900	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
 901	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
 902	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
 903	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
 904	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
 
 
 
 905	jo	3f
 906	la	%r14,__LC_SYNC_ENTER_TIMER
 907	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
 908	jl	0f
 909	la	%r14,__LC_ASYNC_ENTER_TIMER
 9100:	clc	0(8,%r14),__LC_EXIT_TIMER
 911	jl	1f
 912	la	%r14,__LC_EXIT_TIMER
 9131:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
 914	jl	2f
 915	la	%r14,__LC_LAST_UPDATE_TIMER
 9162:	spt	0(%r14)
 917	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
 9183:	TSTMSK	__LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID)
 919	jno	.Lmcck_panic		# no -> skip cleanup critical
 920	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
 921.Lmcck_skip:
 922	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
 923	stmg	%r0,%r7,__PT_R0(%r11)
 924	mvc	__PT_R8(64,%r11),0(%r14)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925	stmg	%r8,%r9,__PT_PSW(%r11)
 926	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 927	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 928	lgr	%r2,%r11		# pass pointer to pt_regs
 929	brasl	%r14,s390_do_machine_check
 930	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 931	jno	.Lmcck_return
 932	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
 933	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
 934	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
 935	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 936	lgr	%r15,%r1
 937	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
 938	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
 939	jno	.Lmcck_return
 940	TRACE_IRQS_OFF
 941	brasl	%r14,s390_handle_mcck
 942	TRACE_IRQS_ON
 943.Lmcck_return:
 944	lg	%r14,__LC_VDSO_PER_CPU
 945	lmg	%r0,%r10,__PT_R0(%r11)
 946	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
 947	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
 948	jno	0f
 949	stpt	__LC_EXIT_TIMER
 950	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 9510:	lmg	%r11,%r15,__PT_R11(%r11)
 952	lpswe	__LC_RETURN_MCCK_PSW
 
 
 
 953
 954.Lmcck_panic:
 955	lg	%r15,__LC_PANIC_STACK
 956	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 957	j	.Lmcck_skip
 958
 959#
 960# PSW restart interrupt handler
 961#
 962ENTRY(restart_int_handler)
 963	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 964	jz	0f
 965	.insn	s,0xb2800000,__LC_LPP
 9660:	stg	%r15,__LC_SAVE_AREA_RESTART
 967	lg	%r15,__LC_RESTART_STACK
 968	aghi	%r15,-__PT_SIZE			# create pt_regs on stack
 969	xc	0(__PT_SIZE,%r15),0(%r15)
 970	stmg	%r0,%r14,__PT_R0(%r15)
 971	mvc	__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
 972	mvc	__PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
 973	aghi	%r15,-STACK_FRAME_OVERHEAD	# create stack frame on stack
 
 
 974	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
 975	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
 976	lg	%r2,__LC_RESTART_DATA
 977	lg	%r3,__LC_RESTART_SOURCE
 978	ltgr	%r3,%r3				# test source cpu address
 979	jm	1f				# negative -> skip source stop
 9800:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
 981	brc	10,0b				# wait for status stored
 9821:	basr	%r14,%r1			# call function
 983	stap	__SF_EMPTY(%r15)		# store cpu address
 984	llgh	%r3,__SF_EMPTY(%r15)
 9852:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
 986	brc	2,2b
 9873:	j	3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 988
 989	.section .kprobes.text, "ax"
 990
 991#ifdef CONFIG_CHECK_STACK
 992/*
 993 * The synchronous or the asynchronous stack overflowed. We are dead.
 994 * No need to properly save the registers, we are going to panic anyway.
 995 * Setup a pt_regs so that show_trace can provide a good call trace.
 996 */
 997stack_overflow:
 998	lg	%r15,__LC_PANIC_STACK	# change to panic stack
 
 999	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1000	stmg	%r0,%r7,__PT_R0(%r11)
1001	stmg	%r8,%r9,__PT_PSW(%r11)
1002	mvc	__PT_R8(64,%r11),0(%r14)
1003	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1004	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1005	lgr	%r2,%r11		# pass pointer to pt_regs
1006	jg	kernel_stack_overflow
 
1007#endif
1008
1009cleanup_critical:
1010#if IS_ENABLED(CONFIG_KVM)
1011	clg	%r9,BASED(.Lcleanup_table_sie)	# .Lsie_gmap
1012	jl	0f
1013	clg	%r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
1014	jl	.Lcleanup_sie
1015#endif
1016	clg	%r9,BASED(.Lcleanup_table)	# system_call
1017	jl	0f
1018	clg	%r9,BASED(.Lcleanup_table+8)	# .Lsysc_do_svc
1019	jl	.Lcleanup_system_call
1020	clg	%r9,BASED(.Lcleanup_table+16)	# .Lsysc_tif
1021	jl	0f
1022	clg	%r9,BASED(.Lcleanup_table+24)	# .Lsysc_restore
1023	jl	.Lcleanup_sysc_tif
1024	clg	%r9,BASED(.Lcleanup_table+32)	# .Lsysc_done
1025	jl	.Lcleanup_sysc_restore
1026	clg	%r9,BASED(.Lcleanup_table+40)	# .Lio_tif
1027	jl	0f
1028	clg	%r9,BASED(.Lcleanup_table+48)	# .Lio_restore
1029	jl	.Lcleanup_io_tif
1030	clg	%r9,BASED(.Lcleanup_table+56)	# .Lio_done
1031	jl	.Lcleanup_io_restore
1032	clg	%r9,BASED(.Lcleanup_table+64)	# psw_idle
1033	jl	0f
1034	clg	%r9,BASED(.Lcleanup_table+72)	# .Lpsw_idle_end
1035	jl	.Lcleanup_idle
1036	clg	%r9,BASED(.Lcleanup_table+80)	# save_fpu_regs
1037	jl	0f
1038	clg	%r9,BASED(.Lcleanup_table+88)	# .Lsave_fpu_regs_end
1039	jl	.Lcleanup_save_fpu_regs
1040	clg	%r9,BASED(.Lcleanup_table+96)	# load_fpu_regs
1041	jl	0f
1042	clg	%r9,BASED(.Lcleanup_table+104)	# .Lload_fpu_regs_end
1043	jl	.Lcleanup_load_fpu_regs
10440:	br	%r14
1045
1046	.align	8
1047.Lcleanup_table:
1048	.quad	system_call
1049	.quad	.Lsysc_do_svc
1050	.quad	.Lsysc_tif
1051	.quad	.Lsysc_restore
1052	.quad	.Lsysc_done
1053	.quad	.Lio_tif
1054	.quad	.Lio_restore
1055	.quad	.Lio_done
1056	.quad	psw_idle
1057	.quad	.Lpsw_idle_end
1058	.quad	save_fpu_regs
1059	.quad	.Lsave_fpu_regs_end
1060	.quad	load_fpu_regs
1061	.quad	.Lload_fpu_regs_end
1062
1063#if IS_ENABLED(CONFIG_KVM)
1064.Lcleanup_table_sie:
1065	.quad	.Lsie_gmap
1066	.quad	.Lsie_done
1067
1068.Lcleanup_sie:
1069	lg	%r9,__SF_EMPTY(%r15)		# get control block pointer
1070	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
1071	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
1072	larl	%r9,sie_exit			# skip forward to sie_exit
1073	br	%r14
1074#endif
1075
1076.Lcleanup_system_call:
1077	# check if stpt has been executed
1078	clg	%r9,BASED(.Lcleanup_system_call_insn)
1079	jh	0f
1080	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
1081	cghi	%r11,__LC_SAVE_AREA_ASYNC
1082	je	0f
1083	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
10840:	# check if stmg has been executed
1085	clg	%r9,BASED(.Lcleanup_system_call_insn+8)
1086	jh	0f
1087	mvc	__LC_SAVE_AREA_SYNC(64),0(%r11)
10880:	# check if base register setup + TIF bit load has been done
1089	clg	%r9,BASED(.Lcleanup_system_call_insn+16)
1090	jhe	0f
1091	# set up saved registers r10 and r12
1092	stg	%r10,16(%r11)		# r10 last break
1093	stg	%r12,32(%r11)		# r12 task struct pointer
10940:	# check if the user time update has been done
1095	clg	%r9,BASED(.Lcleanup_system_call_insn+24)
1096	jh	0f
1097	lg	%r15,__LC_EXIT_TIMER
1098	slg	%r15,__LC_SYNC_ENTER_TIMER
1099	alg	%r15,__LC_USER_TIMER
1100	stg	%r15,__LC_USER_TIMER
11010:	# check if the system time update has been done
1102	clg	%r9,BASED(.Lcleanup_system_call_insn+32)
1103	jh	0f
1104	lg	%r15,__LC_LAST_UPDATE_TIMER
1105	slg	%r15,__LC_EXIT_TIMER
1106	alg	%r15,__LC_SYSTEM_TIMER
1107	stg	%r15,__LC_SYSTEM_TIMER
11080:	# update accounting time stamp
1109	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1110	# do LAST_BREAK
1111	lg	%r9,16(%r11)
1112	srag	%r9,%r9,23
1113	jz	0f
1114	lgr	%r9,%r12
1115	aghi	%r9,__TASK_thread
1116	mvc	__THREAD_last_break(8,%r9),16(%r11)
11170:	# set up saved register r11
1118	lg	%r15,__LC_KERNEL_STACK
1119	la	%r9,STACK_FRAME_OVERHEAD(%r15)
1120	stg	%r9,24(%r11)		# r11 pt_regs pointer
1121	# fill pt_regs
1122	mvc	__PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
1123	stmg	%r0,%r7,__PT_R0(%r9)
1124	mvc	__PT_PSW(16,%r9),__LC_SVC_OLD_PSW
1125	mvc	__PT_INT_CODE(4,%r9),__LC_SVC_ILC
1126	xc	__PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
1127	mvi	__PT_FLAGS+7(%r9),_PIF_SYSCALL
1128	# setup saved register r15
1129	stg	%r15,56(%r11)		# r15 stack pointer
1130	# set new psw address and exit
1131	larl	%r9,.Lsysc_do_svc
1132	br	%r14
1133.Lcleanup_system_call_insn:
1134	.quad	system_call
1135	.quad	.Lsysc_stmg
1136	.quad	.Lsysc_per
1137	.quad	.Lsysc_vtime+36
1138	.quad	.Lsysc_vtime+42
1139
1140.Lcleanup_sysc_tif:
1141	larl	%r9,.Lsysc_tif
1142	br	%r14
1143
1144.Lcleanup_sysc_restore:
1145	clg	%r9,BASED(.Lcleanup_sysc_restore_insn)
1146	je	0f
1147	lg	%r9,24(%r11)		# get saved pointer to pt_regs
1148	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
1149	mvc	0(64,%r11),__PT_R8(%r9)
1150	lmg	%r0,%r7,__PT_R0(%r9)
11510:	lmg	%r8,%r9,__LC_RETURN_PSW
1152	br	%r14
1153.Lcleanup_sysc_restore_insn:
1154	.quad	.Lsysc_done - 4
1155
1156.Lcleanup_io_tif:
1157	larl	%r9,.Lio_tif
1158	br	%r14
1159
1160.Lcleanup_io_restore:
1161	clg	%r9,BASED(.Lcleanup_io_restore_insn)
1162	je	0f
1163	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
1164	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
1165	mvc	0(64,%r11),__PT_R8(%r9)
1166	lmg	%r0,%r7,__PT_R0(%r9)
11670:	lmg	%r8,%r9,__LC_RETURN_PSW
1168	br	%r14
1169.Lcleanup_io_restore_insn:
1170	.quad	.Lio_done - 4
1171
1172.Lcleanup_idle:
1173	ni	__LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
1174	# copy interrupt clock & cpu timer
1175	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
1176	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
1177	cghi	%r11,__LC_SAVE_AREA_ASYNC
1178	je	0f
1179	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
1180	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
11810:	# check if stck & stpt have been executed
1182	clg	%r9,BASED(.Lcleanup_idle_insn)
1183	jhe	1f
1184	mvc	__CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
1185	mvc	__TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
11861:	# calculate idle cycles
1187#ifdef CONFIG_SMP
1188	clg	%r9,BASED(.Lcleanup_idle_insn)
1189	jl	3f
1190	larl	%r1,smp_cpu_mtid
1191	llgf	%r1,0(%r1)
1192	ltgr	%r1,%r1
1193	jz	3f
1194	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
1195	larl	%r3,mt_cycles
1196	ag	%r3,__LC_PERCPU_OFFSET
1197	la	%r4,__SF_EMPTY+16(%r15)
11982:	lg	%r0,0(%r3)
1199	slg	%r0,0(%r4)
1200	alg	%r0,64(%r4)
1201	stg	%r0,0(%r3)
1202	la	%r3,8(%r3)
1203	la	%r4,8(%r4)
1204	brct	%r1,2b
1205#endif
12063:	# account system time going idle
1207	lg	%r9,__LC_STEAL_TIMER
1208	alg	%r9,__CLOCK_IDLE_ENTER(%r2)
1209	slg	%r9,__LC_LAST_UPDATE_CLOCK
1210	stg	%r9,__LC_STEAL_TIMER
1211	mvc	__LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
1212	lg	%r9,__LC_SYSTEM_TIMER
1213	alg	%r9,__LC_LAST_UPDATE_TIMER
1214	slg	%r9,__TIMER_IDLE_ENTER(%r2)
1215	stg	%r9,__LC_SYSTEM_TIMER
1216	mvc	__LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
1217	# prepare return psw
1218	nihh	%r8,0xfcfd		# clear irq & wait state bits
1219	lg	%r9,48(%r11)		# return from psw_idle
1220	br	%r14
1221.Lcleanup_idle_insn:
1222	.quad	.Lpsw_idle_lpsw
1223
1224.Lcleanup_save_fpu_regs:
1225	larl	%r9,save_fpu_regs
1226	br	%r14
1227
1228.Lcleanup_load_fpu_regs:
1229	larl	%r9,load_fpu_regs
1230	br	%r14
1231
1232/*
1233 * Integer constants
1234 */
1235	.align	8
1236.Lcritical_start:
1237	.quad	.L__critical_start
1238.Lcritical_length:
1239	.quad	.L__critical_end - .L__critical_start
1240#if IS_ENABLED(CONFIG_KVM)
1241.Lsie_critical_start:
1242	.quad	.Lsie_gmap
1243.Lsie_critical_length:
1244	.quad	.Lsie_done - .Lsie_gmap
1245#endif
1246
1247	.section .rodata, "a"
1248#define SYSCALL(esame,emu)	.long esame
1249	.globl	sys_call_table
1250sys_call_table:
1251#include "syscalls.S"
 
1252#undef SYSCALL
1253
1254#ifdef CONFIG_COMPAT
1255
1256#define SYSCALL(esame,emu)	.long emu
1257	.globl	sys_call_table_emu
1258sys_call_table_emu:
1259#include "syscalls.S"
1260#undef SYSCALL
1261#endif