Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 *    S390 low-level entry points.
  4 *
  5 *    Copyright IBM Corp. 1999, 2012
  6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7 *		 Hartmut Penner (hp@de.ibm.com),
  8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
 
  9 */
 10
 11#include <linux/export.h>
 12#include <linux/init.h>
 13#include <linux/linkage.h>
 14#include <asm/asm-extable.h>
 15#include <asm/alternative-asm.h>
 16#include <asm/processor.h>
 17#include <asm/cache.h>
 18#include <asm/dwarf.h>
 19#include <asm/errno.h>
 20#include <asm/ptrace.h>
 21#include <asm/thread_info.h>
 22#include <asm/asm-offsets.h>
 23#include <asm/unistd.h>
 24#include <asm/page.h>
 25#include <asm/sigp.h>
 26#include <asm/irq.h>
 27#include <asm/vx-insn.h>
 28#include <asm/setup.h>
 29#include <asm/nmi.h>
 30#include <asm/nospec-insn.h>
 31
 32_LPP_OFFSET	= __LC_LPP
 33
 34	.macro STBEAR address
 35	ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193
 36	.endm
 37
 38	.macro LBEAR address
 39	ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40	.endm
 41
 42	.macro LPSWEY address,lpswe
 43	ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193
 
 
 
 44	.endm
 45
 46	.macro MBEAR reg
 47	ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
 
 
 
 
 48	.endm
 49
 50	.macro	CHECK_STACK savearea
 51#ifdef CONFIG_CHECK_STACK
 52	tml	%r15,THREAD_SIZE - CONFIG_STACK_GUARD
 53	lghi	%r14,\savearea
 54	jz	stack_overflow
 55#endif
 56	.endm
 57
 58	.macro	CHECK_VMAP_STACK savearea,oklabel
 59#ifdef CONFIG_VMAP_STACK
 60	lgr	%r14,%r15
 61	nill	%r14,0x10000 - THREAD_SIZE
 62	oill	%r14,STACK_INIT_OFFSET
 63	clg	%r14,__LC_KERNEL_STACK
 64	je	\oklabel
 65	clg	%r14,__LC_ASYNC_STACK
 66	je	\oklabel
 67	clg	%r14,__LC_MCCK_STACK
 68	je	\oklabel
 69	clg	%r14,__LC_NODAT_STACK
 70	je	\oklabel
 71	clg	%r14,__LC_RESTART_STACK
 72	je	\oklabel
 73	lghi	%r14,\savearea
 74	j	stack_overflow
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75#else
 76	j	\oklabel
 77#endif
 78	.endm
 79
 80	/*
 81	 * The TSTMSK macro generates a test-under-mask instruction by
 82	 * calculating the memory offset for the specified mask value.
 83	 * Mask value can be any constant.  The macro shifts the mask
 84	 * value to calculate the memory offset for the test-under-mask
 85	 * instruction.
 86	 */
 87	.macro TSTMSK addr, mask, size=8, bytepos=0
 88		.if (\bytepos < \size) && (\mask >> 8)
 89			.if (\mask & 0xff)
 90				.error "Mask exceeds byte boundary"
 91			.endif
 92			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
 93			.exitm
 94		.endif
 95		.ifeq \mask
 96			.error "Mask must not be zero"
 97		.endif
 98		off = \size - \bytepos - 1
 99		tm	off+\addr, \mask
100	.endm
101
102	.macro BPOFF
103	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82
104	.endm
105
106	.macro BPON
107	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82
108	.endm
109
110	.macro BPENTER tif_ptr,tif_mask
111	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
112		    "j .+12; nop; nop", 82
113	.endm
114
115	.macro BPEXIT tif_ptr,tif_mask
116	TSTMSK	\tif_ptr,\tif_mask
117	ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
118		    "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
119	.endm
120
121#if IS_ENABLED(CONFIG_KVM)
122	/*
123	 * The OUTSIDE macro jumps to the provided label in case the value
124	 * in the provided register is outside of the provided range. The
125	 * macro is useful for checking whether a PSW stored in a register
126	 * pair points inside or outside of a block of instructions.
127	 * @reg: register to check
128	 * @start: start of the range
129	 * @end: end of the range
130	 * @outside_label: jump here if @reg is outside of [@start..@end)
131	 */
132	.macro OUTSIDE reg,start,end,outside_label
133	lgr	%r14,\reg
134	larl	%r13,\start
135	slgr	%r14,%r13
136	clgfrl	%r14,.Lrange_size\@
137	jhe	\outside_label
138	.section .rodata, "a"
139	.balign 4
140.Lrange_size\@:
141	.long	\end - \start
142	.previous
143	.endm
144
145	.macro SIEEXIT
146	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
147	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
148	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
149	larl	%r9,sie_exit			# skip forward to sie_exit
150	.endm
151#endif
152
153	.macro STACKLEAK_ERASE
154#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
155	brasl	%r14,stackleak_erase_on_task_stack
156#endif
157	.endm
158
159	GEN_BR_THUNK %r14
160
161	.section .kprobes.text, "ax"
162.Ldummy:
163	/*
164	 * The following nop exists only in order to avoid that the next
165	 * symbol starts at the beginning of the kprobes text section.
166	 * In that case there would be several symbols at the same address.
167	 * E.g. objdump would take an arbitrary symbol when disassembling
168	 * the code.
169	 * With the added nop in between this cannot happen.
170	 */
171	nop	0
172
173/*
174 * Scheduler resume function, called by switch_to
175 *  gpr2 = (task_struct *) prev
176 *  gpr3 = (task_struct *) next
177 * Returns:
178 *  gpr2 = prev
179 */
180SYM_FUNC_START(__switch_to)
181	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
182	lghi	%r4,__TASK_stack
183	lghi	%r1,__TASK_thread
184	llill	%r5,STACK_INIT_OFFSET
185	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
186	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
187	agr	%r15,%r5			# end of kernel stack of next
 
 
188	stg	%r3,__LC_CURRENT		# store task struct of next
189	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
190	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
191	aghi	%r3,__TASK_pid
192	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
 
193	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
194	ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
195	BR_EX	%r14
196SYM_FUNC_END(__switch_to)
 
 
 
197
198#if IS_ENABLED(CONFIG_KVM)
199/*
200 * __sie64a calling convention:
201 * %r2 pointer to sie control block phys
202 * %r3 pointer to sie control block virt
203 * %r4 guest register save area
204 */
205SYM_FUNC_START(__sie64a)
206	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
207	lg	%r12,__LC_CURRENT
208	stg	%r2,__SF_SIE_CONTROL_PHYS(%r15)	# save sie block physical..
209	stg	%r3,__SF_SIE_CONTROL(%r15)	# ...and virtual addresses
210	stg	%r4,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
211	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
212	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
213	lmg	%r0,%r13,0(%r4)			# load guest gprs 0-13
 
214	lg	%r14,__LC_GMAP			# get gmap pointer
215	ltgr	%r14,%r14
216	jz	.Lsie_gmap
217	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
218.Lsie_gmap:
219	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
220	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
221	tm	__SIE_PROG20+3(%r14),3		# last exit...
222	jnz	.Lsie_skip
223	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
224	jo	.Lsie_skip			# exit if fp/vx regs changed
225	lg	%r14,__SF_SIE_CONTROL_PHYS(%r15)	# get sie block phys addr
226	BPEXIT	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
227.Lsie_entry:
228	sie	0(%r14)
229# Let the next instruction be NOP to avoid triggering a machine check
230# and handling it in a guest as result of the instruction execution.
231	nopr	7
232.Lsie_leave:
233	BPOFF
234	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
235.Lsie_skip:
236	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
237	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
238	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
239.Lsie_done:
240# some program checks are suppressing. C code (e.g. do_protection_exception)
241# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
242# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
243# Other instructions between __sie64a and .Lsie_done should not cause program
244# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
245.Lrewind_pad6:
246	nopr	7
247.Lrewind_pad4:
248	nopr	7
249.Lrewind_pad2:
250	nopr	7
251SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
252	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
253	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
254	xgr	%r0,%r0				# clear guest registers to
255	xgr	%r1,%r1				# prevent speculative use
256	xgr	%r3,%r3
257	xgr	%r4,%r4
258	xgr	%r5,%r5
259	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
260	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
261	BR_EX	%r14
262.Lsie_fault:
263	lghi	%r14,-EFAULT
264	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
265	j	sie_exit
266
267	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
268	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
269	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
270	EX_TABLE(sie_exit,.Lsie_fault)
271SYM_FUNC_END(__sie64a)
272EXPORT_SYMBOL(__sie64a)
273EXPORT_SYMBOL(sie_exit)
274#endif
275
276/*
277 * SVC interrupt handler routine. System calls are synchronous events and
278 * are entered with interrupts disabled.
279 */
280
281SYM_CODE_START(system_call)
282	stpt	__LC_SYS_ENTER_TIMER
 
283	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
284	BPOFF
285	lghi	%r14,0
 
286.Lsysc_per:
287	STBEAR	__LC_LAST_BREAK
288	lctlg	%c1,%c1,__LC_KERNEL_ASCE
289	lg	%r15,__LC_KERNEL_STACK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
291	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
292	# clear user controlled register to prevent speculative use
293	xgr	%r0,%r0
294	xgr	%r1,%r1
295	xgr	%r4,%r4
296	xgr	%r5,%r5
297	xgr	%r6,%r6
298	xgr	%r7,%r7
299	xgr	%r8,%r8
300	xgr	%r9,%r9
301	xgr	%r10,%r10
302	xgr	%r11,%r11
303	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
304	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
305	MBEAR	%r2
306	lgr	%r3,%r14
307	brasl	%r14,__do_syscall
308	STACKLEAK_ERASE
309	lctlg	%c1,%c1,__LC_USER_ASCE
310	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
311	BPON
312	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
313	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
314	stpt	__LC_EXIT_TIMER
315	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
316SYM_CODE_END(system_call)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
318#
319# a new process exits the kernel with ret_from_fork
320#
321SYM_CODE_START(ret_from_fork)
322	lgr	%r3,%r11
323	brasl	%r14,__ret_from_fork
324	STACKLEAK_ERASE
325	lctlg	%c1,%c1,__LC_USER_ASCE
326	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
327	BPON
328	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
329	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
330	stpt	__LC_EXIT_TIMER
331	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
332SYM_CODE_END(ret_from_fork)
 
 
333
334/*
335 * Program check handler routine
336 */
337
338SYM_CODE_START(pgm_check_handler)
339	stpt	__LC_SYS_ENTER_TIMER
340	BPOFF
341	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
342	lghi	%r10,0
 
 
343	lmg	%r8,%r9,__LC_PGM_OLD_PSW
344	tmhh	%r8,0x0001		# coming from user space?
345	jno	.Lpgm_skip_asce
346	lctlg	%c1,%c1,__LC_KERNEL_ASCE
347	j	3f			# -> fault in user space
348.Lpgm_skip_asce:
349#if IS_ENABLED(CONFIG_KVM)
350	# cleanup critical section for program checks in __sie64a
351	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,1f
352	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
353	SIEEXIT
354	lghi	%r10,_PIF_GUEST_FAULT
 
355#endif
3561:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
357	jnz	2f			# -> enabled, can't be a double fault
358	tm	__LC_PGM_ILC+3,0x80	# check for per exception
359	jnz	.Lpgm_svcper		# -> single stepped svc
3602:	CHECK_STACK __LC_SAVE_AREA_SYNC
361	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
362	# CHECK_VMAP_STACK branches to stack_overflow or 4f
363	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
3643:	lg	%r15,__LC_KERNEL_STACK
3654:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
366	stg	%r10,__PT_FLAGS(%r11)
367	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 
 
 
 
 
368	stmg	%r0,%r7,__PT_R0(%r11)
369	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
370	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
371	stmg	%r8,%r9,__PT_PSW(%r11)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372
373	# clear user controlled registers to prevent speculative use
374	xgr	%r0,%r0
375	xgr	%r1,%r1
376	xgr	%r3,%r3
377	xgr	%r4,%r4
378	xgr	%r5,%r5
379	xgr	%r6,%r6
380	xgr	%r7,%r7
381	lgr	%r2,%r11
382	brasl	%r14,__do_pgm_check
383	tmhh	%r8,0x0001		# returning to user space?
384	jno	.Lpgm_exit_kernel
385	STACKLEAK_ERASE
386	lctlg	%c1,%c1,__LC_USER_ASCE
387	BPON
388	stpt	__LC_EXIT_TIMER
389.Lpgm_exit_kernel:
390	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
391	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
392	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
393	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
394
395#
396# single stepped system call
397#
398.Lpgm_svcper:
399	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
400	larl	%r14,.Lsysc_per
401	stg	%r14,__LC_RETURN_PSW+8
402	lghi	%r14,1
403	LBEAR	__LC_PGM_LAST_BREAK
404	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
405SYM_CODE_END(pgm_check_handler)
406
407/*
408 * Interrupt handler macro used for external and IO interrupts.
409 */
410.macro INT_HANDLER name,lc_old_psw,handler
411SYM_CODE_START(\name)
412	stckf	__LC_INT_CLOCK
413	stpt	__LC_SYS_ENTER_TIMER
414	STBEAR	__LC_LAST_BREAK
415	BPOFF
416	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
417	lmg	%r8,%r9,\lc_old_psw
418	tmhh	%r8,0x0001			# interrupting from user ?
419	jnz	1f
420#if IS_ENABLED(CONFIG_KVM)
421	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,0f
422	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
423	SIEEXIT
424#endif
4250:	CHECK_STACK __LC_SAVE_AREA_ASYNC
426	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
427	j	2f
4281:	lctlg	%c1,%c1,__LC_KERNEL_ASCE
429	lg	%r15,__LC_KERNEL_STACK
4302:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
431	la	%r11,STACK_FRAME_OVERHEAD(%r15)
432	stmg	%r0,%r7,__PT_R0(%r11)
433	# clear user controlled registers to prevent speculative use
434	xgr	%r0,%r0
435	xgr	%r1,%r1
436	xgr	%r3,%r3
437	xgr	%r4,%r4
438	xgr	%r5,%r5
439	xgr	%r6,%r6
440	xgr	%r7,%r7
441	xgr	%r10,%r10
442	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
443	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
444	MBEAR	%r11
445	stmg	%r8,%r9,__PT_PSW(%r11)
 
 
 
 
 
 
 
446	lgr	%r2,%r11		# pass pointer to pt_regs
447	brasl	%r14,\handler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
448	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
449	tmhh	%r8,0x0001		# returning to user ?
450	jno	2f
451	STACKLEAK_ERASE
452	lctlg	%c1,%c1,__LC_USER_ASCE
453	BPON
454	stpt	__LC_EXIT_TIMER
4552:	LBEAR	__PT_LAST_BREAK(%r11)
456	lmg	%r0,%r15,__PT_R0(%r11)
457	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
458SYM_CODE_END(\name)
459.endm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460
461INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
462INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
463
464/*
465 * Load idle PSW.
466 */
467SYM_FUNC_START(psw_idle)
468	stg	%r14,(__SF_GPRS+8*8)(%r15)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469	stg	%r3,__SF_EMPTY(%r15)
470	larl	%r1,psw_idle_exit
471	stg	%r1,__SF_EMPTY+8(%r15)
 
472	larl	%r1,smp_cpu_mtid
473	llgf	%r1,0(%r1)
474	ltgr	%r1,%r1
475	jz	.Lpsw_idle_stcctm
476	.insn	rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
477.Lpsw_idle_stcctm:
 
478	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
479	BPON
480	stckf	__CLOCK_IDLE_ENTER(%r2)
481	stpt	__TIMER_IDLE_ENTER(%r2)
 
482	lpswe	__SF_EMPTY(%r15)
483SYM_INNER_LABEL(psw_idle_exit, SYM_L_GLOBAL)
484	BR_EX	%r14
485SYM_FUNC_END(psw_idle)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486
487/*
488 * Machine check handler routines
489 */
490SYM_CODE_START(mcck_int_handler)
491	BPOFF
492	la	%r1,4095		# validate r1
493	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
494	LBEAR	__LC_LAST_BREAK_SAVE_AREA-4095(%r1)		# validate bear
495	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA		# validate gprs
 
 
496	lmg	%r8,%r9,__LC_MCK_OLD_PSW
497	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
498	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
499	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
500	jno	.Lmcck_panic		# control registers invalid -> panic
501	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA		# validate ctl regs
502	ptlb
503	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
504	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
505	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
506	jo	3f
507	la	%r14,__LC_SYS_ENTER_TIMER
508	clc	0(8,%r14),__LC_EXIT_TIMER
 
 
 
509	jl	1f
510	la	%r14,__LC_EXIT_TIMER
5111:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
512	jl	2f
513	la	%r14,__LC_LAST_UPDATE_TIMER
5142:	spt	0(%r14)
515	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
5163:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
517	jno	.Lmcck_panic
518	tmhh	%r8,0x0001		# interrupting from user ?
519	jnz	.Lmcck_user
520	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
521	jno	.Lmcck_panic
522#if IS_ENABLED(CONFIG_KVM)
523	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,.Lmcck_user
524	OUTSIDE	%r9,.Lsie_entry,.Lsie_leave,4f
525	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
5264:	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
527	SIEEXIT
528#endif
529.Lmcck_user:
530	lg	%r15,__LC_MCCK_STACK
531	la	%r11,STACK_FRAME_OVERHEAD(%r15)
532	stctg	%c1,%c1,__PT_CR1(%r11)
533	lctlg	%c1,%c1,__LC_KERNEL_ASCE
534	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
535	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
536	stmg	%r0,%r7,__PT_R0(%r11)
537	# clear user controlled registers to prevent speculative use
538	xgr	%r0,%r0
539	xgr	%r1,%r1
540	xgr	%r3,%r3
541	xgr	%r4,%r4
542	xgr	%r5,%r5
543	xgr	%r6,%r6
544	xgr	%r7,%r7
545	xgr	%r10,%r10
546	mvc	__PT_R8(64,%r11),0(%r14)
547	stmg	%r8,%r9,__PT_PSW(%r11)
548	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
549	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
550	lgr	%r2,%r11		# pass pointer to pt_regs
551	brasl	%r14,s390_do_machine_check
552	lctlg	%c1,%c1,__PT_CR1(%r11)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553	lmg	%r0,%r10,__PT_R0(%r11)
554	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
555	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
556	jno	0f
557	BPON
558	stpt	__LC_EXIT_TIMER
5590:	ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
560	LBEAR	0(%r12)
561	lmg	%r11,%r15,__PT_R11(%r11)
562	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
563
564.Lmcck_panic:
565	/*
566	 * Iterate over all possible CPU addresses in the range 0..0xffff
567	 * and stop each CPU using signal processor. Use compare and swap
568	 * to allow just one CPU-stopper and prevent concurrent CPUs from
569	 * stopping each other while leaving the others running.
570	 */
571	lhi	%r5,0
572	lhi	%r6,1
573	larl	%r7,stop_lock
574	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
575	jnz	4f
576	larl	%r7,this_cpu
577	stap	0(%r7)			# this CPU address
578	lh	%r4,0(%r7)
579	nilh	%r4,0
580	lhi	%r0,1
581	sll	%r0,16			# CPU counter
582	lhi	%r3,0			# next CPU address
5830:	cr	%r3,%r4
584	je	2f
5851:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
586	brc	SIGP_CC_BUSY,1b
5872:	ahi	%r3,1
588	brct	%r0,0b
5893:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
590	brc	SIGP_CC_BUSY,3b
5914:	j	4b
592SYM_CODE_END(mcck_int_handler)
593
594SYM_CODE_START(restart_int_handler)
595	ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
596	stg	%r15,__LC_SAVE_AREA_RESTART
597	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
598	jz	0f
599	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA
6000:	larl	%r15,daton_psw
601	lpswe	0(%r15)				# turn dat on, keep irqs off
602.Ldaton:
603	lg	%r15,__LC_RESTART_STACK
604	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
605	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
606	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
607	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
 
 
608	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
609	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
610	lg	%r2,__LC_RESTART_DATA
611	lgf	%r3,__LC_RESTART_SOURCE
612	ltgr	%r3,%r3				# test source cpu address
613	jm	1f				# negative -> skip source stop
6140:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
615	brc	10,0b				# wait for status stored
6161:	basr	%r14,%r1			# call function
617	stap	__SF_EMPTY(%r15)		# store cpu address
618	llgh	%r3,__SF_EMPTY(%r15)
6192:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
620	brc	2,2b
6213:	j	3b
622SYM_CODE_END(restart_int_handler)
623
624	.section .kprobes.text, "ax"
625
626#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
627/*
628 * The synchronous or the asynchronous stack overflowed. We are dead.
629 * No need to properly save the registers, we are going to panic anyway.
630 * Setup a pt_regs so that show_trace can provide a good call trace.
631 */
632SYM_CODE_START(stack_overflow)
633	lg	%r15,__LC_NODAT_STACK	# change to panic stack
634	la	%r11,STACK_FRAME_OVERHEAD(%r15)
635	stmg	%r0,%r7,__PT_R0(%r11)
636	stmg	%r8,%r9,__PT_PSW(%r11)
637	mvc	__PT_R8(64,%r11),0(%r14)
638	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
639	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
640	lgr	%r2,%r11		# pass pointer to pt_regs
641	jg	kernel_stack_overflow
642SYM_CODE_END(stack_overflow)
643#endif
644
645	.section .data, "aw"
646	.balign	4
647SYM_DATA_LOCAL(stop_lock,	.long 0)
648SYM_DATA_LOCAL(this_cpu,	.short 0)
649	.balign	8
650SYM_DATA_START_LOCAL(daton_psw)
651	.quad	PSW_KERNEL_BITS
652	.quad	.Ldaton
653SYM_DATA_END(daton_psw)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
654
655	.section .rodata, "a"
656#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
657SYM_DATA_START(sys_call_table)
658#include "asm/syscall_table.h"
659SYM_DATA_END(sys_call_table)
660#undef SYSCALL
661
662#ifdef CONFIG_COMPAT
663
664#define SYSCALL(esame,emu)	.quad __s390_ ## emu
665SYM_DATA_START(sys_call_table_emu)
666#include "asm/syscall_table.h"
667SYM_DATA_END(sys_call_table_emu)
668#undef SYSCALL
669#endif
v4.10.11
 
   1/*
   2 *    S390 low-level entry points.
   3 *
   4 *    Copyright IBM Corp. 1999, 2012
   5 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
   6 *		 Hartmut Penner (hp@de.ibm.com),
   7 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
   8 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
   9 */
  10
 
  11#include <linux/init.h>
  12#include <linux/linkage.h>
 
 
  13#include <asm/processor.h>
  14#include <asm/cache.h>
 
  15#include <asm/errno.h>
  16#include <asm/ptrace.h>
  17#include <asm/thread_info.h>
  18#include <asm/asm-offsets.h>
  19#include <asm/unistd.h>
  20#include <asm/page.h>
  21#include <asm/sigp.h>
  22#include <asm/irq.h>
  23#include <asm/vx-insn.h>
  24#include <asm/setup.h>
  25#include <asm/nmi.h>
  26#include <asm/export.h>
  27
  28__PT_R0      =	__PT_GPRS
  29__PT_R1      =	__PT_GPRS + 8
  30__PT_R2      =	__PT_GPRS + 16
  31__PT_R3      =	__PT_GPRS + 24
  32__PT_R4      =	__PT_GPRS + 32
  33__PT_R5      =	__PT_GPRS + 40
  34__PT_R6      =	__PT_GPRS + 48
  35__PT_R7      =	__PT_GPRS + 56
  36__PT_R8      =	__PT_GPRS + 64
  37__PT_R9      =	__PT_GPRS + 72
  38__PT_R10     =	__PT_GPRS + 80
  39__PT_R11     =	__PT_GPRS + 88
  40__PT_R12     =	__PT_GPRS + 96
  41__PT_R13     =	__PT_GPRS + 104
  42__PT_R14     =	__PT_GPRS + 112
  43__PT_R15     =	__PT_GPRS + 120
  44
  45STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
  46STACK_SIZE  = 1 << STACK_SHIFT
  47STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
  48
  49_TIF_WORK	= (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
  50		   _TIF_UPROBE)
  51_TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
  52		   _TIF_SYSCALL_TRACEPOINT)
  53_CIF_WORK	= (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU)
  54_PIF_WORK	= (_PIF_PER_TRAP)
  55
  56#define BASED(name) name-cleanup_critical(%r13)
  57
  58	.macro	TRACE_IRQS_ON
  59#ifdef CONFIG_TRACE_IRQFLAGS
  60	basr	%r2,%r0
  61	brasl	%r14,trace_hardirqs_on_caller
  62#endif
  63	.endm
  64
  65	.macro	TRACE_IRQS_OFF
  66#ifdef CONFIG_TRACE_IRQFLAGS
  67	basr	%r2,%r0
  68	brasl	%r14,trace_hardirqs_off_caller
  69#endif
  70	.endm
  71
  72	.macro	LOCKDEP_SYS_EXIT
  73#ifdef CONFIG_LOCKDEP
  74	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
  75	jz	.+10
  76	brasl	%r14,lockdep_sys_exit
  77#endif
  78	.endm
  79
  80	.macro	CHECK_STACK stacksize,savearea
  81#ifdef CONFIG_CHECK_STACK
  82	tml	%r15,\stacksize - CONFIG_STACK_GUARD
  83	lghi	%r14,\savearea
  84	jz	stack_overflow
  85#endif
  86	.endm
  87
  88	.macro	SWITCH_ASYNC savearea,timer
  89	tmhh	%r8,0x0001		# interrupting from user ?
  90	jnz	1f
  91	lgr	%r14,%r9
  92	slg	%r14,BASED(.Lcritical_start)
  93	clg	%r14,BASED(.Lcritical_length)
  94	jhe	0f
  95	lghi	%r11,\savearea		# inside critical section, do cleanup
  96	brasl	%r14,cleanup_critical
  97	tmhh	%r8,0x0001		# retest problem state after cleanup
  98	jnz	1f
  990:	lg	%r14,__LC_ASYNC_STACK	# are we already on the async stack?
 100	slgr	%r14,%r15
 101	srag	%r14,%r14,STACK_SHIFT
 102	jnz	2f
 103	CHECK_STACK 1<<STACK_SHIFT,\savearea
 104	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 105	j	3f
 1061:	LAST_BREAK %r14
 107	UPDATE_VTIME %r14,%r15,\timer
 1082:	lg	%r15,__LC_ASYNC_STACK	# load async stack
 1093:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 110	.endm
 111
 112	.macro UPDATE_VTIME w1,w2,enter_timer
 113	lg	\w1,__LC_EXIT_TIMER
 114	lg	\w2,__LC_LAST_UPDATE_TIMER
 115	slg	\w1,\enter_timer
 116	slg	\w2,__LC_EXIT_TIMER
 117	alg	\w1,__LC_USER_TIMER
 118	alg	\w2,__LC_SYSTEM_TIMER
 119	stg	\w1,__LC_USER_TIMER
 120	stg	\w2,__LC_SYSTEM_TIMER
 121	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
 122	.endm
 123
 124	.macro	LAST_BREAK scratch
 125	srag	\scratch,%r10,23
 126#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
 127	jz	.+10
 128	stg	%r10,__TASK_thread+__THREAD_last_break(%r12)
 129#else
 130	jz	.+14
 131	lghi	\scratch,__TASK_thread
 132	stg	%r10,__THREAD_last_break(\scratch,%r12)
 133#endif
 134	.endm
 135
 136	.macro REENABLE_IRQS
 137	stg	%r8,__LC_RETURN_PSW
 138	ni	__LC_RETURN_PSW,0xbf
 139	ssm	__LC_RETURN_PSW
 140	.endm
 141
 142	.macro STCK savearea
 143#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
 144	.insn	s,0xb27c0000,\savearea		# store clock fast
 145#else
 146	.insn	s,0xb2050000,\savearea		# store clock
 147#endif
 148	.endm
 149
 150	/*
 151	 * The TSTMSK macro generates a test-under-mask instruction by
 152	 * calculating the memory offset for the specified mask value.
 153	 * Mask value can be any constant.  The macro shifts the mask
 154	 * value to calculate the memory offset for the test-under-mask
 155	 * instruction.
 156	 */
 157	.macro TSTMSK addr, mask, size=8, bytepos=0
 158		.if (\bytepos < \size) && (\mask >> 8)
 159			.if (\mask & 0xff)
 160				.error "Mask exceeds byte boundary"
 161			.endif
 162			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
 163			.exitm
 164		.endif
 165		.ifeq \mask
 166			.error "Mask must not be zero"
 167		.endif
 168		off = \size - \bytepos - 1
 169		tm	off+\addr, \mask
 170	.endm
 171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172	.section .kprobes.text, "ax"
 173.Ldummy:
 174	/*
 175	 * This nop exists only in order to avoid that __switch_to starts at
 176	 * the beginning of the kprobes text section. In that case we would
 177	 * have several symbols at the same address. E.g. objdump would take
 178	 * an arbitrary symbol name when disassembling this code.
 179	 * With the added nop in between the __switch_to symbol is unique
 180	 * again.
 181	 */
 182	nop	0
 183
 184/*
 185 * Scheduler resume function, called by switch_to
 186 *  gpr2 = (task_struct *) prev
 187 *  gpr3 = (task_struct *) next
 188 * Returns:
 189 *  gpr2 = prev
 190 */
 191ENTRY(__switch_to)
 192	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
 193	lgr	%r1,%r2
 194	aghi	%r1,__TASK_thread		# thread_struct of prev task
 195	lg	%r5,__TASK_stack(%r3)		# start of kernel stack of next
 196	stg	%r15,__THREAD_ksp(%r1)		# store kernel stack of prev
 197	lgr	%r1,%r3
 198	aghi	%r1,__TASK_thread		# thread_struct of next task
 199	lgr	%r15,%r5
 200	aghi	%r15,STACK_INIT			# end of kernel stack of next
 201	stg	%r3,__LC_CURRENT		# store task struct of next
 202	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
 203	lg	%r15,__THREAD_ksp(%r1)		# load kernel stack of next
 204	/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
 205	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
 206	mvc	__LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
 207	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
 208	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
 209	bzr	%r14
 210	.insn	s,0xb2800000,__LC_LPP		# set program parameter
 211	br	%r14
 212
 213.L__critical_start:
 214
 215#if IS_ENABLED(CONFIG_KVM)
 216/*
 217 * sie64a calling convention:
 218 * %r2 pointer to sie control block
 219 * %r3 guest register save area
 
 220 */
 221ENTRY(sie64a)
 222	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
 223	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
 224	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
 225	xc	__SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
 226	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU		# load guest fp/vx registers ?
 227	jno	.Lsie_load_guest_gprs
 228	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
 229.Lsie_load_guest_gprs:
 230	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
 231	lg	%r14,__LC_GMAP			# get gmap pointer
 232	ltgr	%r14,%r14
 233	jz	.Lsie_gmap
 234	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
 235.Lsie_gmap:
 236	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
 237	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
 238	tm	__SIE_PROG20+3(%r14),3		# last exit...
 239	jnz	.Lsie_skip
 240	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 241	jo	.Lsie_skip			# exit if fp/vx regs changed
 
 
 
 242	sie	0(%r14)
 
 
 
 
 
 
 243.Lsie_skip:
 
 244	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
 245	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 246.Lsie_done:
 247# some program checks are suppressing. C code (e.g. do_protection_exception)
 248# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
 249# instructions between sie64a and .Lsie_done should not cause program
 250# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
 251# See also .Lcleanup_sie
 252.Lrewind_pad:
 253	nop	0
 254	.globl sie_exit
 255sie_exit:
 256	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
 
 
 
 257	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
 
 
 
 
 
 258	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
 259	lg	%r2,__SF_EMPTY+16(%r15)		# return exit reason code
 260	br	%r14
 261.Lsie_fault:
 262	lghi	%r14,-EFAULT
 263	stg	%r14,__SF_EMPTY+16(%r15)	# set exit reason code
 264	j	sie_exit
 265
 266	EX_TABLE(.Lrewind_pad,.Lsie_fault)
 
 
 267	EX_TABLE(sie_exit,.Lsie_fault)
 268EXPORT_SYMBOL(sie64a)
 
 269EXPORT_SYMBOL(sie_exit)
 270#endif
 271
 272/*
 273 * SVC interrupt handler routine. System calls are synchronous events and
 274 * are executed with interrupts enabled.
 275 */
 276
 277ENTRY(system_call)
 278	stpt	__LC_SYNC_ENTER_TIMER
 279.Lsysc_stmg:
 280	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 281	lg	%r10,__LC_LAST_BREAK
 282	lg	%r12,__LC_CURRENT
 283	lghi	%r14,_PIF_SYSCALL
 284.Lsysc_per:
 
 
 285	lg	%r15,__LC_KERNEL_STACK
 286	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
 287	LAST_BREAK %r13
 288.Lsysc_vtime:
 289	UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
 290	stmg	%r0,%r7,__PT_R0(%r11)
 291	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 292	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
 293	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
 294	stg	%r14,__PT_FLAGS(%r11)
 295.Lsysc_do_svc:
 296	# load address of system call table
 297#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
 298	lg	%r10,__TASK_thread+__THREAD_sysc_table(%r12)
 299#else
 300	lghi	%r13,__TASK_thread
 301	lg	%r10,__THREAD_sysc_table(%r13,%r12)
 302#endif
 303	llgh	%r8,__PT_INT_CODE+2(%r11)
 304	slag	%r8,%r8,2			# shift and test for svc 0
 305	jnz	.Lsysc_nr_ok
 306	# svc 0: system call number in %r1
 307	llgfr	%r1,%r1				# clear high word in r1
 308	cghi	%r1,NR_syscalls
 309	jnl	.Lsysc_nr_ok
 310	sth	%r1,__PT_INT_CODE+2(%r11)
 311	slag	%r8,%r1,2
 312.Lsysc_nr_ok:
 313	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 314	stg	%r2,__PT_ORIG_GPR2(%r11)
 315	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
 316	lgf	%r9,0(%r8,%r10)			# get system call add.
 317	TSTMSK	__TI_flags(%r12),_TIF_TRACE
 318	jnz	.Lsysc_tracesys
 319	basr	%r14,%r9			# call sys_xxxx
 320	stg	%r2,__PT_R2(%r11)		# store return value
 321
 322.Lsysc_return:
 323	LOCKDEP_SYS_EXIT
 324.Lsysc_tif:
 325	TSTMSK	__PT_FLAGS(%r11),_PIF_WORK
 326	jnz	.Lsysc_work
 327	TSTMSK	__TI_flags(%r12),_TIF_WORK
 328	jnz	.Lsysc_work			# check for work
 329	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
 330	jnz	.Lsysc_work
 331.Lsysc_restore:
 332	lg	%r14,__LC_VDSO_PER_CPU
 333	lmg	%r0,%r10,__PT_R0(%r11)
 334	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
 
 
 335	stpt	__LC_EXIT_TIMER
 336	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 337	lmg	%r11,%r15,__PT_R11(%r11)
 338	lpswe	__LC_RETURN_PSW
 339.Lsysc_done:
 340
 341#
 342# One of the work bits is on. Find out which one.
 343#
 344.Lsysc_work:
 345	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
 346	jo	.Lsysc_mcck_pending
 347	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
 348	jo	.Lsysc_reschedule
 349#ifdef CONFIG_UPROBES
 350	TSTMSK	__TI_flags(%r12),_TIF_UPROBE
 351	jo	.Lsysc_uprobe_notify
 352#endif
 353	TSTMSK	__PT_FLAGS(%r11),_PIF_PER_TRAP
 354	jo	.Lsysc_singlestep
 355	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
 356	jo	.Lsysc_sigpending
 357	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
 358	jo	.Lsysc_notify_resume
 359	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 360	jo	.Lsysc_vxrs
 361	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE
 362	jo	.Lsysc_uaccess
 363	j	.Lsysc_return		# beware of critical section cleanup
 364
 365#
 366# _TIF_NEED_RESCHED is set, call schedule
 367#
 368.Lsysc_reschedule:
 369	larl	%r14,.Lsysc_return
 370	jg	schedule
 371
 372#
 373# _CIF_MCCK_PENDING is set, call handler
 374#
 375.Lsysc_mcck_pending:
 376	larl	%r14,.Lsysc_return
 377	jg	s390_handle_mcck	# TIF bit will be cleared by handler
 378
 379#
 380# _CIF_ASCE is set, load user space asce
 381#
 382.Lsysc_uaccess:
 383	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
 384	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 385	j	.Lsysc_return
 386
 387#
 388# CIF_FPU is set, restore floating-point controls and floating-point registers.
 389#
 390.Lsysc_vxrs:
 391	larl	%r14,.Lsysc_return
 392	jg	load_fpu_regs
 393
 394#
 395# _TIF_SIGPENDING is set, call do_signal
 396#
 397.Lsysc_sigpending:
 398	lgr	%r2,%r11		# pass pointer to pt_regs
 399	brasl	%r14,do_signal
 400	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
 401	jno	.Lsysc_return
 402	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
 403	lghi	%r8,0			# svc 0 returns -ENOSYS
 404	llgh	%r1,__PT_INT_CODE+2(%r11)	# load new svc number
 405	cghi	%r1,NR_syscalls
 406	jnl	.Lsysc_nr_ok		# invalid svc number -> do svc 0
 407	slag	%r8,%r1,2
 408	j	.Lsysc_nr_ok		# restart svc
 409
 410#
 411# _TIF_NOTIFY_RESUME is set, call do_notify_resume
 412#
 413.Lsysc_notify_resume:
 414	lgr	%r2,%r11		# pass pointer to pt_regs
 415	larl	%r14,.Lsysc_return
 416	jg	do_notify_resume
 417
 418#
 419# _TIF_UPROBE is set, call uprobe_notify_resume
 420#
 421#ifdef CONFIG_UPROBES
 422.Lsysc_uprobe_notify:
 423	lgr	%r2,%r11		# pass pointer to pt_regs
 424	larl	%r14,.Lsysc_return
 425	jg	uprobe_notify_resume
 426#endif
 427
 428#
 429# _PIF_PER_TRAP is set, call do_per_trap
 430#
 431.Lsysc_singlestep:
 432	ni	__PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
 433	lgr	%r2,%r11		# pass pointer to pt_regs
 434	larl	%r14,.Lsysc_return
 435	jg	do_per_trap
 436
 437#
 438# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
 439# and after the system call
 440#
 441.Lsysc_tracesys:
 442	lgr	%r2,%r11		# pass pointer to pt_regs
 443	la	%r3,0
 444	llgh	%r0,__PT_INT_CODE+2(%r11)
 445	stg	%r0,__PT_R2(%r11)
 446	brasl	%r14,do_syscall_trace_enter
 447	lghi	%r0,NR_syscalls
 448	clgr	%r0,%r2
 449	jnh	.Lsysc_tracenogo
 450	sllg	%r8,%r2,2
 451	lgf	%r9,0(%r8,%r10)
 452.Lsysc_tracego:
 453	lmg	%r3,%r7,__PT_R3(%r11)
 454	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
 455	lg	%r2,__PT_ORIG_GPR2(%r11)
 456	basr	%r14,%r9		# call sys_xxx
 457	stg	%r2,__PT_R2(%r11)	# store return value
 458.Lsysc_tracenogo:
 459	TSTMSK	__TI_flags(%r12),_TIF_TRACE
 460	jz	.Lsysc_return
 461	lgr	%r2,%r11		# pass pointer to pt_regs
 462	larl	%r14,.Lsysc_return
 463	jg	do_syscall_trace_exit
 464
 465#
 466# a new process exits the kernel with ret_from_fork
 467#
 468ENTRY(ret_from_fork)
 469	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 470	lg	%r12,__LC_CURRENT
 471	brasl	%r14,schedule_tail
 472	TRACE_IRQS_ON
 473	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 474	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
 475	jne	.Lsysc_tracenogo
 476	# it's a kernel thread
 477	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
 478ENTRY(kernel_thread_starter)
 479	la	%r2,0(%r10)
 480	basr	%r14,%r9
 481	j	.Lsysc_tracenogo
 482
 483/*
 484 * Program check handler routine
 485 */
 486
 487ENTRY(pgm_check_handler)
 488	stpt	__LC_SYNC_ENTER_TIMER
 
 489	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 490	lg	%r10,__LC_LAST_BREAK
 491	lg	%r12,__LC_CURRENT
 492	larl	%r13,cleanup_critical
 493	lmg	%r8,%r9,__LC_PGM_OLD_PSW
 494	tmhh	%r8,0x0001		# test problem state bit
 495	jnz	2f			# -> fault in user space
 
 
 
 496#if IS_ENABLED(CONFIG_KVM)
 497	# cleanup critical section for sie64a
 498	lgr	%r14,%r9
 499	slg	%r14,BASED(.Lsie_critical_start)
 500	clg	%r14,BASED(.Lsie_critical_length)
 501	jhe	0f
 502	brasl	%r14,.Lcleanup_sie
 503#endif
 5040:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
 505	jnz	1f			# -> enabled, can't be a double fault
 506	tm	__LC_PGM_ILC+3,0x80	# check for per exception
 507	jnz	.Lpgm_svcper		# -> single stepped svc
 5081:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
 509	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 510	j	3f
 5112:	LAST_BREAK %r14
 512	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
 513	lg	%r15,__LC_KERNEL_STACK
 514	lgr	%r14,%r12
 515	aghi	%r14,__TASK_thread	# pointer to thread_struct
 516	lghi	%r13,__LC_PGM_TDB
 517	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
 518	jz	3f
 519	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
 5203:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 521	stmg	%r0,%r7,__PT_R0(%r11)
 522	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 
 523	stmg	%r8,%r9,__PT_PSW(%r11)
 524	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
 525	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
 526	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 527	stg	%r10,__PT_ARGS(%r11)
 528	tm	__LC_PGM_ILC+3,0x80	# check for per exception
 529	jz	4f
 530	tmhh	%r8,0x0001		# kernel per event ?
 531	jz	.Lpgm_kprobe
 532	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
 533	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
 534	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
 535	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
 5364:	REENABLE_IRQS
 537	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 538	larl	%r1,pgm_check_table
 539	llgh	%r10,__PT_INT_CODE+2(%r11)
 540	nill	%r10,0x007f
 541	sll	%r10,2
 542	je	.Lpgm_return
 543	lgf	%r1,0(%r10,%r1)		# load address of handler routine
 544	lgr	%r2,%r11		# pass pointer to pt_regs
 545	basr	%r14,%r1		# branch to interrupt-handler
 546.Lpgm_return:
 547	LOCKDEP_SYS_EXIT
 548	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 549	jno	.Lsysc_restore
 550	j	.Lsysc_tif
 551
 552#
 553# PER event in supervisor state, must be kprobes
 554#
 555.Lpgm_kprobe:
 556	REENABLE_IRQS
 557	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 558	lgr	%r2,%r11		# pass pointer to pt_regs
 559	brasl	%r14,do_per_trap
 560	j	.Lpgm_return
 
 
 
 
 
 
 
 
 
 
 
 
 561
 562#
 563# single stepped system call
 564#
 565.Lpgm_svcper:
 566	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
 567	larl	%r14,.Lsysc_per
 568	stg	%r14,__LC_RETURN_PSW+8
 569	lghi	%r14,_PIF_SYSCALL | _PIF_PER_TRAP
 570	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per and enable irqs
 
 
 571
 572/*
 573 * IO interrupt handler routine
 574 */
 575ENTRY(io_int_handler)
 576	STCK	__LC_INT_CLOCK
 577	stpt	__LC_ASYNC_ENTER_TIMER
 
 
 
 578	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 579	lg	%r10,__LC_LAST_BREAK
 580	lg	%r12,__LC_CURRENT
 581	larl	%r13,cleanup_critical
 582	lmg	%r8,%r9,__LC_IO_OLD_PSW
 583	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 
 
 
 
 
 
 
 
 
 
 584	stmg	%r0,%r7,__PT_R0(%r11)
 
 
 
 
 
 
 
 
 
 
 585	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 
 586	stmg	%r8,%r9,__PT_PSW(%r11)
 587	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
 588	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 589	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
 590	jo	.Lio_restore
 591	TRACE_IRQS_OFF
 592	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 593.Lio_loop:
 594	lgr	%r2,%r11		# pass pointer to pt_regs
 595	lghi	%r3,IO_INTERRUPT
 596	tm	__PT_INT_CODE+8(%r11),0x80	# adapter interrupt ?
 597	jz	.Lio_call
 598	lghi	%r3,THIN_INTERRUPT
 599.Lio_call:
 600	brasl	%r14,do_IRQ
 601	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
 602	jz	.Lio_return
 603	tpi	0
 604	jz	.Lio_return
 605	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
 606	j	.Lio_loop
 607.Lio_return:
 608	LOCKDEP_SYS_EXIT
 609	TRACE_IRQS_ON
 610.Lio_tif:
 611	TSTMSK	__TI_flags(%r12),_TIF_WORK
 612	jnz	.Lio_work		# there is work to do (signals etc.)
 613	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
 614	jnz	.Lio_work
 615.Lio_restore:
 616	lg	%r14,__LC_VDSO_PER_CPU
 617	lmg	%r0,%r10,__PT_R0(%r11)
 618	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
 
 
 
 
 
 619	stpt	__LC_EXIT_TIMER
 620	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 621	lmg	%r11,%r15,__PT_R11(%r11)
 622	lpswe	__LC_RETURN_PSW
 623.Lio_done:
 624
 625#
 626# There is work todo, find out in which context we have been interrupted:
 627# 1) if we return to user space we can do all _TIF_WORK work
 628# 2) if we return to kernel code and kvm is enabled check if we need to
 629#    modify the psw to leave SIE
 630# 3) if we return to kernel code and preemptive scheduling is enabled check
 631#    the preemption counter and if it is zero call preempt_schedule_irq
 632# Before any work can be done, a switch to the kernel stack is required.
 633#
 634.Lio_work:
 635	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 636	jo	.Lio_work_user		# yes -> do resched & signal
 637#ifdef CONFIG_PREEMPT
 638	# check for preemptive scheduling
 639	icm	%r0,15,__LC_PREEMPT_COUNT
 640	jnz	.Lio_restore		# preemption is disabled
 641	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
 642	jno	.Lio_restore
 643	# switch to kernel stack
 644	lg	%r1,__PT_R15(%r11)
 645	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 646	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
 647	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
 648	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 649	lgr	%r15,%r1
 650	# TRACE_IRQS_ON already done at .Lio_return, call
 651	# TRACE_IRQS_OFF to keep things symmetrical
 652	TRACE_IRQS_OFF
 653	brasl	%r14,preempt_schedule_irq
 654	j	.Lio_return
 655#else
 656	j	.Lio_restore
 657#endif
 658
 659#
 660# Need to do work before returning to userspace, switch to kernel stack
 661#
 662.Lio_work_user:
 663	lg	%r1,__LC_KERNEL_STACK
 664	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
 665	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
 666	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 667	lgr	%r15,%r1
 668
 669#
 670# One of the work bits is on. Find out which one.
 671#
 672.Lio_work_tif:
 673	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
 674	jo	.Lio_mcck_pending
 675	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
 676	jo	.Lio_reschedule
 677	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
 678	jo	.Lio_sigpending
 679	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
 680	jo	.Lio_notify_resume
 681	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 682	jo	.Lio_vxrs
 683	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE
 684	jo	.Lio_uaccess
 685	j	.Lio_return		# beware of critical section cleanup
 686
 687#
 688# _CIF_MCCK_PENDING is set, call handler
 689#
 690.Lio_mcck_pending:
 691	# TRACE_IRQS_ON already done at .Lio_return
 692	brasl	%r14,s390_handle_mcck	# TIF bit will be cleared by handler
 693	TRACE_IRQS_OFF
 694	j	.Lio_return
 695
 696#
 697# _CIF_ASCE is set, load user space asce
 698#
 699.Lio_uaccess:
 700	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
 701	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 702	j	.Lio_return
 703
 704#
 705# CIF_FPU is set, restore floating-point controls and floating-point registers.
 706#
 707.Lio_vxrs:
 708	larl	%r14,.Lio_return
 709	jg	load_fpu_regs
 710
 711#
 712# _TIF_NEED_RESCHED is set, call schedule
 713#
 714.Lio_reschedule:
 715	# TRACE_IRQS_ON already done at .Lio_return
 716	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 717	brasl	%r14,schedule		# call scheduler
 718	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 719	TRACE_IRQS_OFF
 720	j	.Lio_return
 721
 722#
 723# _TIF_SIGPENDING or is set, call do_signal
 724#
 725.Lio_sigpending:
 726	# TRACE_IRQS_ON already done at .Lio_return
 727	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 728	lgr	%r2,%r11		# pass pointer to pt_regs
 729	brasl	%r14,do_signal
 730	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 731	TRACE_IRQS_OFF
 732	j	.Lio_return
 733
 734#
 735# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
 736#
 737.Lio_notify_resume:
 738	# TRACE_IRQS_ON already done at .Lio_return
 739	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 740	lgr	%r2,%r11		# pass pointer to pt_regs
 741	brasl	%r14,do_notify_resume
 742	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 743	TRACE_IRQS_OFF
 744	j	.Lio_return
 745
 746/*
 747 * External interrupt handler routine
 748 */
 749ENTRY(ext_int_handler)
 750	STCK	__LC_INT_CLOCK
 751	stpt	__LC_ASYNC_ENTER_TIMER
 752	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 753	lg	%r10,__LC_LAST_BREAK
 754	lg	%r12,__LC_CURRENT
 755	larl	%r13,cleanup_critical
 756	lmg	%r8,%r9,__LC_EXT_OLD_PSW
 757	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 758	stmg	%r0,%r7,__PT_R0(%r11)
 759	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 760	stmg	%r8,%r9,__PT_PSW(%r11)
 761	lghi	%r1,__LC_EXT_PARAMS2
 762	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
 763	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
 764	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
 765	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 766	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
 767	jo	.Lio_restore
 768	TRACE_IRQS_OFF
 769	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 770	lgr	%r2,%r11		# pass pointer to pt_regs
 771	lghi	%r3,EXT_INTERRUPT
 772	brasl	%r14,do_IRQ
 773	j	.Lio_return
 774
 775/*
 776 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
 777 */
 778ENTRY(psw_idle)
 779	stg	%r3,__SF_EMPTY(%r15)
 780	larl	%r1,.Lpsw_idle_lpsw+4
 781	stg	%r1,__SF_EMPTY+8(%r15)
 782#ifdef CONFIG_SMP
 783	larl	%r1,smp_cpu_mtid
 784	llgf	%r1,0(%r1)
 785	ltgr	%r1,%r1
 786	jz	.Lpsw_idle_stcctm
 787	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
 788.Lpsw_idle_stcctm:
 789#endif
 790	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
 791	STCK	__CLOCK_IDLE_ENTER(%r2)
 
 792	stpt	__TIMER_IDLE_ENTER(%r2)
 793.Lpsw_idle_lpsw:
 794	lpswe	__SF_EMPTY(%r15)
 795	br	%r14
 796.Lpsw_idle_end:
 797
 798/*
 799 * Store floating-point controls and floating-point or vector register
 800 * depending whether the vector facility is available.	A critical section
 801 * cleanup assures that the registers are stored even if interrupted for
 802 * some other work.  The CIF_FPU flag is set to trigger a lazy restore
 803 * of the register contents at return from io or a system call.
 804 */
 805ENTRY(save_fpu_regs)
 806	lg	%r2,__LC_CURRENT
 807	aghi	%r2,__TASK_thread
 808	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 809	bor	%r14
 810	stfpc	__THREAD_FPU_fpc(%r2)
 811	lg	%r3,__THREAD_FPU_regs(%r2)
 812	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
 813	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
 814	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
 815	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
 816	j	.Lsave_fpu_regs_done	  # -> set CIF_FPU flag
 817.Lsave_fpu_regs_fp:
 818	std	0,0(%r3)
 819	std	1,8(%r3)
 820	std	2,16(%r3)
 821	std	3,24(%r3)
 822	std	4,32(%r3)
 823	std	5,40(%r3)
 824	std	6,48(%r3)
 825	std	7,56(%r3)
 826	std	8,64(%r3)
 827	std	9,72(%r3)
 828	std	10,80(%r3)
 829	std	11,88(%r3)
 830	std	12,96(%r3)
 831	std	13,104(%r3)
 832	std	14,112(%r3)
 833	std	15,120(%r3)
 834.Lsave_fpu_regs_done:
 835	oi	__LC_CPU_FLAGS+7,_CIF_FPU
 836	br	%r14
 837.Lsave_fpu_regs_end:
 838#if IS_ENABLED(CONFIG_KVM)
 839EXPORT_SYMBOL(save_fpu_regs)
 840#endif
 841
 842/*
 843 * Load floating-point controls and floating-point or vector registers.
 844 * A critical section cleanup assures that the register contents are
 845 * loaded even if interrupted for some other work.
 846 *
 847 * There are special calling conventions to fit into sysc and io return work:
 848 *	%r15:	<kernel stack>
 849 * The function requires:
 850 *	%r4
 851 */
 852load_fpu_regs:
 853	lg	%r4,__LC_CURRENT
 854	aghi	%r4,__TASK_thread
 855	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 856	bnor	%r14
 857	lfpc	__THREAD_FPU_fpc(%r4)
 858	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
 859	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
 860	jz	.Lload_fpu_regs_fp		# -> no VX, load FP regs
 861	VLM	%v0,%v15,0,%r4
 862	VLM	%v16,%v31,256,%r4
 863	j	.Lload_fpu_regs_done
 864.Lload_fpu_regs_fp:
 865	ld	0,0(%r4)
 866	ld	1,8(%r4)
 867	ld	2,16(%r4)
 868	ld	3,24(%r4)
 869	ld	4,32(%r4)
 870	ld	5,40(%r4)
 871	ld	6,48(%r4)
 872	ld	7,56(%r4)
 873	ld	8,64(%r4)
 874	ld	9,72(%r4)
 875	ld	10,80(%r4)
 876	ld	11,88(%r4)
 877	ld	12,96(%r4)
 878	ld	13,104(%r4)
 879	ld	14,112(%r4)
 880	ld	15,120(%r4)
 881.Lload_fpu_regs_done:
 882	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
 883	br	%r14
 884.Lload_fpu_regs_end:
 885
 886.L__critical_end:
 887
 888/*
 889 * Machine check handler routines
 890 */
 891ENTRY(mcck_int_handler)
 892	STCK	__LC_MCCK_CLOCK
 893	la	%r1,4095		# revalidate r1
 894	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
 895	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
 896	lg	%r10,__LC_LAST_BREAK
 897	lg	%r12,__LC_CURRENT
 898	larl	%r13,cleanup_critical
 899	lmg	%r8,%r9,__LC_MCK_OLD_PSW
 900	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
 901	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
 
 
 
 
 902	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
 903	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
 904	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
 905	jo	3f
 906	la	%r14,__LC_SYNC_ENTER_TIMER
 907	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
 908	jl	0f
 909	la	%r14,__LC_ASYNC_ENTER_TIMER
 9100:	clc	0(8,%r14),__LC_EXIT_TIMER
 911	jl	1f
 912	la	%r14,__LC_EXIT_TIMER
 9131:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
 914	jl	2f
 915	la	%r14,__LC_LAST_UPDATE_TIMER
 9162:	spt	0(%r14)
 917	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
 9183:	TSTMSK	__LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID)
 919	jno	.Lmcck_panic		# no -> skip cleanup critical
 920	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
 921.Lmcck_skip:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 922	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
 923	stmg	%r0,%r7,__PT_R0(%r11)
 
 
 
 
 
 
 
 
 
 924	mvc	__PT_R8(64,%r11),0(%r14)
 925	stmg	%r8,%r9,__PT_PSW(%r11)
 926	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 927	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 928	lgr	%r2,%r11		# pass pointer to pt_regs
 929	brasl	%r14,s390_do_machine_check
 930	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 931	jno	.Lmcck_return
 932	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
 933	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
 934	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
 935	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 936	lgr	%r15,%r1
 937	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
 938	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
 939	jno	.Lmcck_return
 940	TRACE_IRQS_OFF
 941	brasl	%r14,s390_handle_mcck
 942	TRACE_IRQS_ON
 943.Lmcck_return:
 944	lg	%r14,__LC_VDSO_PER_CPU
 945	lmg	%r0,%r10,__PT_R0(%r11)
 946	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
 947	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
 948	jno	0f
 
 949	stpt	__LC_EXIT_TIMER
 950	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 9510:	lmg	%r11,%r15,__PT_R11(%r11)
 952	lpswe	__LC_RETURN_MCCK_PSW
 
 953
 954.Lmcck_panic:
 955	lg	%r15,__LC_PANIC_STACK
 956	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 957	j	.Lmcck_skip
 958
 959#
 960# PSW restart interrupt handler
 961#
 962ENTRY(restart_int_handler)
 963	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 964	jz	0f
 965	.insn	s,0xb2800000,__LC_LPP
 9660:	stg	%r15,__LC_SAVE_AREA_RESTART
 
 
 967	lg	%r15,__LC_RESTART_STACK
 968	aghi	%r15,-__PT_SIZE			# create pt_regs on stack
 969	xc	0(__PT_SIZE,%r15),0(%r15)
 970	stmg	%r0,%r14,__PT_R0(%r15)
 971	mvc	__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
 972	mvc	__PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
 973	aghi	%r15,-STACK_FRAME_OVERHEAD	# create stack frame on stack
 974	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
 975	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
 976	lg	%r2,__LC_RESTART_DATA
 977	lg	%r3,__LC_RESTART_SOURCE
 978	ltgr	%r3,%r3				# test source cpu address
 979	jm	1f				# negative -> skip source stop
 9800:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
 981	brc	10,0b				# wait for status stored
 9821:	basr	%r14,%r1			# call function
 983	stap	__SF_EMPTY(%r15)		# store cpu address
 984	llgh	%r3,__SF_EMPTY(%r15)
 9852:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
 986	brc	2,2b
 9873:	j	3b
 
 988
 989	.section .kprobes.text, "ax"
 990
 991#ifdef CONFIG_CHECK_STACK
 992/*
 993 * The synchronous or the asynchronous stack overflowed. We are dead.
 994 * No need to properly save the registers, we are going to panic anyway.
 995 * Setup a pt_regs so that show_trace can provide a good call trace.
 996 */
 997stack_overflow:
 998	lg	%r15,__LC_PANIC_STACK	# change to panic stack
 999	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1000	stmg	%r0,%r7,__PT_R0(%r11)
1001	stmg	%r8,%r9,__PT_PSW(%r11)
1002	mvc	__PT_R8(64,%r11),0(%r14)
1003	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1004	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1005	lgr	%r2,%r11		# pass pointer to pt_regs
1006	jg	kernel_stack_overflow
 
1007#endif
1008
1009cleanup_critical:
1010#if IS_ENABLED(CONFIG_KVM)
1011	clg	%r9,BASED(.Lcleanup_table_sie)	# .Lsie_gmap
1012	jl	0f
1013	clg	%r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
1014	jl	.Lcleanup_sie
1015#endif
1016	clg	%r9,BASED(.Lcleanup_table)	# system_call
1017	jl	0f
1018	clg	%r9,BASED(.Lcleanup_table+8)	# .Lsysc_do_svc
1019	jl	.Lcleanup_system_call
1020	clg	%r9,BASED(.Lcleanup_table+16)	# .Lsysc_tif
1021	jl	0f
1022	clg	%r9,BASED(.Lcleanup_table+24)	# .Lsysc_restore
1023	jl	.Lcleanup_sysc_tif
1024	clg	%r9,BASED(.Lcleanup_table+32)	# .Lsysc_done
1025	jl	.Lcleanup_sysc_restore
1026	clg	%r9,BASED(.Lcleanup_table+40)	# .Lio_tif
1027	jl	0f
1028	clg	%r9,BASED(.Lcleanup_table+48)	# .Lio_restore
1029	jl	.Lcleanup_io_tif
1030	clg	%r9,BASED(.Lcleanup_table+56)	# .Lio_done
1031	jl	.Lcleanup_io_restore
1032	clg	%r9,BASED(.Lcleanup_table+64)	# psw_idle
1033	jl	0f
1034	clg	%r9,BASED(.Lcleanup_table+72)	# .Lpsw_idle_end
1035	jl	.Lcleanup_idle
1036	clg	%r9,BASED(.Lcleanup_table+80)	# save_fpu_regs
1037	jl	0f
1038	clg	%r9,BASED(.Lcleanup_table+88)	# .Lsave_fpu_regs_end
1039	jl	.Lcleanup_save_fpu_regs
1040	clg	%r9,BASED(.Lcleanup_table+96)	# load_fpu_regs
1041	jl	0f
1042	clg	%r9,BASED(.Lcleanup_table+104)	# .Lload_fpu_regs_end
1043	jl	.Lcleanup_load_fpu_regs
10440:	br	%r14
1045
1046	.align	8
1047.Lcleanup_table:
1048	.quad	system_call
1049	.quad	.Lsysc_do_svc
1050	.quad	.Lsysc_tif
1051	.quad	.Lsysc_restore
1052	.quad	.Lsysc_done
1053	.quad	.Lio_tif
1054	.quad	.Lio_restore
1055	.quad	.Lio_done
1056	.quad	psw_idle
1057	.quad	.Lpsw_idle_end
1058	.quad	save_fpu_regs
1059	.quad	.Lsave_fpu_regs_end
1060	.quad	load_fpu_regs
1061	.quad	.Lload_fpu_regs_end
1062
1063#if IS_ENABLED(CONFIG_KVM)
1064.Lcleanup_table_sie:
1065	.quad	.Lsie_gmap
1066	.quad	.Lsie_done
1067
1068.Lcleanup_sie:
1069	lg	%r9,__SF_EMPTY(%r15)		# get control block pointer
1070	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
1071	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
1072	larl	%r9,sie_exit			# skip forward to sie_exit
1073	br	%r14
1074#endif
1075
1076.Lcleanup_system_call:
1077	# check if stpt has been executed
1078	clg	%r9,BASED(.Lcleanup_system_call_insn)
1079	jh	0f
1080	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
1081	cghi	%r11,__LC_SAVE_AREA_ASYNC
1082	je	0f
1083	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
10840:	# check if stmg has been executed
1085	clg	%r9,BASED(.Lcleanup_system_call_insn+8)
1086	jh	0f
1087	mvc	__LC_SAVE_AREA_SYNC(64),0(%r11)
10880:	# check if base register setup + TIF bit load has been done
1089	clg	%r9,BASED(.Lcleanup_system_call_insn+16)
1090	jhe	0f
1091	# set up saved registers r10 and r12
1092	stg	%r10,16(%r11)		# r10 last break
1093	stg	%r12,32(%r11)		# r12 task struct pointer
10940:	# check if the user time update has been done
1095	clg	%r9,BASED(.Lcleanup_system_call_insn+24)
1096	jh	0f
1097	lg	%r15,__LC_EXIT_TIMER
1098	slg	%r15,__LC_SYNC_ENTER_TIMER
1099	alg	%r15,__LC_USER_TIMER
1100	stg	%r15,__LC_USER_TIMER
11010:	# check if the system time update has been done
1102	clg	%r9,BASED(.Lcleanup_system_call_insn+32)
1103	jh	0f
1104	lg	%r15,__LC_LAST_UPDATE_TIMER
1105	slg	%r15,__LC_EXIT_TIMER
1106	alg	%r15,__LC_SYSTEM_TIMER
1107	stg	%r15,__LC_SYSTEM_TIMER
11080:	# update accounting time stamp
1109	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1110	# do LAST_BREAK
1111	lg	%r9,16(%r11)
1112	srag	%r9,%r9,23
1113	jz	0f
1114	lgr	%r9,%r12
1115	aghi	%r9,__TASK_thread
1116	mvc	__THREAD_last_break(8,%r9),16(%r11)
11170:	# set up saved register r11
1118	lg	%r15,__LC_KERNEL_STACK
1119	la	%r9,STACK_FRAME_OVERHEAD(%r15)
1120	stg	%r9,24(%r11)		# r11 pt_regs pointer
1121	# fill pt_regs
1122	mvc	__PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
1123	stmg	%r0,%r7,__PT_R0(%r9)
1124	mvc	__PT_PSW(16,%r9),__LC_SVC_OLD_PSW
1125	mvc	__PT_INT_CODE(4,%r9),__LC_SVC_ILC
1126	xc	__PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
1127	mvi	__PT_FLAGS+7(%r9),_PIF_SYSCALL
1128	# setup saved register r15
1129	stg	%r15,56(%r11)		# r15 stack pointer
1130	# set new psw address and exit
1131	larl	%r9,.Lsysc_do_svc
1132	br	%r14
1133.Lcleanup_system_call_insn:
1134	.quad	system_call
1135	.quad	.Lsysc_stmg
1136	.quad	.Lsysc_per
1137	.quad	.Lsysc_vtime+36
1138	.quad	.Lsysc_vtime+42
1139
1140.Lcleanup_sysc_tif:
1141	larl	%r9,.Lsysc_tif
1142	br	%r14
1143
1144.Lcleanup_sysc_restore:
1145	clg	%r9,BASED(.Lcleanup_sysc_restore_insn)
1146	je	0f
1147	lg	%r9,24(%r11)		# get saved pointer to pt_regs
1148	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
1149	mvc	0(64,%r11),__PT_R8(%r9)
1150	lmg	%r0,%r7,__PT_R0(%r9)
11510:	lmg	%r8,%r9,__LC_RETURN_PSW
1152	br	%r14
1153.Lcleanup_sysc_restore_insn:
1154	.quad	.Lsysc_done - 4
1155
1156.Lcleanup_io_tif:
1157	larl	%r9,.Lio_tif
1158	br	%r14
1159
1160.Lcleanup_io_restore:
1161	clg	%r9,BASED(.Lcleanup_io_restore_insn)
1162	je	0f
1163	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
1164	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
1165	mvc	0(64,%r11),__PT_R8(%r9)
1166	lmg	%r0,%r7,__PT_R0(%r9)
11670:	lmg	%r8,%r9,__LC_RETURN_PSW
1168	br	%r14
1169.Lcleanup_io_restore_insn:
1170	.quad	.Lio_done - 4
1171
1172.Lcleanup_idle:
1173	ni	__LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
1174	# copy interrupt clock & cpu timer
1175	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
1176	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
1177	cghi	%r11,__LC_SAVE_AREA_ASYNC
1178	je	0f
1179	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
1180	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
11810:	# check if stck & stpt have been executed
1182	clg	%r9,BASED(.Lcleanup_idle_insn)
1183	jhe	1f
1184	mvc	__CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
1185	mvc	__TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
11861:	# calculate idle cycles
1187#ifdef CONFIG_SMP
1188	clg	%r9,BASED(.Lcleanup_idle_insn)
1189	jl	3f
1190	larl	%r1,smp_cpu_mtid
1191	llgf	%r1,0(%r1)
1192	ltgr	%r1,%r1
1193	jz	3f
1194	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
1195	larl	%r3,mt_cycles
1196	ag	%r3,__LC_PERCPU_OFFSET
1197	la	%r4,__SF_EMPTY+16(%r15)
11982:	lg	%r0,0(%r3)
1199	slg	%r0,0(%r4)
1200	alg	%r0,64(%r4)
1201	stg	%r0,0(%r3)
1202	la	%r3,8(%r3)
1203	la	%r4,8(%r4)
1204	brct	%r1,2b
1205#endif
12063:	# account system time going idle
1207	lg	%r9,__LC_STEAL_TIMER
1208	alg	%r9,__CLOCK_IDLE_ENTER(%r2)
1209	slg	%r9,__LC_LAST_UPDATE_CLOCK
1210	stg	%r9,__LC_STEAL_TIMER
1211	mvc	__LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
1212	lg	%r9,__LC_SYSTEM_TIMER
1213	alg	%r9,__LC_LAST_UPDATE_TIMER
1214	slg	%r9,__TIMER_IDLE_ENTER(%r2)
1215	stg	%r9,__LC_SYSTEM_TIMER
1216	mvc	__LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
1217	# prepare return psw
1218	nihh	%r8,0xfcfd		# clear irq & wait state bits
1219	lg	%r9,48(%r11)		# return from psw_idle
1220	br	%r14
1221.Lcleanup_idle_insn:
1222	.quad	.Lpsw_idle_lpsw
1223
1224.Lcleanup_save_fpu_regs:
1225	larl	%r9,save_fpu_regs
1226	br	%r14
1227
1228.Lcleanup_load_fpu_regs:
1229	larl	%r9,load_fpu_regs
1230	br	%r14
1231
1232/*
1233 * Integer constants
1234 */
1235	.align	8
1236.Lcritical_start:
1237	.quad	.L__critical_start
1238.Lcritical_length:
1239	.quad	.L__critical_end - .L__critical_start
1240#if IS_ENABLED(CONFIG_KVM)
1241.Lsie_critical_start:
1242	.quad	.Lsie_gmap
1243.Lsie_critical_length:
1244	.quad	.Lsie_done - .Lsie_gmap
1245#endif
1246
1247	.section .rodata, "a"
1248#define SYSCALL(esame,emu)	.long esame
1249	.globl	sys_call_table
1250sys_call_table:
1251#include "syscalls.S"
1252#undef SYSCALL
1253
1254#ifdef CONFIG_COMPAT
1255
1256#define SYSCALL(esame,emu)	.long emu
1257	.globl	sys_call_table_emu
1258sys_call_table_emu:
1259#include "syscalls.S"
1260#undef SYSCALL
1261#endif