Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 *    S390 low-level entry points.
  4 *
  5 *    Copyright IBM Corp. 1999, 2012
  6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7 *		 Hartmut Penner (hp@de.ibm.com),
  8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
 
  9 */
 10
 11#include <linux/export.h>
 12#include <linux/init.h>
 13#include <linux/linkage.h>
 14#include <asm/asm-extable.h>
 15#include <asm/alternative.h>
 16#include <asm/processor.h>
 17#include <asm/cache.h>
 18#include <asm/dwarf.h>
 19#include <asm/errno.h>
 20#include <asm/ptrace.h>
 21#include <asm/thread_info.h>
 22#include <asm/asm-offsets.h>
 23#include <asm/unistd.h>
 24#include <asm/page.h>
 25#include <asm/sigp.h>
 26#include <asm/irq.h>
 27#include <asm/fpu-insn.h>
 28#include <asm/setup.h>
 29#include <asm/nmi.h>
 30#include <asm/nospec-insn.h>
 31#include <asm/lowcore.h>
 32
 33_LPP_OFFSET	= __LC_LPP
 34
 35	.macro STBEAR address
 36	ALTERNATIVE "nop", ".insn s,0xb2010000,\address", ALT_FACILITY(193)
 37	.endm
 38
 39	.macro LBEAR address
 40	ALTERNATIVE "nop", ".insn s,0xb2000000,\address", ALT_FACILITY(193)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 41	.endm
 42
 43	.macro LPSWEY address, lpswe
 44	ALTERNATIVE_2 "b \lpswe;nopr", \
 45		".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193),		\
 46		__stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0),	\
 47		ALT_LOWCORE
 48	.endm
 49
 50	.macro MBEAR reg, lowcore
 51	ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK(\lowcore)),\
 52		ALT_FACILITY(193)
 
 
 
 53	.endm
 54
 55	.macro	CHECK_STACK savearea, lowcore
 56#ifdef CONFIG_CHECK_STACK
 57	tml	%r15,THREAD_SIZE - CONFIG_STACK_GUARD
 58	la	%r14,\savearea(\lowcore)
 59	jz	stack_overflow
 60#endif
 61	.endm
 62
 63	.macro	CHECK_VMAP_STACK savearea, lowcore, oklabel
 64#ifdef CONFIG_VMAP_STACK
 65	lgr	%r14,%r15
 66	nill	%r14,0x10000 - THREAD_SIZE
 67	oill	%r14,STACK_INIT_OFFSET
 68	clg	%r14,__LC_KERNEL_STACK(\lowcore)
 69	je	\oklabel
 70	clg	%r14,__LC_ASYNC_STACK(\lowcore)
 71	je	\oklabel
 72	clg	%r14,__LC_MCCK_STACK(\lowcore)
 73	je	\oklabel
 74	clg	%r14,__LC_NODAT_STACK(\lowcore)
 75	je	\oklabel
 76	clg	%r14,__LC_RESTART_STACK(\lowcore)
 77	je	\oklabel
 78	la	%r14,\savearea(\lowcore)
 79	j	stack_overflow
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80#else
 81	j	\oklabel
 82#endif
 83	.endm
 84
 85	/*
 86	 * The TSTMSK macro generates a test-under-mask instruction by
 87	 * calculating the memory offset for the specified mask value.
 88	 * Mask value can be any constant.  The macro shifts the mask
 89	 * value to calculate the memory offset for the test-under-mask
 90	 * instruction.
 91	 */
 92	.macro TSTMSK addr, mask, size=8, bytepos=0
 93		.if (\bytepos < \size) && (\mask >> 8)
 94			.if (\mask & 0xff)
 95				.error "Mask exceeds byte boundary"
 96			.endif
 97			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
 98			.exitm
 99		.endif
100		.ifeq \mask
101			.error "Mask must not be zero"
102		.endif
103		off = \size - \bytepos - 1
104		tm	off+\addr, \mask
105	.endm
106
107	.macro BPOFF
108	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", ALT_SPEC(82)
109	.endm
110
111	.macro BPON
112	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
113	.endm
114
115	.macro BPENTER tif_ptr,tif_mask
116	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
117		    "j .+12; nop; nop", ALT_SPEC(82)
118	.endm
119
120	.macro BPEXIT tif_ptr,tif_mask
121	TSTMSK	\tif_ptr,\tif_mask
122	ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
123		    "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
124	.endm
125
126#if IS_ENABLED(CONFIG_KVM)
127	.macro SIEEXIT sie_control,lowcore
128	lg	%r9,\sie_control			# get control block pointer
129	ni	__SIE_PROG0C+3(%r9),0xfe		# no longer in SIE
130	lctlg	%c1,%c1,__LC_KERNEL_ASCE(\lowcore)	# load primary asce
131	lg	%r9,__LC_CURRENT(\lowcore)
132	mvi	__TI_sie(%r9),0
133	larl	%r9,sie_exit			# skip forward to sie_exit
134	.endm
135#endif
136
137	.macro STACKLEAK_ERASE
138#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
139	brasl	%r14,stackleak_erase_on_task_stack
140#endif
141	.endm
142
143	GEN_BR_THUNK %r14
144
145	.section .kprobes.text, "ax"
146.Ldummy:
147	/*
148	 * The following nop exists only in order to avoid that the next
149	 * symbol starts at the beginning of the kprobes text section.
150	 * In that case there would be several symbols at the same address.
151	 * E.g. objdump would take an arbitrary symbol when disassembling
152	 * the code.
153	 * With the added nop in between this cannot happen.
154	 */
155	nop	0
156
157/*
158 * Scheduler resume function, called by __switch_to
159 *  gpr2 = (task_struct *)prev
160 *  gpr3 = (task_struct *)next
161 * Returns:
162 *  gpr2 = prev
163 */
164SYM_FUNC_START(__switch_to_asm)
165	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
166	lghi	%r4,__TASK_stack
167	lghi	%r1,__TASK_thread
168	llill	%r5,STACK_INIT_OFFSET
169	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
170	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
171	agr	%r15,%r5			# end of kernel stack of next
172	GET_LC	%r13
173	stg	%r3,__LC_CURRENT(%r13)		# store task struct of next
174	stg	%r15,__LC_KERNEL_STACK(%r13)	# store end of kernel stack
175	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
176	aghi	%r3,__TASK_pid
177	mvc	__LC_CURRENT_PID(4,%r13),0(%r3)	# store pid of next
178	ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40)
 
 
 
179	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
180	BR_EX	%r14
181SYM_FUNC_END(__switch_to_asm)
 
 
 
 
182
183#if IS_ENABLED(CONFIG_KVM)
184/*
185 * __sie64a calling convention:
186 * %r2 pointer to sie control block phys
187 * %r3 pointer to sie control block virt
188 * %r4 guest register save area
189 * %r5 guest asce
190 */
191SYM_FUNC_START(__sie64a)
192	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
193	GET_LC	%r13
194	lg	%r14,__LC_CURRENT(%r13)
195	stg	%r2,__SF_SIE_CONTROL_PHYS(%r15)	# save sie block physical..
196	stg	%r3,__SF_SIE_CONTROL(%r15)	# ...and virtual addresses
197	stg	%r4,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
198	stg	%r5,__SF_SIE_GUEST_ASCE(%r15)	# save guest asce
199	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
200	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags
201	lmg	%r0,%r13,0(%r4)			# load guest gprs 0-13
202	mvi	__TI_sie(%r14),1
203	lctlg	%c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce
204	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
 
 
205	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
206	tm	__SIE_PROG20+3(%r14),3		# last exit...
207	jnz	.Lsie_skip
208	lg	%r14,__SF_SIE_CONTROL_PHYS(%r15)	# get sie block phys addr
209	BPEXIT	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
210.Lsie_entry:
211	sie	0(%r14)
212# Let the next instruction be NOP to avoid triggering a machine check
213# and handling it in a guest as result of the instruction execution.
214	nopr	7
215.Lsie_leave:
216	BPOFF
217	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
218.Lsie_skip:
219	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
220	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
221	GET_LC	%r14
222	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r14)	# load primary asce
223	lg	%r14,__LC_CURRENT(%r14)
224	mvi	__TI_sie(%r14),0
225SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
226	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
 
 
 
 
 
 
227	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
228	xgr	%r0,%r0				# clear guest registers to
229	xgr	%r1,%r1				# prevent speculative use
230	xgr	%r3,%r3
231	xgr	%r4,%r4
232	xgr	%r5,%r5
233	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
234	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
235	BR_EX	%r14
236SYM_FUNC_END(__sie64a)
237EXPORT_SYMBOL(__sie64a)
238EXPORT_SYMBOL(sie_exit)
 
 
 
 
239#endif
240
241/*
242 * SVC interrupt handler routine. System calls are synchronous events and
243 * are entered with interrupts disabled.
244 */
245
246SYM_CODE_START(system_call)
247	STMG_LC	%r8,%r15,__LC_SAVE_AREA
248	GET_LC	%r13
249	stpt	__LC_SYS_ENTER_TIMER(%r13)
250	BPOFF
251	lghi	%r14,0
 
252.Lsysc_per:
253	STBEAR	__LC_LAST_BREAK(%r13)
254	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
255	lg	%r15,__LC_KERNEL_STACK(%r13)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
257	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
258	# clear user controlled register to prevent speculative use
259	xgr	%r0,%r0
260	xgr	%r1,%r1
261	xgr	%r4,%r4
262	xgr	%r5,%r5
263	xgr	%r6,%r6
264	xgr	%r7,%r7
265	xgr	%r8,%r8
266	xgr	%r9,%r9
267	xgr	%r10,%r10
268	xgr	%r11,%r11
269	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
270	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA(%r13)
271	MBEAR	%r2,%r13
272	lgr	%r3,%r14
273	brasl	%r14,__do_syscall
274	STACKLEAK_ERASE
275	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
276	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
277	BPON
278	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
279	stpt	__LC_EXIT_TIMER(%r13)
280	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
281	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
282SYM_CODE_END(system_call)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
284#
285# a new process exits the kernel with ret_from_fork
286#
287SYM_CODE_START(ret_from_fork)
288	lgr	%r3,%r11
289	brasl	%r14,__ret_from_fork
290	STACKLEAK_ERASE
291	GET_LC	%r13
292	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
293	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
294	BPON
295	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
296	stpt	__LC_EXIT_TIMER(%r13)
297	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
298	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
299SYM_CODE_END(ret_from_fork)
 
300
301/*
302 * Program check handler routine
303 */
304
305SYM_CODE_START(pgm_check_handler)
306	STMG_LC	%r8,%r15,__LC_SAVE_AREA
307	GET_LC	%r13
308	stpt	__LC_SYS_ENTER_TIMER(%r13)
309	BPOFF
310	lmg	%r8,%r9,__LC_PGM_OLD_PSW(%r13)
311	xgr	%r10,%r10
312	tmhh	%r8,0x0001		# coming from user space?
313	jno	.Lpgm_skip_asce
314	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
315	j	3f			# -> fault in user space
316.Lpgm_skip_asce:
317#if IS_ENABLED(CONFIG_KVM)
318	lg	%r11,__LC_CURRENT(%r13)
319	tm	__TI_sie(%r11),0xff
320	jz	1f
321	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
322	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
323	lghi	%r10,_PIF_GUEST_FAULT
324#endif
3251:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
326	jnz	2f			# -> enabled, can't be a double fault
327	tm	__LC_PGM_ILC+3(%r13),0x80	# check for per exception
328	jnz	.Lpgm_svcper		# -> single stepped svc
3292:	CHECK_STACK __LC_SAVE_AREA,%r13
330	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
331	# CHECK_VMAP_STACK branches to stack_overflow or 4f
332	CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f
3333:	lg	%r15,__LC_KERNEL_STACK(%r13)
3344:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
335	stg	%r10,__PT_FLAGS(%r11)
336	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 
 
 
 
 
337	stmg	%r0,%r7,__PT_R0(%r11)
338	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
339	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13)
340	stmg	%r8,%r9,__PT_PSW(%r11)
341	# clear user controlled registers to prevent speculative use
342	xgr	%r0,%r0
343	xgr	%r1,%r1
344	xgr	%r3,%r3
345	xgr	%r4,%r4
346	xgr	%r5,%r5
347	xgr	%r6,%r6
348	xgr	%r7,%r7
349	xgr	%r12,%r12
350	lgr	%r2,%r11
351	brasl	%r14,__do_pgm_check
352	tmhh	%r8,0x0001		# returning to user space?
353	jno	.Lpgm_exit_kernel
354	STACKLEAK_ERASE
355	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
356	BPON
357	stpt	__LC_EXIT_TIMER(%r13)
358.Lpgm_exit_kernel:
359	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
360	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
361	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
362	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
364#
365# single stepped system call
366#
367.Lpgm_svcper:
368	mvc	__LC_RETURN_PSW(8,%r13),__LC_SVC_NEW_PSW(%r13)
369	larl	%r14,.Lsysc_per
370	stg	%r14,__LC_RETURN_PSW+8(%r13)
371	lghi	%r14,1
372	LBEAR	__LC_PGM_LAST_BREAK(%r13)
373	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
374SYM_CODE_END(pgm_check_handler)
375
376/*
377 * Interrupt handler macro used for external and IO interrupts.
378 */
379.macro INT_HANDLER name,lc_old_psw,handler
380SYM_CODE_START(\name)
381	STMG_LC	%r8,%r15,__LC_SAVE_AREA
382	GET_LC	%r13
383	stckf	__LC_INT_CLOCK(%r13)
384	stpt	__LC_SYS_ENTER_TIMER(%r13)
385	STBEAR	__LC_LAST_BREAK(%r13)
386	BPOFF
387	lmg	%r8,%r9,\lc_old_psw(%r13)
388	tmhh	%r8,0x0001			# interrupting from user ?
389	jnz	1f
390#if IS_ENABLED(CONFIG_KVM)
391	lg	%r10,__LC_CURRENT(%r13)
392	tm	__TI_sie(%r10),0xff
393	jz	0f
394	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
395	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
396#endif
3970:	CHECK_STACK __LC_SAVE_AREA,%r13
398	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
399	j	2f
4001:	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
401	lg	%r15,__LC_KERNEL_STACK(%r13)
4022:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
403	la	%r11,STACK_FRAME_OVERHEAD(%r15)
404	stmg	%r0,%r7,__PT_R0(%r11)
405	# clear user controlled registers to prevent speculative use
406	xgr	%r0,%r0
407	xgr	%r1,%r1
408	xgr	%r3,%r3
409	xgr	%r4,%r4
410	xgr	%r5,%r5
411	xgr	%r6,%r6
412	xgr	%r7,%r7
413	xgr	%r10,%r10
414	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
415	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
416	MBEAR	%r11,%r13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417	stmg	%r8,%r9,__PT_PSW(%r11)
 
 
 
 
 
 
 
 
 
418	lgr	%r2,%r11		# pass pointer to pt_regs
419	brasl	%r14,\handler
420	mvc	__LC_RETURN_PSW(16,%r13),__PT_PSW(%r11)
421	tmhh	%r8,0x0001		# returning to user ?
422	jno	2f
423	STACKLEAK_ERASE
424	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
425	BPON
426	stpt	__LC_EXIT_TIMER(%r13)
4272:	LBEAR	__PT_LAST_BREAK(%r11)
428	lmg	%r0,%r15,__PT_R0(%r11)
429	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
430SYM_CODE_END(\name)
431.endm
432
433	.section .irqentry.text, "ax"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434
435INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
436INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437
438	.section .kprobes.text, "ax"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439
440/*
441 * Machine check handler routines
442 */
443SYM_CODE_START(mcck_int_handler)
444	BPOFF
445	GET_LC	%r13
446	lmg	%r8,%r9,__LC_MCK_OLD_PSW(%r13)
447	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_SYSTEM_DAMAGE
 
 
 
 
 
448	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
449	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_CR_VALID
450	jno	.Lmcck_panic		# control registers invalid -> panic
451	ptlb
452	lay	%r14,__LC_CPU_TIMER_SAVE_AREA(%r13)
453	mvc	__LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
454	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_CPU_TIMER_VALID
455	jo	3f
456	la	%r14,__LC_SYS_ENTER_TIMER(%r13)
457	clc	0(8,%r14),__LC_EXIT_TIMER(%r13)
 
 
 
458	jl	1f
459	la	%r14,__LC_EXIT_TIMER(%r13)
4601:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER(%r13)
461	jl	2f
462	la	%r14,__LC_LAST_UPDATE_TIMER(%r13)
4632:	spt	0(%r14)
464	mvc	__LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
4653:	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_PSW_MWP_VALID
466	jno	.Lmcck_panic
467	tmhh	%r8,0x0001		# interrupting from user ?
468	jnz	.Lmcck_user
469	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_PSW_IA_VALID
470	jno	.Lmcck_panic
471#if IS_ENABLED(CONFIG_KVM)
472	lg	%r10,__LC_CURRENT(%r13)
473	tm	__TI_sie(%r10),0xff
474	jz	.Lmcck_user
475	# Need to compare the address instead of __TI_SIE flag.
476	# Otherwise there would be a race between setting the flag
477	# and entering SIE (or leaving and clearing the flag). This
478	# would cause machine checks targeted at the guest to be
479	# handled by the host.
480	larl	%r14,.Lsie_entry
481	clgrjl	%r9,%r14, 4f
482	larl	%r14,.Lsie_leave
483	clgrjhe	%r9,%r14, 4f
484	lg	%r10,__LC_PCPU
485	oi	__PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
4864:	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
487	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
488#endif
489.Lmcck_user:
490	lg	%r15,__LC_MCCK_STACK(%r13)
491	la	%r11,STACK_FRAME_OVERHEAD(%r15)
492	stctg	%c1,%c1,__PT_CR1(%r11)
493	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
494	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
495	lay	%r14,__LC_GPREGS_SAVE_AREA(%r13)
496	mvc	__PT_R0(128,%r11),0(%r14)
497	# clear user controlled registers to prevent speculative use
498	xgr	%r0,%r0
499	xgr	%r1,%r1
500	xgr	%r3,%r3
501	xgr	%r4,%r4
502	xgr	%r5,%r5
503	xgr	%r6,%r6
504	xgr	%r7,%r7
505	xgr	%r10,%r10
506	stmg	%r8,%r9,__PT_PSW(%r11)
507	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
508	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
509	lgr	%r2,%r11		# pass pointer to pt_regs
510	brasl	%r14,s390_do_machine_check
511	lctlg	%c1,%c1,__PT_CR1(%r11)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512	lmg	%r0,%r10,__PT_R0(%r11)
513	mvc	__LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW
514	tm	__LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ?
515	jno	0f
516	BPON
517	stpt	__LC_EXIT_TIMER(%r13)
5180:	ALTERNATIVE "brcl 0,0", __stringify(lay %r12,__LC_LAST_BREAK_SAVE_AREA(%r13)),\
519		ALT_FACILITY(193)
520	LBEAR	0(%r12)
521	lmg	%r11,%r15,__PT_R11(%r11)
522	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
523
524.Lmcck_panic:
525	/*
526	 * Iterate over all possible CPU addresses in the range 0..0xffff
527	 * and stop each CPU using signal processor. Use compare and swap
528	 * to allow just one CPU-stopper and prevent concurrent CPUs from
529	 * stopping each other while leaving the others running.
530	 */
531	lhi	%r5,0
532	lhi	%r6,1
533	larl	%r7,stop_lock
534	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
535	jnz	4f
536	larl	%r7,this_cpu
537	stap	0(%r7)			# this CPU address
538	lh	%r4,0(%r7)
539	nilh	%r4,0
540	lhi	%r0,1
541	sll	%r0,16			# CPU counter
542	lhi	%r3,0			# next CPU address
5430:	cr	%r3,%r4
544	je	2f
5451:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
546	brc	SIGP_CC_BUSY,1b
5472:	ahi	%r3,1
548	brct	%r0,0b
5493:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
550	brc	SIGP_CC_BUSY,3b
5514:	j	4b
552SYM_CODE_END(mcck_int_handler)
553
554SYM_CODE_START(restart_int_handler)
555	ALTERNATIVE "nop", "lpp _LPP_OFFSET", ALT_FACILITY(40)
556	stg	%r15,__LC_SAVE_AREA_RESTART
557	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
558	jz	0f
559	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA
5600:	larl	%r15,daton_psw
561	lpswe	0(%r15)				# turn dat on, keep irqs off
562.Ldaton:
563	GET_LC	%r15
564	lg	%r15,__LC_RESTART_STACK(%r15)
565	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
566	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
567	GET_LC	%r13
568	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART(%r13)
569	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW(%r13)
570	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
571	lg	%r1,__LC_RESTART_FN(%r13)	# load fn, parm & source cpu
572	lg	%r2,__LC_RESTART_DATA(%r13)
573	lgf	%r3,__LC_RESTART_SOURCE(%r13)
574	ltgr	%r3,%r3				# test source cpu address
575	jm	1f				# negative -> skip source stop
5760:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
577	brc	10,0b				# wait for status stored
5781:	basr	%r14,%r1			# call function
579	stap	__SF_EMPTY(%r15)		# store cpu address
580	llgh	%r3,__SF_EMPTY(%r15)
5812:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
582	brc	2,2b
5833:	j	3b
584SYM_CODE_END(restart_int_handler)
585
586	__INIT
587SYM_CODE_START(early_pgm_check_handler)
588	STMG_LC %r8,%r15,__LC_SAVE_AREA
589	GET_LC	%r13
590	aghi	%r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
591	la	%r11,STACK_FRAME_OVERHEAD(%r15)
592	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
593	stmg	%r0,%r7,__PT_R0(%r11)
594	mvc	__PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13)
595	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
596	lgr	%r2,%r11
597	brasl	%r14,__do_early_pgm_check
598	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
599	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
600	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
601SYM_CODE_END(early_pgm_check_handler)
602	__FINIT
603
604	.section .kprobes.text, "ax"
605
606#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
607/*
608 * The synchronous or the asynchronous stack overflowed. We are dead.
609 * No need to properly save the registers, we are going to panic anyway.
610 * Setup a pt_regs so that show_trace can provide a good call trace.
611 */
612SYM_CODE_START(stack_overflow)
613	GET_LC	%r15
614	lg	%r15,__LC_NODAT_STACK(%r15) # change to panic stack
615	la	%r11,STACK_FRAME_OVERHEAD(%r15)
616	stmg	%r0,%r7,__PT_R0(%r11)
617	stmg	%r8,%r9,__PT_PSW(%r11)
618	mvc	__PT_R8(64,%r11),0(%r14)
619	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
620	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
621	lgr	%r2,%r11		# pass pointer to pt_regs
622	jg	kernel_stack_overflow
623SYM_CODE_END(stack_overflow)
624#endif
625
626	.section .data, "aw"
627	.balign	4
628SYM_DATA_LOCAL(stop_lock,	.long 0)
629SYM_DATA_LOCAL(this_cpu,	.short 0)
630	.balign	8
631SYM_DATA_START_LOCAL(daton_psw)
632	.quad	PSW_KERNEL_BITS
633	.quad	.Ldaton
634SYM_DATA_END(daton_psw)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
636	.section .rodata, "a"
637	.balign	8
638#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
639SYM_DATA_START(sys_call_table)
640#include "asm/syscall_table.h"
641SYM_DATA_END(sys_call_table)
642#undef SYSCALL
643
644#ifdef CONFIG_COMPAT
645
646#define SYSCALL(esame,emu)	.quad __s390_ ## emu
647SYM_DATA_START(sys_call_table_emu)
648#include "asm/syscall_table.h"
649SYM_DATA_END(sys_call_table_emu)
650#undef SYSCALL
651#endif
v4.6
 
   1/*
   2 *    S390 low-level entry points.
   3 *
   4 *    Copyright IBM Corp. 1999, 2012
   5 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
   6 *		 Hartmut Penner (hp@de.ibm.com),
   7 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
   8 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
   9 */
  10
 
  11#include <linux/init.h>
  12#include <linux/linkage.h>
 
 
  13#include <asm/processor.h>
  14#include <asm/cache.h>
 
  15#include <asm/errno.h>
  16#include <asm/ptrace.h>
  17#include <asm/thread_info.h>
  18#include <asm/asm-offsets.h>
  19#include <asm/unistd.h>
  20#include <asm/page.h>
  21#include <asm/sigp.h>
  22#include <asm/irq.h>
  23#include <asm/vx-insn.h>
  24#include <asm/setup.h>
  25#include <asm/nmi.h>
 
 
  26
  27__PT_R0      =	__PT_GPRS
  28__PT_R1      =	__PT_GPRS + 8
  29__PT_R2      =	__PT_GPRS + 16
  30__PT_R3      =	__PT_GPRS + 24
  31__PT_R4      =	__PT_GPRS + 32
  32__PT_R5      =	__PT_GPRS + 40
  33__PT_R6      =	__PT_GPRS + 48
  34__PT_R7      =	__PT_GPRS + 56
  35__PT_R8      =	__PT_GPRS + 64
  36__PT_R9      =	__PT_GPRS + 72
  37__PT_R10     =	__PT_GPRS + 80
  38__PT_R11     =	__PT_GPRS + 88
  39__PT_R12     =	__PT_GPRS + 96
  40__PT_R13     =	__PT_GPRS + 104
  41__PT_R14     =	__PT_GPRS + 112
  42__PT_R15     =	__PT_GPRS + 120
  43
  44STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
  45STACK_SIZE  = 1 << STACK_SHIFT
  46STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
  47
  48_TIF_WORK	= (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
  49		   _TIF_UPROBE)
  50_TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
  51		   _TIF_SYSCALL_TRACEPOINT)
  52_CIF_WORK	= (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU)
  53_PIF_WORK	= (_PIF_PER_TRAP)
  54
  55#define BASED(name) name-cleanup_critical(%r13)
  56
  57	.macro	TRACE_IRQS_ON
  58#ifdef CONFIG_TRACE_IRQFLAGS
  59	basr	%r2,%r0
  60	brasl	%r14,trace_hardirqs_on_caller
  61#endif
  62	.endm
  63
  64	.macro	TRACE_IRQS_OFF
  65#ifdef CONFIG_TRACE_IRQFLAGS
  66	basr	%r2,%r0
  67	brasl	%r14,trace_hardirqs_off_caller
  68#endif
  69	.endm
  70
  71	.macro	LOCKDEP_SYS_EXIT
  72#ifdef CONFIG_LOCKDEP
  73	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
  74	jz	.+10
  75	brasl	%r14,lockdep_sys_exit
  76#endif
  77	.endm
  78
  79	.macro	CHECK_STACK stacksize,savearea
  80#ifdef CONFIG_CHECK_STACK
  81	tml	%r15,\stacksize - CONFIG_STACK_GUARD
  82	lghi	%r14,\savearea
  83	jz	stack_overflow
  84#endif
  85	.endm
  86
  87	.macro	SWITCH_ASYNC savearea,timer
  88	tmhh	%r8,0x0001		# interrupting from user ?
  89	jnz	1f
  90	lgr	%r14,%r9
  91	slg	%r14,BASED(.Lcritical_start)
  92	clg	%r14,BASED(.Lcritical_length)
  93	jhe	0f
  94	lghi	%r11,\savearea		# inside critical section, do cleanup
  95	brasl	%r14,cleanup_critical
  96	tmhh	%r8,0x0001		# retest problem state after cleanup
  97	jnz	1f
  980:	lg	%r14,__LC_ASYNC_STACK	# are we already on the async stack?
  99	slgr	%r14,%r15
 100	srag	%r14,%r14,STACK_SHIFT
 101	jnz	2f
 102	CHECK_STACK 1<<STACK_SHIFT,\savearea
 103	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 104	j	3f
 1051:	LAST_BREAK %r14
 106	UPDATE_VTIME %r14,%r15,\timer
 1072:	lg	%r15,__LC_ASYNC_STACK	# load async stack
 1083:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 109	.endm
 110
 111	.macro UPDATE_VTIME w1,w2,enter_timer
 112	lg	\w1,__LC_EXIT_TIMER
 113	lg	\w2,__LC_LAST_UPDATE_TIMER
 114	slg	\w1,\enter_timer
 115	slg	\w2,__LC_EXIT_TIMER
 116	alg	\w1,__LC_USER_TIMER
 117	alg	\w2,__LC_SYSTEM_TIMER
 118	stg	\w1,__LC_USER_TIMER
 119	stg	\w2,__LC_SYSTEM_TIMER
 120	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
 121	.endm
 122
 123	.macro	LAST_BREAK scratch
 124	srag	\scratch,%r10,23
 125	jz	.+10
 126	stg	%r10,__TI_last_break(%r12)
 127	.endm
 128
 129	.macro REENABLE_IRQS
 130	stg	%r8,__LC_RETURN_PSW
 131	ni	__LC_RETURN_PSW,0xbf
 132	ssm	__LC_RETURN_PSW
 133	.endm
 134
 135	.macro STCK savearea
 136#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
 137	.insn	s,0xb27c0000,\savearea		# store clock fast
 138#else
 139	.insn	s,0xb2050000,\savearea		# store clock
 140#endif
 141	.endm
 142
 143	/*
 144	 * The TSTMSK macro generates a test-under-mask instruction by
 145	 * calculating the memory offset for the specified mask value.
 146	 * Mask value can be any constant.  The macro shifts the mask
 147	 * value to calculate the memory offset for the test-under-mask
 148	 * instruction.
 149	 */
 150	.macro TSTMSK addr, mask, size=8, bytepos=0
 151		.if (\bytepos < \size) && (\mask >> 8)
 152			.if (\mask & 0xff)
 153				.error "Mask exceeds byte boundary"
 154			.endif
 155			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
 156			.exitm
 157		.endif
 158		.ifeq \mask
 159			.error "Mask must not be zero"
 160		.endif
 161		off = \size - \bytepos - 1
 162		tm	off+\addr, \mask
 163	.endm
 164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 165	.section .kprobes.text, "ax"
 
 
 
 
 
 
 
 
 
 
 166
 167/*
 168 * Scheduler resume function, called by switch_to
 169 *  gpr2 = (task_struct *) prev
 170 *  gpr3 = (task_struct *) next
 171 * Returns:
 172 *  gpr2 = prev
 173 */
 174ENTRY(__switch_to)
 175	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
 176	lgr	%r1,%r2
 177	aghi	%r1,__TASK_thread		# thread_struct of prev task
 178	lg	%r4,__TASK_thread_info(%r2)	# get thread_info of prev
 179	lg	%r5,__TASK_thread_info(%r3)	# get thread_info of next
 180	stg	%r15,__THREAD_ksp(%r1)		# store kernel stack of prev
 181	lgr	%r1,%r3
 182	aghi	%r1,__TASK_thread		# thread_struct of next task
 183	lgr	%r15,%r5
 184	aghi	%r15,STACK_INIT			# end of kernel stack of next
 185	stg	%r3,__LC_CURRENT		# store task struct of next
 186	stg	%r5,__LC_THREAD_INFO		# store thread info of next
 187	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
 188	lg	%r15,__THREAD_ksp(%r1)		# load kernel stack of next
 189	/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
 190	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
 191	mvc	__LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
 192	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
 193	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
 194	bzr	%r14
 195	.insn	s,0xb2800000,__LC_LPP		# set program parameter
 196	br	%r14
 197
 198.L__critical_start:
 199
 200#if IS_ENABLED(CONFIG_KVM)
 201/*
 202 * sie64a calling convention:
 203 * %r2 pointer to sie control block
 204 * %r3 guest register save area
 
 
 205 */
 206ENTRY(sie64a)
 207	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
 208	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
 209	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
 210	xc	__SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
 211	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU		# load guest fp/vx registers ?
 212	jno	.Lsie_load_guest_gprs
 213	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
 214.Lsie_load_guest_gprs:
 215	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
 216	lg	%r14,__LC_GMAP			# get gmap pointer
 217	ltgr	%r14,%r14
 218	jz	.Lsie_gmap
 219	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
 220.Lsie_gmap:
 221	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
 222	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
 223	tm	__SIE_PROG20+3(%r14),3		# last exit...
 224	jnz	.Lsie_skip
 225	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 226	jo	.Lsie_skip			# exit if fp/vx regs changed
 
 227	sie	0(%r14)
 
 
 
 
 
 
 228.Lsie_skip:
 
 229	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
 230	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 231.Lsie_done:
 232# some program checks are suppressing. C code (e.g. do_protection_exception)
 233# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
 234# instructions between sie64a and .Lsie_done should not cause program
 235# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
 236# See also .Lcleanup_sie
 237.Lrewind_pad:
 238	nop	0
 239	.globl sie_exit
 240sie_exit:
 241	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
 242	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
 
 
 
 
 
 243	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
 244	lg	%r2,__SF_EMPTY+16(%r15)		# return exit reason code
 245	br	%r14
 246.Lsie_fault:
 247	lghi	%r14,-EFAULT
 248	stg	%r14,__SF_EMPTY+16(%r15)	# set exit reason code
 249	j	sie_exit
 250
 251	EX_TABLE(.Lrewind_pad,.Lsie_fault)
 252	EX_TABLE(sie_exit,.Lsie_fault)
 253#endif
 254
 255/*
 256 * SVC interrupt handler routine. System calls are synchronous events and
 257 * are executed with interrupts enabled.
 258 */
 259
 260ENTRY(system_call)
 261	stpt	__LC_SYNC_ENTER_TIMER
 262.Lsysc_stmg:
 263	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 264	lg	%r10,__LC_LAST_BREAK
 265	lg	%r12,__LC_THREAD_INFO
 266	lghi	%r14,_PIF_SYSCALL
 267.Lsysc_per:
 268	lg	%r15,__LC_KERNEL_STACK
 269	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
 270	LAST_BREAK %r13
 271.Lsysc_vtime:
 272	UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
 273	stmg	%r0,%r7,__PT_R0(%r11)
 274	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 275	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
 276	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
 277	stg	%r14,__PT_FLAGS(%r11)
 278.Lsysc_do_svc:
 279	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
 280	llgh	%r8,__PT_INT_CODE+2(%r11)
 281	slag	%r8,%r8,2			# shift and test for svc 0
 282	jnz	.Lsysc_nr_ok
 283	# svc 0: system call number in %r1
 284	llgfr	%r1,%r1				# clear high word in r1
 285	cghi	%r1,NR_syscalls
 286	jnl	.Lsysc_nr_ok
 287	sth	%r1,__PT_INT_CODE+2(%r11)
 288	slag	%r8,%r1,2
 289.Lsysc_nr_ok:
 290	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 291	stg	%r2,__PT_ORIG_GPR2(%r11)
 292	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
 293	lgf	%r9,0(%r8,%r10)			# get system call add.
 294	TSTMSK	__TI_flags(%r12),_TIF_TRACE
 295	jnz	.Lsysc_tracesys
 296	basr	%r14,%r9			# call sys_xxxx
 297	stg	%r2,__PT_R2(%r11)		# store return value
 298
 299.Lsysc_return:
 300	LOCKDEP_SYS_EXIT
 301.Lsysc_tif:
 302	TSTMSK	__PT_FLAGS(%r11),_PIF_WORK
 303	jnz	.Lsysc_work
 304	TSTMSK	__TI_flags(%r12),_TIF_WORK
 305	jnz	.Lsysc_work			# check for work
 306	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
 307	jnz	.Lsysc_work
 308.Lsysc_restore:
 309	lg	%r14,__LC_VDSO_PER_CPU
 310	lmg	%r0,%r10,__PT_R0(%r11)
 311	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
 312	stpt	__LC_EXIT_TIMER
 313	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 314	lmg	%r11,%r15,__PT_R11(%r11)
 315	lpswe	__LC_RETURN_PSW
 316.Lsysc_done:
 317
 318#
 319# One of the work bits is on. Find out which one.
 320#
 321.Lsysc_work:
 322	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
 323	jo	.Lsysc_mcck_pending
 324	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
 325	jo	.Lsysc_reschedule
 326#ifdef CONFIG_UPROBES
 327	TSTMSK	__TI_flags(%r12),_TIF_UPROBE
 328	jo	.Lsysc_uprobe_notify
 329#endif
 330	TSTMSK	__PT_FLAGS(%r11),_PIF_PER_TRAP
 331	jo	.Lsysc_singlestep
 332	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
 333	jo	.Lsysc_sigpending
 334	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
 335	jo	.Lsysc_notify_resume
 336	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 337	jo	.Lsysc_vxrs
 338	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE
 339	jo	.Lsysc_uaccess
 340	j	.Lsysc_return		# beware of critical section cleanup
 341
 342#
 343# _TIF_NEED_RESCHED is set, call schedule
 344#
 345.Lsysc_reschedule:
 346	larl	%r14,.Lsysc_return
 347	jg	schedule
 348
 349#
 350# _CIF_MCCK_PENDING is set, call handler
 351#
 352.Lsysc_mcck_pending:
 353	larl	%r14,.Lsysc_return
 354	jg	s390_handle_mcck	# TIF bit will be cleared by handler
 355
 356#
 357# _CIF_ASCE is set, load user space asce
 358#
 359.Lsysc_uaccess:
 360	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
 361	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 362	j	.Lsysc_return
 363
 364#
 365# CIF_FPU is set, restore floating-point controls and floating-point registers.
 366#
 367.Lsysc_vxrs:
 368	larl	%r14,.Lsysc_return
 369	jg	load_fpu_regs
 370
 371#
 372# _TIF_SIGPENDING is set, call do_signal
 373#
 374.Lsysc_sigpending:
 375	lgr	%r2,%r11		# pass pointer to pt_regs
 376	brasl	%r14,do_signal
 377	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
 378	jno	.Lsysc_return
 379	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
 380	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
 381	lghi	%r8,0			# svc 0 returns -ENOSYS
 382	llgh	%r1,__PT_INT_CODE+2(%r11)	# load new svc number
 383	cghi	%r1,NR_syscalls
 384	jnl	.Lsysc_nr_ok		# invalid svc number -> do svc 0
 385	slag	%r8,%r1,2
 386	j	.Lsysc_nr_ok		# restart svc
 387
 388#
 389# _TIF_NOTIFY_RESUME is set, call do_notify_resume
 390#
 391.Lsysc_notify_resume:
 392	lgr	%r2,%r11		# pass pointer to pt_regs
 393	larl	%r14,.Lsysc_return
 394	jg	do_notify_resume
 395
 396#
 397# _TIF_UPROBE is set, call uprobe_notify_resume
 398#
 399#ifdef CONFIG_UPROBES
 400.Lsysc_uprobe_notify:
 401	lgr	%r2,%r11		# pass pointer to pt_regs
 402	larl	%r14,.Lsysc_return
 403	jg	uprobe_notify_resume
 404#endif
 405
 406#
 407# _PIF_PER_TRAP is set, call do_per_trap
 408#
 409.Lsysc_singlestep:
 410	ni	__PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
 411	lgr	%r2,%r11		# pass pointer to pt_regs
 412	larl	%r14,.Lsysc_return
 413	jg	do_per_trap
 414
 415#
 416# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
 417# and after the system call
 418#
 419.Lsysc_tracesys:
 420	lgr	%r2,%r11		# pass pointer to pt_regs
 421	la	%r3,0
 422	llgh	%r0,__PT_INT_CODE+2(%r11)
 423	stg	%r0,__PT_R2(%r11)
 424	brasl	%r14,do_syscall_trace_enter
 425	lghi	%r0,NR_syscalls
 426	clgr	%r0,%r2
 427	jnh	.Lsysc_tracenogo
 428	sllg	%r8,%r2,2
 429	lgf	%r9,0(%r8,%r10)
 430.Lsysc_tracego:
 431	lmg	%r3,%r7,__PT_R3(%r11)
 432	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
 433	lg	%r2,__PT_ORIG_GPR2(%r11)
 434	basr	%r14,%r9		# call sys_xxx
 435	stg	%r2,__PT_R2(%r11)	# store return value
 436.Lsysc_tracenogo:
 437	TSTMSK	__TI_flags(%r12),_TIF_TRACE
 438	jz	.Lsysc_return
 439	lgr	%r2,%r11		# pass pointer to pt_regs
 440	larl	%r14,.Lsysc_return
 441	jg	do_syscall_trace_exit
 442
 443#
 444# a new process exits the kernel with ret_from_fork
 445#
 446ENTRY(ret_from_fork)
 447	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 448	lg	%r12,__LC_THREAD_INFO
 449	brasl	%r14,schedule_tail
 450	TRACE_IRQS_ON
 451	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 452	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
 453	jne	.Lsysc_tracenogo
 454	# it's a kernel thread
 455	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
 456ENTRY(kernel_thread_starter)
 457	la	%r2,0(%r10)
 458	basr	%r14,%r9
 459	j	.Lsysc_tracenogo
 460
 461/*
 462 * Program check handler routine
 463 */
 464
 465ENTRY(pgm_check_handler)
 466	stpt	__LC_SYNC_ENTER_TIMER
 467	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 468	lg	%r10,__LC_LAST_BREAK
 469	lg	%r12,__LC_THREAD_INFO
 470	larl	%r13,cleanup_critical
 471	lmg	%r8,%r9,__LC_PGM_OLD_PSW
 472	tmhh	%r8,0x0001		# test problem state bit
 473	jnz	2f			# -> fault in user space
 
 
 
 474#if IS_ENABLED(CONFIG_KVM)
 475	# cleanup critical section for sie64a
 476	lgr	%r14,%r9
 477	slg	%r14,BASED(.Lsie_critical_start)
 478	clg	%r14,BASED(.Lsie_critical_length)
 479	jhe	0f
 480	brasl	%r14,.Lcleanup_sie
 481#endif
 4820:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
 483	jnz	1f			# -> enabled, can't be a double fault
 484	tm	__LC_PGM_ILC+3,0x80	# check for per exception
 485	jnz	.Lpgm_svcper		# -> single stepped svc
 4861:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
 487	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 488	j	3f
 4892:	LAST_BREAK %r14
 490	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
 491	lg	%r15,__LC_KERNEL_STACK
 492	lg	%r14,__TI_task(%r12)
 493	aghi	%r14,__TASK_thread	# pointer to thread_struct
 494	lghi	%r13,__LC_PGM_TDB
 495	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
 496	jz	3f
 497	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
 4983:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 499	stmg	%r0,%r7,__PT_R0(%r11)
 500	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 
 501	stmg	%r8,%r9,__PT_PSW(%r11)
 502	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
 503	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
 504	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 505	stg	%r10,__PT_ARGS(%r11)
 506	tm	__LC_PGM_ILC+3,0x80	# check for per exception
 507	jz	4f
 508	tmhh	%r8,0x0001		# kernel per event ?
 509	jz	.Lpgm_kprobe
 510	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
 511	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
 512	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
 513	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
 5144:	REENABLE_IRQS
 515	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 516	larl	%r1,pgm_check_table
 517	llgh	%r10,__PT_INT_CODE+2(%r11)
 518	nill	%r10,0x007f
 519	sll	%r10,2
 520	je	.Lpgm_return
 521	lgf	%r1,0(%r10,%r1)		# load address of handler routine
 522	lgr	%r2,%r11		# pass pointer to pt_regs
 523	basr	%r14,%r1		# branch to interrupt-handler
 524.Lpgm_return:
 525	LOCKDEP_SYS_EXIT
 526	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 527	jno	.Lsysc_restore
 528	j	.Lsysc_tif
 529
 530#
 531# PER event in supervisor state, must be kprobes
 532#
 533.Lpgm_kprobe:
 534	REENABLE_IRQS
 535	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 536	lgr	%r2,%r11		# pass pointer to pt_regs
 537	brasl	%r14,do_per_trap
 538	j	.Lpgm_return
 539
 540#
 541# single stepped system call
 542#
 543.Lpgm_svcper:
 544	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
 545	larl	%r14,.Lsysc_per
 546	stg	%r14,__LC_RETURN_PSW+8
 547	lghi	%r14,_PIF_SYSCALL | _PIF_PER_TRAP
 548	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per and enable irqs
 549
 550/*
 551 * IO interrupt handler routine
 552 */
 553ENTRY(io_int_handler)
 554	STCK	__LC_INT_CLOCK
 555	stpt	__LC_ASYNC_ENTER_TIMER
 556	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 557	lg	%r10,__LC_LAST_BREAK
 558	lg	%r12,__LC_THREAD_INFO
 559	larl	%r13,cleanup_critical
 560	lmg	%r8,%r9,__LC_IO_OLD_PSW
 561	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562	stmg	%r0,%r7,__PT_R0(%r11)
 563	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 564	stmg	%r8,%r9,__PT_PSW(%r11)
 565	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
 
 
 
 
 
 
 566	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 567	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
 568	jo	.Lio_restore
 569	TRACE_IRQS_OFF
 570	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 571.Lio_loop:
 572	lgr	%r2,%r11		# pass pointer to pt_regs
 573	lghi	%r3,IO_INTERRUPT
 574	tm	__PT_INT_CODE+8(%r11),0x80	# adapter interrupt ?
 575	jz	.Lio_call
 576	lghi	%r3,THIN_INTERRUPT
 577.Lio_call:
 578	brasl	%r14,do_IRQ
 579	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
 580	jz	.Lio_return
 581	tpi	0
 582	jz	.Lio_return
 583	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
 584	j	.Lio_loop
 585.Lio_return:
 586	LOCKDEP_SYS_EXIT
 587	TRACE_IRQS_ON
 588.Lio_tif:
 589	TSTMSK	__TI_flags(%r12),_TIF_WORK
 590	jnz	.Lio_work		# there is work to do (signals etc.)
 591	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
 592	jnz	.Lio_work
 593.Lio_restore:
 594	lg	%r14,__LC_VDSO_PER_CPU
 595	lmg	%r0,%r10,__PT_R0(%r11)
 596	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
 597	stpt	__LC_EXIT_TIMER
 598	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 599	lmg	%r11,%r15,__PT_R11(%r11)
 600	lpswe	__LC_RETURN_PSW
 601.Lio_done:
 602
 603#
 604# There is work todo, find out in which context we have been interrupted:
 605# 1) if we return to user space we can do all _TIF_WORK work
 606# 2) if we return to kernel code and kvm is enabled check if we need to
 607#    modify the psw to leave SIE
 608# 3) if we return to kernel code and preemptive scheduling is enabled check
 609#    the preemption counter and if it is zero call preempt_schedule_irq
 610# Before any work can be done, a switch to the kernel stack is required.
 611#
 612.Lio_work:
 613	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 614	jo	.Lio_work_user		# yes -> do resched & signal
 615#ifdef CONFIG_PREEMPT
 616	# check for preemptive scheduling
 617	icm	%r0,15,__TI_precount(%r12)
 618	jnz	.Lio_restore		# preemption is disabled
 619	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
 620	jno	.Lio_restore
 621	# switch to kernel stack
 622	lg	%r1,__PT_R15(%r11)
 623	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 624	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
 625	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
 626	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 627	lgr	%r15,%r1
 628	# TRACE_IRQS_ON already done at .Lio_return, call
 629	# TRACE_IRQS_OFF to keep things symmetrical
 630	TRACE_IRQS_OFF
 631	brasl	%r14,preempt_schedule_irq
 632	j	.Lio_return
 633#else
 634	j	.Lio_restore
 635#endif
 636
 637#
 638# Need to do work before returning to userspace, switch to kernel stack
 639#
 640.Lio_work_user:
 641	lg	%r1,__LC_KERNEL_STACK
 642	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
 643	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
 644	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 645	lgr	%r15,%r1
 646
 647#
 648# One of the work bits is on. Find out which one.
 649#
 650.Lio_work_tif:
 651	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
 652	jo	.Lio_mcck_pending
 653	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
 654	jo	.Lio_reschedule
 655	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
 656	jo	.Lio_sigpending
 657	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
 658	jo	.Lio_notify_resume
 659	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 660	jo	.Lio_vxrs
 661	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE
 662	jo	.Lio_uaccess
 663	j	.Lio_return		# beware of critical section cleanup
 664
 665#
 666# _CIF_MCCK_PENDING is set, call handler
 667#
 668.Lio_mcck_pending:
 669	# TRACE_IRQS_ON already done at .Lio_return
 670	brasl	%r14,s390_handle_mcck	# TIF bit will be cleared by handler
 671	TRACE_IRQS_OFF
 672	j	.Lio_return
 673
 674#
 675# _CIF_ASCE is set, load user space asce
 676#
 677.Lio_uaccess:
 678	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
 679	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 680	j	.Lio_return
 681
 682#
 683# CIF_FPU is set, restore floating-point controls and floating-point registers.
 684#
 685.Lio_vxrs:
 686	larl	%r14,.Lio_return
 687	jg	load_fpu_regs
 688
 689#
 690# _TIF_NEED_RESCHED is set, call schedule
 691#
 692.Lio_reschedule:
 693	# TRACE_IRQS_ON already done at .Lio_return
 694	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 695	brasl	%r14,schedule		# call scheduler
 696	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 697	TRACE_IRQS_OFF
 698	j	.Lio_return
 699
 700#
 701# _TIF_SIGPENDING or is set, call do_signal
 702#
 703.Lio_sigpending:
 704	# TRACE_IRQS_ON already done at .Lio_return
 705	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 706	lgr	%r2,%r11		# pass pointer to pt_regs
 707	brasl	%r14,do_signal
 708	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 709	TRACE_IRQS_OFF
 710	j	.Lio_return
 711
 712#
 713# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
 714#
 715.Lio_notify_resume:
 716	# TRACE_IRQS_ON already done at .Lio_return
 717	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
 718	lgr	%r2,%r11		# pass pointer to pt_regs
 719	brasl	%r14,do_notify_resume
 720	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
 721	TRACE_IRQS_OFF
 722	j	.Lio_return
 723
 724/*
 725 * External interrupt handler routine
 726 */
 727ENTRY(ext_int_handler)
 728	STCK	__LC_INT_CLOCK
 729	stpt	__LC_ASYNC_ENTER_TIMER
 730	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 731	lg	%r10,__LC_LAST_BREAK
 732	lg	%r12,__LC_THREAD_INFO
 733	larl	%r13,cleanup_critical
 734	lmg	%r8,%r9,__LC_EXT_OLD_PSW
 735	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 736	stmg	%r0,%r7,__PT_R0(%r11)
 737	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 738	stmg	%r8,%r9,__PT_PSW(%r11)
 739	lghi	%r1,__LC_EXT_PARAMS2
 740	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
 741	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
 742	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
 743	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 744	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
 745	jo	.Lio_restore
 746	TRACE_IRQS_OFF
 747	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 748	lgr	%r2,%r11		# pass pointer to pt_regs
 749	lghi	%r3,EXT_INTERRUPT
 750	brasl	%r14,do_IRQ
 751	j	.Lio_return
 
 
 
 
 
 
 
 
 
 
 752
 753/*
 754 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
 755 */
 756ENTRY(psw_idle)
 757	stg	%r3,__SF_EMPTY(%r15)
 758	larl	%r1,.Lpsw_idle_lpsw+4
 759	stg	%r1,__SF_EMPTY+8(%r15)
 760#ifdef CONFIG_SMP
 761	larl	%r1,smp_cpu_mtid
 762	llgf	%r1,0(%r1)
 763	ltgr	%r1,%r1
 764	jz	.Lpsw_idle_stcctm
 765	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
 766.Lpsw_idle_stcctm:
 767#endif
 768	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
 769	STCK	__CLOCK_IDLE_ENTER(%r2)
 770	stpt	__TIMER_IDLE_ENTER(%r2)
 771.Lpsw_idle_lpsw:
 772	lpswe	__SF_EMPTY(%r15)
 773	br	%r14
 774.Lpsw_idle_end:
 775
 776/*
 777 * Store floating-point controls and floating-point or vector register
 778 * depending whether the vector facility is available.	A critical section
 779 * cleanup assures that the registers are stored even if interrupted for
 780 * some other work.  The CIF_FPU flag is set to trigger a lazy restore
 781 * of the register contents at return from io or a system call.
 782 */
 783ENTRY(save_fpu_regs)
 784	lg	%r2,__LC_CURRENT
 785	aghi	%r2,__TASK_thread
 786	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 787	bor	%r14
 788	stfpc	__THREAD_FPU_fpc(%r2)
 789.Lsave_fpu_regs_fpc_end:
 790	lg	%r3,__THREAD_FPU_regs(%r2)
 791	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
 792	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
 793.Lsave_fpu_regs_vx_low:
 794	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
 795.Lsave_fpu_regs_vx_high:
 796	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
 797	j	.Lsave_fpu_regs_done	  # -> set CIF_FPU flag
 798.Lsave_fpu_regs_fp:
 799	std	0,0(%r3)
 800	std	1,8(%r3)
 801	std	2,16(%r3)
 802	std	3,24(%r3)
 803	std	4,32(%r3)
 804	std	5,40(%r3)
 805	std	6,48(%r3)
 806	std	7,56(%r3)
 807	std	8,64(%r3)
 808	std	9,72(%r3)
 809	std	10,80(%r3)
 810	std	11,88(%r3)
 811	std	12,96(%r3)
 812	std	13,104(%r3)
 813	std	14,112(%r3)
 814	std	15,120(%r3)
 815.Lsave_fpu_regs_done:
 816	oi	__LC_CPU_FLAGS+7,_CIF_FPU
 817	br	%r14
 818.Lsave_fpu_regs_end:
 819
 820/*
 821 * Load floating-point controls and floating-point or vector registers.
 822 * A critical section cleanup assures that the register contents are
 823 * loaded even if interrupted for some other work.
 824 *
 825 * There are special calling conventions to fit into sysc and io return work:
 826 *	%r15:	<kernel stack>
 827 * The function requires:
 828 *	%r4
 829 */
 830load_fpu_regs:
 831	lg	%r4,__LC_CURRENT
 832	aghi	%r4,__TASK_thread
 833	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 834	bnor	%r14
 835	lfpc	__THREAD_FPU_fpc(%r4)
 836	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
 837	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
 838	jz	.Lload_fpu_regs_fp		# -> no VX, load FP regs
 839.Lload_fpu_regs_vx:
 840	VLM	%v0,%v15,0,%r4
 841.Lload_fpu_regs_vx_high:
 842	VLM	%v16,%v31,256,%r4
 843	j	.Lload_fpu_regs_done
 844.Lload_fpu_regs_fp:
 845	ld	0,0(%r4)
 846	ld	1,8(%r4)
 847	ld	2,16(%r4)
 848	ld	3,24(%r4)
 849	ld	4,32(%r4)
 850	ld	5,40(%r4)
 851	ld	6,48(%r4)
 852	ld	7,56(%r4)
 853	ld	8,64(%r4)
 854	ld	9,72(%r4)
 855	ld	10,80(%r4)
 856	ld	11,88(%r4)
 857	ld	12,96(%r4)
 858	ld	13,104(%r4)
 859	ld	14,112(%r4)
 860	ld	15,120(%r4)
 861.Lload_fpu_regs_done:
 862	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
 863	br	%r14
 864.Lload_fpu_regs_end:
 865
 866.L__critical_end:
 867
 868/*
 869 * Machine check handler routines
 870 */
 871ENTRY(mcck_int_handler)
 872	STCK	__LC_MCCK_CLOCK
 873	la	%r1,4095		# revalidate r1
 874	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
 875	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
 876	lg	%r10,__LC_LAST_BREAK
 877	lg	%r12,__LC_THREAD_INFO
 878	larl	%r13,cleanup_critical
 879	lmg	%r8,%r9,__LC_MCK_OLD_PSW
 880	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
 881	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
 882	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
 883	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
 884	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
 
 
 
 885	jo	3f
 886	la	%r14,__LC_SYNC_ENTER_TIMER
 887	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
 888	jl	0f
 889	la	%r14,__LC_ASYNC_ENTER_TIMER
 8900:	clc	0(8,%r14),__LC_EXIT_TIMER
 891	jl	1f
 892	la	%r14,__LC_EXIT_TIMER
 8931:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
 894	jl	2f
 895	la	%r14,__LC_LAST_UPDATE_TIMER
 8962:	spt	0(%r14)
 897	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
 8983:	TSTMSK	__LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID)
 899	jno	.Lmcck_panic		# no -> skip cleanup critical
 900	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
 901.Lmcck_skip:
 902	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
 903	stmg	%r0,%r7,__PT_R0(%r11)
 904	mvc	__PT_R8(64,%r11),0(%r14)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 905	stmg	%r8,%r9,__PT_PSW(%r11)
 906	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 907	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 908	lgr	%r2,%r11		# pass pointer to pt_regs
 909	brasl	%r14,s390_do_machine_check
 910	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 911	jno	.Lmcck_return
 912	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
 913	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
 914	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
 915	la	%r11,STACK_FRAME_OVERHEAD(%r1)
 916	lgr	%r15,%r1
 917	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
 918	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
 919	jno	.Lmcck_return
 920	TRACE_IRQS_OFF
 921	brasl	%r14,s390_handle_mcck
 922	TRACE_IRQS_ON
 923.Lmcck_return:
 924	lg	%r14,__LC_VDSO_PER_CPU
 925	lmg	%r0,%r10,__PT_R0(%r11)
 926	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
 927	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
 928	jno	0f
 929	stpt	__LC_EXIT_TIMER
 930	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 9310:	lmg	%r11,%r15,__PT_R11(%r11)
 932	lpswe	__LC_RETURN_MCCK_PSW
 
 
 
 933
 934.Lmcck_panic:
 935	lg	%r15,__LC_PANIC_STACK
 936	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 937	j	.Lmcck_skip
 938
 939#
 940# PSW restart interrupt handler
 941#
 942ENTRY(restart_int_handler)
 943	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944	jz	0f
 945	.insn	s,0xb2800000,__LC_LPP
 9460:	stg	%r15,__LC_SAVE_AREA_RESTART
 947	lg	%r15,__LC_RESTART_STACK
 948	aghi	%r15,-__PT_SIZE			# create pt_regs on stack
 949	xc	0(__PT_SIZE,%r15),0(%r15)
 950	stmg	%r0,%r14,__PT_R0(%r15)
 951	mvc	__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
 952	mvc	__PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
 953	aghi	%r15,-STACK_FRAME_OVERHEAD	# create stack frame on stack
 
 
 954	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
 955	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
 956	lg	%r2,__LC_RESTART_DATA
 957	lg	%r3,__LC_RESTART_SOURCE
 958	ltgr	%r3,%r3				# test source cpu address
 959	jm	1f				# negative -> skip source stop
 9600:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
 961	brc	10,0b				# wait for status stored
 9621:	basr	%r14,%r1			# call function
 963	stap	__SF_EMPTY(%r15)		# store cpu address
 964	llgh	%r3,__SF_EMPTY(%r15)
 9652:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
 966	brc	2,2b
 9673:	j	3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968
 969	.section .kprobes.text, "ax"
 970
 971#ifdef CONFIG_CHECK_STACK
 972/*
 973 * The synchronous or the asynchronous stack overflowed. We are dead.
 974 * No need to properly save the registers, we are going to panic anyway.
 975 * Setup a pt_regs so that show_trace can provide a good call trace.
 976 */
 977stack_overflow:
 978	lg	%r15,__LC_PANIC_STACK	# change to panic stack
 
 979	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 980	stmg	%r0,%r7,__PT_R0(%r11)
 981	stmg	%r8,%r9,__PT_PSW(%r11)
 982	mvc	__PT_R8(64,%r11),0(%r14)
 983	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
 984	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 985	lgr	%r2,%r11		# pass pointer to pt_regs
 986	jg	kernel_stack_overflow
 
 987#endif
 988
 989cleanup_critical:
 990#if IS_ENABLED(CONFIG_KVM)
 991	clg	%r9,BASED(.Lcleanup_table_sie)	# .Lsie_gmap
 992	jl	0f
 993	clg	%r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
 994	jl	.Lcleanup_sie
 995#endif
 996	clg	%r9,BASED(.Lcleanup_table)	# system_call
 997	jl	0f
 998	clg	%r9,BASED(.Lcleanup_table+8)	# .Lsysc_do_svc
 999	jl	.Lcleanup_system_call
1000	clg	%r9,BASED(.Lcleanup_table+16)	# .Lsysc_tif
1001	jl	0f
1002	clg	%r9,BASED(.Lcleanup_table+24)	# .Lsysc_restore
1003	jl	.Lcleanup_sysc_tif
1004	clg	%r9,BASED(.Lcleanup_table+32)	# .Lsysc_done
1005	jl	.Lcleanup_sysc_restore
1006	clg	%r9,BASED(.Lcleanup_table+40)	# .Lio_tif
1007	jl	0f
1008	clg	%r9,BASED(.Lcleanup_table+48)	# .Lio_restore
1009	jl	.Lcleanup_io_tif
1010	clg	%r9,BASED(.Lcleanup_table+56)	# .Lio_done
1011	jl	.Lcleanup_io_restore
1012	clg	%r9,BASED(.Lcleanup_table+64)	# psw_idle
1013	jl	0f
1014	clg	%r9,BASED(.Lcleanup_table+72)	# .Lpsw_idle_end
1015	jl	.Lcleanup_idle
1016	clg	%r9,BASED(.Lcleanup_table+80)	# save_fpu_regs
1017	jl	0f
1018	clg	%r9,BASED(.Lcleanup_table+88)	# .Lsave_fpu_regs_end
1019	jl	.Lcleanup_save_fpu_regs
1020	clg	%r9,BASED(.Lcleanup_table+96)	# load_fpu_regs
1021	jl	0f
1022	clg	%r9,BASED(.Lcleanup_table+104)	# .Lload_fpu_regs_end
1023	jl	.Lcleanup_load_fpu_regs
10240:	br	%r14
1025
1026	.align	8
1027.Lcleanup_table:
1028	.quad	system_call
1029	.quad	.Lsysc_do_svc
1030	.quad	.Lsysc_tif
1031	.quad	.Lsysc_restore
1032	.quad	.Lsysc_done
1033	.quad	.Lio_tif
1034	.quad	.Lio_restore
1035	.quad	.Lio_done
1036	.quad	psw_idle
1037	.quad	.Lpsw_idle_end
1038	.quad	save_fpu_regs
1039	.quad	.Lsave_fpu_regs_end
1040	.quad	load_fpu_regs
1041	.quad	.Lload_fpu_regs_end
1042
1043#if IS_ENABLED(CONFIG_KVM)
1044.Lcleanup_table_sie:
1045	.quad	.Lsie_gmap
1046	.quad	.Lsie_done
1047
1048.Lcleanup_sie:
1049	lg	%r9,__SF_EMPTY(%r15)		# get control block pointer
1050	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
1051	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
1052	larl	%r9,sie_exit			# skip forward to sie_exit
1053	br	%r14
1054#endif
1055
1056.Lcleanup_system_call:
1057	# check if stpt has been executed
1058	clg	%r9,BASED(.Lcleanup_system_call_insn)
1059	jh	0f
1060	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
1061	cghi	%r11,__LC_SAVE_AREA_ASYNC
1062	je	0f
1063	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
10640:	# check if stmg has been executed
1065	clg	%r9,BASED(.Lcleanup_system_call_insn+8)
1066	jh	0f
1067	mvc	__LC_SAVE_AREA_SYNC(64),0(%r11)
10680:	# check if base register setup + TIF bit load has been done
1069	clg	%r9,BASED(.Lcleanup_system_call_insn+16)
1070	jhe	0f
1071	# set up saved registers r10 and r12
1072	stg	%r10,16(%r11)		# r10 last break
1073	stg	%r12,32(%r11)		# r12 thread-info pointer
10740:	# check if the user time update has been done
1075	clg	%r9,BASED(.Lcleanup_system_call_insn+24)
1076	jh	0f
1077	lg	%r15,__LC_EXIT_TIMER
1078	slg	%r15,__LC_SYNC_ENTER_TIMER
1079	alg	%r15,__LC_USER_TIMER
1080	stg	%r15,__LC_USER_TIMER
10810:	# check if the system time update has been done
1082	clg	%r9,BASED(.Lcleanup_system_call_insn+32)
1083	jh	0f
1084	lg	%r15,__LC_LAST_UPDATE_TIMER
1085	slg	%r15,__LC_EXIT_TIMER
1086	alg	%r15,__LC_SYSTEM_TIMER
1087	stg	%r15,__LC_SYSTEM_TIMER
10880:	# update accounting time stamp
1089	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1090	# do LAST_BREAK
1091	lg	%r9,16(%r11)
1092	srag	%r9,%r9,23
1093	jz	0f
1094	mvc	__TI_last_break(8,%r12),16(%r11)
10950:	# set up saved register r11
1096	lg	%r15,__LC_KERNEL_STACK
1097	la	%r9,STACK_FRAME_OVERHEAD(%r15)
1098	stg	%r9,24(%r11)		# r11 pt_regs pointer
1099	# fill pt_regs
1100	mvc	__PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
1101	stmg	%r0,%r7,__PT_R0(%r9)
1102	mvc	__PT_PSW(16,%r9),__LC_SVC_OLD_PSW
1103	mvc	__PT_INT_CODE(4,%r9),__LC_SVC_ILC
1104	xc	__PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
1105	mvi	__PT_FLAGS+7(%r9),_PIF_SYSCALL
1106	# setup saved register r15
1107	stg	%r15,56(%r11)		# r15 stack pointer
1108	# set new psw address and exit
1109	larl	%r9,.Lsysc_do_svc
1110	br	%r14
1111.Lcleanup_system_call_insn:
1112	.quad	system_call
1113	.quad	.Lsysc_stmg
1114	.quad	.Lsysc_per
1115	.quad	.Lsysc_vtime+36
1116	.quad	.Lsysc_vtime+42
1117
1118.Lcleanup_sysc_tif:
1119	larl	%r9,.Lsysc_tif
1120	br	%r14
1121
1122.Lcleanup_sysc_restore:
1123	clg	%r9,BASED(.Lcleanup_sysc_restore_insn)
1124	je	0f
1125	lg	%r9,24(%r11)		# get saved pointer to pt_regs
1126	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
1127	mvc	0(64,%r11),__PT_R8(%r9)
1128	lmg	%r0,%r7,__PT_R0(%r9)
11290:	lmg	%r8,%r9,__LC_RETURN_PSW
1130	br	%r14
1131.Lcleanup_sysc_restore_insn:
1132	.quad	.Lsysc_done - 4
1133
1134.Lcleanup_io_tif:
1135	larl	%r9,.Lio_tif
1136	br	%r14
1137
1138.Lcleanup_io_restore:
1139	clg	%r9,BASED(.Lcleanup_io_restore_insn)
1140	je	0f
1141	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
1142	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
1143	mvc	0(64,%r11),__PT_R8(%r9)
1144	lmg	%r0,%r7,__PT_R0(%r9)
11450:	lmg	%r8,%r9,__LC_RETURN_PSW
1146	br	%r14
1147.Lcleanup_io_restore_insn:
1148	.quad	.Lio_done - 4
1149
1150.Lcleanup_idle:
1151	ni	__LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
1152	# copy interrupt clock & cpu timer
1153	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
1154	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
1155	cghi	%r11,__LC_SAVE_AREA_ASYNC
1156	je	0f
1157	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
1158	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
11590:	# check if stck & stpt have been executed
1160	clg	%r9,BASED(.Lcleanup_idle_insn)
1161	jhe	1f
1162	mvc	__CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
1163	mvc	__TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
11641:	# calculate idle cycles
1165#ifdef CONFIG_SMP
1166	clg	%r9,BASED(.Lcleanup_idle_insn)
1167	jl	3f
1168	larl	%r1,smp_cpu_mtid
1169	llgf	%r1,0(%r1)
1170	ltgr	%r1,%r1
1171	jz	3f
1172	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
1173	larl	%r3,mt_cycles
1174	ag	%r3,__LC_PERCPU_OFFSET
1175	la	%r4,__SF_EMPTY+16(%r15)
11762:	lg	%r0,0(%r3)
1177	slg	%r0,0(%r4)
1178	alg	%r0,64(%r4)
1179	stg	%r0,0(%r3)
1180	la	%r3,8(%r3)
1181	la	%r4,8(%r4)
1182	brct	%r1,2b
1183#endif
11843:	# account system time going idle
1185	lg	%r9,__LC_STEAL_TIMER
1186	alg	%r9,__CLOCK_IDLE_ENTER(%r2)
1187	slg	%r9,__LC_LAST_UPDATE_CLOCK
1188	stg	%r9,__LC_STEAL_TIMER
1189	mvc	__LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
1190	lg	%r9,__LC_SYSTEM_TIMER
1191	alg	%r9,__LC_LAST_UPDATE_TIMER
1192	slg	%r9,__TIMER_IDLE_ENTER(%r2)
1193	stg	%r9,__LC_SYSTEM_TIMER
1194	mvc	__LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
1195	# prepare return psw
1196	nihh	%r8,0xfcfd		# clear irq & wait state bits
1197	lg	%r9,48(%r11)		# return from psw_idle
1198	br	%r14
1199.Lcleanup_idle_insn:
1200	.quad	.Lpsw_idle_lpsw
1201
1202.Lcleanup_save_fpu_regs:
1203	larl	%r9,save_fpu_regs
1204	br	%r14
1205
1206.Lcleanup_load_fpu_regs:
1207	larl	%r9,load_fpu_regs
1208	br	%r14
1209
1210/*
1211 * Integer constants
1212 */
1213	.align	8
1214.Lcritical_start:
1215	.quad	.L__critical_start
1216.Lcritical_length:
1217	.quad	.L__critical_end - .L__critical_start
1218#if IS_ENABLED(CONFIG_KVM)
1219.Lsie_critical_start:
1220	.quad	.Lsie_gmap
1221.Lsie_critical_length:
1222	.quad	.Lsie_done - .Lsie_gmap
1223#endif
1224
1225	.section .rodata, "a"
1226#define SYSCALL(esame,emu)	.long esame
1227	.globl	sys_call_table
1228sys_call_table:
1229#include "syscalls.S"
 
1230#undef SYSCALL
1231
1232#ifdef CONFIG_COMPAT
1233
1234#define SYSCALL(esame,emu)	.long emu
1235	.globl	sys_call_table_emu
1236sys_call_table_emu:
1237#include "syscalls.S"
1238#undef SYSCALL
1239#endif