Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 low-level entry points.
4 *
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/asm-extable.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/dwarf.h>
18#include <asm/errno.h>
19#include <asm/ptrace.h>
20#include <asm/thread_info.h>
21#include <asm/asm-offsets.h>
22#include <asm/unistd.h>
23#include <asm/page.h>
24#include <asm/sigp.h>
25#include <asm/irq.h>
26#include <asm/vx-insn.h>
27#include <asm/setup.h>
28#include <asm/nmi.h>
29#include <asm/export.h>
30#include <asm/nospec-insn.h>
31
32STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
33STACK_SIZE = 1 << STACK_SHIFT
34STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
35
36_LPP_OFFSET = __LC_LPP
37
38 .macro STBEAR address
39 ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193
40 .endm
41
42 .macro LBEAR address
43 ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193
44 .endm
45
46 .macro LPSWEY address,lpswe
47 ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193
48 .endm
49
50 .macro MBEAR reg
51 ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
52 .endm
53
54 .macro CHECK_STACK savearea
55#ifdef CONFIG_CHECK_STACK
56 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
57 lghi %r14,\savearea
58 jz stack_overflow
59#endif
60 .endm
61
62 .macro CHECK_VMAP_STACK savearea,oklabel
63#ifdef CONFIG_VMAP_STACK
64 lgr %r14,%r15
65 nill %r14,0x10000 - STACK_SIZE
66 oill %r14,STACK_INIT
67 clg %r14,__LC_KERNEL_STACK
68 je \oklabel
69 clg %r14,__LC_ASYNC_STACK
70 je \oklabel
71 clg %r14,__LC_MCCK_STACK
72 je \oklabel
73 clg %r14,__LC_NODAT_STACK
74 je \oklabel
75 clg %r14,__LC_RESTART_STACK
76 je \oklabel
77 lghi %r14,\savearea
78 j stack_overflow
79#else
80 j \oklabel
81#endif
82 .endm
83
84 /*
85 * The TSTMSK macro generates a test-under-mask instruction by
86 * calculating the memory offset for the specified mask value.
87 * Mask value can be any constant. The macro shifts the mask
88 * value to calculate the memory offset for the test-under-mask
89 * instruction.
90 */
91 .macro TSTMSK addr, mask, size=8, bytepos=0
92 .if (\bytepos < \size) && (\mask >> 8)
93 .if (\mask & 0xff)
94 .error "Mask exceeds byte boundary"
95 .endif
96 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
97 .exitm
98 .endif
99 .ifeq \mask
100 .error "Mask must not be zero"
101 .endif
102 off = \size - \bytepos - 1
103 tm off+\addr, \mask
104 .endm
105
106 .macro BPOFF
107 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82
108 .endm
109
110 .macro BPON
111 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82
112 .endm
113
114 .macro BPENTER tif_ptr,tif_mask
115 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
116 "j .+12; nop; nop", 82
117 .endm
118
119 .macro BPEXIT tif_ptr,tif_mask
120 TSTMSK \tif_ptr,\tif_mask
121 ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \
122 "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
123 .endm
124
125#if IS_ENABLED(CONFIG_KVM)
126 /*
127 * The OUTSIDE macro jumps to the provided label in case the value
128 * in the provided register is outside of the provided range. The
129 * macro is useful for checking whether a PSW stored in a register
130 * pair points inside or outside of a block of instructions.
131 * @reg: register to check
132 * @start: start of the range
133 * @end: end of the range
134 * @outside_label: jump here if @reg is outside of [@start..@end)
135 */
136 .macro OUTSIDE reg,start,end,outside_label
137 lgr %r14,\reg
138 larl %r13,\start
139 slgr %r14,%r13
140#ifdef CONFIG_AS_IS_LLVM
141 clgfrl %r14,.Lrange_size\@
142#else
143 clgfi %r14,\end - \start
144#endif
145 jhe \outside_label
146#ifdef CONFIG_AS_IS_LLVM
147 .section .rodata, "a"
148 .align 4
149.Lrange_size\@:
150 .long \end - \start
151 .previous
152#endif
153 .endm
154
155 .macro SIEEXIT
156 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
157 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
158 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
159 larl %r9,sie_exit # skip forward to sie_exit
160 .endm
161#endif
162
163 GEN_BR_THUNK %r14
164
165 .section .kprobes.text, "ax"
166.Ldummy:
167 /*
168 * This nop exists only in order to avoid that __bpon starts at
169 * the beginning of the kprobes text section. In that case we would
170 * have several symbols at the same address. E.g. objdump would take
171 * an arbitrary symbol name when disassembling this code.
172 * With the added nop in between the __bpon symbol is unique
173 * again.
174 */
175 nop 0
176
177ENTRY(__bpon)
178 .globl __bpon
179 BPON
180 BR_EX %r14
181ENDPROC(__bpon)
182
183/*
184 * Scheduler resume function, called by switch_to
185 * gpr2 = (task_struct *) prev
186 * gpr3 = (task_struct *) next
187 * Returns:
188 * gpr2 = prev
189 */
190ENTRY(__switch_to)
191 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
192 lghi %r4,__TASK_stack
193 lghi %r1,__TASK_thread
194 llill %r5,STACK_INIT
195 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
196 lg %r15,0(%r4,%r3) # start of kernel stack of next
197 agr %r15,%r5 # end of kernel stack of next
198 stg %r3,__LC_CURRENT # store task struct of next
199 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
200 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
201 aghi %r3,__TASK_pid
202 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
203 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
204 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
205 BR_EX %r14
206ENDPROC(__switch_to)
207
208#if IS_ENABLED(CONFIG_KVM)
209/*
210 * __sie64a calling convention:
211 * %r2 pointer to sie control block phys
212 * %r3 pointer to sie control block virt
213 * %r4 guest register save area
214 */
215ENTRY(__sie64a)
216 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
217 lg %r12,__LC_CURRENT
218 stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical..
219 stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses
220 stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area
221 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
222 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
223 lmg %r0,%r13,0(%r4) # load guest gprs 0-13
224 lg %r14,__LC_GMAP # get gmap pointer
225 ltgr %r14,%r14
226 jz .Lsie_gmap
227 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
228.Lsie_gmap:
229 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
230 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
231 tm __SIE_PROG20+3(%r14),3 # last exit...
232 jnz .Lsie_skip
233 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
234 jo .Lsie_skip # exit if fp/vx regs changed
235 lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
236 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
237.Lsie_entry:
238 sie 0(%r14)
239# Let the next instruction be NOP to avoid triggering a machine check
240# and handling it in a guest as result of the instruction execution.
241 nopr 7
242.Lsie_leave:
243 BPOFF
244 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
245.Lsie_skip:
246 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
247 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
248 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
249.Lsie_done:
250# some program checks are suppressing. C code (e.g. do_protection_exception)
251# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
252# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
253# Other instructions between __sie64a and .Lsie_done should not cause program
254# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
255.Lrewind_pad6:
256 nopr 7
257.Lrewind_pad4:
258 nopr 7
259.Lrewind_pad2:
260 nopr 7
261 .globl sie_exit
262sie_exit:
263 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
264 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
265 xgr %r0,%r0 # clear guest registers to
266 xgr %r1,%r1 # prevent speculative use
267 xgr %r3,%r3
268 xgr %r4,%r4
269 xgr %r5,%r5
270 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
271 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
272 BR_EX %r14
273.Lsie_fault:
274 lghi %r14,-EFAULT
275 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code
276 j sie_exit
277
278 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
279 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
280 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
281 EX_TABLE(sie_exit,.Lsie_fault)
282ENDPROC(__sie64a)
283EXPORT_SYMBOL(__sie64a)
284EXPORT_SYMBOL(sie_exit)
285#endif
286
287/*
288 * SVC interrupt handler routine. System calls are synchronous events and
289 * are entered with interrupts disabled.
290 */
291
292ENTRY(system_call)
293 stpt __LC_SYS_ENTER_TIMER
294 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
295 BPOFF
296 lghi %r14,0
297.Lsysc_per:
298 STBEAR __LC_LAST_BREAK
299 lctlg %c1,%c1,__LC_KERNEL_ASCE
300 lg %r12,__LC_CURRENT
301 lg %r15,__LC_KERNEL_STACK
302 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
303 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
304 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
305 # clear user controlled register to prevent speculative use
306 xgr %r0,%r0
307 xgr %r1,%r1
308 xgr %r4,%r4
309 xgr %r5,%r5
310 xgr %r6,%r6
311 xgr %r7,%r7
312 xgr %r8,%r8
313 xgr %r9,%r9
314 xgr %r10,%r10
315 xgr %r11,%r11
316 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
317 mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
318 MBEAR %r2
319 lgr %r3,%r14
320 brasl %r14,__do_syscall
321 lctlg %c1,%c1,__LC_USER_ASCE
322 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
323 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
324 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
325 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
326 stpt __LC_EXIT_TIMER
327 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
328ENDPROC(system_call)
329
330#
331# a new process exits the kernel with ret_from_fork
332#
333ENTRY(ret_from_fork)
334 lgr %r3,%r11
335 brasl %r14,__ret_from_fork
336 lctlg %c1,%c1,__LC_USER_ASCE
337 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
338 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
339 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
340 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
341 stpt __LC_EXIT_TIMER
342 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
343ENDPROC(ret_from_fork)
344
345/*
346 * Program check handler routine
347 */
348
349ENTRY(pgm_check_handler)
350 stpt __LC_SYS_ENTER_TIMER
351 BPOFF
352 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
353 lg %r12,__LC_CURRENT
354 lghi %r10,0
355 lmg %r8,%r9,__LC_PGM_OLD_PSW
356 tmhh %r8,0x0001 # coming from user space?
357 jno .Lpgm_skip_asce
358 lctlg %c1,%c1,__LC_KERNEL_ASCE
359 j 3f # -> fault in user space
360.Lpgm_skip_asce:
361#if IS_ENABLED(CONFIG_KVM)
362 # cleanup critical section for program checks in __sie64a
363 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
364 SIEEXIT
365 lghi %r10,_PIF_GUEST_FAULT
366#endif
3671: tmhh %r8,0x4000 # PER bit set in old PSW ?
368 jnz 2f # -> enabled, can't be a double fault
369 tm __LC_PGM_ILC+3,0x80 # check for per exception
370 jnz .Lpgm_svcper # -> single stepped svc
3712: CHECK_STACK __LC_SAVE_AREA_SYNC
372 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
373 # CHECK_VMAP_STACK branches to stack_overflow or 4f
374 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
3753: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
376 lg %r15,__LC_KERNEL_STACK
3774: la %r11,STACK_FRAME_OVERHEAD(%r15)
378 stg %r10,__PT_FLAGS(%r11)
379 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
380 stmg %r0,%r7,__PT_R0(%r11)
381 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
382 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
383 stmg %r8,%r9,__PT_PSW(%r11)
384
385 # clear user controlled registers to prevent speculative use
386 xgr %r0,%r0
387 xgr %r1,%r1
388 xgr %r3,%r3
389 xgr %r4,%r4
390 xgr %r5,%r5
391 xgr %r6,%r6
392 xgr %r7,%r7
393 lgr %r2,%r11
394 brasl %r14,__do_pgm_check
395 tmhh %r8,0x0001 # returning to user space?
396 jno .Lpgm_exit_kernel
397 lctlg %c1,%c1,__LC_USER_ASCE
398 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
399 stpt __LC_EXIT_TIMER
400.Lpgm_exit_kernel:
401 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
402 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
403 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
404 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
405
406#
407# single stepped system call
408#
409.Lpgm_svcper:
410 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
411 larl %r14,.Lsysc_per
412 stg %r14,__LC_RETURN_PSW+8
413 lghi %r14,1
414 LBEAR __LC_PGM_LAST_BREAK
415 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
416ENDPROC(pgm_check_handler)
417
418/*
419 * Interrupt handler macro used for external and IO interrupts.
420 */
421.macro INT_HANDLER name,lc_old_psw,handler
422ENTRY(\name)
423 stckf __LC_INT_CLOCK
424 stpt __LC_SYS_ENTER_TIMER
425 STBEAR __LC_LAST_BREAK
426 BPOFF
427 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
428 lg %r12,__LC_CURRENT
429 lmg %r8,%r9,\lc_old_psw
430 tmhh %r8,0x0001 # interrupting from user ?
431 jnz 1f
432#if IS_ENABLED(CONFIG_KVM)
433 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f
434 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
435 SIEEXIT
436#endif
4370: CHECK_STACK __LC_SAVE_AREA_ASYNC
438 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
439 j 2f
4401: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
441 lctlg %c1,%c1,__LC_KERNEL_ASCE
442 lg %r15,__LC_KERNEL_STACK
4432: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
444 la %r11,STACK_FRAME_OVERHEAD(%r15)
445 stmg %r0,%r7,__PT_R0(%r11)
446 # clear user controlled registers to prevent speculative use
447 xgr %r0,%r0
448 xgr %r1,%r1
449 xgr %r3,%r3
450 xgr %r4,%r4
451 xgr %r5,%r5
452 xgr %r6,%r6
453 xgr %r7,%r7
454 xgr %r10,%r10
455 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
456 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
457 MBEAR %r11
458 stmg %r8,%r9,__PT_PSW(%r11)
459 lgr %r2,%r11 # pass pointer to pt_regs
460 brasl %r14,\handler
461 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
462 tmhh %r8,0x0001 # returning to user ?
463 jno 2f
464 lctlg %c1,%c1,__LC_USER_ASCE
465 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
466 stpt __LC_EXIT_TIMER
4672: LBEAR __PT_LAST_BREAK(%r11)
468 lmg %r0,%r15,__PT_R0(%r11)
469 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
470ENDPROC(\name)
471.endm
472
473INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
474INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
475
476/*
477 * Load idle PSW.
478 */
479ENTRY(psw_idle)
480 stg %r14,(__SF_GPRS+8*8)(%r15)
481 stg %r3,__SF_EMPTY(%r15)
482 larl %r1,psw_idle_exit
483 stg %r1,__SF_EMPTY+8(%r15)
484 larl %r1,smp_cpu_mtid
485 llgf %r1,0(%r1)
486 ltgr %r1,%r1
487 jz .Lpsw_idle_stcctm
488 .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
489.Lpsw_idle_stcctm:
490 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
491 BPON
492 stckf __CLOCK_IDLE_ENTER(%r2)
493 stpt __TIMER_IDLE_ENTER(%r2)
494 lpswe __SF_EMPTY(%r15)
495.globl psw_idle_exit
496psw_idle_exit:
497 BR_EX %r14
498ENDPROC(psw_idle)
499
500/*
501 * Machine check handler routines
502 */
503ENTRY(mcck_int_handler)
504 stckf __LC_MCCK_CLOCK
505 BPOFF
506 la %r1,4095 # validate r1
507 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
508 LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear
509 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
510 lg %r12,__LC_CURRENT
511 lmg %r8,%r9,__LC_MCK_OLD_PSW
512 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
513 jo .Lmcck_panic # yes -> rest of mcck code invalid
514 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
515 jno .Lmcck_panic # control registers invalid -> panic
516 la %r14,4095
517 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
518 ptlb
519 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
520 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
521 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
522 jo 3f
523 la %r14,__LC_SYS_ENTER_TIMER
524 clc 0(8,%r14),__LC_EXIT_TIMER
525 jl 1f
526 la %r14,__LC_EXIT_TIMER
5271: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
528 jl 2f
529 la %r14,__LC_LAST_UPDATE_TIMER
5302: spt 0(%r14)
531 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
5323: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
533 jno .Lmcck_panic
534 tmhh %r8,0x0001 # interrupting from user ?
535 jnz .Lmcck_user
536 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
537 jno .Lmcck_panic
538#if IS_ENABLED(CONFIG_KVM)
539 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack
540 OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f
541 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
5424: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
543 SIEEXIT
544 j .Lmcck_stack
545#endif
546.Lmcck_user:
547 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
548.Lmcck_stack:
549 lg %r15,__LC_MCCK_STACK
550 la %r11,STACK_FRAME_OVERHEAD(%r15)
551 stctg %c1,%c1,__PT_CR1(%r11)
552 lctlg %c1,%c1,__LC_KERNEL_ASCE
553 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
554 lghi %r14,__LC_GPREGS_SAVE_AREA+64
555 stmg %r0,%r7,__PT_R0(%r11)
556 # clear user controlled registers to prevent speculative use
557 xgr %r0,%r0
558 xgr %r1,%r1
559 xgr %r3,%r3
560 xgr %r4,%r4
561 xgr %r5,%r5
562 xgr %r6,%r6
563 xgr %r7,%r7
564 xgr %r10,%r10
565 mvc __PT_R8(64,%r11),0(%r14)
566 stmg %r8,%r9,__PT_PSW(%r11)
567 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
568 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
569 lgr %r2,%r11 # pass pointer to pt_regs
570 brasl %r14,s390_do_machine_check
571 cghi %r2,0
572 je .Lmcck_return
573 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
574 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
575 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
576 la %r11,STACK_FRAME_OVERHEAD(%r1)
577 lgr %r2,%r11
578 lgr %r15,%r1
579 brasl %r14,s390_handle_mcck
580.Lmcck_return:
581 lctlg %c1,%c1,__PT_CR1(%r11)
582 lmg %r0,%r10,__PT_R0(%r11)
583 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
584 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
585 jno 0f
586 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
587 stpt __LC_EXIT_TIMER
5880: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
589 LBEAR 0(%r12)
590 lmg %r11,%r15,__PT_R11(%r11)
591 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
592
593.Lmcck_panic:
594 /*
595 * Iterate over all possible CPU addresses in the range 0..0xffff
596 * and stop each CPU using signal processor. Use compare and swap
597 * to allow just one CPU-stopper and prevent concurrent CPUs from
598 * stopping each other while leaving the others running.
599 */
600 lhi %r5,0
601 lhi %r6,1
602 larl %r7,.Lstop_lock
603 cs %r5,%r6,0(%r7) # single CPU-stopper only
604 jnz 4f
605 larl %r7,.Lthis_cpu
606 stap 0(%r7) # this CPU address
607 lh %r4,0(%r7)
608 nilh %r4,0
609 lhi %r0,1
610 sll %r0,16 # CPU counter
611 lhi %r3,0 # next CPU address
6120: cr %r3,%r4
613 je 2f
6141: sigp %r1,%r3,SIGP_STOP # stop next CPU
615 brc SIGP_CC_BUSY,1b
6162: ahi %r3,1
617 brct %r0,0b
6183: sigp %r1,%r4,SIGP_STOP # stop this CPU
619 brc SIGP_CC_BUSY,3b
6204: j 4b
621ENDPROC(mcck_int_handler)
622
623ENTRY(restart_int_handler)
624 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
625 stg %r15,__LC_SAVE_AREA_RESTART
626 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
627 jz 0f
628 la %r15,4095
629 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
6300: larl %r15,.Lstosm_tmp
631 stosm 0(%r15),0x04 # turn dat on, keep irqs off
632 lg %r15,__LC_RESTART_STACK
633 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
634 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
635 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
636 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
637 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
638 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
639 lg %r2,__LC_RESTART_DATA
640 lgf %r3,__LC_RESTART_SOURCE
641 ltgr %r3,%r3 # test source cpu address
642 jm 1f # negative -> skip source stop
6430: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
644 brc 10,0b # wait for status stored
6451: basr %r14,%r1 # call function
646 stap __SF_EMPTY(%r15) # store cpu address
647 llgh %r3,__SF_EMPTY(%r15)
6482: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
649 brc 2,2b
6503: j 3b
651ENDPROC(restart_int_handler)
652
653 .section .kprobes.text, "ax"
654
655#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
656/*
657 * The synchronous or the asynchronous stack overflowed. We are dead.
658 * No need to properly save the registers, we are going to panic anyway.
659 * Setup a pt_regs so that show_trace can provide a good call trace.
660 */
661ENTRY(stack_overflow)
662 lg %r15,__LC_NODAT_STACK # change to panic stack
663 la %r11,STACK_FRAME_OVERHEAD(%r15)
664 stmg %r0,%r7,__PT_R0(%r11)
665 stmg %r8,%r9,__PT_PSW(%r11)
666 mvc __PT_R8(64,%r11),0(%r14)
667 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
668 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
669 lgr %r2,%r11 # pass pointer to pt_regs
670 jg kernel_stack_overflow
671ENDPROC(stack_overflow)
672#endif
673
674 .section .data, "aw"
675 .align 4
676.Lstop_lock: .long 0
677.Lthis_cpu: .short 0
678.Lstosm_tmp: .byte 0
679 .section .rodata, "a"
680#define SYSCALL(esame,emu) .quad __s390x_ ## esame
681 .globl sys_call_table
682sys_call_table:
683#include "asm/syscall_table.h"
684#undef SYSCALL
685
686#ifdef CONFIG_COMPAT
687
688#define SYSCALL(esame,emu) .quad __s390_ ## emu
689 .globl sys_call_table_emu
690sys_call_table_emu:
691#include "asm/syscall_table.h"
692#undef SYSCALL
693#endif
1/*
2 * S390 low-level entry points.
3 *
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Hartmut Penner (hp@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 * Heiko Carstens <heiko.carstens@de.ibm.com>
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/processor.h>
14#include <asm/cache.h>
15#include <asm/errno.h>
16#include <asm/ptrace.h>
17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h>
19#include <asm/unistd.h>
20#include <asm/page.h>
21#include <asm/sigp.h>
22#include <asm/irq.h>
23#include <asm/vx-insn.h>
24#include <asm/setup.h>
25#include <asm/nmi.h>
26
27__PT_R0 = __PT_GPRS
28__PT_R1 = __PT_GPRS + 8
29__PT_R2 = __PT_GPRS + 16
30__PT_R3 = __PT_GPRS + 24
31__PT_R4 = __PT_GPRS + 32
32__PT_R5 = __PT_GPRS + 40
33__PT_R6 = __PT_GPRS + 48
34__PT_R7 = __PT_GPRS + 56
35__PT_R8 = __PT_GPRS + 64
36__PT_R9 = __PT_GPRS + 72
37__PT_R10 = __PT_GPRS + 80
38__PT_R11 = __PT_GPRS + 88
39__PT_R12 = __PT_GPRS + 96
40__PT_R13 = __PT_GPRS + 104
41__PT_R14 = __PT_GPRS + 112
42__PT_R15 = __PT_GPRS + 120
43
44STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
45STACK_SIZE = 1 << STACK_SHIFT
46STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
47
48_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
49 _TIF_UPROBE)
50_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
51 _TIF_SYSCALL_TRACEPOINT)
52_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU)
53_PIF_WORK = (_PIF_PER_TRAP)
54
55#define BASED(name) name-cleanup_critical(%r13)
56
57 .macro TRACE_IRQS_ON
58#ifdef CONFIG_TRACE_IRQFLAGS
59 basr %r2,%r0
60 brasl %r14,trace_hardirqs_on_caller
61#endif
62 .endm
63
64 .macro TRACE_IRQS_OFF
65#ifdef CONFIG_TRACE_IRQFLAGS
66 basr %r2,%r0
67 brasl %r14,trace_hardirqs_off_caller
68#endif
69 .endm
70
71 .macro LOCKDEP_SYS_EXIT
72#ifdef CONFIG_LOCKDEP
73 tm __PT_PSW+1(%r11),0x01 # returning to user ?
74 jz .+10
75 brasl %r14,lockdep_sys_exit
76#endif
77 .endm
78
79 .macro CHECK_STACK stacksize,savearea
80#ifdef CONFIG_CHECK_STACK
81 tml %r15,\stacksize - CONFIG_STACK_GUARD
82 lghi %r14,\savearea
83 jz stack_overflow
84#endif
85 .endm
86
87 .macro SWITCH_ASYNC savearea,timer
88 tmhh %r8,0x0001 # interrupting from user ?
89 jnz 1f
90 lgr %r14,%r9
91 slg %r14,BASED(.Lcritical_start)
92 clg %r14,BASED(.Lcritical_length)
93 jhe 0f
94 lghi %r11,\savearea # inside critical section, do cleanup
95 brasl %r14,cleanup_critical
96 tmhh %r8,0x0001 # retest problem state after cleanup
97 jnz 1f
980: lg %r14,__LC_ASYNC_STACK # are we already on the async stack?
99 slgr %r14,%r15
100 srag %r14,%r14,STACK_SHIFT
101 jnz 2f
102 CHECK_STACK 1<<STACK_SHIFT,\savearea
103 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
104 j 3f
1051: LAST_BREAK %r14
106 UPDATE_VTIME %r14,%r15,\timer
1072: lg %r15,__LC_ASYNC_STACK # load async stack
1083: la %r11,STACK_FRAME_OVERHEAD(%r15)
109 .endm
110
111 .macro UPDATE_VTIME w1,w2,enter_timer
112 lg \w1,__LC_EXIT_TIMER
113 lg \w2,__LC_LAST_UPDATE_TIMER
114 slg \w1,\enter_timer
115 slg \w2,__LC_EXIT_TIMER
116 alg \w1,__LC_USER_TIMER
117 alg \w2,__LC_SYSTEM_TIMER
118 stg \w1,__LC_USER_TIMER
119 stg \w2,__LC_SYSTEM_TIMER
120 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
121 .endm
122
123 .macro LAST_BREAK scratch
124 srag \scratch,%r10,23
125 jz .+10
126 stg %r10,__TI_last_break(%r12)
127 .endm
128
129 .macro REENABLE_IRQS
130 stg %r8,__LC_RETURN_PSW
131 ni __LC_RETURN_PSW,0xbf
132 ssm __LC_RETURN_PSW
133 .endm
134
135 .macro STCK savearea
136#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
137 .insn s,0xb27c0000,\savearea # store clock fast
138#else
139 .insn s,0xb2050000,\savearea # store clock
140#endif
141 .endm
142
143 /*
144 * The TSTMSK macro generates a test-under-mask instruction by
145 * calculating the memory offset for the specified mask value.
146 * Mask value can be any constant. The macro shifts the mask
147 * value to calculate the memory offset for the test-under-mask
148 * instruction.
149 */
150 .macro TSTMSK addr, mask, size=8, bytepos=0
151 .if (\bytepos < \size) && (\mask >> 8)
152 .if (\mask & 0xff)
153 .error "Mask exceeds byte boundary"
154 .endif
155 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
156 .exitm
157 .endif
158 .ifeq \mask
159 .error "Mask must not be zero"
160 .endif
161 off = \size - \bytepos - 1
162 tm off+\addr, \mask
163 .endm
164
165 .section .kprobes.text, "ax"
166
167/*
168 * Scheduler resume function, called by switch_to
169 * gpr2 = (task_struct *) prev
170 * gpr3 = (task_struct *) next
171 * Returns:
172 * gpr2 = prev
173 */
174ENTRY(__switch_to)
175 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
176 lgr %r1,%r2
177 aghi %r1,__TASK_thread # thread_struct of prev task
178 lg %r4,__TASK_thread_info(%r2) # get thread_info of prev
179 lg %r5,__TASK_thread_info(%r3) # get thread_info of next
180 stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
181 lgr %r1,%r3
182 aghi %r1,__TASK_thread # thread_struct of next task
183 lgr %r15,%r5
184 aghi %r15,STACK_INIT # end of kernel stack of next
185 stg %r3,__LC_CURRENT # store task struct of next
186 stg %r5,__LC_THREAD_INFO # store thread info of next
187 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
188 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
189 /* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
190 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
191 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
192 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
193 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
194 bzr %r14
195 .insn s,0xb2800000,__LC_LPP # set program parameter
196 br %r14
197
198.L__critical_start:
199
200#if IS_ENABLED(CONFIG_KVM)
201/*
202 * sie64a calling convention:
203 * %r2 pointer to sie control block
204 * %r3 guest register save area
205 */
206ENTRY(sie64a)
207 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
208 stg %r2,__SF_EMPTY(%r15) # save control block pointer
209 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
210 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
211 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
212 jno .Lsie_load_guest_gprs
213 brasl %r14,load_fpu_regs # load guest fp/vx regs
214.Lsie_load_guest_gprs:
215 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
216 lg %r14,__LC_GMAP # get gmap pointer
217 ltgr %r14,%r14
218 jz .Lsie_gmap
219 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
220.Lsie_gmap:
221 lg %r14,__SF_EMPTY(%r15) # get control block pointer
222 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
223 tm __SIE_PROG20+3(%r14),3 # last exit...
224 jnz .Lsie_skip
225 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
226 jo .Lsie_skip # exit if fp/vx regs changed
227 sie 0(%r14)
228.Lsie_skip:
229 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
230 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
231.Lsie_done:
232# some program checks are suppressing. C code (e.g. do_protection_exception)
233# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
234# instructions between sie64a and .Lsie_done should not cause program
235# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
236# See also .Lcleanup_sie
237.Lrewind_pad:
238 nop 0
239 .globl sie_exit
240sie_exit:
241 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
242 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
243 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
244 lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
245 br %r14
246.Lsie_fault:
247 lghi %r14,-EFAULT
248 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
249 j sie_exit
250
251 EX_TABLE(.Lrewind_pad,.Lsie_fault)
252 EX_TABLE(sie_exit,.Lsie_fault)
253#endif
254
255/*
256 * SVC interrupt handler routine. System calls are synchronous events and
257 * are executed with interrupts enabled.
258 */
259
260ENTRY(system_call)
261 stpt __LC_SYNC_ENTER_TIMER
262.Lsysc_stmg:
263 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
264 lg %r10,__LC_LAST_BREAK
265 lg %r12,__LC_THREAD_INFO
266 lghi %r14,_PIF_SYSCALL
267.Lsysc_per:
268 lg %r15,__LC_KERNEL_STACK
269 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
270 LAST_BREAK %r13
271.Lsysc_vtime:
272 UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
273 stmg %r0,%r7,__PT_R0(%r11)
274 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
275 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
276 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
277 stg %r14,__PT_FLAGS(%r11)
278.Lsysc_do_svc:
279 lg %r10,__TI_sysc_table(%r12) # address of system call table
280 llgh %r8,__PT_INT_CODE+2(%r11)
281 slag %r8,%r8,2 # shift and test for svc 0
282 jnz .Lsysc_nr_ok
283 # svc 0: system call number in %r1
284 llgfr %r1,%r1 # clear high word in r1
285 cghi %r1,NR_syscalls
286 jnl .Lsysc_nr_ok
287 sth %r1,__PT_INT_CODE+2(%r11)
288 slag %r8,%r1,2
289.Lsysc_nr_ok:
290 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
291 stg %r2,__PT_ORIG_GPR2(%r11)
292 stg %r7,STACK_FRAME_OVERHEAD(%r15)
293 lgf %r9,0(%r8,%r10) # get system call add.
294 TSTMSK __TI_flags(%r12),_TIF_TRACE
295 jnz .Lsysc_tracesys
296 basr %r14,%r9 # call sys_xxxx
297 stg %r2,__PT_R2(%r11) # store return value
298
299.Lsysc_return:
300 LOCKDEP_SYS_EXIT
301.Lsysc_tif:
302 TSTMSK __PT_FLAGS(%r11),_PIF_WORK
303 jnz .Lsysc_work
304 TSTMSK __TI_flags(%r12),_TIF_WORK
305 jnz .Lsysc_work # check for work
306 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
307 jnz .Lsysc_work
308.Lsysc_restore:
309 lg %r14,__LC_VDSO_PER_CPU
310 lmg %r0,%r10,__PT_R0(%r11)
311 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
312 stpt __LC_EXIT_TIMER
313 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
314 lmg %r11,%r15,__PT_R11(%r11)
315 lpswe __LC_RETURN_PSW
316.Lsysc_done:
317
318#
319# One of the work bits is on. Find out which one.
320#
321.Lsysc_work:
322 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
323 jo .Lsysc_mcck_pending
324 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
325 jo .Lsysc_reschedule
326#ifdef CONFIG_UPROBES
327 TSTMSK __TI_flags(%r12),_TIF_UPROBE
328 jo .Lsysc_uprobe_notify
329#endif
330 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
331 jo .Lsysc_singlestep
332 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
333 jo .Lsysc_sigpending
334 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
335 jo .Lsysc_notify_resume
336 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
337 jo .Lsysc_vxrs
338 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
339 jo .Lsysc_uaccess
340 j .Lsysc_return # beware of critical section cleanup
341
342#
343# _TIF_NEED_RESCHED is set, call schedule
344#
345.Lsysc_reschedule:
346 larl %r14,.Lsysc_return
347 jg schedule
348
349#
350# _CIF_MCCK_PENDING is set, call handler
351#
352.Lsysc_mcck_pending:
353 larl %r14,.Lsysc_return
354 jg s390_handle_mcck # TIF bit will be cleared by handler
355
356#
357# _CIF_ASCE is set, load user space asce
358#
359.Lsysc_uaccess:
360 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
361 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
362 j .Lsysc_return
363
364#
365# CIF_FPU is set, restore floating-point controls and floating-point registers.
366#
367.Lsysc_vxrs:
368 larl %r14,.Lsysc_return
369 jg load_fpu_regs
370
371#
372# _TIF_SIGPENDING is set, call do_signal
373#
374.Lsysc_sigpending:
375 lgr %r2,%r11 # pass pointer to pt_regs
376 brasl %r14,do_signal
377 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
378 jno .Lsysc_return
379 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
380 lg %r10,__TI_sysc_table(%r12) # address of system call table
381 lghi %r8,0 # svc 0 returns -ENOSYS
382 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
383 cghi %r1,NR_syscalls
384 jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
385 slag %r8,%r1,2
386 j .Lsysc_nr_ok # restart svc
387
388#
389# _TIF_NOTIFY_RESUME is set, call do_notify_resume
390#
391.Lsysc_notify_resume:
392 lgr %r2,%r11 # pass pointer to pt_regs
393 larl %r14,.Lsysc_return
394 jg do_notify_resume
395
396#
397# _TIF_UPROBE is set, call uprobe_notify_resume
398#
399#ifdef CONFIG_UPROBES
400.Lsysc_uprobe_notify:
401 lgr %r2,%r11 # pass pointer to pt_regs
402 larl %r14,.Lsysc_return
403 jg uprobe_notify_resume
404#endif
405
406#
407# _PIF_PER_TRAP is set, call do_per_trap
408#
409.Lsysc_singlestep:
410 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
411 lgr %r2,%r11 # pass pointer to pt_regs
412 larl %r14,.Lsysc_return
413 jg do_per_trap
414
415#
416# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
417# and after the system call
418#
419.Lsysc_tracesys:
420 lgr %r2,%r11 # pass pointer to pt_regs
421 la %r3,0
422 llgh %r0,__PT_INT_CODE+2(%r11)
423 stg %r0,__PT_R2(%r11)
424 brasl %r14,do_syscall_trace_enter
425 lghi %r0,NR_syscalls
426 clgr %r0,%r2
427 jnh .Lsysc_tracenogo
428 sllg %r8,%r2,2
429 lgf %r9,0(%r8,%r10)
430.Lsysc_tracego:
431 lmg %r3,%r7,__PT_R3(%r11)
432 stg %r7,STACK_FRAME_OVERHEAD(%r15)
433 lg %r2,__PT_ORIG_GPR2(%r11)
434 basr %r14,%r9 # call sys_xxx
435 stg %r2,__PT_R2(%r11) # store return value
436.Lsysc_tracenogo:
437 TSTMSK __TI_flags(%r12),_TIF_TRACE
438 jz .Lsysc_return
439 lgr %r2,%r11 # pass pointer to pt_regs
440 larl %r14,.Lsysc_return
441 jg do_syscall_trace_exit
442
443#
444# a new process exits the kernel with ret_from_fork
445#
446ENTRY(ret_from_fork)
447 la %r11,STACK_FRAME_OVERHEAD(%r15)
448 lg %r12,__LC_THREAD_INFO
449 brasl %r14,schedule_tail
450 TRACE_IRQS_ON
451 ssm __LC_SVC_NEW_PSW # reenable interrupts
452 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
453 jne .Lsysc_tracenogo
454 # it's a kernel thread
455 lmg %r9,%r10,__PT_R9(%r11) # load gprs
456ENTRY(kernel_thread_starter)
457 la %r2,0(%r10)
458 basr %r14,%r9
459 j .Lsysc_tracenogo
460
461/*
462 * Program check handler routine
463 */
464
465ENTRY(pgm_check_handler)
466 stpt __LC_SYNC_ENTER_TIMER
467 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
468 lg %r10,__LC_LAST_BREAK
469 lg %r12,__LC_THREAD_INFO
470 larl %r13,cleanup_critical
471 lmg %r8,%r9,__LC_PGM_OLD_PSW
472 tmhh %r8,0x0001 # test problem state bit
473 jnz 2f # -> fault in user space
474#if IS_ENABLED(CONFIG_KVM)
475 # cleanup critical section for sie64a
476 lgr %r14,%r9
477 slg %r14,BASED(.Lsie_critical_start)
478 clg %r14,BASED(.Lsie_critical_length)
479 jhe 0f
480 brasl %r14,.Lcleanup_sie
481#endif
4820: tmhh %r8,0x4000 # PER bit set in old PSW ?
483 jnz 1f # -> enabled, can't be a double fault
484 tm __LC_PGM_ILC+3,0x80 # check for per exception
485 jnz .Lpgm_svcper # -> single stepped svc
4861: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
487 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
488 j 3f
4892: LAST_BREAK %r14
490 UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
491 lg %r15,__LC_KERNEL_STACK
492 lg %r14,__TI_task(%r12)
493 aghi %r14,__TASK_thread # pointer to thread_struct
494 lghi %r13,__LC_PGM_TDB
495 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
496 jz 3f
497 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
4983: la %r11,STACK_FRAME_OVERHEAD(%r15)
499 stmg %r0,%r7,__PT_R0(%r11)
500 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
501 stmg %r8,%r9,__PT_PSW(%r11)
502 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
503 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
504 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
505 stg %r10,__PT_ARGS(%r11)
506 tm __LC_PGM_ILC+3,0x80 # check for per exception
507 jz 4f
508 tmhh %r8,0x0001 # kernel per event ?
509 jz .Lpgm_kprobe
510 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
511 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
512 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
513 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
5144: REENABLE_IRQS
515 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
516 larl %r1,pgm_check_table
517 llgh %r10,__PT_INT_CODE+2(%r11)
518 nill %r10,0x007f
519 sll %r10,2
520 je .Lpgm_return
521 lgf %r1,0(%r10,%r1) # load address of handler routine
522 lgr %r2,%r11 # pass pointer to pt_regs
523 basr %r14,%r1 # branch to interrupt-handler
524.Lpgm_return:
525 LOCKDEP_SYS_EXIT
526 tm __PT_PSW+1(%r11),0x01 # returning to user ?
527 jno .Lsysc_restore
528 j .Lsysc_tif
529
530#
531# PER event in supervisor state, must be kprobes
532#
533.Lpgm_kprobe:
534 REENABLE_IRQS
535 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
536 lgr %r2,%r11 # pass pointer to pt_regs
537 brasl %r14,do_per_trap
538 j .Lpgm_return
539
540#
541# single stepped system call
542#
543.Lpgm_svcper:
544 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
545 larl %r14,.Lsysc_per
546 stg %r14,__LC_RETURN_PSW+8
547 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
548 lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
549
550/*
551 * IO interrupt handler routine
552 */
553ENTRY(io_int_handler)
554 STCK __LC_INT_CLOCK
555 stpt __LC_ASYNC_ENTER_TIMER
556 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
557 lg %r10,__LC_LAST_BREAK
558 lg %r12,__LC_THREAD_INFO
559 larl %r13,cleanup_critical
560 lmg %r8,%r9,__LC_IO_OLD_PSW
561 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
562 stmg %r0,%r7,__PT_R0(%r11)
563 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
564 stmg %r8,%r9,__PT_PSW(%r11)
565 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
566 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
567 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
568 jo .Lio_restore
569 TRACE_IRQS_OFF
570 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
571.Lio_loop:
572 lgr %r2,%r11 # pass pointer to pt_regs
573 lghi %r3,IO_INTERRUPT
574 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
575 jz .Lio_call
576 lghi %r3,THIN_INTERRUPT
577.Lio_call:
578 brasl %r14,do_IRQ
579 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
580 jz .Lio_return
581 tpi 0
582 jz .Lio_return
583 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
584 j .Lio_loop
585.Lio_return:
586 LOCKDEP_SYS_EXIT
587 TRACE_IRQS_ON
588.Lio_tif:
589 TSTMSK __TI_flags(%r12),_TIF_WORK
590 jnz .Lio_work # there is work to do (signals etc.)
591 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
592 jnz .Lio_work
593.Lio_restore:
594 lg %r14,__LC_VDSO_PER_CPU
595 lmg %r0,%r10,__PT_R0(%r11)
596 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
597 stpt __LC_EXIT_TIMER
598 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
599 lmg %r11,%r15,__PT_R11(%r11)
600 lpswe __LC_RETURN_PSW
601.Lio_done:
602
603#
604# There is work todo, find out in which context we have been interrupted:
605# 1) if we return to user space we can do all _TIF_WORK work
606# 2) if we return to kernel code and kvm is enabled check if we need to
607# modify the psw to leave SIE
608# 3) if we return to kernel code and preemptive scheduling is enabled check
609# the preemption counter and if it is zero call preempt_schedule_irq
610# Before any work can be done, a switch to the kernel stack is required.
611#
612.Lio_work:
613 tm __PT_PSW+1(%r11),0x01 # returning to user ?
614 jo .Lio_work_user # yes -> do resched & signal
615#ifdef CONFIG_PREEMPT
616 # check for preemptive scheduling
617 icm %r0,15,__TI_precount(%r12)
618 jnz .Lio_restore # preemption is disabled
619 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
620 jno .Lio_restore
621 # switch to kernel stack
622 lg %r1,__PT_R15(%r11)
623 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
624 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
625 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
626 la %r11,STACK_FRAME_OVERHEAD(%r1)
627 lgr %r15,%r1
628 # TRACE_IRQS_ON already done at .Lio_return, call
629 # TRACE_IRQS_OFF to keep things symmetrical
630 TRACE_IRQS_OFF
631 brasl %r14,preempt_schedule_irq
632 j .Lio_return
633#else
634 j .Lio_restore
635#endif
636
637#
638# Need to do work before returning to userspace, switch to kernel stack
639#
640.Lio_work_user:
641 lg %r1,__LC_KERNEL_STACK
642 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
643 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
644 la %r11,STACK_FRAME_OVERHEAD(%r1)
645 lgr %r15,%r1
646
647#
648# One of the work bits is on. Find out which one.
649#
650.Lio_work_tif:
651 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
652 jo .Lio_mcck_pending
653 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
654 jo .Lio_reschedule
655 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
656 jo .Lio_sigpending
657 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
658 jo .Lio_notify_resume
659 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
660 jo .Lio_vxrs
661 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
662 jo .Lio_uaccess
663 j .Lio_return # beware of critical section cleanup
664
665#
666# _CIF_MCCK_PENDING is set, call handler
667#
668.Lio_mcck_pending:
669 # TRACE_IRQS_ON already done at .Lio_return
670 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
671 TRACE_IRQS_OFF
672 j .Lio_return
673
674#
675# _CIF_ASCE is set, load user space asce
676#
677.Lio_uaccess:
678 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
679 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
680 j .Lio_return
681
682#
683# CIF_FPU is set, restore floating-point controls and floating-point registers.
684#
685.Lio_vxrs:
686 larl %r14,.Lio_return
687 jg load_fpu_regs
688
689#
690# _TIF_NEED_RESCHED is set, call schedule
691#
692.Lio_reschedule:
693 # TRACE_IRQS_ON already done at .Lio_return
694 ssm __LC_SVC_NEW_PSW # reenable interrupts
695 brasl %r14,schedule # call scheduler
696 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
697 TRACE_IRQS_OFF
698 j .Lio_return
699
700#
701# _TIF_SIGPENDING or is set, call do_signal
702#
703.Lio_sigpending:
704 # TRACE_IRQS_ON already done at .Lio_return
705 ssm __LC_SVC_NEW_PSW # reenable interrupts
706 lgr %r2,%r11 # pass pointer to pt_regs
707 brasl %r14,do_signal
708 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
709 TRACE_IRQS_OFF
710 j .Lio_return
711
712#
713# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
714#
715.Lio_notify_resume:
716 # TRACE_IRQS_ON already done at .Lio_return
717 ssm __LC_SVC_NEW_PSW # reenable interrupts
718 lgr %r2,%r11 # pass pointer to pt_regs
719 brasl %r14,do_notify_resume
720 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
721 TRACE_IRQS_OFF
722 j .Lio_return
723
724/*
725 * External interrupt handler routine
726 */
727ENTRY(ext_int_handler)
728 STCK __LC_INT_CLOCK
729 stpt __LC_ASYNC_ENTER_TIMER
730 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
731 lg %r10,__LC_LAST_BREAK
732 lg %r12,__LC_THREAD_INFO
733 larl %r13,cleanup_critical
734 lmg %r8,%r9,__LC_EXT_OLD_PSW
735 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
736 stmg %r0,%r7,__PT_R0(%r11)
737 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
738 stmg %r8,%r9,__PT_PSW(%r11)
739 lghi %r1,__LC_EXT_PARAMS2
740 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
741 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
742 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
743 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
744 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
745 jo .Lio_restore
746 TRACE_IRQS_OFF
747 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
748 lgr %r2,%r11 # pass pointer to pt_regs
749 lghi %r3,EXT_INTERRUPT
750 brasl %r14,do_IRQ
751 j .Lio_return
752
753/*
754 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
755 */
756ENTRY(psw_idle)
757 stg %r3,__SF_EMPTY(%r15)
758 larl %r1,.Lpsw_idle_lpsw+4
759 stg %r1,__SF_EMPTY+8(%r15)
760#ifdef CONFIG_SMP
761 larl %r1,smp_cpu_mtid
762 llgf %r1,0(%r1)
763 ltgr %r1,%r1
764 jz .Lpsw_idle_stcctm
765 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
766.Lpsw_idle_stcctm:
767#endif
768 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
769 STCK __CLOCK_IDLE_ENTER(%r2)
770 stpt __TIMER_IDLE_ENTER(%r2)
771.Lpsw_idle_lpsw:
772 lpswe __SF_EMPTY(%r15)
773 br %r14
774.Lpsw_idle_end:
775
776/*
777 * Store floating-point controls and floating-point or vector register
778 * depending whether the vector facility is available. A critical section
779 * cleanup assures that the registers are stored even if interrupted for
780 * some other work. The CIF_FPU flag is set to trigger a lazy restore
781 * of the register contents at return from io or a system call.
782 */
783ENTRY(save_fpu_regs)
784 lg %r2,__LC_CURRENT
785 aghi %r2,__TASK_thread
786 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
787 bor %r14
788 stfpc __THREAD_FPU_fpc(%r2)
789.Lsave_fpu_regs_fpc_end:
790 lg %r3,__THREAD_FPU_regs(%r2)
791 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
792 jz .Lsave_fpu_regs_fp # no -> store FP regs
793.Lsave_fpu_regs_vx_low:
794 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
795.Lsave_fpu_regs_vx_high:
796 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
797 j .Lsave_fpu_regs_done # -> set CIF_FPU flag
798.Lsave_fpu_regs_fp:
799 std 0,0(%r3)
800 std 1,8(%r3)
801 std 2,16(%r3)
802 std 3,24(%r3)
803 std 4,32(%r3)
804 std 5,40(%r3)
805 std 6,48(%r3)
806 std 7,56(%r3)
807 std 8,64(%r3)
808 std 9,72(%r3)
809 std 10,80(%r3)
810 std 11,88(%r3)
811 std 12,96(%r3)
812 std 13,104(%r3)
813 std 14,112(%r3)
814 std 15,120(%r3)
815.Lsave_fpu_regs_done:
816 oi __LC_CPU_FLAGS+7,_CIF_FPU
817 br %r14
818.Lsave_fpu_regs_end:
819
820/*
821 * Load floating-point controls and floating-point or vector registers.
822 * A critical section cleanup assures that the register contents are
823 * loaded even if interrupted for some other work.
824 *
825 * There are special calling conventions to fit into sysc and io return work:
826 * %r15: <kernel stack>
827 * The function requires:
828 * %r4
829 */
830load_fpu_regs:
831 lg %r4,__LC_CURRENT
832 aghi %r4,__TASK_thread
833 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
834 bnor %r14
835 lfpc __THREAD_FPU_fpc(%r4)
836 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
837 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
838 jz .Lload_fpu_regs_fp # -> no VX, load FP regs
839.Lload_fpu_regs_vx:
840 VLM %v0,%v15,0,%r4
841.Lload_fpu_regs_vx_high:
842 VLM %v16,%v31,256,%r4
843 j .Lload_fpu_regs_done
844.Lload_fpu_regs_fp:
845 ld 0,0(%r4)
846 ld 1,8(%r4)
847 ld 2,16(%r4)
848 ld 3,24(%r4)
849 ld 4,32(%r4)
850 ld 5,40(%r4)
851 ld 6,48(%r4)
852 ld 7,56(%r4)
853 ld 8,64(%r4)
854 ld 9,72(%r4)
855 ld 10,80(%r4)
856 ld 11,88(%r4)
857 ld 12,96(%r4)
858 ld 13,104(%r4)
859 ld 14,112(%r4)
860 ld 15,120(%r4)
861.Lload_fpu_regs_done:
862 ni __LC_CPU_FLAGS+7,255-_CIF_FPU
863 br %r14
864.Lload_fpu_regs_end:
865
866.L__critical_end:
867
868/*
869 * Machine check handler routines
870 */
871ENTRY(mcck_int_handler)
872 STCK __LC_MCCK_CLOCK
873 la %r1,4095 # revalidate r1
874 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
875 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
876 lg %r10,__LC_LAST_BREAK
877 lg %r12,__LC_THREAD_INFO
878 larl %r13,cleanup_critical
879 lmg %r8,%r9,__LC_MCK_OLD_PSW
880 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
881 jo .Lmcck_panic # yes -> rest of mcck code invalid
882 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
883 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
884 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
885 jo 3f
886 la %r14,__LC_SYNC_ENTER_TIMER
887 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
888 jl 0f
889 la %r14,__LC_ASYNC_ENTER_TIMER
8900: clc 0(8,%r14),__LC_EXIT_TIMER
891 jl 1f
892 la %r14,__LC_EXIT_TIMER
8931: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
894 jl 2f
895 la %r14,__LC_LAST_UPDATE_TIMER
8962: spt 0(%r14)
897 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
8983: TSTMSK __LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID)
899 jno .Lmcck_panic # no -> skip cleanup critical
900 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
901.Lmcck_skip:
902 lghi %r14,__LC_GPREGS_SAVE_AREA+64
903 stmg %r0,%r7,__PT_R0(%r11)
904 mvc __PT_R8(64,%r11),0(%r14)
905 stmg %r8,%r9,__PT_PSW(%r11)
906 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
907 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
908 lgr %r2,%r11 # pass pointer to pt_regs
909 brasl %r14,s390_do_machine_check
910 tm __PT_PSW+1(%r11),0x01 # returning to user ?
911 jno .Lmcck_return
912 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
913 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
914 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
915 la %r11,STACK_FRAME_OVERHEAD(%r1)
916 lgr %r15,%r1
917 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
918 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
919 jno .Lmcck_return
920 TRACE_IRQS_OFF
921 brasl %r14,s390_handle_mcck
922 TRACE_IRQS_ON
923.Lmcck_return:
924 lg %r14,__LC_VDSO_PER_CPU
925 lmg %r0,%r10,__PT_R0(%r11)
926 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
927 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
928 jno 0f
929 stpt __LC_EXIT_TIMER
930 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
9310: lmg %r11,%r15,__PT_R11(%r11)
932 lpswe __LC_RETURN_MCCK_PSW
933
934.Lmcck_panic:
935 lg %r15,__LC_PANIC_STACK
936 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
937 j .Lmcck_skip
938
939#
940# PSW restart interrupt handler
941#
942ENTRY(restart_int_handler)
943 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
944 jz 0f
945 .insn s,0xb2800000,__LC_LPP
9460: stg %r15,__LC_SAVE_AREA_RESTART
947 lg %r15,__LC_RESTART_STACK
948 aghi %r15,-__PT_SIZE # create pt_regs on stack
949 xc 0(__PT_SIZE,%r15),0(%r15)
950 stmg %r0,%r14,__PT_R0(%r15)
951 mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
952 mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
953 aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
954 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
955 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
956 lg %r2,__LC_RESTART_DATA
957 lg %r3,__LC_RESTART_SOURCE
958 ltgr %r3,%r3 # test source cpu address
959 jm 1f # negative -> skip source stop
9600: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
961 brc 10,0b # wait for status stored
9621: basr %r14,%r1 # call function
963 stap __SF_EMPTY(%r15) # store cpu address
964 llgh %r3,__SF_EMPTY(%r15)
9652: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
966 brc 2,2b
9673: j 3b
968
969 .section .kprobes.text, "ax"
970
971#ifdef CONFIG_CHECK_STACK
972/*
973 * The synchronous or the asynchronous stack overflowed. We are dead.
974 * No need to properly save the registers, we are going to panic anyway.
975 * Setup a pt_regs so that show_trace can provide a good call trace.
976 */
977stack_overflow:
978 lg %r15,__LC_PANIC_STACK # change to panic stack
979 la %r11,STACK_FRAME_OVERHEAD(%r15)
980 stmg %r0,%r7,__PT_R0(%r11)
981 stmg %r8,%r9,__PT_PSW(%r11)
982 mvc __PT_R8(64,%r11),0(%r14)
983 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
984 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
985 lgr %r2,%r11 # pass pointer to pt_regs
986 jg kernel_stack_overflow
987#endif
988
989cleanup_critical:
990#if IS_ENABLED(CONFIG_KVM)
991 clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
992 jl 0f
993 clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
994 jl .Lcleanup_sie
995#endif
996 clg %r9,BASED(.Lcleanup_table) # system_call
997 jl 0f
998 clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
999 jl .Lcleanup_system_call
1000 clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
1001 jl 0f
1002 clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
1003 jl .Lcleanup_sysc_tif
1004 clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
1005 jl .Lcleanup_sysc_restore
1006 clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
1007 jl 0f
1008 clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
1009 jl .Lcleanup_io_tif
1010 clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
1011 jl .Lcleanup_io_restore
1012 clg %r9,BASED(.Lcleanup_table+64) # psw_idle
1013 jl 0f
1014 clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
1015 jl .Lcleanup_idle
1016 clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs
1017 jl 0f
1018 clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end
1019 jl .Lcleanup_save_fpu_regs
1020 clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs
1021 jl 0f
1022 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1023 jl .Lcleanup_load_fpu_regs
10240: br %r14
1025
1026 .align 8
1027.Lcleanup_table:
1028 .quad system_call
1029 .quad .Lsysc_do_svc
1030 .quad .Lsysc_tif
1031 .quad .Lsysc_restore
1032 .quad .Lsysc_done
1033 .quad .Lio_tif
1034 .quad .Lio_restore
1035 .quad .Lio_done
1036 .quad psw_idle
1037 .quad .Lpsw_idle_end
1038 .quad save_fpu_regs
1039 .quad .Lsave_fpu_regs_end
1040 .quad load_fpu_regs
1041 .quad .Lload_fpu_regs_end
1042
1043#if IS_ENABLED(CONFIG_KVM)
1044.Lcleanup_table_sie:
1045 .quad .Lsie_gmap
1046 .quad .Lsie_done
1047
1048.Lcleanup_sie:
1049 lg %r9,__SF_EMPTY(%r15) # get control block pointer
1050 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1051 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1052 larl %r9,sie_exit # skip forward to sie_exit
1053 br %r14
1054#endif
1055
1056.Lcleanup_system_call:
1057 # check if stpt has been executed
1058 clg %r9,BASED(.Lcleanup_system_call_insn)
1059 jh 0f
1060 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
1061 cghi %r11,__LC_SAVE_AREA_ASYNC
1062 je 0f
1063 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
10640: # check if stmg has been executed
1065 clg %r9,BASED(.Lcleanup_system_call_insn+8)
1066 jh 0f
1067 mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
10680: # check if base register setup + TIF bit load has been done
1069 clg %r9,BASED(.Lcleanup_system_call_insn+16)
1070 jhe 0f
1071 # set up saved registers r10 and r12
1072 stg %r10,16(%r11) # r10 last break
1073 stg %r12,32(%r11) # r12 thread-info pointer
10740: # check if the user time update has been done
1075 clg %r9,BASED(.Lcleanup_system_call_insn+24)
1076 jh 0f
1077 lg %r15,__LC_EXIT_TIMER
1078 slg %r15,__LC_SYNC_ENTER_TIMER
1079 alg %r15,__LC_USER_TIMER
1080 stg %r15,__LC_USER_TIMER
10810: # check if the system time update has been done
1082 clg %r9,BASED(.Lcleanup_system_call_insn+32)
1083 jh 0f
1084 lg %r15,__LC_LAST_UPDATE_TIMER
1085 slg %r15,__LC_EXIT_TIMER
1086 alg %r15,__LC_SYSTEM_TIMER
1087 stg %r15,__LC_SYSTEM_TIMER
10880: # update accounting time stamp
1089 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1090 # do LAST_BREAK
1091 lg %r9,16(%r11)
1092 srag %r9,%r9,23
1093 jz 0f
1094 mvc __TI_last_break(8,%r12),16(%r11)
10950: # set up saved register r11
1096 lg %r15,__LC_KERNEL_STACK
1097 la %r9,STACK_FRAME_OVERHEAD(%r15)
1098 stg %r9,24(%r11) # r11 pt_regs pointer
1099 # fill pt_regs
1100 mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
1101 stmg %r0,%r7,__PT_R0(%r9)
1102 mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
1103 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
1104 xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
1105 mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
1106 # setup saved register r15
1107 stg %r15,56(%r11) # r15 stack pointer
1108 # set new psw address and exit
1109 larl %r9,.Lsysc_do_svc
1110 br %r14
1111.Lcleanup_system_call_insn:
1112 .quad system_call
1113 .quad .Lsysc_stmg
1114 .quad .Lsysc_per
1115 .quad .Lsysc_vtime+36
1116 .quad .Lsysc_vtime+42
1117
1118.Lcleanup_sysc_tif:
1119 larl %r9,.Lsysc_tif
1120 br %r14
1121
1122.Lcleanup_sysc_restore:
1123 clg %r9,BASED(.Lcleanup_sysc_restore_insn)
1124 je 0f
1125 lg %r9,24(%r11) # get saved pointer to pt_regs
1126 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1127 mvc 0(64,%r11),__PT_R8(%r9)
1128 lmg %r0,%r7,__PT_R0(%r9)
11290: lmg %r8,%r9,__LC_RETURN_PSW
1130 br %r14
1131.Lcleanup_sysc_restore_insn:
1132 .quad .Lsysc_done - 4
1133
1134.Lcleanup_io_tif:
1135 larl %r9,.Lio_tif
1136 br %r14
1137
1138.Lcleanup_io_restore:
1139 clg %r9,BASED(.Lcleanup_io_restore_insn)
1140 je 0f
1141 lg %r9,24(%r11) # get saved r11 pointer to pt_regs
1142 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1143 mvc 0(64,%r11),__PT_R8(%r9)
1144 lmg %r0,%r7,__PT_R0(%r9)
11450: lmg %r8,%r9,__LC_RETURN_PSW
1146 br %r14
1147.Lcleanup_io_restore_insn:
1148 .quad .Lio_done - 4
1149
1150.Lcleanup_idle:
1151 ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
1152 # copy interrupt clock & cpu timer
1153 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
1154 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
1155 cghi %r11,__LC_SAVE_AREA_ASYNC
1156 je 0f
1157 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
1158 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
11590: # check if stck & stpt have been executed
1160 clg %r9,BASED(.Lcleanup_idle_insn)
1161 jhe 1f
1162 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
1163 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
11641: # calculate idle cycles
1165#ifdef CONFIG_SMP
1166 clg %r9,BASED(.Lcleanup_idle_insn)
1167 jl 3f
1168 larl %r1,smp_cpu_mtid
1169 llgf %r1,0(%r1)
1170 ltgr %r1,%r1
1171 jz 3f
1172 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
1173 larl %r3,mt_cycles
1174 ag %r3,__LC_PERCPU_OFFSET
1175 la %r4,__SF_EMPTY+16(%r15)
11762: lg %r0,0(%r3)
1177 slg %r0,0(%r4)
1178 alg %r0,64(%r4)
1179 stg %r0,0(%r3)
1180 la %r3,8(%r3)
1181 la %r4,8(%r4)
1182 brct %r1,2b
1183#endif
11843: # account system time going idle
1185 lg %r9,__LC_STEAL_TIMER
1186 alg %r9,__CLOCK_IDLE_ENTER(%r2)
1187 slg %r9,__LC_LAST_UPDATE_CLOCK
1188 stg %r9,__LC_STEAL_TIMER
1189 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
1190 lg %r9,__LC_SYSTEM_TIMER
1191 alg %r9,__LC_LAST_UPDATE_TIMER
1192 slg %r9,__TIMER_IDLE_ENTER(%r2)
1193 stg %r9,__LC_SYSTEM_TIMER
1194 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
1195 # prepare return psw
1196 nihh %r8,0xfcfd # clear irq & wait state bits
1197 lg %r9,48(%r11) # return from psw_idle
1198 br %r14
1199.Lcleanup_idle_insn:
1200 .quad .Lpsw_idle_lpsw
1201
1202.Lcleanup_save_fpu_regs:
1203 larl %r9,save_fpu_regs
1204 br %r14
1205
1206.Lcleanup_load_fpu_regs:
1207 larl %r9,load_fpu_regs
1208 br %r14
1209
1210/*
1211 * Integer constants
1212 */
1213 .align 8
1214.Lcritical_start:
1215 .quad .L__critical_start
1216.Lcritical_length:
1217 .quad .L__critical_end - .L__critical_start
1218#if IS_ENABLED(CONFIG_KVM)
1219.Lsie_critical_start:
1220 .quad .Lsie_gmap
1221.Lsie_critical_length:
1222 .quad .Lsie_done - .Lsie_gmap
1223#endif
1224
1225 .section .rodata, "a"
1226#define SYSCALL(esame,emu) .long esame
1227 .globl sys_call_table
1228sys_call_table:
1229#include "syscalls.S"
1230#undef SYSCALL
1231
1232#ifdef CONFIG_COMPAT
1233
1234#define SYSCALL(esame,emu) .long emu
1235 .globl sys_call_table_emu
1236sys_call_table_emu:
1237#include "syscalls.S"
1238#undef SYSCALL
1239#endif