Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 low-level entry points.
4 *
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/ctl_reg.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/vx-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/export.h>
31#include <asm/nospec-insn.h>
32
33__PT_R0 = __PT_GPRS
34__PT_R1 = __PT_GPRS + 8
35__PT_R2 = __PT_GPRS + 16
36__PT_R3 = __PT_GPRS + 24
37__PT_R4 = __PT_GPRS + 32
38__PT_R5 = __PT_GPRS + 40
39__PT_R6 = __PT_GPRS + 48
40__PT_R7 = __PT_GPRS + 56
41__PT_R8 = __PT_GPRS + 64
42__PT_R9 = __PT_GPRS + 72
43__PT_R10 = __PT_GPRS + 80
44__PT_R11 = __PT_GPRS + 88
45__PT_R12 = __PT_GPRS + 96
46__PT_R13 = __PT_GPRS + 104
47__PT_R14 = __PT_GPRS + 112
48__PT_R15 = __PT_GPRS + 120
49
50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
51STACK_SIZE = 1 << STACK_SHIFT
52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
53
54_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
55 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
56_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
57 _TIF_SYSCALL_TRACEPOINT)
58_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
59 _CIF_ASCE_SECONDARY | _CIF_FPU)
60_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
61
62_LPP_OFFSET = __LC_LPP
63
64#define BASED(name) name-cleanup_critical(%r13)
65
66 .macro TRACE_IRQS_ON
67#ifdef CONFIG_TRACE_IRQFLAGS
68 basr %r2,%r0
69 brasl %r14,trace_hardirqs_on_caller
70#endif
71 .endm
72
73 .macro TRACE_IRQS_OFF
74#ifdef CONFIG_TRACE_IRQFLAGS
75 basr %r2,%r0
76 brasl %r14,trace_hardirqs_off_caller
77#endif
78 .endm
79
80 .macro LOCKDEP_SYS_EXIT
81#ifdef CONFIG_LOCKDEP
82 tm __PT_PSW+1(%r11),0x01 # returning to user ?
83 jz .+10
84 brasl %r14,lockdep_sys_exit
85#endif
86 .endm
87
88 .macro CHECK_STACK savearea
89#ifdef CONFIG_CHECK_STACK
90 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
91 lghi %r14,\savearea
92 jz stack_overflow
93#endif
94 .endm
95
96 .macro CHECK_VMAP_STACK savearea,oklabel
97#ifdef CONFIG_VMAP_STACK
98 lgr %r14,%r15
99 nill %r14,0x10000 - STACK_SIZE
100 oill %r14,STACK_INIT
101 clg %r14,__LC_KERNEL_STACK
102 je \oklabel
103 clg %r14,__LC_ASYNC_STACK
104 je \oklabel
105 clg %r14,__LC_NODAT_STACK
106 je \oklabel
107 clg %r14,__LC_RESTART_STACK
108 je \oklabel
109 lghi %r14,\savearea
110 j stack_overflow
111#else
112 j \oklabel
113#endif
114 .endm
115
116 .macro SWITCH_ASYNC savearea,timer
117 tmhh %r8,0x0001 # interrupting from user ?
118 jnz 1f
119 lgr %r14,%r9
120 slg %r14,BASED(.Lcritical_start)
121 clg %r14,BASED(.Lcritical_length)
122 jhe 0f
123 lghi %r11,\savearea # inside critical section, do cleanup
124 brasl %r14,cleanup_critical
125 tmhh %r8,0x0001 # retest problem state after cleanup
126 jnz 1f
1270: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
128 slgr %r14,%r15
129 srag %r14,%r14,STACK_SHIFT
130 jnz 2f
131 CHECK_STACK \savearea
132 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
133 j 3f
1341: UPDATE_VTIME %r14,%r15,\timer
135 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1362: lg %r15,__LC_ASYNC_STACK # load async stack
1373: la %r11,STACK_FRAME_OVERHEAD(%r15)
138 .endm
139
140 .macro UPDATE_VTIME w1,w2,enter_timer
141 lg \w1,__LC_EXIT_TIMER
142 lg \w2,__LC_LAST_UPDATE_TIMER
143 slg \w1,\enter_timer
144 slg \w2,__LC_EXIT_TIMER
145 alg \w1,__LC_USER_TIMER
146 alg \w2,__LC_SYSTEM_TIMER
147 stg \w1,__LC_USER_TIMER
148 stg \w2,__LC_SYSTEM_TIMER
149 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
150 .endm
151
152 .macro REENABLE_IRQS
153 stg %r8,__LC_RETURN_PSW
154 ni __LC_RETURN_PSW,0xbf
155 ssm __LC_RETURN_PSW
156 .endm
157
158 .macro STCK savearea
159#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
160 .insn s,0xb27c0000,\savearea # store clock fast
161#else
162 .insn s,0xb2050000,\savearea # store clock
163#endif
164 .endm
165
166 /*
167 * The TSTMSK macro generates a test-under-mask instruction by
168 * calculating the memory offset for the specified mask value.
169 * Mask value can be any constant. The macro shifts the mask
170 * value to calculate the memory offset for the test-under-mask
171 * instruction.
172 */
173 .macro TSTMSK addr, mask, size=8, bytepos=0
174 .if (\bytepos < \size) && (\mask >> 8)
175 .if (\mask & 0xff)
176 .error "Mask exceeds byte boundary"
177 .endif
178 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
179 .exitm
180 .endif
181 .ifeq \mask
182 .error "Mask must not be zero"
183 .endif
184 off = \size - \bytepos - 1
185 tm off+\addr, \mask
186 .endm
187
188 .macro BPOFF
189 ALTERNATIVE "", ".long 0xb2e8c000", 82
190 .endm
191
192 .macro BPON
193 ALTERNATIVE "", ".long 0xb2e8d000", 82
194 .endm
195
196 .macro BPENTER tif_ptr,tif_mask
197 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
198 "", 82
199 .endm
200
201 .macro BPEXIT tif_ptr,tif_mask
202 TSTMSK \tif_ptr,\tif_mask
203 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \
204 "jnz .+8; .long 0xb2e8d000", 82
205 .endm
206
207 GEN_BR_THUNK %r9
208 GEN_BR_THUNK %r14
209 GEN_BR_THUNK %r14,%r11
210
211 .section .kprobes.text, "ax"
212.Ldummy:
213 /*
214 * This nop exists only in order to avoid that __switch_to starts at
215 * the beginning of the kprobes text section. In that case we would
216 * have several symbols at the same address. E.g. objdump would take
217 * an arbitrary symbol name when disassembling this code.
218 * With the added nop in between the __switch_to symbol is unique
219 * again.
220 */
221 nop 0
222
223ENTRY(__bpon)
224 .globl __bpon
225 BPON
226 BR_EX %r14
227ENDPROC(__bpon)
228
229/*
230 * Scheduler resume function, called by switch_to
231 * gpr2 = (task_struct *) prev
232 * gpr3 = (task_struct *) next
233 * Returns:
234 * gpr2 = prev
235 */
236ENTRY(__switch_to)
237 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
238 lghi %r4,__TASK_stack
239 lghi %r1,__TASK_thread
240 llill %r5,STACK_INIT
241 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
242 lg %r15,0(%r4,%r3) # start of kernel stack of next
243 agr %r15,%r5 # end of kernel stack of next
244 stg %r3,__LC_CURRENT # store task struct of next
245 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
246 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
247 aghi %r3,__TASK_pid
248 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
249 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
250 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
251 BR_EX %r14
252ENDPROC(__switch_to)
253
254.L__critical_start:
255
256#if IS_ENABLED(CONFIG_KVM)
257/*
258 * sie64a calling convention:
259 * %r2 pointer to sie control block
260 * %r3 guest register save area
261 */
262ENTRY(sie64a)
263 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
264 lg %r12,__LC_CURRENT
265 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer
266 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area
267 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
268 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
269 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
270 jno .Lsie_load_guest_gprs
271 brasl %r14,load_fpu_regs # load guest fp/vx regs
272.Lsie_load_guest_gprs:
273 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
274 lg %r14,__LC_GMAP # get gmap pointer
275 ltgr %r14,%r14
276 jz .Lsie_gmap
277 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
278.Lsie_gmap:
279 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
280 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
281 tm __SIE_PROG20+3(%r14),3 # last exit...
282 jnz .Lsie_skip
283 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
284 jo .Lsie_skip # exit if fp/vx regs changed
285 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
286.Lsie_entry:
287 sie 0(%r14)
288.Lsie_exit:
289 BPOFF
290 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
291.Lsie_skip:
292 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
293 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
294.Lsie_done:
295# some program checks are suppressing. C code (e.g. do_protection_exception)
296# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
297# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
298# Other instructions between sie64a and .Lsie_done should not cause program
299# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
300# See also .Lcleanup_sie
301.Lrewind_pad6:
302 nopr 7
303.Lrewind_pad4:
304 nopr 7
305.Lrewind_pad2:
306 nopr 7
307 .globl sie_exit
308sie_exit:
309 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
310 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
311 xgr %r0,%r0 # clear guest registers to
312 xgr %r1,%r1 # prevent speculative use
313 xgr %r2,%r2
314 xgr %r3,%r3
315 xgr %r4,%r4
316 xgr %r5,%r5
317 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
318 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
319 BR_EX %r14
320.Lsie_fault:
321 lghi %r14,-EFAULT
322 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code
323 j sie_exit
324
325 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
326 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
327 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
328 EX_TABLE(sie_exit,.Lsie_fault)
329ENDPROC(sie64a)
330EXPORT_SYMBOL(sie64a)
331EXPORT_SYMBOL(sie_exit)
332#endif
333
334/*
335 * SVC interrupt handler routine. System calls are synchronous events and
336 * are executed with interrupts enabled.
337 */
338
339ENTRY(system_call)
340 stpt __LC_SYNC_ENTER_TIMER
341.Lsysc_stmg:
342 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
343 BPOFF
344 lg %r12,__LC_CURRENT
345 lghi %r13,__TASK_thread
346 lghi %r14,_PIF_SYSCALL
347.Lsysc_per:
348 lg %r15,__LC_KERNEL_STACK
349 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
350.Lsysc_vtime:
351 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
352 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
353 stmg %r0,%r7,__PT_R0(%r11)
354 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
355 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
356 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
357 stg %r14,__PT_FLAGS(%r11)
358.Lsysc_do_svc:
359 # clear user controlled register to prevent speculative use
360 xgr %r0,%r0
361 # load address of system call table
362 lg %r10,__THREAD_sysc_table(%r13,%r12)
363 llgh %r8,__PT_INT_CODE+2(%r11)
364 slag %r8,%r8,3 # shift and test for svc 0
365 jnz .Lsysc_nr_ok
366 # svc 0: system call number in %r1
367 llgfr %r1,%r1 # clear high word in r1
368 cghi %r1,NR_syscalls
369 jnl .Lsysc_nr_ok
370 sth %r1,__PT_INT_CODE+2(%r11)
371 slag %r8,%r1,3
372.Lsysc_nr_ok:
373 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
374 stg %r2,__PT_ORIG_GPR2(%r11)
375 stg %r7,STACK_FRAME_OVERHEAD(%r15)
376 lg %r9,0(%r8,%r10) # get system call add.
377 TSTMSK __TI_flags(%r12),_TIF_TRACE
378 jnz .Lsysc_tracesys
379 BASR_EX %r14,%r9 # call sys_xxxx
380 stg %r2,__PT_R2(%r11) # store return value
381
382.Lsysc_return:
383#ifdef CONFIG_DEBUG_RSEQ
384 lgr %r2,%r11
385 brasl %r14,rseq_syscall
386#endif
387 LOCKDEP_SYS_EXIT
388.Lsysc_tif:
389 TSTMSK __PT_FLAGS(%r11),_PIF_WORK
390 jnz .Lsysc_work
391 TSTMSK __TI_flags(%r12),_TIF_WORK
392 jnz .Lsysc_work # check for work
393 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
394 jnz .Lsysc_work
395 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
396.Lsysc_restore:
397 lg %r14,__LC_VDSO_PER_CPU
398 lmg %r0,%r10,__PT_R0(%r11)
399 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
400.Lsysc_exit_timer:
401 stpt __LC_EXIT_TIMER
402 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
403 lmg %r11,%r15,__PT_R11(%r11)
404 lpswe __LC_RETURN_PSW
405.Lsysc_done:
406
407#
408# One of the work bits is on. Find out which one.
409#
410.Lsysc_work:
411 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
412 jo .Lsysc_mcck_pending
413 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
414 jo .Lsysc_reschedule
415 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
416 jo .Lsysc_syscall_restart
417#ifdef CONFIG_UPROBES
418 TSTMSK __TI_flags(%r12),_TIF_UPROBE
419 jo .Lsysc_uprobe_notify
420#endif
421 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
422 jo .Lsysc_guarded_storage
423 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
424 jo .Lsysc_singlestep
425#ifdef CONFIG_LIVEPATCH
426 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
427 jo .Lsysc_patch_pending # handle live patching just before
428 # signals and possible syscall restart
429#endif
430 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
431 jo .Lsysc_syscall_restart
432 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
433 jo .Lsysc_sigpending
434 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
435 jo .Lsysc_notify_resume
436 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
437 jo .Lsysc_vxrs
438 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
439 jnz .Lsysc_asce
440 j .Lsysc_return # beware of critical section cleanup
441
442#
443# _TIF_NEED_RESCHED is set, call schedule
444#
445.Lsysc_reschedule:
446 larl %r14,.Lsysc_return
447 jg schedule
448
449#
450# _CIF_MCCK_PENDING is set, call handler
451#
452.Lsysc_mcck_pending:
453 larl %r14,.Lsysc_return
454 jg s390_handle_mcck # TIF bit will be cleared by handler
455
456#
457# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
458#
459.Lsysc_asce:
460 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
461 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
462 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
463 jz .Lsysc_return
464#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
465 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
466 jnz .Lsysc_set_fs_fixup
467 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
468 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
469 j .Lsysc_return
470.Lsysc_set_fs_fixup:
471#endif
472 larl %r14,.Lsysc_return
473 jg set_fs_fixup
474
475#
476# CIF_FPU is set, restore floating-point controls and floating-point registers.
477#
478.Lsysc_vxrs:
479 larl %r14,.Lsysc_return
480 jg load_fpu_regs
481
482#
483# _TIF_SIGPENDING is set, call do_signal
484#
485.Lsysc_sigpending:
486 lgr %r2,%r11 # pass pointer to pt_regs
487 brasl %r14,do_signal
488 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
489 jno .Lsysc_return
490.Lsysc_do_syscall:
491 lghi %r13,__TASK_thread
492 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
493 lghi %r1,0 # svc 0 returns -ENOSYS
494 j .Lsysc_do_svc
495
496#
497# _TIF_NOTIFY_RESUME is set, call do_notify_resume
498#
499.Lsysc_notify_resume:
500 lgr %r2,%r11 # pass pointer to pt_regs
501 larl %r14,.Lsysc_return
502 jg do_notify_resume
503
504#
505# _TIF_UPROBE is set, call uprobe_notify_resume
506#
507#ifdef CONFIG_UPROBES
508.Lsysc_uprobe_notify:
509 lgr %r2,%r11 # pass pointer to pt_regs
510 larl %r14,.Lsysc_return
511 jg uprobe_notify_resume
512#endif
513
514#
515# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
516#
517.Lsysc_guarded_storage:
518 lgr %r2,%r11 # pass pointer to pt_regs
519 larl %r14,.Lsysc_return
520 jg gs_load_bc_cb
521#
522# _TIF_PATCH_PENDING is set, call klp_update_patch_state
523#
524#ifdef CONFIG_LIVEPATCH
525.Lsysc_patch_pending:
526 lg %r2,__LC_CURRENT # pass pointer to task struct
527 larl %r14,.Lsysc_return
528 jg klp_update_patch_state
529#endif
530
531#
532# _PIF_PER_TRAP is set, call do_per_trap
533#
534.Lsysc_singlestep:
535 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
536 lgr %r2,%r11 # pass pointer to pt_regs
537 larl %r14,.Lsysc_return
538 jg do_per_trap
539
540#
541# _PIF_SYSCALL_RESTART is set, repeat the current system call
542#
543.Lsysc_syscall_restart:
544 ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
545 lmg %r1,%r7,__PT_R1(%r11) # load svc arguments
546 lg %r2,__PT_ORIG_GPR2(%r11)
547 j .Lsysc_do_svc
548
549#
550# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
551# and after the system call
552#
553.Lsysc_tracesys:
554 lgr %r2,%r11 # pass pointer to pt_regs
555 la %r3,0
556 llgh %r0,__PT_INT_CODE+2(%r11)
557 stg %r0,__PT_R2(%r11)
558 brasl %r14,do_syscall_trace_enter
559 lghi %r0,NR_syscalls
560 clgr %r0,%r2
561 jnh .Lsysc_tracenogo
562 sllg %r8,%r2,3
563 lg %r9,0(%r8,%r10)
564.Lsysc_tracego:
565 lmg %r3,%r7,__PT_R3(%r11)
566 stg %r7,STACK_FRAME_OVERHEAD(%r15)
567 lg %r2,__PT_ORIG_GPR2(%r11)
568 BASR_EX %r14,%r9 # call sys_xxx
569 stg %r2,__PT_R2(%r11) # store return value
570.Lsysc_tracenogo:
571 TSTMSK __TI_flags(%r12),_TIF_TRACE
572 jz .Lsysc_return
573 lgr %r2,%r11 # pass pointer to pt_regs
574 larl %r14,.Lsysc_return
575 jg do_syscall_trace_exit
576ENDPROC(system_call)
577
578#
579# a new process exits the kernel with ret_from_fork
580#
581ENTRY(ret_from_fork)
582 la %r11,STACK_FRAME_OVERHEAD(%r15)
583 lg %r12,__LC_CURRENT
584 brasl %r14,schedule_tail
585 TRACE_IRQS_ON
586 ssm __LC_SVC_NEW_PSW # reenable interrupts
587 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
588 jne .Lsysc_tracenogo
589 # it's a kernel thread
590 lmg %r9,%r10,__PT_R9(%r11) # load gprs
591 la %r2,0(%r10)
592 BASR_EX %r14,%r9
593 j .Lsysc_tracenogo
594ENDPROC(ret_from_fork)
595
596ENTRY(kernel_thread_starter)
597 la %r2,0(%r10)
598 BASR_EX %r14,%r9
599 j .Lsysc_tracenogo
600ENDPROC(kernel_thread_starter)
601
602/*
603 * Program check handler routine
604 */
605
606ENTRY(pgm_check_handler)
607 stpt __LC_SYNC_ENTER_TIMER
608 BPOFF
609 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
610 lg %r10,__LC_LAST_BREAK
611 lg %r12,__LC_CURRENT
612 lghi %r11,0
613 larl %r13,cleanup_critical
614 lmg %r8,%r9,__LC_PGM_OLD_PSW
615 tmhh %r8,0x0001 # test problem state bit
616 jnz 2f # -> fault in user space
617#if IS_ENABLED(CONFIG_KVM)
618 # cleanup critical section for program checks in sie64a
619 lgr %r14,%r9
620 slg %r14,BASED(.Lsie_critical_start)
621 clg %r14,BASED(.Lsie_critical_length)
622 jhe 0f
623 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
624 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
625 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
626 larl %r9,sie_exit # skip forward to sie_exit
627 lghi %r11,_PIF_GUEST_FAULT
628#endif
6290: tmhh %r8,0x4000 # PER bit set in old PSW ?
630 jnz 1f # -> enabled, can't be a double fault
631 tm __LC_PGM_ILC+3,0x80 # check for per exception
632 jnz .Lpgm_svcper # -> single stepped svc
6331: CHECK_STACK __LC_SAVE_AREA_SYNC
634 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
635 # CHECK_VMAP_STACK branches to stack_overflow or 4f
636 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
6372: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
638 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
639 lg %r15,__LC_KERNEL_STACK
640 lgr %r14,%r12
641 aghi %r14,__TASK_thread # pointer to thread_struct
642 lghi %r13,__LC_PGM_TDB
643 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
644 jz 3f
645 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
6463: stg %r10,__THREAD_last_break(%r14)
6474: lgr %r13,%r11
648 la %r11,STACK_FRAME_OVERHEAD(%r15)
649 stmg %r0,%r7,__PT_R0(%r11)
650 # clear user controlled registers to prevent speculative use
651 xgr %r0,%r0
652 xgr %r1,%r1
653 xgr %r2,%r2
654 xgr %r3,%r3
655 xgr %r4,%r4
656 xgr %r5,%r5
657 xgr %r6,%r6
658 xgr %r7,%r7
659 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
660 stmg %r8,%r9,__PT_PSW(%r11)
661 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
662 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
663 stg %r13,__PT_FLAGS(%r11)
664 stg %r10,__PT_ARGS(%r11)
665 tm __LC_PGM_ILC+3,0x80 # check for per exception
666 jz 5f
667 tmhh %r8,0x0001 # kernel per event ?
668 jz .Lpgm_kprobe
669 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
670 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
671 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
672 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
6735: REENABLE_IRQS
674 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
675 larl %r1,pgm_check_table
676 llgh %r10,__PT_INT_CODE+2(%r11)
677 nill %r10,0x007f
678 sll %r10,3
679 je .Lpgm_return
680 lg %r9,0(%r10,%r1) # load address of handler routine
681 lgr %r2,%r11 # pass pointer to pt_regs
682 BASR_EX %r14,%r9 # branch to interrupt-handler
683.Lpgm_return:
684 LOCKDEP_SYS_EXIT
685 tm __PT_PSW+1(%r11),0x01 # returning to user ?
686 jno .Lsysc_restore
687 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
688 jo .Lsysc_do_syscall
689 j .Lsysc_tif
690
691#
692# PER event in supervisor state, must be kprobes
693#
694.Lpgm_kprobe:
695 REENABLE_IRQS
696 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
697 lgr %r2,%r11 # pass pointer to pt_regs
698 brasl %r14,do_per_trap
699 j .Lpgm_return
700
701#
702# single stepped system call
703#
704.Lpgm_svcper:
705 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
706 lghi %r13,__TASK_thread
707 larl %r14,.Lsysc_per
708 stg %r14,__LC_RETURN_PSW+8
709 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
710 lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
711ENDPROC(pgm_check_handler)
712
713/*
714 * IO interrupt handler routine
715 */
716ENTRY(io_int_handler)
717 STCK __LC_INT_CLOCK
718 stpt __LC_ASYNC_ENTER_TIMER
719 BPOFF
720 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
721 lg %r12,__LC_CURRENT
722 larl %r13,cleanup_critical
723 lmg %r8,%r9,__LC_IO_OLD_PSW
724 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
725 stmg %r0,%r7,__PT_R0(%r11)
726 # clear user controlled registers to prevent speculative use
727 xgr %r0,%r0
728 xgr %r1,%r1
729 xgr %r2,%r2
730 xgr %r3,%r3
731 xgr %r4,%r4
732 xgr %r5,%r5
733 xgr %r6,%r6
734 xgr %r7,%r7
735 xgr %r10,%r10
736 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
737 stmg %r8,%r9,__PT_PSW(%r11)
738 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
739 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
740 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
741 jo .Lio_restore
742 TRACE_IRQS_OFF
743 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
744.Lio_loop:
745 lgr %r2,%r11 # pass pointer to pt_regs
746 lghi %r3,IO_INTERRUPT
747 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
748 jz .Lio_call
749 lghi %r3,THIN_INTERRUPT
750.Lio_call:
751 brasl %r14,do_IRQ
752 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
753 jz .Lio_return
754 tpi 0
755 jz .Lio_return
756 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
757 j .Lio_loop
758.Lio_return:
759 LOCKDEP_SYS_EXIT
760 TRACE_IRQS_ON
761.Lio_tif:
762 TSTMSK __TI_flags(%r12),_TIF_WORK
763 jnz .Lio_work # there is work to do (signals etc.)
764 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
765 jnz .Lio_work
766.Lio_restore:
767 lg %r14,__LC_VDSO_PER_CPU
768 lmg %r0,%r10,__PT_R0(%r11)
769 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
770 tm __PT_PSW+1(%r11),0x01 # returning to user ?
771 jno .Lio_exit_kernel
772 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
773.Lio_exit_timer:
774 stpt __LC_EXIT_TIMER
775 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
776.Lio_exit_kernel:
777 lmg %r11,%r15,__PT_R11(%r11)
778 lpswe __LC_RETURN_PSW
779.Lio_done:
780
781#
782# There is work todo, find out in which context we have been interrupted:
783# 1) if we return to user space we can do all _TIF_WORK work
784# 2) if we return to kernel code and kvm is enabled check if we need to
785# modify the psw to leave SIE
786# 3) if we return to kernel code and preemptive scheduling is enabled check
787# the preemption counter and if it is zero call preempt_schedule_irq
788# Before any work can be done, a switch to the kernel stack is required.
789#
790.Lio_work:
791 tm __PT_PSW+1(%r11),0x01 # returning to user ?
792 jo .Lio_work_user # yes -> do resched & signal
793#ifdef CONFIG_PREEMPT
794 # check for preemptive scheduling
795 icm %r0,15,__LC_PREEMPT_COUNT
796 jnz .Lio_restore # preemption is disabled
797 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
798 jno .Lio_restore
799 # switch to kernel stack
800 lg %r1,__PT_R15(%r11)
801 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
802 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
803 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
804 la %r11,STACK_FRAME_OVERHEAD(%r1)
805 lgr %r15,%r1
806 # TRACE_IRQS_ON already done at .Lio_return, call
807 # TRACE_IRQS_OFF to keep things symmetrical
808 TRACE_IRQS_OFF
809 brasl %r14,preempt_schedule_irq
810 j .Lio_return
811#else
812 j .Lio_restore
813#endif
814
815#
816# Need to do work before returning to userspace, switch to kernel stack
817#
818.Lio_work_user:
819 lg %r1,__LC_KERNEL_STACK
820 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
821 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
822 la %r11,STACK_FRAME_OVERHEAD(%r1)
823 lgr %r15,%r1
824
825#
826# One of the work bits is on. Find out which one.
827#
828.Lio_work_tif:
829 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
830 jo .Lio_mcck_pending
831 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
832 jo .Lio_reschedule
833#ifdef CONFIG_LIVEPATCH
834 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
835 jo .Lio_patch_pending
836#endif
837 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
838 jo .Lio_sigpending
839 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
840 jo .Lio_notify_resume
841 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
842 jo .Lio_guarded_storage
843 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
844 jo .Lio_vxrs
845 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
846 jnz .Lio_asce
847 j .Lio_return # beware of critical section cleanup
848
849#
850# _CIF_MCCK_PENDING is set, call handler
851#
852.Lio_mcck_pending:
853 # TRACE_IRQS_ON already done at .Lio_return
854 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
855 TRACE_IRQS_OFF
856 j .Lio_return
857
858#
859# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
860#
861.Lio_asce:
862 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
863 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
864 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
865 jz .Lio_return
866#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
867 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
868 jnz .Lio_set_fs_fixup
869 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
870 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
871 j .Lio_return
872.Lio_set_fs_fixup:
873#endif
874 larl %r14,.Lio_return
875 jg set_fs_fixup
876
877#
878# CIF_FPU is set, restore floating-point controls and floating-point registers.
879#
880.Lio_vxrs:
881 larl %r14,.Lio_return
882 jg load_fpu_regs
883
884#
885# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
886#
887.Lio_guarded_storage:
888 # TRACE_IRQS_ON already done at .Lio_return
889 ssm __LC_SVC_NEW_PSW # reenable interrupts
890 lgr %r2,%r11 # pass pointer to pt_regs
891 brasl %r14,gs_load_bc_cb
892 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
893 TRACE_IRQS_OFF
894 j .Lio_return
895
896#
897# _TIF_NEED_RESCHED is set, call schedule
898#
899.Lio_reschedule:
900 # TRACE_IRQS_ON already done at .Lio_return
901 ssm __LC_SVC_NEW_PSW # reenable interrupts
902 brasl %r14,schedule # call scheduler
903 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
904 TRACE_IRQS_OFF
905 j .Lio_return
906
907#
908# _TIF_PATCH_PENDING is set, call klp_update_patch_state
909#
910#ifdef CONFIG_LIVEPATCH
911.Lio_patch_pending:
912 lg %r2,__LC_CURRENT # pass pointer to task struct
913 larl %r14,.Lio_return
914 jg klp_update_patch_state
915#endif
916
917#
918# _TIF_SIGPENDING or is set, call do_signal
919#
920.Lio_sigpending:
921 # TRACE_IRQS_ON already done at .Lio_return
922 ssm __LC_SVC_NEW_PSW # reenable interrupts
923 lgr %r2,%r11 # pass pointer to pt_regs
924 brasl %r14,do_signal
925 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
926 TRACE_IRQS_OFF
927 j .Lio_return
928
929#
930# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
931#
932.Lio_notify_resume:
933 # TRACE_IRQS_ON already done at .Lio_return
934 ssm __LC_SVC_NEW_PSW # reenable interrupts
935 lgr %r2,%r11 # pass pointer to pt_regs
936 brasl %r14,do_notify_resume
937 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
938 TRACE_IRQS_OFF
939 j .Lio_return
940ENDPROC(io_int_handler)
941
942/*
943 * External interrupt handler routine
944 */
945ENTRY(ext_int_handler)
946 STCK __LC_INT_CLOCK
947 stpt __LC_ASYNC_ENTER_TIMER
948 BPOFF
949 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
950 lg %r12,__LC_CURRENT
951 larl %r13,cleanup_critical
952 lmg %r8,%r9,__LC_EXT_OLD_PSW
953 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
954 stmg %r0,%r7,__PT_R0(%r11)
955 # clear user controlled registers to prevent speculative use
956 xgr %r0,%r0
957 xgr %r1,%r1
958 xgr %r2,%r2
959 xgr %r3,%r3
960 xgr %r4,%r4
961 xgr %r5,%r5
962 xgr %r6,%r6
963 xgr %r7,%r7
964 xgr %r10,%r10
965 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
966 stmg %r8,%r9,__PT_PSW(%r11)
967 lghi %r1,__LC_EXT_PARAMS2
968 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
969 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
970 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
971 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
972 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
973 jo .Lio_restore
974 TRACE_IRQS_OFF
975 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
976 lgr %r2,%r11 # pass pointer to pt_regs
977 lghi %r3,EXT_INTERRUPT
978 brasl %r14,do_IRQ
979 j .Lio_return
980ENDPROC(ext_int_handler)
981
982/*
983 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
984 */
985ENTRY(psw_idle)
986 stg %r3,__SF_EMPTY(%r15)
987 larl %r1,.Lpsw_idle_lpsw+4
988 stg %r1,__SF_EMPTY+8(%r15)
989 larl %r1,smp_cpu_mtid
990 llgf %r1,0(%r1)
991 ltgr %r1,%r1
992 jz .Lpsw_idle_stcctm
993 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
994.Lpsw_idle_stcctm:
995 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
996 BPON
997 STCK __CLOCK_IDLE_ENTER(%r2)
998 stpt __TIMER_IDLE_ENTER(%r2)
999.Lpsw_idle_lpsw:
1000 lpswe __SF_EMPTY(%r15)
1001 BR_EX %r14
1002.Lpsw_idle_end:
1003ENDPROC(psw_idle)
1004
1005/*
1006 * Store floating-point controls and floating-point or vector register
1007 * depending whether the vector facility is available. A critical section
1008 * cleanup assures that the registers are stored even if interrupted for
1009 * some other work. The CIF_FPU flag is set to trigger a lazy restore
1010 * of the register contents at return from io or a system call.
1011 */
1012ENTRY(save_fpu_regs)
1013 lg %r2,__LC_CURRENT
1014 aghi %r2,__TASK_thread
1015 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1016 jo .Lsave_fpu_regs_exit
1017 stfpc __THREAD_FPU_fpc(%r2)
1018 lg %r3,__THREAD_FPU_regs(%r2)
1019 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1020 jz .Lsave_fpu_regs_fp # no -> store FP regs
1021 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
1022 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
1023 j .Lsave_fpu_regs_done # -> set CIF_FPU flag
1024.Lsave_fpu_regs_fp:
1025 std 0,0(%r3)
1026 std 1,8(%r3)
1027 std 2,16(%r3)
1028 std 3,24(%r3)
1029 std 4,32(%r3)
1030 std 5,40(%r3)
1031 std 6,48(%r3)
1032 std 7,56(%r3)
1033 std 8,64(%r3)
1034 std 9,72(%r3)
1035 std 10,80(%r3)
1036 std 11,88(%r3)
1037 std 12,96(%r3)
1038 std 13,104(%r3)
1039 std 14,112(%r3)
1040 std 15,120(%r3)
1041.Lsave_fpu_regs_done:
1042 oi __LC_CPU_FLAGS+7,_CIF_FPU
1043.Lsave_fpu_regs_exit:
1044 BR_EX %r14
1045.Lsave_fpu_regs_end:
1046ENDPROC(save_fpu_regs)
1047EXPORT_SYMBOL(save_fpu_regs)
1048
1049/*
1050 * Load floating-point controls and floating-point or vector registers.
1051 * A critical section cleanup assures that the register contents are
1052 * loaded even if interrupted for some other work.
1053 *
1054 * There are special calling conventions to fit into sysc and io return work:
1055 * %r15: <kernel stack>
1056 * The function requires:
1057 * %r4
1058 */
1059load_fpu_regs:
1060 lg %r4,__LC_CURRENT
1061 aghi %r4,__TASK_thread
1062 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1063 jno .Lload_fpu_regs_exit
1064 lfpc __THREAD_FPU_fpc(%r4)
1065 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1066 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
1067 jz .Lload_fpu_regs_fp # -> no VX, load FP regs
1068 VLM %v0,%v15,0,%r4
1069 VLM %v16,%v31,256,%r4
1070 j .Lload_fpu_regs_done
1071.Lload_fpu_regs_fp:
1072 ld 0,0(%r4)
1073 ld 1,8(%r4)
1074 ld 2,16(%r4)
1075 ld 3,24(%r4)
1076 ld 4,32(%r4)
1077 ld 5,40(%r4)
1078 ld 6,48(%r4)
1079 ld 7,56(%r4)
1080 ld 8,64(%r4)
1081 ld 9,72(%r4)
1082 ld 10,80(%r4)
1083 ld 11,88(%r4)
1084 ld 12,96(%r4)
1085 ld 13,104(%r4)
1086 ld 14,112(%r4)
1087 ld 15,120(%r4)
1088.Lload_fpu_regs_done:
1089 ni __LC_CPU_FLAGS+7,255-_CIF_FPU
1090.Lload_fpu_regs_exit:
1091 BR_EX %r14
1092.Lload_fpu_regs_end:
1093ENDPROC(load_fpu_regs)
1094
1095.L__critical_end:
1096
1097/*
1098 * Machine check handler routines
1099 */
1100ENTRY(mcck_int_handler)
1101 STCK __LC_MCCK_CLOCK
1102 BPOFF
1103 la %r1,4095 # validate r1
1104 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
1105 sckc __LC_CLOCK_COMPARATOR # validate comparator
1106 lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
1107 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
1108 lg %r12,__LC_CURRENT
1109 larl %r13,cleanup_critical
1110 lmg %r8,%r9,__LC_MCK_OLD_PSW
1111 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
1112 jo .Lmcck_panic # yes -> rest of mcck code invalid
1113 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
1114 jno .Lmcck_panic # control registers invalid -> panic
1115 la %r14,4095
1116 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
1117 ptlb
1118 lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area
1119 nill %r11,0xfc00 # MCESA_ORIGIN_MASK
1120 TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
1121 jno 0f
1122 TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID
1123 jno 0f
1124 .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
11250: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
1126 TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID
1127 jo 0f
1128 sr %r14,%r14
11290: sfpc %r14
1130 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1131 jo 0f
1132 lghi %r14,__LC_FPREGS_SAVE_AREA
1133 ld %f0,0(%r14)
1134 ld %f1,8(%r14)
1135 ld %f2,16(%r14)
1136 ld %f3,24(%r14)
1137 ld %f4,32(%r14)
1138 ld %f5,40(%r14)
1139 ld %f6,48(%r14)
1140 ld %f7,56(%r14)
1141 ld %f8,64(%r14)
1142 ld %f9,72(%r14)
1143 ld %f10,80(%r14)
1144 ld %f11,88(%r14)
1145 ld %f12,96(%r14)
1146 ld %f13,104(%r14)
1147 ld %f14,112(%r14)
1148 ld %f15,120(%r14)
1149 j 1f
11500: VLM %v0,%v15,0,%r11
1151 VLM %v16,%v31,256,%r11
11521: lghi %r14,__LC_CPU_TIMER_SAVE_AREA
1153 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
1154 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
1155 jo 3f
1156 la %r14,__LC_SYNC_ENTER_TIMER
1157 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
1158 jl 0f
1159 la %r14,__LC_ASYNC_ENTER_TIMER
11600: clc 0(8,%r14),__LC_EXIT_TIMER
1161 jl 1f
1162 la %r14,__LC_EXIT_TIMER
11631: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
1164 jl 2f
1165 la %r14,__LC_LAST_UPDATE_TIMER
11662: spt 0(%r14)
1167 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
11683: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
1169 jno .Lmcck_panic
1170 tmhh %r8,0x0001 # interrupting from user ?
1171 jnz 4f
1172 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
1173 jno .Lmcck_panic
11744: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
1175 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
1176.Lmcck_skip:
1177 lghi %r14,__LC_GPREGS_SAVE_AREA+64
1178 stmg %r0,%r7,__PT_R0(%r11)
1179 # clear user controlled registers to prevent speculative use
1180 xgr %r0,%r0
1181 xgr %r1,%r1
1182 xgr %r2,%r2
1183 xgr %r3,%r3
1184 xgr %r4,%r4
1185 xgr %r5,%r5
1186 xgr %r6,%r6
1187 xgr %r7,%r7
1188 xgr %r10,%r10
1189 mvc __PT_R8(64,%r11),0(%r14)
1190 stmg %r8,%r9,__PT_PSW(%r11)
1191 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1192 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1193 lgr %r2,%r11 # pass pointer to pt_regs
1194 brasl %r14,s390_do_machine_check
1195 tm __PT_PSW+1(%r11),0x01 # returning to user ?
1196 jno .Lmcck_return
1197 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
1198 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
1199 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
1200 la %r11,STACK_FRAME_OVERHEAD(%r1)
1201 lgr %r15,%r1
1202 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
1203 jno .Lmcck_return
1204 TRACE_IRQS_OFF
1205 brasl %r14,s390_handle_mcck
1206 TRACE_IRQS_ON
1207.Lmcck_return:
1208 lg %r14,__LC_VDSO_PER_CPU
1209 lmg %r0,%r10,__PT_R0(%r11)
1210 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1211 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1212 jno 0f
1213 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
1214 stpt __LC_EXIT_TIMER
1215 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
12160: lmg %r11,%r15,__PT_R11(%r11)
1217 lpswe __LC_RETURN_MCCK_PSW
1218
1219.Lmcck_panic:
1220 lg %r15,__LC_NODAT_STACK
1221 la %r11,STACK_FRAME_OVERHEAD(%r15)
1222 j .Lmcck_skip
1223ENDPROC(mcck_int_handler)
1224
1225#
1226# PSW restart interrupt handler
1227#
1228ENTRY(restart_int_handler)
1229 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
1230 stg %r15,__LC_SAVE_AREA_RESTART
1231 lg %r15,__LC_RESTART_STACK
1232 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
1233 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
1234 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
1235 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
1236 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
1237 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
1238 lg %r2,__LC_RESTART_DATA
1239 lg %r3,__LC_RESTART_SOURCE
1240 ltgr %r3,%r3 # test source cpu address
1241 jm 1f # negative -> skip source stop
12420: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
1243 brc 10,0b # wait for status stored
12441: basr %r14,%r1 # call function
1245 stap __SF_EMPTY(%r15) # store cpu address
1246 llgh %r3,__SF_EMPTY(%r15)
12472: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
1248 brc 2,2b
12493: j 3b
1250ENDPROC(restart_int_handler)
1251
1252 .section .kprobes.text, "ax"
1253
1254#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
1255/*
1256 * The synchronous or the asynchronous stack overflowed. We are dead.
1257 * No need to properly save the registers, we are going to panic anyway.
1258 * Setup a pt_regs so that show_trace can provide a good call trace.
1259 */
1260ENTRY(stack_overflow)
1261 lg %r15,__LC_NODAT_STACK # change to panic stack
1262 la %r11,STACK_FRAME_OVERHEAD(%r15)
1263 stmg %r0,%r7,__PT_R0(%r11)
1264 stmg %r8,%r9,__PT_PSW(%r11)
1265 mvc __PT_R8(64,%r11),0(%r14)
1266 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1267 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1268 lgr %r2,%r11 # pass pointer to pt_regs
1269 jg kernel_stack_overflow
1270ENDPROC(stack_overflow)
1271#endif
1272
1273ENTRY(cleanup_critical)
1274#if IS_ENABLED(CONFIG_KVM)
1275 clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
1276 jl 0f
1277 clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
1278 jl .Lcleanup_sie
1279#endif
1280 clg %r9,BASED(.Lcleanup_table) # system_call
1281 jl 0f
1282 clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
1283 jl .Lcleanup_system_call
1284 clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
1285 jl 0f
1286 clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
1287 jl .Lcleanup_sysc_tif
1288 clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
1289 jl .Lcleanup_sysc_restore
1290 clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
1291 jl 0f
1292 clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
1293 jl .Lcleanup_io_tif
1294 clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
1295 jl .Lcleanup_io_restore
1296 clg %r9,BASED(.Lcleanup_table+64) # psw_idle
1297 jl 0f
1298 clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
1299 jl .Lcleanup_idle
1300 clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs
1301 jl 0f
1302 clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end
1303 jl .Lcleanup_save_fpu_regs
1304 clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs
1305 jl 0f
1306 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1307 jl .Lcleanup_load_fpu_regs
13080: BR_EX %r14,%r11
1309ENDPROC(cleanup_critical)
1310
1311 .align 8
1312.Lcleanup_table:
1313 .quad system_call
1314 .quad .Lsysc_do_svc
1315 .quad .Lsysc_tif
1316 .quad .Lsysc_restore
1317 .quad .Lsysc_done
1318 .quad .Lio_tif
1319 .quad .Lio_restore
1320 .quad .Lio_done
1321 .quad psw_idle
1322 .quad .Lpsw_idle_end
1323 .quad save_fpu_regs
1324 .quad .Lsave_fpu_regs_end
1325 .quad load_fpu_regs
1326 .quad .Lload_fpu_regs_end
1327
1328#if IS_ENABLED(CONFIG_KVM)
1329.Lcleanup_table_sie:
1330 .quad .Lsie_gmap
1331 .quad .Lsie_done
1332
1333.Lcleanup_sie:
1334 cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
1335 je 1f
1336 slg %r9,BASED(.Lsie_crit_mcck_start)
1337 clg %r9,BASED(.Lsie_crit_mcck_length)
1338 jh 1f
1339 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
13401: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1341 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
1342 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1343 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1344 larl %r9,sie_exit # skip forward to sie_exit
1345 BR_EX %r14,%r11
1346#endif
1347
1348.Lcleanup_system_call:
1349 # check if stpt has been executed
1350 clg %r9,BASED(.Lcleanup_system_call_insn)
1351 jh 0f
1352 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
1353 cghi %r11,__LC_SAVE_AREA_ASYNC
1354 je 0f
1355 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
13560: # check if stmg has been executed
1357 clg %r9,BASED(.Lcleanup_system_call_insn+8)
1358 jh 0f
1359 mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
13600: # check if base register setup + TIF bit load has been done
1361 clg %r9,BASED(.Lcleanup_system_call_insn+16)
1362 jhe 0f
1363 # set up saved register r12 task struct pointer
1364 stg %r12,32(%r11)
1365 # set up saved register r13 __TASK_thread offset
1366 mvc 40(8,%r11),BASED(.Lcleanup_system_call_const)
13670: # check if the user time update has been done
1368 clg %r9,BASED(.Lcleanup_system_call_insn+24)
1369 jh 0f
1370 lg %r15,__LC_EXIT_TIMER
1371 slg %r15,__LC_SYNC_ENTER_TIMER
1372 alg %r15,__LC_USER_TIMER
1373 stg %r15,__LC_USER_TIMER
13740: # check if the system time update has been done
1375 clg %r9,BASED(.Lcleanup_system_call_insn+32)
1376 jh 0f
1377 lg %r15,__LC_LAST_UPDATE_TIMER
1378 slg %r15,__LC_EXIT_TIMER
1379 alg %r15,__LC_SYSTEM_TIMER
1380 stg %r15,__LC_SYSTEM_TIMER
13810: # update accounting time stamp
1382 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1383 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1384 # set up saved register r11
1385 lg %r15,__LC_KERNEL_STACK
1386 la %r9,STACK_FRAME_OVERHEAD(%r15)
1387 stg %r9,24(%r11) # r11 pt_regs pointer
1388 # fill pt_regs
1389 mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
1390 stmg %r0,%r7,__PT_R0(%r9)
1391 mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
1392 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
1393 xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
1394 mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
1395 # setup saved register r15
1396 stg %r15,56(%r11) # r15 stack pointer
1397 # set new psw address and exit
1398 larl %r9,.Lsysc_do_svc
1399 BR_EX %r14,%r11
1400.Lcleanup_system_call_insn:
1401 .quad system_call
1402 .quad .Lsysc_stmg
1403 .quad .Lsysc_per
1404 .quad .Lsysc_vtime+36
1405 .quad .Lsysc_vtime+42
1406.Lcleanup_system_call_const:
1407 .quad __TASK_thread
1408
1409.Lcleanup_sysc_tif:
1410 larl %r9,.Lsysc_tif
1411 BR_EX %r14,%r11
1412
1413.Lcleanup_sysc_restore:
1414 # check if stpt has been executed
1415 clg %r9,BASED(.Lcleanup_sysc_restore_insn)
1416 jh 0f
1417 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1418 cghi %r11,__LC_SAVE_AREA_ASYNC
1419 je 0f
1420 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
14210: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
1422 je 1f
1423 lg %r9,24(%r11) # get saved pointer to pt_regs
1424 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1425 mvc 0(64,%r11),__PT_R8(%r9)
1426 lmg %r0,%r7,__PT_R0(%r9)
14271: lmg %r8,%r9,__LC_RETURN_PSW
1428 BR_EX %r14,%r11
1429.Lcleanup_sysc_restore_insn:
1430 .quad .Lsysc_exit_timer
1431 .quad .Lsysc_done - 4
1432
1433.Lcleanup_io_tif:
1434 larl %r9,.Lio_tif
1435 BR_EX %r14,%r11
1436
1437.Lcleanup_io_restore:
1438 # check if stpt has been executed
1439 clg %r9,BASED(.Lcleanup_io_restore_insn)
1440 jh 0f
1441 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
14420: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
1443 je 1f
1444 lg %r9,24(%r11) # get saved r11 pointer to pt_regs
1445 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1446 mvc 0(64,%r11),__PT_R8(%r9)
1447 lmg %r0,%r7,__PT_R0(%r9)
14481: lmg %r8,%r9,__LC_RETURN_PSW
1449 BR_EX %r14,%r11
1450.Lcleanup_io_restore_insn:
1451 .quad .Lio_exit_timer
1452 .quad .Lio_done - 4
1453
1454.Lcleanup_idle:
1455 ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
1456 # copy interrupt clock & cpu timer
1457 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
1458 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
1459 cghi %r11,__LC_SAVE_AREA_ASYNC
1460 je 0f
1461 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
1462 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
14630: # check if stck & stpt have been executed
1464 clg %r9,BASED(.Lcleanup_idle_insn)
1465 jhe 1f
1466 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
1467 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
14681: # calculate idle cycles
1469 clg %r9,BASED(.Lcleanup_idle_insn)
1470 jl 3f
1471 larl %r1,smp_cpu_mtid
1472 llgf %r1,0(%r1)
1473 ltgr %r1,%r1
1474 jz 3f
1475 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
1476 larl %r3,mt_cycles
1477 ag %r3,__LC_PERCPU_OFFSET
1478 la %r4,__SF_EMPTY+16(%r15)
14792: lg %r0,0(%r3)
1480 slg %r0,0(%r4)
1481 alg %r0,64(%r4)
1482 stg %r0,0(%r3)
1483 la %r3,8(%r3)
1484 la %r4,8(%r4)
1485 brct %r1,2b
14863: # account system time going idle
1487 lg %r9,__LC_STEAL_TIMER
1488 alg %r9,__CLOCK_IDLE_ENTER(%r2)
1489 slg %r9,__LC_LAST_UPDATE_CLOCK
1490 stg %r9,__LC_STEAL_TIMER
1491 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
1492 lg %r9,__LC_SYSTEM_TIMER
1493 alg %r9,__LC_LAST_UPDATE_TIMER
1494 slg %r9,__TIMER_IDLE_ENTER(%r2)
1495 stg %r9,__LC_SYSTEM_TIMER
1496 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
1497 # prepare return psw
1498 nihh %r8,0xfcfd # clear irq & wait state bits
1499 lg %r9,48(%r11) # return from psw_idle
1500 BR_EX %r14,%r11
1501.Lcleanup_idle_insn:
1502 .quad .Lpsw_idle_lpsw
1503
1504.Lcleanup_save_fpu_regs:
1505 larl %r9,save_fpu_regs
1506 BR_EX %r14,%r11
1507
1508.Lcleanup_load_fpu_regs:
1509 larl %r9,load_fpu_regs
1510 BR_EX %r14,%r11
1511
1512/*
1513 * Integer constants
1514 */
1515 .align 8
1516.Lcritical_start:
1517 .quad .L__critical_start
1518.Lcritical_length:
1519 .quad .L__critical_end - .L__critical_start
1520#if IS_ENABLED(CONFIG_KVM)
1521.Lsie_critical_start:
1522 .quad .Lsie_gmap
1523.Lsie_critical_length:
1524 .quad .Lsie_done - .Lsie_gmap
1525.Lsie_crit_mcck_start:
1526 .quad .Lsie_entry
1527.Lsie_crit_mcck_length:
1528 .quad .Lsie_skip - .Lsie_entry
1529#endif
1530 .section .rodata, "a"
1531#define SYSCALL(esame,emu) .quad __s390x_ ## esame
1532 .globl sys_call_table
1533sys_call_table:
1534#include "asm/syscall_table.h"
1535#undef SYSCALL
1536
1537#ifdef CONFIG_COMPAT
1538
1539#define SYSCALL(esame,emu) .quad __s390_ ## emu
1540 .globl sys_call_table_emu
1541sys_call_table_emu:
1542#include "asm/syscall_table.h"
1543#undef SYSCALL
1544#endif
1/*
2 * S390 low-level entry points.
3 *
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Hartmut Penner (hp@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 * Heiko Carstens <heiko.carstens@de.ibm.com>
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/cache.h>
14#include <asm/errno.h>
15#include <asm/ptrace.h>
16#include <asm/thread_info.h>
17#include <asm/asm-offsets.h>
18#include <asm/unistd.h>
19#include <asm/page.h>
20#include <asm/sigp.h>
21#include <asm/irq.h>
22
23__PT_R0 = __PT_GPRS
24__PT_R1 = __PT_GPRS + 4
25__PT_R2 = __PT_GPRS + 8
26__PT_R3 = __PT_GPRS + 12
27__PT_R4 = __PT_GPRS + 16
28__PT_R5 = __PT_GPRS + 20
29__PT_R6 = __PT_GPRS + 24
30__PT_R7 = __PT_GPRS + 28
31__PT_R8 = __PT_GPRS + 32
32__PT_R9 = __PT_GPRS + 36
33__PT_R10 = __PT_GPRS + 40
34__PT_R11 = __PT_GPRS + 44
35__PT_R12 = __PT_GPRS + 48
36__PT_R13 = __PT_GPRS + 524
37__PT_R14 = __PT_GPRS + 56
38__PT_R15 = __PT_GPRS + 60
39
40_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
41 _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE)
42_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
43 _TIF_MCCK_PENDING | _TIF_ASCE)
44_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
45 _TIF_SYSCALL_TRACEPOINT)
46_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
47
48STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
49STACK_SIZE = 1 << STACK_SHIFT
50STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
51
52#define BASED(name) name-system_call(%r13)
53
54 .macro TRACE_IRQS_ON
55#ifdef CONFIG_TRACE_IRQFLAGS
56 basr %r2,%r0
57 l %r1,BASED(.Lhardirqs_on)
58 basr %r14,%r1 # call trace_hardirqs_on_caller
59#endif
60 .endm
61
62 .macro TRACE_IRQS_OFF
63#ifdef CONFIG_TRACE_IRQFLAGS
64 basr %r2,%r0
65 l %r1,BASED(.Lhardirqs_off)
66 basr %r14,%r1 # call trace_hardirqs_off_caller
67#endif
68 .endm
69
70 .macro LOCKDEP_SYS_EXIT
71#ifdef CONFIG_LOCKDEP
72 tm __PT_PSW+1(%r11),0x01 # returning to user ?
73 jz .+10
74 l %r1,BASED(.Llockdep_sys_exit)
75 basr %r14,%r1 # call lockdep_sys_exit
76#endif
77 .endm
78
79 .macro CHECK_STACK stacksize,savearea
80#ifdef CONFIG_CHECK_STACK
81 tml %r15,\stacksize - CONFIG_STACK_GUARD
82 la %r14,\savearea
83 jz stack_overflow
84#endif
85 .endm
86
87 .macro SWITCH_ASYNC savearea,stack,shift
88 tmh %r8,0x0001 # interrupting from user ?
89 jnz 1f
90 lr %r14,%r9
91 sl %r14,BASED(.Lcritical_start)
92 cl %r14,BASED(.Lcritical_length)
93 jhe 0f
94 la %r11,\savearea # inside critical section, do cleanup
95 bras %r14,cleanup_critical
96 tmh %r8,0x0001 # retest problem state after cleanup
97 jnz 1f
980: l %r14,\stack # are we already on the target stack?
99 slr %r14,%r15
100 sra %r14,\shift
101 jnz 1f
102 CHECK_STACK 1<<\shift,\savearea
103 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
104 j 2f
1051: l %r15,\stack # load target stack
1062: la %r11,STACK_FRAME_OVERHEAD(%r15)
107 .endm
108
109 .macro ADD64 high,low,timer
110 al \high,\timer
111 al \low,4+\timer
112 brc 12,.+8
113 ahi \high,1
114 .endm
115
116 .macro SUB64 high,low,timer
117 sl \high,\timer
118 sl \low,4+\timer
119 brc 3,.+8
120 ahi \high,-1
121 .endm
122
123 .macro UPDATE_VTIME high,low,enter_timer
124 lm \high,\low,__LC_EXIT_TIMER
125 SUB64 \high,\low,\enter_timer
126 ADD64 \high,\low,__LC_USER_TIMER
127 stm \high,\low,__LC_USER_TIMER
128 lm \high,\low,__LC_LAST_UPDATE_TIMER
129 SUB64 \high,\low,__LC_EXIT_TIMER
130 ADD64 \high,\low,__LC_SYSTEM_TIMER
131 stm \high,\low,__LC_SYSTEM_TIMER
132 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
133 .endm
134
135 .macro REENABLE_IRQS
136 st %r8,__LC_RETURN_PSW
137 ni __LC_RETURN_PSW,0xbf
138 ssm __LC_RETURN_PSW
139 .endm
140
141 .section .kprobes.text, "ax"
142
143/*
144 * Scheduler resume function, called by switch_to
145 * gpr2 = (task_struct *) prev
146 * gpr3 = (task_struct *) next
147 * Returns:
148 * gpr2 = prev
149 */
150ENTRY(__switch_to)
151 stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
152 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
153 l %r4,__THREAD_info(%r2) # get thread_info of prev
154 l %r5,__THREAD_info(%r3) # get thread_info of next
155 lr %r15,%r5
156 ahi %r15,STACK_INIT # end of kernel stack of next
157 st %r3,__LC_CURRENT # store task struct of next
158 st %r5,__LC_THREAD_INFO # store thread info of next
159 st %r15,__LC_KERNEL_STACK # store end of kernel stack
160 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
161 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
162 l %r15,__THREAD_ksp(%r3) # load kernel stack of next
163 lhi %r6,_TIF_TRANSFER # transfer TIF bits
164 n %r6,__TI_flags(%r4) # isolate TIF bits
165 jz 0f
166 o %r6,__TI_flags(%r5) # set TIF bits of next
167 st %r6,__TI_flags(%r5)
168 ni __TI_flags+3(%r4),255-_TIF_TRANSFER # clear TIF bits of prev
1690: lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
170 br %r14
171
172__critical_start:
173/*
174 * SVC interrupt handler routine. System calls are synchronous events and
175 * are executed with interrupts enabled.
176 */
177
178ENTRY(system_call)
179 stpt __LC_SYNC_ENTER_TIMER
180sysc_stm:
181 stm %r8,%r15,__LC_SAVE_AREA_SYNC
182 l %r12,__LC_THREAD_INFO
183 l %r13,__LC_SVC_NEW_PSW+4
184sysc_per:
185 l %r15,__LC_KERNEL_STACK
186 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
187sysc_vtime:
188 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
189 stm %r0,%r7,__PT_R0(%r11)
190 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
191 mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
192 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
193sysc_do_svc:
194 oi __TI_flags+3(%r12),_TIF_SYSCALL
195 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
196 lh %r8,__PT_INT_CODE+2(%r11)
197 sla %r8,2 # shift and test for svc0
198 jnz sysc_nr_ok
199 # svc 0: system call number in %r1
200 cl %r1,BASED(.Lnr_syscalls)
201 jnl sysc_nr_ok
202 sth %r1,__PT_INT_CODE+2(%r11)
203 lr %r8,%r1
204 sla %r8,2
205sysc_nr_ok:
206 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
207 st %r2,__PT_ORIG_GPR2(%r11)
208 st %r7,STACK_FRAME_OVERHEAD(%r15)
209 l %r9,0(%r8,%r10) # get system call addr.
210 tm __TI_flags+2(%r12),_TIF_TRACE >> 8
211 jnz sysc_tracesys
212 basr %r14,%r9 # call sys_xxxx
213 st %r2,__PT_R2(%r11) # store return value
214
215sysc_return:
216 LOCKDEP_SYS_EXIT
217sysc_tif:
218 tm __PT_PSW+1(%r11),0x01 # returning to user ?
219 jno sysc_restore
220 tm __TI_flags+3(%r12),_TIF_WORK_SVC
221 jnz sysc_work # check for work
222 ni __TI_flags+3(%r12),255-_TIF_SYSCALL
223sysc_restore:
224 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
225 stpt __LC_EXIT_TIMER
226 lm %r0,%r15,__PT_R0(%r11)
227 lpsw __LC_RETURN_PSW
228sysc_done:
229
230#
231# One of the work bits is on. Find out which one.
232#
233sysc_work:
234 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
235 jo sysc_mcck_pending
236 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
237 jo sysc_reschedule
238 tm __TI_flags+3(%r12),_TIF_PER_TRAP
239 jo sysc_singlestep
240 tm __TI_flags+3(%r12),_TIF_SIGPENDING
241 jo sysc_sigpending
242 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
243 jo sysc_notify_resume
244 tm __TI_flags+3(%r12),_TIF_ASCE
245 jo sysc_uaccess
246 j sysc_return # beware of critical section cleanup
247
248#
249# _TIF_NEED_RESCHED is set, call schedule
250#
251sysc_reschedule:
252 l %r1,BASED(.Lschedule)
253 la %r14,BASED(sysc_return)
254 br %r1 # call schedule
255
256#
257# _TIF_MCCK_PENDING is set, call handler
258#
259sysc_mcck_pending:
260 l %r1,BASED(.Lhandle_mcck)
261 la %r14,BASED(sysc_return)
262 br %r1 # TIF bit will be cleared by handler
263
264#
265# _TIF_ASCE is set, load user space asce
266#
267sysc_uaccess:
268 ni __TI_flags+3(%r12),255-_TIF_ASCE
269 lctl %c1,%c1,__LC_USER_ASCE # load primary asce
270 j sysc_return
271
272#
273# _TIF_SIGPENDING is set, call do_signal
274#
275sysc_sigpending:
276 lr %r2,%r11 # pass pointer to pt_regs
277 l %r1,BASED(.Ldo_signal)
278 basr %r14,%r1 # call do_signal
279 tm __TI_flags+3(%r12),_TIF_SYSCALL
280 jno sysc_return
281 lm %r2,%r7,__PT_R2(%r11) # load svc arguments
282 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
283 xr %r8,%r8 # svc 0 returns -ENOSYS
284 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
285 jnl sysc_nr_ok # invalid svc number -> do svc 0
286 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
287 sla %r8,2
288 j sysc_nr_ok # restart svc
289
290#
291# _TIF_NOTIFY_RESUME is set, call do_notify_resume
292#
293sysc_notify_resume:
294 lr %r2,%r11 # pass pointer to pt_regs
295 l %r1,BASED(.Ldo_notify_resume)
296 la %r14,BASED(sysc_return)
297 br %r1 # call do_notify_resume
298
299#
300# _TIF_PER_TRAP is set, call do_per_trap
301#
302sysc_singlestep:
303 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP
304 lr %r2,%r11 # pass pointer to pt_regs
305 l %r1,BASED(.Ldo_per_trap)
306 la %r14,BASED(sysc_return)
307 br %r1 # call do_per_trap
308
309#
310# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
311# and after the system call
312#
313sysc_tracesys:
314 l %r1,BASED(.Ltrace_enter)
315 lr %r2,%r11 # pass pointer to pt_regs
316 la %r3,0
317 xr %r0,%r0
318 icm %r0,3,__PT_INT_CODE+2(%r11)
319 st %r0,__PT_R2(%r11)
320 basr %r14,%r1 # call do_syscall_trace_enter
321 cl %r2,BASED(.Lnr_syscalls)
322 jnl sysc_tracenogo
323 lr %r8,%r2
324 sll %r8,2
325 l %r9,0(%r8,%r10)
326sysc_tracego:
327 lm %r3,%r7,__PT_R3(%r11)
328 st %r7,STACK_FRAME_OVERHEAD(%r15)
329 l %r2,__PT_ORIG_GPR2(%r11)
330 basr %r14,%r9 # call sys_xxx
331 st %r2,__PT_R2(%r11) # store return value
332sysc_tracenogo:
333 tm __TI_flags+2(%r12),_TIF_TRACE >> 8
334 jz sysc_return
335 l %r1,BASED(.Ltrace_exit)
336 lr %r2,%r11 # pass pointer to pt_regs
337 la %r14,BASED(sysc_return)
338 br %r1 # call do_syscall_trace_exit
339
340#
341# a new process exits the kernel with ret_from_fork
342#
343ENTRY(ret_from_fork)
344 la %r11,STACK_FRAME_OVERHEAD(%r15)
345 l %r12,__LC_THREAD_INFO
346 l %r13,__LC_SVC_NEW_PSW+4
347 l %r1,BASED(.Lschedule_tail)
348 basr %r14,%r1 # call schedule_tail
349 TRACE_IRQS_ON
350 ssm __LC_SVC_NEW_PSW # reenable interrupts
351 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
352 jne sysc_tracenogo
353 # it's a kernel thread
354 lm %r9,%r10,__PT_R9(%r11) # load gprs
355ENTRY(kernel_thread_starter)
356 la %r2,0(%r10)
357 basr %r14,%r9
358 j sysc_tracenogo
359
360/*
361 * Program check handler routine
362 */
363
364ENTRY(pgm_check_handler)
365 stpt __LC_SYNC_ENTER_TIMER
366 stm %r8,%r15,__LC_SAVE_AREA_SYNC
367 l %r12,__LC_THREAD_INFO
368 l %r13,__LC_SVC_NEW_PSW+4
369 lm %r8,%r9,__LC_PGM_OLD_PSW
370 tmh %r8,0x0001 # test problem state bit
371 jnz 1f # -> fault in user space
372 tmh %r8,0x4000 # PER bit set in old PSW ?
373 jnz 0f # -> enabled, can't be a double fault
374 tm __LC_PGM_ILC+3,0x80 # check for per exception
375 jnz pgm_svcper # -> single stepped svc
3760: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
377 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
378 j 2f
3791: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
380 l %r15,__LC_KERNEL_STACK
3812: la %r11,STACK_FRAME_OVERHEAD(%r15)
382 stm %r0,%r7,__PT_R0(%r11)
383 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
384 stm %r8,%r9,__PT_PSW(%r11)
385 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
386 mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE
387 tm __LC_PGM_ILC+3,0x80 # check for per exception
388 jz 0f
389 l %r1,__TI_task(%r12)
390 tmh %r8,0x0001 # kernel per event ?
391 jz pgm_kprobe
392 oi __TI_flags+3(%r12),_TIF_PER_TRAP
393 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
394 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
395 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
3960: REENABLE_IRQS
397 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
398 l %r1,BASED(.Ljump_table)
399 la %r10,0x7f
400 n %r10,__PT_INT_CODE(%r11)
401 je sysc_return
402 sll %r10,2
403 l %r1,0(%r10,%r1) # load address of handler routine
404 lr %r2,%r11 # pass pointer to pt_regs
405 basr %r14,%r1 # branch to interrupt-handler
406 j sysc_return
407
408#
409# PER event in supervisor state, must be kprobes
410#
411pgm_kprobe:
412 REENABLE_IRQS
413 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
414 l %r1,BASED(.Ldo_per_trap)
415 lr %r2,%r11 # pass pointer to pt_regs
416 basr %r14,%r1 # call do_per_trap
417 j sysc_return
418
419#
420# single stepped system call
421#
422pgm_svcper:
423 oi __TI_flags+3(%r12),_TIF_PER_TRAP
424 mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
425 mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per)
426 lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs
427
428/*
429 * IO interrupt handler routine
430 */
431
432ENTRY(io_int_handler)
433 stck __LC_INT_CLOCK
434 stpt __LC_ASYNC_ENTER_TIMER
435 stm %r8,%r15,__LC_SAVE_AREA_ASYNC
436 l %r12,__LC_THREAD_INFO
437 l %r13,__LC_SVC_NEW_PSW+4
438 lm %r8,%r9,__LC_IO_OLD_PSW
439 tmh %r8,0x0001 # interrupting from user ?
440 jz io_skip
441 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
442io_skip:
443 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
444 stm %r0,%r7,__PT_R0(%r11)
445 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
446 stm %r8,%r9,__PT_PSW(%r11)
447 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
448 TRACE_IRQS_OFF
449 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
450io_loop:
451 l %r1,BASED(.Ldo_IRQ)
452 lr %r2,%r11 # pass pointer to pt_regs
453 lhi %r3,IO_INTERRUPT
454 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
455 jz io_call
456 lhi %r3,THIN_INTERRUPT
457io_call:
458 basr %r14,%r1 # call do_IRQ
459 tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
460 jz io_return
461 tpi 0
462 jz io_return
463 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
464 j io_loop
465io_return:
466 LOCKDEP_SYS_EXIT
467 TRACE_IRQS_ON
468io_tif:
469 tm __TI_flags+3(%r12),_TIF_WORK_INT
470 jnz io_work # there is work to do (signals etc.)
471io_restore:
472 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
473 stpt __LC_EXIT_TIMER
474 lm %r0,%r15,__PT_R0(%r11)
475 lpsw __LC_RETURN_PSW
476io_done:
477
478#
479# There is work todo, find out in which context we have been interrupted:
480# 1) if we return to user space we can do all _TIF_WORK_INT work
481# 2) if we return to kernel code and preemptive scheduling is enabled check
482# the preemption counter and if it is zero call preempt_schedule_irq
483# Before any work can be done, a switch to the kernel stack is required.
484#
485io_work:
486 tm __PT_PSW+1(%r11),0x01 # returning to user ?
487 jo io_work_user # yes -> do resched & signal
488#ifdef CONFIG_PREEMPT
489 # check for preemptive scheduling
490 icm %r0,15,__TI_precount(%r12)
491 jnz io_restore # preemption disabled
492 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
493 jno io_restore
494 # switch to kernel stack
495 l %r1,__PT_R15(%r11)
496 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
497 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
498 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
499 la %r11,STACK_FRAME_OVERHEAD(%r1)
500 lr %r15,%r1
501 # TRACE_IRQS_ON already done at io_return, call
502 # TRACE_IRQS_OFF to keep things symmetrical
503 TRACE_IRQS_OFF
504 l %r1,BASED(.Lpreempt_irq)
505 basr %r14,%r1 # call preempt_schedule_irq
506 j io_return
507#else
508 j io_restore
509#endif
510
511#
512# Need to do work before returning to userspace, switch to kernel stack
513#
514io_work_user:
515 l %r1,__LC_KERNEL_STACK
516 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
517 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
518 la %r11,STACK_FRAME_OVERHEAD(%r1)
519 lr %r15,%r1
520
521#
522# One of the work bits is on. Find out which one.
523# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
524# and _TIF_MCCK_PENDING
525#
526io_work_tif:
527 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
528 jo io_mcck_pending
529 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
530 jo io_reschedule
531 tm __TI_flags+3(%r12),_TIF_SIGPENDING
532 jo io_sigpending
533 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
534 jo io_notify_resume
535 tm __TI_flags+3(%r12),_TIF_ASCE
536 jo io_uaccess
537 j io_return # beware of critical section cleanup
538
539#
540# _TIF_MCCK_PENDING is set, call handler
541#
542io_mcck_pending:
543 # TRACE_IRQS_ON already done at io_return
544 l %r1,BASED(.Lhandle_mcck)
545 basr %r14,%r1 # TIF bit will be cleared by handler
546 TRACE_IRQS_OFF
547 j io_return
548
549#
550# _TIF_ASCE is set, load user space asce
551#
552io_uaccess:
553 ni __TI_flags+3(%r12),255-_TIF_ASCE
554 lctl %c1,%c1,__LC_USER_ASCE # load primary asce
555 j io_return
556
557#
558# _TIF_NEED_RESCHED is set, call schedule
559#
560io_reschedule:
561 # TRACE_IRQS_ON already done at io_return
562 l %r1,BASED(.Lschedule)
563 ssm __LC_SVC_NEW_PSW # reenable interrupts
564 basr %r14,%r1 # call scheduler
565 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
566 TRACE_IRQS_OFF
567 j io_return
568
569#
570# _TIF_SIGPENDING is set, call do_signal
571#
572io_sigpending:
573 # TRACE_IRQS_ON already done at io_return
574 l %r1,BASED(.Ldo_signal)
575 ssm __LC_SVC_NEW_PSW # reenable interrupts
576 lr %r2,%r11 # pass pointer to pt_regs
577 basr %r14,%r1 # call do_signal
578 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
579 TRACE_IRQS_OFF
580 j io_return
581
582#
583# _TIF_SIGPENDING is set, call do_signal
584#
585io_notify_resume:
586 # TRACE_IRQS_ON already done at io_return
587 l %r1,BASED(.Ldo_notify_resume)
588 ssm __LC_SVC_NEW_PSW # reenable interrupts
589 lr %r2,%r11 # pass pointer to pt_regs
590 basr %r14,%r1 # call do_notify_resume
591 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
592 TRACE_IRQS_OFF
593 j io_return
594
595/*
596 * External interrupt handler routine
597 */
598
599ENTRY(ext_int_handler)
600 stck __LC_INT_CLOCK
601 stpt __LC_ASYNC_ENTER_TIMER
602 stm %r8,%r15,__LC_SAVE_AREA_ASYNC
603 l %r12,__LC_THREAD_INFO
604 l %r13,__LC_SVC_NEW_PSW+4
605 lm %r8,%r9,__LC_EXT_OLD_PSW
606 tmh %r8,0x0001 # interrupting from user ?
607 jz ext_skip
608 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
609ext_skip:
610 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
611 stm %r0,%r7,__PT_R0(%r11)
612 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
613 stm %r8,%r9,__PT_PSW(%r11)
614 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
615 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
616 TRACE_IRQS_OFF
617 l %r1,BASED(.Ldo_IRQ)
618 lr %r2,%r11 # pass pointer to pt_regs
619 lhi %r3,EXT_INTERRUPT
620 basr %r14,%r1 # call do_IRQ
621 j io_return
622
623/*
624 * Load idle PSW. The second "half" of this function is in cleanup_idle.
625 */
626ENTRY(psw_idle)
627 st %r3,__SF_EMPTY(%r15)
628 basr %r1,0
629 la %r1,psw_idle_lpsw+4-.(%r1)
630 st %r1,__SF_EMPTY+4(%r15)
631 oi __SF_EMPTY+4(%r15),0x80
632 stck __CLOCK_IDLE_ENTER(%r2)
633 stpt __TIMER_IDLE_ENTER(%r2)
634psw_idle_lpsw:
635 lpsw __SF_EMPTY(%r15)
636 br %r14
637psw_idle_end:
638
639__critical_end:
640
641/*
642 * Machine check handler routines
643 */
644
645ENTRY(mcck_int_handler)
646 stck __LC_MCCK_CLOCK
647 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
648 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
649 l %r12,__LC_THREAD_INFO
650 l %r13,__LC_SVC_NEW_PSW+4
651 lm %r8,%r9,__LC_MCK_OLD_PSW
652 tm __LC_MCCK_CODE,0x80 # system damage?
653 jo mcck_panic # yes -> rest of mcck code invalid
654 la %r14,__LC_CPU_TIMER_SAVE_AREA
655 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
656 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
657 jo 3f
658 la %r14,__LC_SYNC_ENTER_TIMER
659 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
660 jl 0f
661 la %r14,__LC_ASYNC_ENTER_TIMER
6620: clc 0(8,%r14),__LC_EXIT_TIMER
663 jl 1f
664 la %r14,__LC_EXIT_TIMER
6651: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
666 jl 2f
667 la %r14,__LC_LAST_UPDATE_TIMER
6682: spt 0(%r14)
669 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
6703: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
671 jno mcck_panic # no -> skip cleanup critical
672 tm %r8,0x0001 # interrupting from user ?
673 jz mcck_skip
674 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
675mcck_skip:
676 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
677 stm %r0,%r7,__PT_R0(%r11)
678 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
679 stm %r8,%r9,__PT_PSW(%r11)
680 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
681 l %r1,BASED(.Ldo_machine_check)
682 lr %r2,%r11 # pass pointer to pt_regs
683 basr %r14,%r1 # call s390_do_machine_check
684 tm __PT_PSW+1(%r11),0x01 # returning to user ?
685 jno mcck_return
686 l %r1,__LC_KERNEL_STACK # switch to kernel stack
687 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
688 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
689 la %r11,STACK_FRAME_OVERHEAD(%r15)
690 lr %r15,%r1
691 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
692 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
693 jno mcck_return
694 TRACE_IRQS_OFF
695 l %r1,BASED(.Lhandle_mcck)
696 basr %r14,%r1 # call s390_handle_mcck
697 TRACE_IRQS_ON
698mcck_return:
699 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
700 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
701 jno 0f
702 lm %r0,%r15,__PT_R0(%r11)
703 stpt __LC_EXIT_TIMER
704 lpsw __LC_RETURN_MCCK_PSW
7050: lm %r0,%r15,__PT_R0(%r11)
706 lpsw __LC_RETURN_MCCK_PSW
707
708mcck_panic:
709 l %r14,__LC_PANIC_STACK
710 slr %r14,%r15
711 sra %r14,PAGE_SHIFT
712 jz 0f
713 l %r15,__LC_PANIC_STACK
714 j mcck_skip
7150: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
716 j mcck_skip
717
718#
719# PSW restart interrupt handler
720#
721ENTRY(restart_int_handler)
722 st %r15,__LC_SAVE_AREA_RESTART
723 l %r15,__LC_RESTART_STACK
724 ahi %r15,-__PT_SIZE # create pt_regs on stack
725 xc 0(__PT_SIZE,%r15),0(%r15)
726 stm %r0,%r14,__PT_R0(%r15)
727 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
728 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
729 ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
730 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
731 l %r1,__LC_RESTART_FN # load fn, parm & source cpu
732 l %r2,__LC_RESTART_DATA
733 l %r3,__LC_RESTART_SOURCE
734 ltr %r3,%r3 # test source cpu address
735 jm 1f # negative -> skip source stop
7360: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
737 brc 10,0b # wait for status stored
7381: basr %r14,%r1 # call function
739 stap __SF_EMPTY(%r15) # store cpu address
740 lh %r3,__SF_EMPTY(%r15)
7412: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
742 brc 2,2b
7433: j 3b
744
745 .section .kprobes.text, "ax"
746
747#ifdef CONFIG_CHECK_STACK
748/*
749 * The synchronous or the asynchronous stack overflowed. We are dead.
750 * No need to properly save the registers, we are going to panic anyway.
751 * Setup a pt_regs so that show_trace can provide a good call trace.
752 */
753stack_overflow:
754 l %r15,__LC_PANIC_STACK # change to panic stack
755 la %r11,STACK_FRAME_OVERHEAD(%r15)
756 stm %r0,%r7,__PT_R0(%r11)
757 stm %r8,%r9,__PT_PSW(%r11)
758 mvc __PT_R8(32,%r11),0(%r14)
759 l %r1,BASED(1f)
760 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
761 lr %r2,%r11 # pass pointer to pt_regs
762 br %r1 # branch to kernel_stack_overflow
7631: .long kernel_stack_overflow
764#endif
765
766cleanup_table:
767 .long system_call + 0x80000000
768 .long sysc_do_svc + 0x80000000
769 .long sysc_tif + 0x80000000
770 .long sysc_restore + 0x80000000
771 .long sysc_done + 0x80000000
772 .long io_tif + 0x80000000
773 .long io_restore + 0x80000000
774 .long io_done + 0x80000000
775 .long psw_idle + 0x80000000
776 .long psw_idle_end + 0x80000000
777
778cleanup_critical:
779 cl %r9,BASED(cleanup_table) # system_call
780 jl 0f
781 cl %r9,BASED(cleanup_table+4) # sysc_do_svc
782 jl cleanup_system_call
783 cl %r9,BASED(cleanup_table+8) # sysc_tif
784 jl 0f
785 cl %r9,BASED(cleanup_table+12) # sysc_restore
786 jl cleanup_sysc_tif
787 cl %r9,BASED(cleanup_table+16) # sysc_done
788 jl cleanup_sysc_restore
789 cl %r9,BASED(cleanup_table+20) # io_tif
790 jl 0f
791 cl %r9,BASED(cleanup_table+24) # io_restore
792 jl cleanup_io_tif
793 cl %r9,BASED(cleanup_table+28) # io_done
794 jl cleanup_io_restore
795 cl %r9,BASED(cleanup_table+32) # psw_idle
796 jl 0f
797 cl %r9,BASED(cleanup_table+36) # psw_idle_end
798 jl cleanup_idle
7990: br %r14
800
801cleanup_system_call:
802 # check if stpt has been executed
803 cl %r9,BASED(cleanup_system_call_insn)
804 jh 0f
805 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
806 chi %r11,__LC_SAVE_AREA_ASYNC
807 je 0f
808 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
8090: # check if stm has been executed
810 cl %r9,BASED(cleanup_system_call_insn+4)
811 jh 0f
812 mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
8130: # set up saved registers r12, and r13
814 st %r12,16(%r11) # r12 thread-info pointer
815 st %r13,20(%r11) # r13 literal-pool pointer
816 # check if the user time calculation has been done
817 cl %r9,BASED(cleanup_system_call_insn+8)
818 jh 0f
819 l %r10,__LC_EXIT_TIMER
820 l %r15,__LC_EXIT_TIMER+4
821 SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER
822 ADD64 %r10,%r15,__LC_USER_TIMER
823 st %r10,__LC_USER_TIMER
824 st %r15,__LC_USER_TIMER+4
8250: # check if the system time calculation has been done
826 cl %r9,BASED(cleanup_system_call_insn+12)
827 jh 0f
828 l %r10,__LC_LAST_UPDATE_TIMER
829 l %r15,__LC_LAST_UPDATE_TIMER+4
830 SUB64 %r10,%r15,__LC_EXIT_TIMER
831 ADD64 %r10,%r15,__LC_SYSTEM_TIMER
832 st %r10,__LC_SYSTEM_TIMER
833 st %r15,__LC_SYSTEM_TIMER+4
8340: # update accounting time stamp
835 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
836 # set up saved register 11
837 l %r15,__LC_KERNEL_STACK
838 la %r9,STACK_FRAME_OVERHEAD(%r15)
839 st %r9,12(%r11) # r11 pt_regs pointer
840 # fill pt_regs
841 mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
842 stm %r0,%r7,__PT_R0(%r9)
843 mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
844 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
845 # setup saved register 15
846 st %r15,28(%r11) # r15 stack pointer
847 # set new psw address and exit
848 l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
849 br %r14
850cleanup_system_call_insn:
851 .long system_call + 0x80000000
852 .long sysc_stm + 0x80000000
853 .long sysc_vtime + 0x80000000 + 36
854 .long sysc_vtime + 0x80000000 + 76
855
856cleanup_sysc_tif:
857 l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000
858 br %r14
859
860cleanup_sysc_restore:
861 cl %r9,BASED(cleanup_sysc_restore_insn)
862 jhe 0f
863 l %r9,12(%r11) # get saved pointer to pt_regs
864 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
865 mvc 0(32,%r11),__PT_R8(%r9)
866 lm %r0,%r7,__PT_R0(%r9)
8670: lm %r8,%r9,__LC_RETURN_PSW
868 br %r14
869cleanup_sysc_restore_insn:
870 .long sysc_done - 4 + 0x80000000
871
872cleanup_io_tif:
873 l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000
874 br %r14
875
876cleanup_io_restore:
877 cl %r9,BASED(cleanup_io_restore_insn)
878 jhe 0f
879 l %r9,12(%r11) # get saved r11 pointer to pt_regs
880 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
881 mvc 0(32,%r11),__PT_R8(%r9)
882 lm %r0,%r7,__PT_R0(%r9)
8830: lm %r8,%r9,__LC_RETURN_PSW
884 br %r14
885cleanup_io_restore_insn:
886 .long io_done - 4 + 0x80000000
887
888cleanup_idle:
889 # copy interrupt clock & cpu timer
890 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
891 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
892 chi %r11,__LC_SAVE_AREA_ASYNC
893 je 0f
894 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
895 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
8960: # check if stck has been executed
897 cl %r9,BASED(cleanup_idle_insn)
898 jhe 1f
899 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
900 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
9011: # account system time going idle
902 lm %r9,%r10,__LC_STEAL_TIMER
903 ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
904 SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
905 stm %r9,%r10,__LC_STEAL_TIMER
906 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
907 lm %r9,%r10,__LC_SYSTEM_TIMER
908 ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
909 SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
910 stm %r9,%r10,__LC_SYSTEM_TIMER
911 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
912 # prepare return psw
913 n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits
914 l %r9,24(%r11) # return from psw_idle
915 br %r14
916cleanup_idle_insn:
917 .long psw_idle_lpsw + 0x80000000
918cleanup_idle_wait:
919 .long 0xfcfdffff
920
921/*
922 * Integer constants
923 */
924 .align 4
925.Lnr_syscalls:
926 .long NR_syscalls
927.Lvtimer_max:
928 .quad 0x7fffffffffffffff
929
930/*
931 * Symbol constants
932 */
933.Ldo_machine_check: .long s390_do_machine_check
934.Lhandle_mcck: .long s390_handle_mcck
935.Ldo_IRQ: .long do_IRQ
936.Ldo_signal: .long do_signal
937.Ldo_notify_resume: .long do_notify_resume
938.Ldo_per_trap: .long do_per_trap
939.Ljump_table: .long pgm_check_table
940.Lschedule: .long schedule
941#ifdef CONFIG_PREEMPT
942.Lpreempt_irq: .long preempt_schedule_irq
943#endif
944.Ltrace_enter: .long do_syscall_trace_enter
945.Ltrace_exit: .long do_syscall_trace_exit
946.Lschedule_tail: .long schedule_tail
947.Lsysc_per: .long sysc_per + 0x80000000
948#ifdef CONFIG_TRACE_IRQFLAGS
949.Lhardirqs_on: .long trace_hardirqs_on_caller
950.Lhardirqs_off: .long trace_hardirqs_off_caller
951#endif
952#ifdef CONFIG_LOCKDEP
953.Llockdep_sys_exit: .long lockdep_sys_exit
954#endif
955.Lcritical_start: .long __critical_start + 0x80000000
956.Lcritical_length: .long __critical_end - __critical_start
957
958 .section .rodata, "a"
959#define SYSCALL(esa,esame,emu) .long esa
960 .globl sys_call_table
961sys_call_table:
962#include "syscalls.S"
963#undef SYSCALL