Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *  arch/s390/kernel/entry.S
   3 *    S390 low-level entry points.
   4 *
   5 *    Copyright (C) IBM Corp. 1999,2006
   6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
   7 *		 Hartmut Penner (hp@de.ibm.com),
   8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
   9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
  10 */
  11
  12#include <linux/init.h>
  13#include <linux/linkage.h>
  14#include <asm/cache.h>
  15#include <asm/errno.h>
  16#include <asm/ptrace.h>
  17#include <asm/thread_info.h>
  18#include <asm/asm-offsets.h>
  19#include <asm/unistd.h>
  20#include <asm/page.h>
  21
  22/*
  23 * Stack layout for the system_call stack entry.
  24 * The first few entries are identical to the user_regs_struct.
  25 */
  26SP_PTREGS    =	STACK_FRAME_OVERHEAD
  27SP_ARGS      =	STACK_FRAME_OVERHEAD + __PT_ARGS
  28SP_PSW	     =	STACK_FRAME_OVERHEAD + __PT_PSW
  29SP_R0	     =	STACK_FRAME_OVERHEAD + __PT_GPRS
  30SP_R1	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 4
  31SP_R2	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 8
  32SP_R3	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 12
  33SP_R4	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 16
  34SP_R5	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 20
  35SP_R6	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 24
  36SP_R7	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 28
  37SP_R8	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 32
  38SP_R9	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 36
  39SP_R10	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 40
  40SP_R11	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 44
  41SP_R12	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 48
  42SP_R13	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 52
  43SP_R14	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 56
  44SP_R15	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 60
  45SP_ORIG_R2   =	STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
  46SP_ILC	     =	STACK_FRAME_OVERHEAD + __PT_ILC
  47SP_SVCNR     =	STACK_FRAME_OVERHEAD + __PT_SVCNR
  48SP_SIZE      =	STACK_FRAME_OVERHEAD + __PT_SIZE
  49
  50_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
  51		 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
  52_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
  53		 _TIF_MCCK_PENDING)
  54_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
  55		_TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
  56
  57STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
  58STACK_SIZE  = 1 << STACK_SHIFT
  59
  60#define BASED(name) name-system_call(%r13)
  61
  62#ifdef CONFIG_TRACE_IRQFLAGS
  63	.macro	TRACE_IRQS_ON
 
  64	basr	%r2,%r0
  65	l	%r1,BASED(.Ltrace_irq_on_caller)
  66	basr	%r14,%r1
 
  67	.endm
  68
  69	.macro	TRACE_IRQS_OFF
 
  70	basr	%r2,%r0
  71	l	%r1,BASED(.Ltrace_irq_off_caller)
  72	basr	%r14,%r1
  73	.endm
  74#else
  75#define TRACE_IRQS_ON
  76#define TRACE_IRQS_OFF
  77#endif
 
  78
  79#ifdef CONFIG_LOCKDEP
  80	.macro	LOCKDEP_SYS_EXIT
  81	tm	SP_PSW+1(%r15),0x01	# returning to user ?
  82	jz	0f
 
  83	l	%r1,BASED(.Llockdep_sys_exit)
  84	basr	%r14,%r1
  850:
  86	.endm
  87#else
  88#define LOCKDEP_SYS_EXIT
  89#endif
  90
  91/*
  92 * Register usage in interrupt handlers:
  93 *    R9  - pointer to current task structure
  94 *    R13 - pointer to literal pool
  95 *    R14 - return register for function calls
  96 *    R15 - kernel stack pointer
  97 */
  98
  99	.macro	UPDATE_VTIME lc_from,lc_to,lc_sum
 100	lm	%r10,%r11,\lc_from
 101	sl	%r10,\lc_to
 102	sl	%r11,\lc_to+4
 103	bc	3,BASED(0f)
 104	sl	%r10,BASED(.Lc_1)
 1050:	al	%r10,\lc_sum
 106	al	%r11,\lc_sum+4
 107	bc	12,BASED(1f)
 108	al	%r10,BASED(.Lc_1)
 1091:	stm	%r10,%r11,\lc_sum
 110	.endm
 111
 112	.macro	SAVE_ALL_SVC psworg,savearea
 113	stm	%r12,%r15,\savearea
 114	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
 115	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
 116	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
 117	.endm
 118
 119	.macro	SAVE_ALL_BASE savearea
 120	stm	%r12,%r15,\savearea
 121	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
 122	.endm
 123
 124	.macro	SAVE_ALL_PGM psworg,savearea
 125	tm	\psworg+1,0x01		# test problem state bit
 126#ifdef CONFIG_CHECK_STACK
 127	bnz	BASED(1f)
 128	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
 129	bnz	BASED(2f)
 130	la	%r12,\psworg
 131	b	BASED(stack_overflow)
 132#else
 133	bz	BASED(2f)
 134#endif
 1351:	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
 1362:	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
 137	.endm
 138
 139	.macro	SAVE_ALL_ASYNC psworg,savearea
 140	stm	%r12,%r15,\savearea
 141	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
 142	la	%r12,\psworg
 143	tm	\psworg+1,0x01		# test problem state bit
 144	bnz	BASED(1f)		# from user -> load async stack
 145	clc	\psworg+4(4),BASED(.Lcritical_end)
 146	bhe	BASED(0f)
 147	clc	\psworg+4(4),BASED(.Lcritical_start)
 148	bl	BASED(0f)
 149	l	%r14,BASED(.Lcleanup_critical)
 150	basr	%r14,%r14
 151	tm	1(%r12),0x01		# retest problem state after cleanup
 152	bnz	BASED(1f)
 1530:	l	%r14,__LC_ASYNC_STACK	# are we already on the async stack ?
 154	slr	%r14,%r15
 155	sra	%r14,STACK_SHIFT
 156#ifdef CONFIG_CHECK_STACK
 157	bnz	BASED(1f)
 158	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
 159	bnz	BASED(2f)
 160	b	BASED(stack_overflow)
 161#else
 162	bz	BASED(2f)
 163#endif
 1641:	l	%r15,__LC_ASYNC_STACK
 1652:	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
 166	.endm
 167
 168	.macro	CREATE_STACK_FRAME savearea
 169	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
 170	st	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
 171	mvc	SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
 172	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
 173	.endm
 174
 175	.macro	RESTORE_ALL psworg,sync
 176	mvc	\psworg(8),SP_PSW(%r15) # move user PSW to lowcore
 177	.if !\sync
 178	ni	\psworg+1,0xfd		# clear wait state bit
 179	.endif
 180	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15 of user
 181	stpt	__LC_EXIT_TIMER
 182	lpsw	\psworg			# back to caller
 
 
 
 
 
 183	.endm
 184
 185	.macro REENABLE_IRQS
 186	mvc	__SF_EMPTY(1,%r15),SP_PSW(%r15)
 187	ni	__SF_EMPTY(%r15),0xbf
 188	ssm	__SF_EMPTY(%r15)
 189	.endm
 190
 191	.section .kprobes.text, "ax"
 192
 193/*
 194 * Scheduler resume function, called by switch_to
 195 *  gpr2 = (task_struct *) prev
 196 *  gpr3 = (task_struct *) next
 197 * Returns:
 198 *  gpr2 = prev
 199 */
 200ENTRY(__switch_to)
 201	basr	%r1,0
 2020:	l	%r4,__THREAD_info(%r2)		# get thread_info of prev
 
 203	l	%r5,__THREAD_info(%r3)		# get thread_info of next
 
 
 
 
 
 
 
 
 204	tm	__TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
 205	bz	1f-0b(%r1)
 206	ni	__TI_flags+3(%r4),255-_TIF_MCCK_PENDING	# clear flag in prev
 207	oi	__TI_flags+3(%r5),_TIF_MCCK_PENDING	# set it in next
 2081:	stm	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
 209	st	%r15,__THREAD_ksp(%r2)		# store kernel stack of prev
 210	l	%r15,__THREAD_ksp(%r3)		# load kernel stack of next
 211	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
 212	lm	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
 213	st	%r3,__LC_CURRENT		# store task struct of next
 214	mvc	__LC_CURRENT_PID(4,%r0),__TASK_pid(%r3)	# store pid of next
 215	st	%r5,__LC_THREAD_INFO		# store thread info of next
 216	ahi	%r5,STACK_SIZE			# end of kernel stack of next
 217	st	%r5,__LC_KERNEL_STACK		# store end of kernel stack
 218	br	%r14
 219
 220__critical_start:
 221/*
 222 * SVC interrupt handler routine. System calls are synchronous events and
 223 * are executed with interrupts enabled.
 224 */
 225
 226ENTRY(system_call)
 227	stpt	__LC_SYNC_ENTER_TIMER
 228sysc_saveall:
 229	SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
 230	CREATE_STACK_FRAME __LC_SAVE_AREA
 231	mvc	SP_PSW(8,%r15),__LC_SVC_OLD_PSW
 232	mvc	SP_ILC(4,%r15),__LC_SVC_ILC
 233	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 
 
 234sysc_vtime:
 235	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
 236sysc_stime:
 237	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 238sysc_update:
 239	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
 240sysc_do_svc:
 241	xr	%r7,%r7
 242	icm	%r7,3,SP_SVCNR(%r15)	# load svc number and test for svc 0
 243	bnz	BASED(sysc_nr_ok)	# svc number > 0
 
 244	# svc 0: system call number in %r1
 245	cl	%r1,BASED(.Lnr_syscalls)
 246	bnl	BASED(sysc_nr_ok)
 247	sth	%r1,SP_SVCNR(%r15)
 248	lr	%r7,%r1 	  # copy svc number to %r7
 
 249sysc_nr_ok:
 250	sll	%r7,2		  # svc number *4
 251	l	%r10,BASED(.Lsysc_table)
 252	tm	__TI_flags+2(%r12),_TIF_SYSCALL
 253	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
 254	l	%r8,0(%r7,%r10)	  # get system call addr.
 255	bnz	BASED(sysc_tracesys)
 256	basr	%r14,%r8	  # call sys_xxxx
 257	st	%r2,SP_R2(%r15)   # store return value (change R2 on stack)
 
 258
 259sysc_return:
 260	LOCKDEP_SYS_EXIT
 261sysc_tif:
 
 
 262	tm	__TI_flags+3(%r12),_TIF_WORK_SVC
 263	bnz	BASED(sysc_work)  # there is work to do (signals etc.)
 
 264sysc_restore:
 265	RESTORE_ALL __LC_RETURN_PSW,1
 
 
 
 266sysc_done:
 267
 268#
 269# There is work to do, but first we need to check if we return to userspace.
 270#
 271sysc_work:
 272	tm	SP_PSW+1(%r15),0x01	# returning to user ?
 273	bno	BASED(sysc_restore)
 274
 275#
 276# One of the work bits is on. Find out which one.
 277#
 278sysc_work_tif:
 279	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
 280	bo	BASED(sysc_mcck_pending)
 281	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
 282	bo	BASED(sysc_reschedule)
 283	tm	__TI_flags+3(%r12),_TIF_SIGPENDING
 284	bo	BASED(sysc_sigpending)
 285	tm	__TI_flags+3(%r12),_TIF_NOTIFY_RESUME
 286	bo	BASED(sysc_notify_resume)
 287	tm	__TI_flags+3(%r12),_TIF_RESTART_SVC
 288	bo	BASED(sysc_restart)
 289	tm	__TI_flags+3(%r12),_TIF_PER_TRAP
 290	bo	BASED(sysc_singlestep)
 291	b	BASED(sysc_return)	# beware of critical section cleanup
 292
 293#
 294# _TIF_NEED_RESCHED is set, call schedule
 295#
 296sysc_reschedule:
 297	l	%r1,BASED(.Lschedule)
 298	la	%r14,BASED(sysc_return)
 299	br	%r1			# call scheduler
 300
 301#
 302# _TIF_MCCK_PENDING is set, call handler
 303#
 304sysc_mcck_pending:
 305	l	%r1,BASED(.Ls390_handle_mcck)
 306	la	%r14,BASED(sysc_return)
 307	br	%r1			# TIF bit will be cleared by handler
 308
 309#
 310# _TIF_SIGPENDING is set, call do_signal
 311#
 312sysc_sigpending:
 313	ni	__TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
 314	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 315	l	%r1,BASED(.Ldo_signal)
 316	basr	%r14,%r1		# call do_signal
 317	tm	__TI_flags+3(%r12),_TIF_RESTART_SVC
 318	bo	BASED(sysc_restart)
 319	tm	__TI_flags+3(%r12),_TIF_PER_TRAP
 320	bo	BASED(sysc_singlestep)
 321	b	BASED(sysc_return)
 
 
 
 
 322
 323#
 324# _TIF_NOTIFY_RESUME is set, call do_notify_resume
 325#
 326sysc_notify_resume:
 327	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 328	l	%r1,BASED(.Ldo_notify_resume)
 329	la	%r14,BASED(sysc_return)
 330	br	%r1			# call do_notify_resume
 331
 332
 333#
 334# _TIF_RESTART_SVC is set, set up registers and restart svc
 335#
 336sysc_restart:
 337	ni	__TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
 338	l	%r7,SP_R2(%r15) 	# load new svc number
 339	mvc	SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
 340	lm	%r2,%r6,SP_R2(%r15)	# load svc arguments
 341	sth	%r7,SP_SVCNR(%r15)
 342	b	BASED(sysc_nr_ok)	# restart svc
 343
 344#
 345# _TIF_PER_TRAP is set, call do_per_trap
 346#
 347sysc_singlestep:
 348	ni	__TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
 349	xc	SP_SVCNR(2,%r15),SP_SVCNR(%r15)		# clear svc number
 350	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 351	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
 352	la	%r14,BASED(sysc_return)	# load adr. of system return
 353	br	%r1			# branch to do_per_trap
 354
 355#
 356# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
 357# and after the system call
 358#
 359sysc_tracesys:
 360	l	%r1,BASED(.Ltrace_entry)
 361	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 362	la	%r3,0
 363	xr	%r0,%r0
 364	icm	%r0,3,SP_SVCNR(%r15)
 365	st	%r0,SP_R2(%r15)
 366	basr	%r14,%r1
 367	cl	%r2,BASED(.Lnr_syscalls)
 368	bnl	BASED(sysc_tracenogo)
 369	lr	%r7,%r2
 370	sll	%r7,2			# svc number *4
 371	l	%r8,0(%r7,%r10)
 372sysc_tracego:
 373	lm	%r3,%r6,SP_R3(%r15)
 374	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
 375	l	%r2,SP_ORIG_R2(%r15)
 376	basr	%r14,%r8		# call sys_xxx
 377	st	%r2,SP_R2(%r15)		# store return value
 378sysc_tracenogo:
 379	tm	__TI_flags+2(%r12),_TIF_SYSCALL
 380	bz	BASED(sysc_return)
 381	l	%r1,BASED(.Ltrace_exit)
 382	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 383	la	%r14,BASED(sysc_return)
 384	br	%r1
 385
 386#
 387# a new process exits the kernel with ret_from_fork
 388#
 389ENTRY(ret_from_fork)
 
 
 390	l	%r13,__LC_SVC_NEW_PSW+4
 391	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 392	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
 393	bo	BASED(0f)
 394	st	%r15,SP_R15(%r15)	# store stack pointer for new kthread
 3950:	l	%r1,BASED(.Lschedtail)
 396	basr	%r14,%r1
 397	TRACE_IRQS_ON
 398	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 399	b	BASED(sysc_tracenogo)
 400
 401#
 402# kernel_execve function needs to deal with pt_regs that is not
 403# at the usual place
 404#
 405ENTRY(kernel_execve)
 406	stm	%r12,%r15,48(%r15)
 407	lr	%r14,%r15
 408	l	%r13,__LC_SVC_NEW_PSW+4
 409	s	%r15,BASED(.Lc_spsize)
 410	st	%r14,__SF_BACKCHAIN(%r15)
 411	la	%r12,SP_PTREGS(%r15)
 412	xc	0(__PT_SIZE,%r12),0(%r12)
 413	l	%r1,BASED(.Ldo_execve)
 414	lr	%r5,%r12
 415	basr	%r14,%r1
 416	ltr	%r2,%r2
 417	be	BASED(0f)
 418	a	%r15,BASED(.Lc_spsize)
 419	lm	%r12,%r15,48(%r15)
 420	br	%r14
 421	# execve succeeded.
 4220:	stnsm	__SF_EMPTY(%r15),0xfc	# disable interrupts
 423	l	%r15,__LC_KERNEL_STACK	# load ksp
 424	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
 425	mvc	SP_PTREGS(__PT_SIZE,%r15),0(%r12)	# copy pt_regs
 
 426	l	%r12,__LC_THREAD_INFO
 427	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
 428	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 429	l	%r1,BASED(.Lexecve_tail)
 430	basr	%r14,%r1
 431	b	BASED(sysc_return)
 432
 433/*
 434 * Program check handler routine
 435 */
 436
 437ENTRY(pgm_check_handler)
 438/*
 439 * First we need to check for a special case:
 440 * Single stepping an instruction that disables the PER event mask will
 441 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
 442 * For a single stepped SVC the program check handler gets control after
 443 * the SVC new PSW has been loaded. But we want to execute the SVC first and
 444 * then handle the PER event. Therefore we update the SVC old PSW to point
 445 * to the pgm_check_handler and branch to the SVC handler after we checked
 446 * if we have to load the kernel stack register.
 447 * For every other possible cause for PER event without the PER mask set
 448 * we just ignore the PER event (FIXME: is there anything we have to do
 449 * for LPSW?).
 450 */
 451	stpt	__LC_SYNC_ENTER_TIMER
 452	SAVE_ALL_BASE __LC_SAVE_AREA
 453	tm	__LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
 454	bnz	BASED(pgm_per)		# got per exception -> special case
 455	SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
 456	CREATE_STACK_FRAME __LC_SAVE_AREA
 457	xc	SP_ILC(4,%r15),SP_ILC(%r15)
 458	mvc	SP_PSW(8,%r15),__LC_PGM_OLD_PSW
 459	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 460	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
 461	bz	BASED(pgm_no_vtime)
 462	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
 463	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 464	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
 465pgm_no_vtime:
 466	l	%r3,__LC_PGM_ILC	# load program interruption code
 467	l	%r4,__LC_TRANS_EXC_CODE
 468	REENABLE_IRQS
 469	la	%r8,0x7f
 470	nr	%r8,%r3
 471	sll	%r8,2
 472	l	%r1,BASED(.Ljump_table)
 473	l	%r1,0(%r8,%r1)		# load address of handler routine
 474	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 475	basr	%r14,%r1		# branch to interrupt-handler
 476pgm_exit:
 477	b	BASED(sysc_return)
 478
 479#
 480# handle per exception
 481#
 482pgm_per:
 483	tm	__LC_PGM_OLD_PSW,0x40	# test if per event recording is on
 484	bnz	BASED(pgm_per_std)	# ok, normal per event from user space
 485# ok its one of the special cases, now we need to find out which one
 486	clc	__LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
 487	be	BASED(pgm_svcper)
 488# no interesting special case, ignore PER event
 489	lm	%r12,%r15,__LC_SAVE_AREA
 490	lpsw	0x28
 491
 492#
 493# Normal per exception
 494#
 495pgm_per_std:
 496	SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
 497	CREATE_STACK_FRAME __LC_SAVE_AREA
 498	mvc	SP_PSW(8,%r15),__LC_PGM_OLD_PSW
 499	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 500	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
 501	bz	BASED(pgm_no_vtime2)
 502	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
 503	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 504	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
 505pgm_no_vtime2:
 506	l	%r1,__TI_task(%r12)
 507	tm	SP_PSW+1(%r15),0x01	# kernel per event ?
 508	bz	BASED(kernel_per)
 509	mvc	__THREAD_per_cause(2,%r1),__LC_PER_CAUSE
 510	mvc	__THREAD_per_address(4,%r1),__LC_PER_ADDRESS
 
 511	mvc	__THREAD_per_paid(1,%r1),__LC_PER_PAID
 512	oi	__TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
 513	l	%r3,__LC_PGM_ILC	# load program interruption code
 514	l	%r4,__LC_TRANS_EXC_CODE
 515	REENABLE_IRQS
 516	la	%r8,0x7f
 517	nr	%r8,%r3 		# clear per-event-bit and ilc
 518	be	BASED(pgm_exit2)	# only per or per+check ?
 519	sll	%r8,2
 520	l	%r1,BASED(.Ljump_table)
 521	l	%r1,0(%r8,%r1)		# load address of handler routine
 522	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 
 
 
 
 523	basr	%r14,%r1		# branch to interrupt-handler
 524pgm_exit2:
 525	b	BASED(sysc_return)
 526
 527#
 528# it was a single stepped SVC that is causing all the trouble
 529#
 530pgm_svcper:
 531	SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
 532	CREATE_STACK_FRAME __LC_SAVE_AREA
 533	mvc	SP_PSW(8,%r15),__LC_SVC_OLD_PSW
 534	mvc	SP_ILC(4,%r15),__LC_SVC_ILC
 535	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 536	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
 537	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 538	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
 539	l	%r8,__TI_task(%r12)
 540	mvc	__THREAD_per_cause(2,%r8),__LC_PER_CAUSE
 541	mvc	__THREAD_per_address(4,%r8),__LC_PER_ADDRESS
 542	mvc	__THREAD_per_paid(1,%r8),__LC_PER_PAID
 543	oi	__TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
 544	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 545	lm	%r2,%r6,SP_R2(%r15)	# load svc arguments
 546	b	BASED(sysc_do_svc)
 547
 548#
 549# per was called from kernel, must be kprobes
 550#
 551kernel_per:
 552	REENABLE_IRQS
 553	xc	SP_SVCNR(2,%r15),SP_SVCNR(%r15)
 554	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 555	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
 556	basr	%r14,%r1		# branch to do_single_step
 557	b	BASED(pgm_exit)
 558
 559/*
 560 * IO interrupt handler routine
 561 */
 562
 563ENTRY(io_int_handler)
 564	stck	__LC_INT_CLOCK
 565	stpt	__LC_ASYNC_ENTER_TIMER
 566	SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
 567	CREATE_STACK_FRAME __LC_SAVE_AREA+16
 568	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
 569	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 570	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
 571	bz	BASED(io_no_vtime)
 572	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
 573	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 574	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
 575io_no_vtime:
 
 
 576	TRACE_IRQS_OFF
 577	l	%r1,BASED(.Ldo_IRQ)	# load address of do_IRQ
 578	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 579	basr	%r14,%r1		# branch to standard irq handler
 
 580io_return:
 581	LOCKDEP_SYS_EXIT
 582	TRACE_IRQS_ON
 583io_tif:
 584	tm	__TI_flags+3(%r12),_TIF_WORK_INT
 585	bnz	BASED(io_work)		# there is work to do (signals etc.)
 586io_restore:
 587	RESTORE_ALL __LC_RETURN_PSW,0
 
 
 
 588io_done:
 589
 590#
 591# There is work todo, find out in which context we have been interrupted:
 592# 1) if we return to user space we can do all _TIF_WORK_INT work
 593# 2) if we return to kernel code and preemptive scheduling is enabled check
 594#    the preemption counter and if it is zero call preempt_schedule_irq
 595# Before any work can be done, a switch to the kernel stack is required.
 596#
 597io_work:
 598	tm	SP_PSW+1(%r15),0x01	# returning to user ?
 599	bo	BASED(io_work_user)	# yes -> do resched & signal
 600#ifdef CONFIG_PREEMPT
 601	# check for preemptive scheduling
 602	icm	%r0,15,__TI_precount(%r12)
 603	bnz	BASED(io_restore)	# preemption disabled
 604	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
 605	bno	BASED(io_restore)
 606	# switch to kernel stack
 607	l	%r1,SP_R15(%r15)
 608	s	%r1,BASED(.Lc_spsize)
 609	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
 610	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
 
 611	lr	%r15,%r1
 612	# TRACE_IRQS_ON already done at io_return, call
 613	# TRACE_IRQS_OFF to keep things symmetrical
 614	TRACE_IRQS_OFF
 615	l	%r1,BASED(.Lpreempt_schedule_irq)
 616	basr	%r14,%r1		# call preempt_schedule_irq
 617	b	BASED(io_return)
 618#else
 619	b	BASED(io_restore)
 620#endif
 621
 622#
 623# Need to do work before returning to userspace, switch to kernel stack
 624#
 625io_work_user:
 626	l	%r1,__LC_KERNEL_STACK
 627	s	%r1,BASED(.Lc_spsize)
 628	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
 629	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
 
 630	lr	%r15,%r1
 631
 632#
 633# One of the work bits is on. Find out which one.
 634# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
 635#		and _TIF_MCCK_PENDING
 636#
 637io_work_tif:
 638	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
 639	bo	BASED(io_mcck_pending)
 640	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
 641	bo	BASED(io_reschedule)
 642	tm	__TI_flags+3(%r12),_TIF_SIGPENDING
 643	bo	BASED(io_sigpending)
 644	tm	__TI_flags+3(%r12),_TIF_NOTIFY_RESUME
 645	bo	BASED(io_notify_resume)
 646	b	BASED(io_return)	# beware of critical section cleanup
 647
 648#
 649# _TIF_MCCK_PENDING is set, call handler
 650#
 651io_mcck_pending:
 652	# TRACE_IRQS_ON already done at io_return
 653	l	%r1,BASED(.Ls390_handle_mcck)
 654	basr	%r14,%r1		# TIF bit will be cleared by handler
 655	TRACE_IRQS_OFF
 656	b	BASED(io_return)
 657
 658#
 659# _TIF_NEED_RESCHED is set, call schedule
 660#
 661io_reschedule:
 662	# TRACE_IRQS_ON already done at io_return
 663	l	%r1,BASED(.Lschedule)
 664	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 665	basr	%r14,%r1		# call scheduler
 666	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
 667	TRACE_IRQS_OFF
 668	b	BASED(io_return)
 669
 670#
 671# _TIF_SIGPENDING is set, call do_signal
 672#
 673io_sigpending:
 674	# TRACE_IRQS_ON already done at io_return
 675	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 676	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 677	l	%r1,BASED(.Ldo_signal)
 
 
 678	basr	%r14,%r1		# call do_signal
 679	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
 680	TRACE_IRQS_OFF
 681	b	BASED(io_return)
 682
 683#
 684# _TIF_SIGPENDING is set, call do_signal
 685#
 686io_notify_resume:
 687	# TRACE_IRQS_ON already done at io_return
 688	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 689	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 690	l	%r1,BASED(.Ldo_notify_resume)
 691	basr	%r14,%r1		# call do_signal
 692	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
 
 
 693	TRACE_IRQS_OFF
 694	b	BASED(io_return)
 695
 696/*
 697 * External interrupt handler routine
 698 */
 699
 700ENTRY(ext_int_handler)
 701	stck	__LC_INT_CLOCK
 702	stpt	__LC_ASYNC_ENTER_TIMER
 703	SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
 704	CREATE_STACK_FRAME __LC_SAVE_AREA+16
 705	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
 706	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 707	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
 708	bz	BASED(ext_no_vtime)
 709	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
 710	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 711	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
 712ext_no_vtime:
 
 
 713	TRACE_IRQS_OFF
 714	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 715	l	%r3,__LC_CPU_ADDRESS	# get cpu address + interruption code
 716	l	%r4,__LC_EXT_PARAMS	# get external parameters
 717	l	%r1,BASED(.Ldo_extint)
 718	basr	%r14,%r1
 719	b	BASED(io_return)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 720
 721__critical_end:
 722
 723/*
 724 * Machine check handler routines
 725 */
 726
 727ENTRY(mcck_int_handler)
 728	stck	__LC_MCCK_CLOCK
 729	spt	__LC_CPU_TIMER_SAVE_AREA	# revalidate cpu timer
 730	lm	%r0,%r15,__LC_GPREGS_SAVE_AREA	# revalidate gprs
 731	SAVE_ALL_BASE __LC_SAVE_AREA+32
 732	la	%r12,__LC_MCK_OLD_PSW
 
 733	tm	__LC_MCCK_CODE,0x80	# system damage?
 734	bo	BASED(mcck_int_main)	# yes -> rest of mcck code invalid
 735	mvc	__LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
 
 736	tm	__LC_MCCK_CODE+5,0x02	# stored cpu timer value valid?
 737	bo	BASED(1f)
 738	la	%r14,__LC_SYNC_ENTER_TIMER
 739	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
 740	bl	BASED(0f)
 741	la	%r14,__LC_ASYNC_ENTER_TIMER
 7420:	clc	0(8,%r14),__LC_EXIT_TIMER
 743	bl	BASED(0f)
 744	la	%r14,__LC_EXIT_TIMER
 7450:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
 746	bl	BASED(0f)
 747	la	%r14,__LC_LAST_UPDATE_TIMER
 7480:	spt	0(%r14)
 749	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
 7501:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
 751	bno	BASED(mcck_int_main)	# no -> skip cleanup critical
 752	tm	__LC_MCK_OLD_PSW+1,0x01	# test problem state bit
 753	bnz	BASED(mcck_int_main)	# from user -> load async stack
 754	clc	__LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end)
 755	bhe	BASED(mcck_int_main)
 756	clc	__LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start)
 757	bl	BASED(mcck_int_main)
 758	l	%r14,BASED(.Lcleanup_critical)
 759	basr	%r14,%r14
 760mcck_int_main:
 761	l	%r14,__LC_PANIC_STACK	# are we already on the panic stack?
 762	slr	%r14,%r15
 763	sra	%r14,PAGE_SHIFT
 764	be	BASED(0f)
 765	l	%r15,__LC_PANIC_STACK	# load panic stack
 7660:	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
 767	CREATE_STACK_FRAME __LC_SAVE_AREA+32
 768	mvc	SP_PSW(8,%r15),0(%r12)
 769	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 770	tm	__LC_MCCK_CODE+2,0x08	# mwp of old psw valid?
 771	bno	BASED(mcck_no_vtime)	# no -> skip cleanup critical
 772	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
 773	bz	BASED(mcck_no_vtime)
 774	UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
 775	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 776	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
 777mcck_no_vtime:
 778	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 779	l	%r1,BASED(.Ls390_mcck)
 780	basr	%r14,%r1		# call machine check handler
 781	tm	SP_PSW+1(%r15),0x01	# returning to user ?
 782	bno	BASED(mcck_return)
 783	l	%r1,__LC_KERNEL_STACK	# switch to kernel stack
 784	s	%r1,BASED(.Lc_spsize)
 785	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
 786	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
 
 787	lr	%r15,%r1
 788	stosm	__SF_EMPTY(%r15),0x04	# turn dat on
 789	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
 790	bno	BASED(mcck_return)
 791	TRACE_IRQS_OFF
 792	l	%r1,BASED(.Ls390_handle_mcck)
 793	basr	%r14,%r1		# call machine check handler
 794	TRACE_IRQS_ON
 795mcck_return:
 796	mvc	__LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
 797	ni	__LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
 798	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
 799	bno	BASED(0f)
 800	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15
 801	stpt	__LC_EXIT_TIMER
 802	lpsw	__LC_RETURN_MCCK_PSW	# back to caller
 8030:	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15
 804	lpsw	__LC_RETURN_MCCK_PSW	# back to caller
 805
 806	RESTORE_ALL __LC_RETURN_MCCK_PSW,0
 807
 808/*
 809 * Restart interruption handler, kick starter for additional CPUs
 810 */
 811#ifdef CONFIG_SMP
 812	__CPUINIT
 813ENTRY(restart_int_handler)
 814	basr	%r1,0
 815restart_base:
 816	spt	restart_vtime-restart_base(%r1)
 817	stck	__LC_LAST_UPDATE_CLOCK
 818	mvc	__LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
 819	mvc	__LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
 820	l	%r15,__LC_SAVE_AREA+60	# load ksp
 821	lctl	%c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
 822	lam	%a0,%a15,__LC_AREGS_SAVE_AREA
 823	lm	%r6,%r15,__SF_GPRS(%r15) # load registers from clone
 824	l	%r1,__LC_THREAD_INFO
 825	mvc	__LC_USER_TIMER(8),__TI_user_timer(%r1)
 826	mvc	__LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
 827	xc	__LC_STEAL_TIMER(8),__LC_STEAL_TIMER
 828	stosm	__SF_EMPTY(%r15),0x04	# now we can turn dat on
 829	basr	%r14,0
 830	l	%r14,restart_addr-.(%r14)
 831	basr	%r14,%r14		# branch to start_secondary
 832restart_addr:
 833	.long	start_secondary
 834	.align	8
 835restart_vtime:
 836	.long	0x7fffffff,0xffffffff
 837	.previous
 838#else
 839/*
 840 * If we do not run with SMP enabled, let the new CPU crash ...
 841 */
 842ENTRY(restart_int_handler)
 843	basr	%r1,0
 844restart_base:
 845	lpsw	restart_crash-restart_base(%r1)
 846	.align	8
 847restart_crash:
 848	.long	0x000a0000,0x00000000
 849restart_go:
 850#endif
 851
 852#
 853# PSW restart interrupt handler
 854#
 855ENTRY(psw_restart_int_handler)
 856	st	%r15,__LC_SAVE_AREA_64(%r0)	# save r15
 857	basr	%r15,0
 8580:	l	%r15,.Lrestart_stack-0b(%r15)	# load restart stack
 859	l	%r15,0(%r15)
 860	ahi	%r15,-SP_SIZE			# make room for pt_regs
 861	stm	%r0,%r14,SP_R0(%r15)		# store gprs %r0-%r14 to stack
 862	mvc	SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack
 863	mvc	SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw
 864	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
 865	basr	%r14,0
 8661:	l	%r14,.Ldo_restart-1b(%r14)
 867	basr	%r14,%r14
 868
 869	basr	%r14,0				# load disabled wait PSW if
 8702:	lpsw	restart_psw_crash-2b(%r14)	# do_restart returns
 871	.align 4
 872.Ldo_restart:
 873	.long	do_restart
 874.Lrestart_stack:
 875	.long	restart_stack
 876	.align 8
 877restart_psw_crash:
 878	.long	0x000a0000,0x00000000 + restart_psw_crash
 879
 880	.section .kprobes.text, "ax"
 881
 882#ifdef CONFIG_CHECK_STACK
 883/*
 884 * The synchronous or the asynchronous stack overflowed. We are dead.
 885 * No need to properly save the registers, we are going to panic anyway.
 886 * Setup a pt_regs so that show_trace can provide a good call trace.
 887 */
 888stack_overflow:
 889	l	%r15,__LC_PANIC_STACK	# change to panic stack
 890	sl	%r15,BASED(.Lc_spsize)
 891	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
 892	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
 893	la	%r1,__LC_SAVE_AREA
 894	ch	%r12,BASED(.L0x020)	# old psw addr == __LC_SVC_OLD_PSW ?
 895	be	BASED(0f)
 896	ch	%r12,BASED(.L0x028)	# old psw addr == __LC_PGM_OLD_PSW ?
 897	be	BASED(0f)
 898	la	%r1,__LC_SAVE_AREA+16
 8990:	mvc	SP_R12(16,%r15),0(%r1)	# move %r12-%r15 to stack
 900	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
 901	l	%r1,BASED(1f)		# branch to kernel_stack_overflow
 902	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 903	br	%r1
 9041:	.long	kernel_stack_overflow
 905#endif
 906
 907cleanup_table_system_call:
 908	.long	system_call + 0x80000000, sysc_do_svc + 0x80000000
 909cleanup_table_sysc_tif:
 910	.long	sysc_tif + 0x80000000, sysc_restore + 0x80000000
 911cleanup_table_sysc_restore:
 912	.long	sysc_restore + 0x80000000, sysc_done + 0x80000000
 913cleanup_table_io_tif:
 914	.long	io_tif + 0x80000000, io_restore + 0x80000000
 915cleanup_table_io_restore:
 916	.long	io_restore + 0x80000000, io_done + 0x80000000
 
 917
 918cleanup_critical:
 919	clc	4(4,%r12),BASED(cleanup_table_system_call)
 920	bl	BASED(0f)
 921	clc	4(4,%r12),BASED(cleanup_table_system_call+4)
 922	bl	BASED(cleanup_system_call)
 9230:
 924	clc	4(4,%r12),BASED(cleanup_table_sysc_tif)
 925	bl	BASED(0f)
 926	clc	4(4,%r12),BASED(cleanup_table_sysc_tif+4)
 927	bl	BASED(cleanup_sysc_tif)
 9280:
 929	clc	4(4,%r12),BASED(cleanup_table_sysc_restore)
 930	bl	BASED(0f)
 931	clc	4(4,%r12),BASED(cleanup_table_sysc_restore+4)
 932	bl	BASED(cleanup_sysc_restore)
 9330:
 934	clc	4(4,%r12),BASED(cleanup_table_io_tif)
 935	bl	BASED(0f)
 936	clc	4(4,%r12),BASED(cleanup_table_io_tif+4)
 937	bl	BASED(cleanup_io_tif)
 9380:
 939	clc	4(4,%r12),BASED(cleanup_table_io_restore)
 940	bl	BASED(0f)
 941	clc	4(4,%r12),BASED(cleanup_table_io_restore+4)
 942	bl	BASED(cleanup_io_restore)
 9430:
 944	br	%r14
 945
 946cleanup_system_call:
 947	mvc	__LC_RETURN_PSW(8),0(%r12)
 948	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
 949	bh	BASED(0f)
 950	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
 951	c	%r12,BASED(.Lmck_old_psw)
 952	be	BASED(0f)
 953	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
 9540:	c	%r12,BASED(.Lmck_old_psw)
 955	la	%r12,__LC_SAVE_AREA+32
 956	be	BASED(0f)
 957	la	%r12,__LC_SAVE_AREA+16
 9580:	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
 959	bhe	BASED(cleanup_vtime)
 960	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
 961	bh	BASED(0f)
 962	mvc	__LC_SAVE_AREA(16),0(%r12)
 9630:	st	%r13,4(%r12)
 964	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
 965	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
 966	st	%r15,12(%r12)
 967	CREATE_STACK_FRAME __LC_SAVE_AREA
 968	mvc	SP_PSW(8,%r15),__LC_SVC_OLD_PSW
 969	mvc	SP_ILC(4,%r15),__LC_SVC_ILC
 970	mvc	0(4,%r12),__LC_THREAD_INFO
 971cleanup_vtime:
 972	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
 973	bhe	BASED(cleanup_stime)
 974	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
 975cleanup_stime:
 976	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
 977	bh	BASED(cleanup_update)
 978	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 979cleanup_update:
 
 
 
 980	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
 981	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
 982	la	%r12,__LC_RETURN_PSW
 
 
 
 
 
 
 
 
 
 
 
 
 983	br	%r14
 984cleanup_system_call_insn:
 985	.long	sysc_saveall + 0x80000000
 986	.long	system_call + 0x80000000
 987	.long	sysc_vtime + 0x80000000
 988	.long	sysc_stime + 0x80000000
 989	.long	sysc_update + 0x80000000
 990
 991cleanup_sysc_tif:
 992	mvc	__LC_RETURN_PSW(4),0(%r12)
 993	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
 994	la	%r12,__LC_RETURN_PSW
 995	br	%r14
 996
 997cleanup_sysc_restore:
 998	clc	4(4,%r12),BASED(cleanup_sysc_restore_insn)
 999	be	BASED(2f)
1000	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1001	c	%r12,BASED(.Lmck_old_psw)
1002	be	BASED(0f)
1003	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
10040:	clc	4(4,%r12),BASED(cleanup_sysc_restore_insn+4)
1005	be	BASED(2f)
1006	mvc	__LC_RETURN_PSW(8),SP_PSW(%r15)
1007	c	%r12,BASED(.Lmck_old_psw)
1008	la	%r12,__LC_SAVE_AREA+32
1009	be	BASED(1f)
1010	la	%r12,__LC_SAVE_AREA+16
10111:	mvc	0(16,%r12),SP_R12(%r15)
1012	lm	%r0,%r11,SP_R0(%r15)
1013	l	%r15,SP_R15(%r15)
10142:	la	%r12,__LC_RETURN_PSW
1015	br	%r14
1016cleanup_sysc_restore_insn:
1017	.long	sysc_done - 4 + 0x80000000
1018	.long	sysc_done - 8 + 0x80000000
1019
1020cleanup_io_tif:
1021	mvc	__LC_RETURN_PSW(4),0(%r12)
1022	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
1023	la	%r12,__LC_RETURN_PSW
1024	br	%r14
1025
1026cleanup_io_restore:
1027	clc	4(4,%r12),BASED(cleanup_io_restore_insn)
1028	be	BASED(1f)
1029	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1030	clc	4(4,%r12),BASED(cleanup_io_restore_insn+4)
1031	be	BASED(1f)
1032	mvc	__LC_RETURN_PSW(8),SP_PSW(%r15)
1033	mvc	__LC_SAVE_AREA+32(16),SP_R12(%r15)
1034	lm	%r0,%r11,SP_R0(%r15)
1035	l	%r15,SP_R15(%r15)
10361:	la	%r12,__LC_RETURN_PSW
1037	br	%r14
1038cleanup_io_restore_insn:
1039	.long	io_done - 4 + 0x80000000
1040	.long	io_done - 8 + 0x80000000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1041
1042/*
1043 * Integer constants
1044 */
1045		.align	4
1046.Lc_spsize:	.long	SP_SIZE
1047.Lc_overhead:	.long	STACK_FRAME_OVERHEAD
1048.Lnr_syscalls:	.long	NR_syscalls
1049.L0x018:	.short	0x018
1050.L0x020:	.short	0x020
1051.L0x028:	.short	0x028
1052.L0x030:	.short	0x030
1053.L0x038:	.short	0x038
1054.Lc_1:		.long	1
1055
1056/*
1057 * Symbol constants
1058 */
1059.Ls390_mcck:	.long	s390_do_machine_check
1060.Ls390_handle_mcck:
1061		.long	s390_handle_mcck
1062.Lmck_old_psw:	.long	__LC_MCK_OLD_PSW
1063.Ldo_IRQ:	.long	do_IRQ
1064.Ldo_extint:	.long	do_extint
1065.Ldo_signal:	.long	do_signal
1066.Ldo_notify_resume:
1067		.long	do_notify_resume
1068.Lhandle_per:	.long	do_per_trap
1069.Ldo_execve:	.long	do_execve
1070.Lexecve_tail:	.long	execve_tail
1071.Ljump_table:	.long	pgm_check_table
1072.Lschedule:	.long	schedule
1073#ifdef CONFIG_PREEMPT
1074.Lpreempt_schedule_irq:
1075		.long	preempt_schedule_irq
1076#endif
1077.Ltrace_entry:	.long	do_syscall_trace_enter
1078.Ltrace_exit:	.long	do_syscall_trace_exit
1079.Lschedtail:	.long	schedule_tail
1080.Lsysc_table:	.long	sys_call_table
 
1081#ifdef CONFIG_TRACE_IRQFLAGS
1082.Ltrace_irq_on_caller:
1083		.long	trace_hardirqs_on_caller
1084.Ltrace_irq_off_caller:
1085		.long	trace_hardirqs_off_caller
1086#endif
1087#ifdef CONFIG_LOCKDEP
1088.Llockdep_sys_exit:
1089		.long	lockdep_sys_exit
1090#endif
1091.Lcritical_start:
1092		.long	__critical_start + 0x80000000
1093.Lcritical_end:
1094		.long	__critical_end + 0x80000000
1095.Lcleanup_critical:
1096		.long	cleanup_critical
1097
1098		.section .rodata, "a"
1099#define SYSCALL(esa,esame,emu)	.long esa
1100	.globl	sys_call_table
1101sys_call_table:
1102#include "syscalls.S"
1103#undef SYSCALL
v3.5.6
  1/*
  2 *  arch/s390/kernel/entry.S
  3 *    S390 low-level entry points.
  4 *
  5 *    Copyright (C) IBM Corp. 1999,2012
  6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7 *		 Hartmut Penner (hp@de.ibm.com),
  8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
 10 */
 11
 12#include <linux/init.h>
 13#include <linux/linkage.h>
 14#include <asm/cache.h>
 15#include <asm/errno.h>
 16#include <asm/ptrace.h>
 17#include <asm/thread_info.h>
 18#include <asm/asm-offsets.h>
 19#include <asm/unistd.h>
 20#include <asm/page.h>
 21
 22__PT_R0      =	__PT_GPRS
 23__PT_R1      =	__PT_GPRS + 4
 24__PT_R2      =	__PT_GPRS + 8
 25__PT_R3      =	__PT_GPRS + 12
 26__PT_R4      =	__PT_GPRS + 16
 27__PT_R5      =	__PT_GPRS + 20
 28__PT_R6      =	__PT_GPRS + 24
 29__PT_R7      =	__PT_GPRS + 28
 30__PT_R8      =	__PT_GPRS + 32
 31__PT_R9      =	__PT_GPRS + 36
 32__PT_R10     =	__PT_GPRS + 40
 33__PT_R11     =	__PT_GPRS + 44
 34__PT_R12     =	__PT_GPRS + 48
 35__PT_R13     =	__PT_GPRS + 524
 36__PT_R14     =	__PT_GPRS + 56
 37__PT_R15     =	__PT_GPRS + 60
 
 
 
 
 
 
 
 
 
 
 
 38
 39_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
 40		 _TIF_MCCK_PENDING | _TIF_PER_TRAP )
 41_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
 42		 _TIF_MCCK_PENDING)
 43_TIF_TRACE    = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
 44		 _TIF_SYSCALL_TRACEPOINT)
 45
 46STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
 47STACK_SIZE  = 1 << STACK_SHIFT
 48
 49#define BASED(name) name-system_call(%r13)
 50
 
 51	.macro	TRACE_IRQS_ON
 52#ifdef CONFIG_TRACE_IRQFLAGS
 53	basr	%r2,%r0
 54	l	%r1,BASED(.Lhardirqs_on)
 55	basr	%r14,%r1		# call trace_hardirqs_on_caller
 56#endif
 57	.endm
 58
 59	.macro	TRACE_IRQS_OFF
 60#ifdef CONFIG_TRACE_IRQFLAGS
 61	basr	%r2,%r0
 62	l	%r1,BASED(.Lhardirqs_off)
 63	basr	%r14,%r1		# call trace_hardirqs_off_caller
 
 
 
 
 64#endif
 65	.endm
 66
 
 67	.macro	LOCKDEP_SYS_EXIT
 68#ifdef CONFIG_LOCKDEP
 69	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
 70	jz	.+10
 71	l	%r1,BASED(.Llockdep_sys_exit)
 72	basr	%r14,%r1		# call lockdep_sys_exit
 
 
 
 
 73#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74	.endm
 75
 76	.macro	CHECK_STACK stacksize,savearea
 
 77#ifdef CONFIG_CHECK_STACK
 78	tml	%r15,\stacksize - CONFIG_STACK_GUARD
 79	la	%r14,\savearea
 80	jz	stack_overflow
 
 
 
 
 81#endif
 
 
 82	.endm
 83
 84	.macro	SWITCH_ASYNC savearea,stack,shift
 85	tmh	%r8,0x0001		# interrupting from user ?
 86	jnz	1f
 87	lr	%r14,%r9
 88	sl	%r14,BASED(.Lcritical_start)
 89	cl	%r14,BASED(.Lcritical_length)
 90	jhe	0f
 91	la	%r11,\savearea		# inside critical section, do cleanup
 92	bras	%r14,cleanup_critical
 93	tmh	%r8,0x0001		# retest problem state after cleanup
 94	jnz	1f
 950:	l	%r14,\stack		# are we already on the target stack?
 
 
 
 96	slr	%r14,%r15
 97	sra	%r14,\shift
 98	jnz	1f
 99	CHECK_STACK 1<<\shift,\savearea
100	j	2f
1011:	l	%r15,\stack		# load target stack
1022:	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
103	la	%r11,STACK_FRAME_OVERHEAD(%r15)
104	.endm
105
106	.macro	ADD64 high,low,timer
107	al	\high,\timer
108	al	\low,4+\timer
109	brc	12,.+8
110	ahi	\high,1
111	.endm
112
113	.macro	SUB64 high,low,timer
114	sl	\high,\timer
115	sl	\low,4+\timer
116	brc	3,.+8
117	ahi	\high,-1
118	.endm
119
120	.macro	UPDATE_VTIME high,low,enter_timer
121	lm	\high,\low,__LC_EXIT_TIMER
122	SUB64	\high,\low,\enter_timer
123	ADD64	\high,\low,__LC_USER_TIMER
124	stm	\high,\low,__LC_USER_TIMER
125	lm	\high,\low,__LC_LAST_UPDATE_TIMER
126	SUB64	\high,\low,__LC_EXIT_TIMER
127	ADD64	\high,\low,__LC_SYSTEM_TIMER
128	stm	\high,\low,__LC_SYSTEM_TIMER
129	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
130	.endm
131
132	.macro REENABLE_IRQS
133	st	%r8,__LC_RETURN_PSW
134	ni	__LC_RETURN_PSW,0xbf
135	ssm	__LC_RETURN_PSW
136	.endm
137
138	.section .kprobes.text, "ax"
139
140/*
141 * Scheduler resume function, called by switch_to
142 *  gpr2 = (task_struct *) prev
143 *  gpr3 = (task_struct *) next
144 * Returns:
145 *  gpr2 = prev
146 */
147ENTRY(__switch_to)
148	stm	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
149	st	%r15,__THREAD_ksp(%r2)		# store kernel stack of prev
150	l	%r4,__THREAD_info(%r2)		# get thread_info of prev
151	l	%r5,__THREAD_info(%r3)		# get thread_info of next
152	lr	%r15,%r5
153	ahi	%r15,STACK_SIZE			# end of kernel stack of next
154	st	%r3,__LC_CURRENT		# store task struct of next
155	st	%r5,__LC_THREAD_INFO		# store thread info of next
156	st	%r15,__LC_KERNEL_STACK		# store end of kernel stack
157	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
158	mvc	__LC_CURRENT_PID(4,%r0),__TASK_pid(%r3)	# store pid of next
159	l	%r15,__THREAD_ksp(%r3)		# load kernel stack of next
160	tm	__TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
161	jz	0f
162	ni	__TI_flags+3(%r4),255-_TIF_MCCK_PENDING	# clear flag in prev
163	oi	__TI_flags+3(%r5),_TIF_MCCK_PENDING	# set it in next
1640:	lm	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
 
 
 
 
 
 
 
 
 
165	br	%r14
166
167__critical_start:
168/*
169 * SVC interrupt handler routine. System calls are synchronous events and
170 * are executed with interrupts enabled.
171 */
172
173ENTRY(system_call)
174	stpt	__LC_SYNC_ENTER_TIMER
175sysc_stm:
176	stm	%r8,%r15,__LC_SAVE_AREA_SYNC
177	l	%r12,__LC_THREAD_INFO
178	l	%r13,__LC_SVC_NEW_PSW+4
179sysc_per:
180	l	%r15,__LC_KERNEL_STACK
181	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
182	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
183sysc_vtime:
184	UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
185	stm	%r0,%r7,__PT_R0(%r11)
186	mvc	__PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
187	mvc	__PT_PSW(8,%r11),__LC_SVC_OLD_PSW
188	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
189sysc_do_svc:
190	oi	__TI_flags+3(%r12),_TIF_SYSCALL
191	lh	%r8,__PT_INT_CODE+2(%r11)
192	sla	%r8,2				# shift and test for svc0
193	jnz	sysc_nr_ok
194	# svc 0: system call number in %r1
195	cl	%r1,BASED(.Lnr_syscalls)
196	jnl	sysc_nr_ok
197	sth	%r1,__PT_INT_CODE+2(%r11)
198	lr	%r8,%r1
199	sla	%r8,2
200sysc_nr_ok:
201	l	%r10,BASED(.Lsys_call_table)	# 31 bit system call table
202	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
203	st	%r2,__PT_ORIG_GPR2(%r11)
204	st	%r7,STACK_FRAME_OVERHEAD(%r15)
205	l	%r9,0(%r8,%r10)			# get system call addr.
206	tm	__TI_flags+2(%r12),_TIF_TRACE >> 8
207	jnz	sysc_tracesys
208	basr	%r14,%r9			# call sys_xxxx
209	st	%r2,__PT_R2(%r11)		# store return value
210
211sysc_return:
212	LOCKDEP_SYS_EXIT
213sysc_tif:
214	tm	__PT_PSW+1(%r11),0x01		# returning to user ?
215	jno	sysc_restore
216	tm	__TI_flags+3(%r12),_TIF_WORK_SVC
217	jnz	sysc_work			# check for work
218	ni	__TI_flags+3(%r12),255-_TIF_SYSCALL
219sysc_restore:
220	mvc	__LC_RETURN_PSW(8),__PT_PSW(%r11)
221	stpt	__LC_EXIT_TIMER
222	lm	%r0,%r15,__PT_R0(%r11)
223	lpsw	__LC_RETURN_PSW
224sysc_done:
225
226#
 
 
 
 
 
 
 
227# One of the work bits is on. Find out which one.
228#
229sysc_work:
230	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
231	jo	sysc_mcck_pending
232	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
233	jo	sysc_reschedule
234	tm	__TI_flags+3(%r12),_TIF_SIGPENDING
235	jo	sysc_sigpending
236	tm	__TI_flags+3(%r12),_TIF_NOTIFY_RESUME
237	jo	sysc_notify_resume
 
 
238	tm	__TI_flags+3(%r12),_TIF_PER_TRAP
239	jo	sysc_singlestep
240	j	sysc_return		# beware of critical section cleanup
241
242#
243# _TIF_NEED_RESCHED is set, call schedule
244#
245sysc_reschedule:
246	l	%r1,BASED(.Lschedule)
247	la	%r14,BASED(sysc_return)
248	br	%r1			# call schedule
249
250#
251# _TIF_MCCK_PENDING is set, call handler
252#
253sysc_mcck_pending:
254	l	%r1,BASED(.Lhandle_mcck)
255	la	%r14,BASED(sysc_return)
256	br	%r1			# TIF bit will be cleared by handler
257
258#
259# _TIF_SIGPENDING is set, call do_signal
260#
261sysc_sigpending:
262	ni	__TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
263	lr	%r2,%r11		# pass pointer to pt_regs
264	l	%r1,BASED(.Ldo_signal)
265	basr	%r14,%r1		# call do_signal
266	tm	__TI_flags+3(%r12),_TIF_SYSCALL
267	jno	sysc_return
268	lm	%r2,%r7,__PT_R2(%r11)	# load svc arguments
269	xr	%r8,%r8			# svc 0 returns -ENOSYS
270	clc	__PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
271	jnl	sysc_nr_ok		# invalid svc number -> do svc 0
272	lh	%r8,__PT_INT_CODE+2(%r11)	# load new svc number
273	sla	%r8,2
274	j	sysc_nr_ok		# restart svc
275
276#
277# _TIF_NOTIFY_RESUME is set, call do_notify_resume
278#
279sysc_notify_resume:
280	lr	%r2,%r11		# pass pointer to pt_regs
281	l	%r1,BASED(.Ldo_notify_resume)
282	la	%r14,BASED(sysc_return)
283	br	%r1			# call do_notify_resume
284
 
 
 
 
 
 
 
 
 
 
 
 
285#
286# _TIF_PER_TRAP is set, call do_per_trap
287#
288sysc_singlestep:
289	ni	__TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
290	lr	%r2,%r11		# pass pointer to pt_regs
291	l	%r1,BASED(.Ldo_per_trap)
292	la	%r14,BASED(sysc_return)
293	br	%r1			# call do_per_trap
 
294
295#
296# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
297# and after the system call
298#
299sysc_tracesys:
300	l	%r1,BASED(.Ltrace_enter)
301	lr	%r2,%r11		# pass pointer to pt_regs
302	la	%r3,0
303	xr	%r0,%r0
304	icm	%r0,3,__PT_INT_CODE+2(%r11)
305	st	%r0,__PT_R2(%r11)
306	basr	%r14,%r1		# call do_syscall_trace_enter
307	cl	%r2,BASED(.Lnr_syscalls)
308	jnl	sysc_tracenogo
309	lr	%r8,%r2
310	sll	%r8,2
311	l	%r9,0(%r8,%r10)
312sysc_tracego:
313	lm	%r3,%r7,__PT_R3(%r11)
314	st	%r7,STACK_FRAME_OVERHEAD(%r15)
315	l	%r2,__PT_ORIG_GPR2(%r11)
316	basr	%r14,%r9		# call sys_xxx
317	st	%r2,__PT_R2(%r11)	# store return value
318sysc_tracenogo:
319	tm	__TI_flags+2(%r12),_TIF_TRACE >> 8
320	jz	sysc_return
321	l	%r1,BASED(.Ltrace_exit)
322	lr	%r2,%r11		# pass pointer to pt_regs
323	la	%r14,BASED(sysc_return)
324	br	%r1			# call do_syscall_trace_exit
325
326#
327# a new process exits the kernel with ret_from_fork
328#
329ENTRY(ret_from_fork)
330	la	%r11,STACK_FRAME_OVERHEAD(%r15)
331	l	%r12,__LC_THREAD_INFO
332	l	%r13,__LC_SVC_NEW_PSW+4
333	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
334	jo	0f
335	st	%r15,__PT_R15(%r11)	# store stack pointer for new kthread
3360:	l	%r1,BASED(.Lschedule_tail)
337	basr	%r14,%r1		# call schedule_tail
 
338	TRACE_IRQS_ON
339	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
340	j	sysc_tracenogo
341
342#
343# kernel_execve function needs to deal with pt_regs that is not
344# at the usual place
345#
346ENTRY(kernel_execve)
347	stm	%r12,%r15,48(%r15)
348	lr	%r14,%r15
349	l	%r13,__LC_SVC_NEW_PSW+4
350	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
351	st	%r14,__SF_BACKCHAIN(%r15)
352	la	%r12,STACK_FRAME_OVERHEAD(%r15)
353	xc	0(__PT_SIZE,%r12),0(%r12)
354	l	%r1,BASED(.Ldo_execve)
355	lr	%r5,%r12
356	basr	%r14,%r1		# call do_execve
357	ltr	%r2,%r2
358	je	0f
359	ahi	%r15,(STACK_FRAME_OVERHEAD + __PT_SIZE)
360	lm	%r12,%r15,48(%r15)
361	br	%r14
362	# execve succeeded.
3630:	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
364	l	%r15,__LC_KERNEL_STACK	# load ksp
365	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
366	la	%r11,STACK_FRAME_OVERHEAD(%r15)
367	mvc	0(__PT_SIZE,%r11),0(%r12)	# copy pt_regs
368	l	%r12,__LC_THREAD_INFO
369	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
370	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
371	l	%r1,BASED(.Lexecve_tail)
372	basr	%r14,%r1		# call execve_tail
373	j	sysc_return
374
375/*
376 * Program check handler routine
377 */
378
379ENTRY(pgm_check_handler)
 
 
 
 
 
 
 
 
 
 
 
 
 
380	stpt	__LC_SYNC_ENTER_TIMER
381	stm	%r8,%r15,__LC_SAVE_AREA_SYNC
382	l	%r12,__LC_THREAD_INFO
383	l	%r13,__LC_SVC_NEW_PSW+4
384	lm	%r8,%r9,__LC_PGM_OLD_PSW
385	tmh	%r8,0x0001		# test problem state bit
386	jnz	1f			# -> fault in user space
387	tmh	%r8,0x4000		# PER bit set in old PSW ?
388	jnz	0f			# -> enabled, can't be a double fault
389	tm	__LC_PGM_ILC+3,0x80	# check for per exception
390	jnz	pgm_svcper		# -> single stepped svc
3910:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
392	j	2f
3931:	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
394	l	%r15,__LC_KERNEL_STACK
3952:	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
396	la	%r11,STACK_FRAME_OVERHEAD(%r15)
397	stm	%r0,%r7,__PT_R0(%r11)
398	mvc	__PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
399	stm	%r8,%r9,__PT_PSW(%r11)
400	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
401	mvc	__PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE
402	tm	__LC_PGM_ILC+3,0x80	# check for per exception
403	jz	0f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404	l	%r1,__TI_task(%r12)
405	tmh	%r8,0x0001		# kernel per event ?
406	jz	pgm_kprobe
407	oi	__TI_flags+3(%r12),_TIF_PER_TRAP
408	mvc	__THREAD_per_address(4,%r1),__LC_PER_ADDRESS
409	mvc	__THREAD_per_cause(2,%r1),__LC_PER_CAUSE
410	mvc	__THREAD_per_paid(1,%r1),__LC_PER_PAID
4110:	REENABLE_IRQS
412	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
 
 
 
 
 
 
413	l	%r1,BASED(.Ljump_table)
414	la	%r10,0x7f
415	n	%r10,__PT_INT_CODE(%r11)
416	je	sysc_return
417	sll	%r10,2
418	l	%r1,0(%r10,%r1)		# load address of handler routine
419	lr	%r2,%r11		# pass pointer to pt_regs
420	basr	%r14,%r1		# branch to interrupt-handler
421	j	sysc_return
 
422
423#
424# PER event in supervisor state, must be kprobes
425#
426pgm_kprobe:
427	REENABLE_IRQS
428	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
429	l	%r1,BASED(.Ldo_per_trap)
430	lr	%r2,%r11		# pass pointer to pt_regs
431	basr	%r14,%r1		# call do_per_trap
432	j	sysc_return
 
 
 
 
 
 
 
 
 
 
433
434#
435# single stepped system call
436#
437pgm_svcper:
438	oi	__TI_flags+3(%r12),_TIF_PER_TRAP
439	mvc	__LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
440	mvc	__LC_RETURN_PSW+4(4),BASED(.Lsysc_per)
441	lpsw	__LC_RETURN_PSW		# branch to sysc_per and enable irqs
 
 
442
443/*
444 * IO interrupt handler routine
445 */
446
447ENTRY(io_int_handler)
448	stck	__LC_INT_CLOCK
449	stpt	__LC_ASYNC_ENTER_TIMER
450	stm	%r8,%r15,__LC_SAVE_AREA_ASYNC
451	l	%r12,__LC_THREAD_INFO
452	l	%r13,__LC_SVC_NEW_PSW+4
453	lm	%r8,%r9,__LC_IO_OLD_PSW
454	tmh	%r8,0x0001		# interrupting from user ?
455	jz	io_skip
456	UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
457io_skip:
458	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
459	stm	%r0,%r7,__PT_R0(%r11)
460	mvc	__PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
461	stm	%r8,%r9,__PT_PSW(%r11)
462	TRACE_IRQS_OFF
463	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
464	l	%r1,BASED(.Ldo_IRQ)
465	lr	%r2,%r11		# pass pointer to pt_regs
466	basr	%r14,%r1		# call do_IRQ
467io_return:
468	LOCKDEP_SYS_EXIT
469	TRACE_IRQS_ON
470io_tif:
471	tm	__TI_flags+3(%r12),_TIF_WORK_INT
472	jnz	io_work			# there is work to do (signals etc.)
473io_restore:
474	mvc	__LC_RETURN_PSW(8),__PT_PSW(%r11)
475	stpt	__LC_EXIT_TIMER
476	lm	%r0,%r15,__PT_R0(%r11)
477	lpsw	__LC_RETURN_PSW
478io_done:
479
480#
481# There is work todo, find out in which context we have been interrupted:
482# 1) if we return to user space we can do all _TIF_WORK_INT work
483# 2) if we return to kernel code and preemptive scheduling is enabled check
484#    the preemption counter and if it is zero call preempt_schedule_irq
485# Before any work can be done, a switch to the kernel stack is required.
486#
487io_work:
488	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
489	jo	io_work_user		# yes -> do resched & signal
490#ifdef CONFIG_PREEMPT
491	# check for preemptive scheduling
492	icm	%r0,15,__TI_precount(%r12)
493	jnz	io_restore		# preemption disabled
494	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
495	jno	io_restore
496	# switch to kernel stack
497	l	%r1,__PT_R15(%r11)
498	ahi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
499	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
500	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
501	la	%r11,STACK_FRAME_OVERHEAD(%r1)
502	lr	%r15,%r1
503	# TRACE_IRQS_ON already done at io_return, call
504	# TRACE_IRQS_OFF to keep things symmetrical
505	TRACE_IRQS_OFF
506	l	%r1,BASED(.Lpreempt_irq)
507	basr	%r14,%r1		# call preempt_schedule_irq
508	j	io_return
509#else
510	j	io_restore
511#endif
512
513#
514# Need to do work before returning to userspace, switch to kernel stack
515#
516io_work_user:
517	l	%r1,__LC_KERNEL_STACK
518	ahi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
519	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
520	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
521	la	%r11,STACK_FRAME_OVERHEAD(%r1)
522	lr	%r15,%r1
523
524#
525# One of the work bits is on. Find out which one.
526# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
527#		and _TIF_MCCK_PENDING
528#
529io_work_tif:
530	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
531	jo	io_mcck_pending
532	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
533	jo	io_reschedule
534	tm	__TI_flags+3(%r12),_TIF_SIGPENDING
535	jo	io_sigpending
536	tm	__TI_flags+3(%r12),_TIF_NOTIFY_RESUME
537	jo	io_notify_resume
538	j	io_return		# beware of critical section cleanup
539
540#
541# _TIF_MCCK_PENDING is set, call handler
542#
543io_mcck_pending:
544	# TRACE_IRQS_ON already done at io_return
545	l	%r1,BASED(.Lhandle_mcck)
546	basr	%r14,%r1		# TIF bit will be cleared by handler
547	TRACE_IRQS_OFF
548	j	io_return
549
550#
551# _TIF_NEED_RESCHED is set, call schedule
552#
553io_reschedule:
554	# TRACE_IRQS_ON already done at io_return
555	l	%r1,BASED(.Lschedule)
556	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
557	basr	%r14,%r1		# call scheduler
558	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
559	TRACE_IRQS_OFF
560	j	io_return
561
562#
563# _TIF_SIGPENDING is set, call do_signal
564#
565io_sigpending:
566	# TRACE_IRQS_ON already done at io_return
 
 
567	l	%r1,BASED(.Ldo_signal)
568	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
569	lr	%r2,%r11		# pass pointer to pt_regs
570	basr	%r14,%r1		# call do_signal
571	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
572	TRACE_IRQS_OFF
573	j	io_return
574
575#
576# _TIF_SIGPENDING is set, call do_signal
577#
578io_notify_resume:
579	# TRACE_IRQS_ON already done at io_return
 
 
580	l	%r1,BASED(.Ldo_notify_resume)
581	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
582	lr	%r2,%r11		# pass pointer to pt_regs
583	basr	%r14,%r1		# call do_notify_resume
584	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
585	TRACE_IRQS_OFF
586	j	io_return
587
588/*
589 * External interrupt handler routine
590 */
591
592ENTRY(ext_int_handler)
593	stck	__LC_INT_CLOCK
594	stpt	__LC_ASYNC_ENTER_TIMER
595	stm	%r8,%r15,__LC_SAVE_AREA_ASYNC
596	l	%r12,__LC_THREAD_INFO
597	l	%r13,__LC_SVC_NEW_PSW+4
598	lm	%r8,%r9,__LC_EXT_OLD_PSW
599	tmh	%r8,0x0001		# interrupting from user ?
600	jz	ext_skip
601	UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
602ext_skip:
603	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
604	stm	%r0,%r7,__PT_R0(%r11)
605	mvc	__PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
606	stm	%r8,%r9,__PT_PSW(%r11)
607	TRACE_IRQS_OFF
608	lr	%r2,%r11		# pass pointer to pt_regs
609	l	%r3,__LC_EXT_CPU_ADDR	# get cpu address + interruption code
610	l	%r4,__LC_EXT_PARAMS	# get external parameters
611	l	%r1,BASED(.Ldo_extint)
612	basr	%r14,%r1		# call do_extint
613	j	io_return
614
615/*
616 * Load idle PSW. The second "half" of this function is in cleanup_idle.
617 */
618ENTRY(psw_idle)
619	st	%r4,__SF_EMPTY(%r15)
620	basr	%r1,0
621	la	%r1,psw_idle_lpsw+4-.(%r1)
622	st	%r1,__SF_EMPTY+4(%r15)
623	oi	__SF_EMPTY+4(%r15),0x80
624	la	%r1,.Lvtimer_max-psw_idle_lpsw-4(%r1)
625	stck	__IDLE_ENTER(%r2)
626	ltr	%r5,%r5
627	stpt	__VQ_IDLE_ENTER(%r3)
628	jz	psw_idle_lpsw
629	spt	0(%r1)
630psw_idle_lpsw:
631	lpsw	__SF_EMPTY(%r15)
632	br	%r14
633psw_idle_end:
634
635__critical_end:
636
637/*
638 * Machine check handler routines
639 */
640
641ENTRY(mcck_int_handler)
642	stck	__LC_MCCK_CLOCK
643	spt	__LC_CPU_TIMER_SAVE_AREA	# revalidate cpu timer
644	lm	%r0,%r15,__LC_GPREGS_SAVE_AREA	# revalidate gprs
645	l	%r12,__LC_THREAD_INFO
646	l	%r13,__LC_SVC_NEW_PSW+4
647	lm	%r8,%r9,__LC_MCK_OLD_PSW
648	tm	__LC_MCCK_CODE,0x80	# system damage?
649	jo	mcck_panic		# yes -> rest of mcck code invalid
650	la	%r14,__LC_CPU_TIMER_SAVE_AREA
651	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
652	tm	__LC_MCCK_CODE+5,0x02	# stored cpu timer value valid?
653	jo	3f
654	la	%r14,__LC_SYNC_ENTER_TIMER
655	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
656	jl	0f
657	la	%r14,__LC_ASYNC_ENTER_TIMER
6580:	clc	0(8,%r14),__LC_EXIT_TIMER
659	jl	1f
660	la	%r14,__LC_EXIT_TIMER
6611:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
662	jl	2f
663	la	%r14,__LC_LAST_UPDATE_TIMER
6642:	spt	0(%r14)
665	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
6663:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
667	jno	mcck_panic		# no -> skip cleanup critical
668	tm	%r8,0x0001		# interrupting from user ?
669	jz	mcck_skip
670	UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
671mcck_skip:
672	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
673	mvc	__PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA
674	stm	%r8,%r9,__PT_PSW(%r11)
675	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
676	l	%r1,BASED(.Ldo_machine_check)
677	lr	%r2,%r11		# pass pointer to pt_regs
678	basr	%r14,%r1		# call s390_do_machine_check
679	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
680	jno	mcck_return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
681	l	%r1,__LC_KERNEL_STACK	# switch to kernel stack
682	ahi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
683	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
684	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
685	la	%r11,STACK_FRAME_OVERHEAD(%r15)
686	lr	%r15,%r1
687	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
688	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
689	jno	mcck_return
690	TRACE_IRQS_OFF
691	l	%r1,BASED(.Lhandle_mcck)
692	basr	%r14,%r1		# call s390_handle_mcck
693	TRACE_IRQS_ON
694mcck_return:
695	mvc	__LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
 
696	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
697	jno	0f
698	lm	%r0,%r15,__PT_R0(%r11)
699	stpt	__LC_EXIT_TIMER
700	lpsw	__LC_RETURN_MCCK_PSW
7010:	lm	%r0,%r15,__PT_R0(%r11)
702	lpsw	__LC_RETURN_MCCK_PSW
703
704mcck_panic:
705	l	%r14,__LC_PANIC_STACK
706	slr	%r14,%r15
707	sra	%r14,PAGE_SHIFT
708	jz	0f
709	l	%r15,__LC_PANIC_STACK
7100:	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
711	j	mcck_skip
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
712
713#
714# PSW restart interrupt handler
715#
716ENTRY(restart_int_handler)
717	st	%r15,__LC_SAVE_AREA_RESTART
718	l	%r15,__LC_RESTART_STACK
719	ahi	%r15,-__PT_SIZE			# create pt_regs on stack
720	xc	0(__PT_SIZE,%r15),0(%r15)
721	stm	%r0,%r14,__PT_R0(%r15)
722	mvc	__PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
723	mvc	__PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
724	ahi	%r15,-STACK_FRAME_OVERHEAD	# create stack frame on stack
725	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
726	lm	%r1,%r3,__LC_RESTART_FN		# load fn, parm & source cpu
727	ltr	%r3,%r3				# test source cpu address
728	jm	1f				# negative -> skip source stop
7290:	sigp	%r4,%r3,1			# sigp sense to source cpu
730	brc	10,0b				# wait for status stored
7311:	basr	%r14,%r1			# call function
732	stap	__SF_EMPTY(%r15)		# store cpu address
733	lh	%r3,__SF_EMPTY(%r15)
7342:	sigp	%r4,%r3,5			# sigp stop to current cpu
735	brc	2,2b
7363:	j	3b
 
 
 
737
738	.section .kprobes.text, "ax"
739
740#ifdef CONFIG_CHECK_STACK
741/*
742 * The synchronous or the asynchronous stack overflowed. We are dead.
743 * No need to properly save the registers, we are going to panic anyway.
744 * Setup a pt_regs so that show_trace can provide a good call trace.
745 */
746stack_overflow:
747	l	%r15,__LC_PANIC_STACK	# change to panic stack
748	ahi	%r15,-__PT_SIZE		# create pt_regs
749	stm	%r0,%r7,__PT_R0(%r15)
750	stm	%r8,%r9,__PT_PSW(%r15)
751	mvc	__PT_R8(32,%r11),0(%r14)
752	lr	%r15,%r11
753	ahi	%r15,-STACK_FRAME_OVERHEAD
754	l	%r1,BASED(1f)
755	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
756	lr	%r2,%r11		# pass pointer to pt_regs
757	br	%r1			# branch to kernel_stack_overflow
 
 
 
 
7581:	.long	kernel_stack_overflow
759#endif
760
761cleanup_table:
762	.long	system_call + 0x80000000
763	.long	sysc_do_svc + 0x80000000
764	.long	sysc_tif + 0x80000000
765	.long	sysc_restore + 0x80000000
766	.long	sysc_done + 0x80000000
767	.long	io_tif + 0x80000000
768	.long	io_restore + 0x80000000
769	.long	io_done + 0x80000000
770	.long	psw_idle + 0x80000000
771	.long	psw_idle_end + 0x80000000
772
773cleanup_critical:
774	cl	%r9,BASED(cleanup_table)	# system_call
775	jl	0f
776	cl	%r9,BASED(cleanup_table+4)	# sysc_do_svc
777	jl	cleanup_system_call
778	cl	%r9,BASED(cleanup_table+8)	# sysc_tif
779	jl	0f
780	cl	%r9,BASED(cleanup_table+12)	# sysc_restore
781	jl	cleanup_sysc_tif
782	cl	%r9,BASED(cleanup_table+16)	# sysc_done
783	jl	cleanup_sysc_restore
784	cl	%r9,BASED(cleanup_table+20)	# io_tif
785	jl	0f
786	cl	%r9,BASED(cleanup_table+24)	# io_restore
787	jl	cleanup_io_tif
788	cl	%r9,BASED(cleanup_table+28)	# io_done
789	jl	cleanup_io_restore
790	cl	%r9,BASED(cleanup_table+32)	# psw_idle
791	jl	0f
792	cl	%r9,BASED(cleanup_table+36)	# psw_idle_end
793	jl	cleanup_idle
7940:	br	%r14
 
 
 
 
 
795
796cleanup_system_call:
797	# check if stpt has been executed
798	cl	%r9,BASED(cleanup_system_call_insn)
799	jh	0f
 
 
 
800	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
801	chi	%r11,__LC_SAVE_AREA_ASYNC
802	je	0f
803	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
8040:	# check if stm has been executed
805	cl	%r9,BASED(cleanup_system_call_insn+4)
806	jh	0f
807	mvc	__LC_SAVE_AREA_SYNC(32),0(%r11)
8080:	# set up saved registers r12, and r13
809	st	%r12,16(%r11)		# r12 thread-info pointer
810	st	%r13,20(%r11)		# r13 literal-pool pointer
811	# check if the user time calculation has been done
812	cl	%r9,BASED(cleanup_system_call_insn+8)
813	jh	0f
814	l	%r10,__LC_EXIT_TIMER
815	l	%r15,__LC_EXIT_TIMER+4
816	SUB64	%r10,%r15,__LC_SYNC_ENTER_TIMER
817	ADD64	%r10,%r15,__LC_USER_TIMER
818	st	%r10,__LC_USER_TIMER
819	st	%r15,__LC_USER_TIMER+4
8200:	# check if the system time calculation has been done
821	cl	%r9,BASED(cleanup_system_call_insn+12)
822	jh	0f
823	l	%r10,__LC_LAST_UPDATE_TIMER
824	l	%r15,__LC_LAST_UPDATE_TIMER+4
825	SUB64	%r10,%r15,__LC_EXIT_TIMER
826	ADD64	%r10,%r15,__LC_SYSTEM_TIMER
827	st	%r10,__LC_SYSTEM_TIMER
828	st	%r15,__LC_SYSTEM_TIMER+4
8290:	# update accounting time stamp
830	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
831	# set up saved register 11
832	l	%r15,__LC_KERNEL_STACK
833	ahi	%r15,-__PT_SIZE
834	st	%r15,12(%r11)		# r11 pt_regs pointer
835	# fill pt_regs
836	mvc	__PT_R8(32,%r15),__LC_SAVE_AREA_SYNC
837	stm	%r0,%r7,__PT_R0(%r15)
838	mvc	__PT_PSW(8,%r15),__LC_SVC_OLD_PSW
839	mvc	__PT_INT_CODE(4,%r15),__LC_SVC_ILC
840	# setup saved register 15
841	ahi	%r15,-STACK_FRAME_OVERHEAD
842	st	%r15,28(%r11)		# r15 stack pointer
843	# set new psw address and exit
844	l	%r9,BASED(cleanup_table+4)	# sysc_do_svc + 0x80000000
845	br	%r14
846cleanup_system_call_insn:
 
847	.long	system_call + 0x80000000
848	.long	sysc_stm + 0x80000000
849	.long	sysc_vtime + 0x80000000 + 36
850	.long	sysc_vtime + 0x80000000 + 76
851
852cleanup_sysc_tif:
853	l	%r9,BASED(cleanup_table+8)	# sysc_tif + 0x80000000
 
 
854	br	%r14
855
856cleanup_sysc_restore:
857	cl	%r9,BASED(cleanup_sysc_restore_insn)
858	jhe	0f
859	l	%r9,12(%r11)		# get saved pointer to pt_regs
860	mvc	__LC_RETURN_PSW(8),__PT_PSW(%r9)
861	mvc	0(32,%r11),__PT_R8(%r9)
862	lm	%r0,%r7,__PT_R0(%r9)
8630:	lm	%r8,%r9,__LC_RETURN_PSW
 
 
 
 
 
 
 
 
 
 
864	br	%r14
865cleanup_sysc_restore_insn:
866	.long	sysc_done - 4 + 0x80000000
 
867
868cleanup_io_tif:
869	l	%r9,BASED(cleanup_table+20)	# io_tif + 0x80000000
 
 
870	br	%r14
871
872cleanup_io_restore:
873	cl	%r9,BASED(cleanup_io_restore_insn)
874	jhe	0f
875	l	%r9,12(%r11)		# get saved r11 pointer to pt_regs
876	mvc	__LC_RETURN_PSW(8),__PT_PSW(%r9)
877	mvc	0(32,%r11),__PT_R8(%r9)
878	lm	%r0,%r7,__PT_R0(%r9)
8790:	lm	%r8,%r9,__LC_RETURN_PSW
 
 
 
880	br	%r14
881cleanup_io_restore_insn:
882	.long	io_done - 4 + 0x80000000
883
884cleanup_idle:
885	# copy interrupt clock & cpu timer
886	mvc	__IDLE_EXIT(8,%r2),__LC_INT_CLOCK
887	mvc	__VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER
888	chi	%r11,__LC_SAVE_AREA_ASYNC
889	je	0f
890	mvc	__IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
891	mvc	__VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER
8920:	# check if stck has been executed
893	cl	%r9,BASED(cleanup_idle_insn)
894	jhe	1f
895	mvc	__IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2)
896	mvc	__VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3)
897	j	2f
8981:	# check if the cpu timer has been reprogrammed
899	ltr	%r5,%r5
900	jz	2f
901	spt	__VQ_IDLE_ENTER(%r3)
9022:	# account system time going idle
903	lm	%r9,%r10,__LC_STEAL_TIMER
904	ADD64	%r9,%r10,__IDLE_ENTER(%r2)
905	SUB64	%r9,%r10,__LC_LAST_UPDATE_CLOCK
906	stm	%r9,%r10,__LC_STEAL_TIMER
907	mvc	__LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2)
908	lm	%r9,%r10,__LC_SYSTEM_TIMER
909	ADD64	%r9,%r10,__LC_LAST_UPDATE_TIMER
910	SUB64	%r9,%r10,__VQ_IDLE_ENTER(%r3)
911	stm	%r9,%r10,__LC_SYSTEM_TIMER
912	mvc	__LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3)
913	# prepare return psw
914	n	%r8,BASED(cleanup_idle_wait)	# clear wait state bit
915	l	%r9,24(%r11)			# return from psw_idle
916	br	%r14
917cleanup_idle_insn:
918	.long	psw_idle_lpsw + 0x80000000
919cleanup_idle_wait:
920	.long	0xfffdffff
921
922/*
923 * Integer constants
924 */
925	.align	4
926.Lnr_syscalls:
927	.long	NR_syscalls
928.Lvtimer_max:
929	.quad	0x7fffffffffffffff
 
 
 
 
 
930
931/*
932 * Symbol constants
933 */
934.Ldo_machine_check:	.long	s390_do_machine_check
935.Lhandle_mcck:		.long	s390_handle_mcck
936.Ldo_IRQ:		.long	do_IRQ
937.Ldo_extint:		.long	do_extint
938.Ldo_signal:		.long	do_signal
939.Ldo_notify_resume:	.long	do_notify_resume
940.Ldo_per_trap:		.long	do_per_trap
941.Ldo_execve:		.long	do_execve
942.Lexecve_tail:		.long	execve_tail
943.Ljump_table:		.long	pgm_check_table
944.Lschedule:		.long	schedule
 
 
 
945#ifdef CONFIG_PREEMPT
946.Lpreempt_irq:		.long	preempt_schedule_irq
 
947#endif
948.Ltrace_enter:		.long	do_syscall_trace_enter
949.Ltrace_exit:		.long	do_syscall_trace_exit
950.Lschedule_tail:	.long	schedule_tail
951.Lsys_call_table:	.long	sys_call_table
952.Lsysc_per:		.long	sysc_per + 0x80000000
953#ifdef CONFIG_TRACE_IRQFLAGS
954.Lhardirqs_on:		.long	trace_hardirqs_on_caller
955.Lhardirqs_off:		.long	trace_hardirqs_off_caller
 
 
956#endif
957#ifdef CONFIG_LOCKDEP
958.Llockdep_sys_exit:	.long	lockdep_sys_exit
 
959#endif
960.Lcritical_start:	.long	__critical_start + 0x80000000
961.Lcritical_length:	.long	__critical_end - __critical_start
 
 
 
 
962
963		.section .rodata, "a"
964#define SYSCALL(esa,esame,emu)	.long esa
965	.globl	sys_call_table
966sys_call_table:
967#include "syscalls.S"
968#undef SYSCALL