Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
  7 * Copyright (C) 2001 MIPS Technologies, Inc.
  8 * Copyright (C) 2004 Thiemo Seufer
  9 * Copyright (C) 2014 Imagination Technologies Ltd.
 10 */
 11#include <linux/errno.h>
 12#include <asm/asm.h>
 13#include <asm/asmmacro.h>
 14#include <asm/irqflags.h>
 15#include <asm/mipsregs.h>
 16#include <asm/regdef.h>
 17#include <asm/stackframe.h>
 18#include <asm/isadep.h>
 19#include <asm/sysmips.h>
 20#include <asm/thread_info.h>
 21#include <asm/unistd.h>
 
 22#include <asm/asm-offsets.h>
 23
 24	.align	5
 25NESTED(handle_sys, PT_SIZE, sp)
 26	.set	noat
 27	SAVE_SOME
 28	TRACE_IRQS_ON_RELOAD
 29	STI
 30	.set	at
 31
 32	lw	t1, PT_EPC(sp)		# skip syscall on return
 33
 34	addiu	t1, 4			# skip to next instruction
 35	sw	t1, PT_EPC(sp)
 36
 37	sw	a3, PT_R26(sp)		# save a3 for syscall restarting
 38
 39	/*
 40	 * More than four arguments.  Try to deal with it by copying the
 41	 * stack arguments from the user stack to the kernel stack.
 42	 * This Sucks (TM).
 43	 */
 44	lw	t0, PT_R29(sp)		# get old user stack pointer
 45
 46	/*
 47	 * We intentionally keep the kernel stack a little below the top of
 48	 * userspace so we don't have to do a slower byte accurate check here.
 49	 */
 50	addu	t4, t0, 32
 51	bltz	t4, bad_stack		# -> sp is bad
 52
 53	/*
 54	 * Ok, copy the args from the luser stack to the kernel stack.
 55	 */
 56
 57	.set    push
 58	.set    noreorder
 59	.set	nomacro
 60
 61load_a4: user_lw(t5, 16(t0))		# argument #5 from usp
 62load_a5: user_lw(t6, 20(t0))		# argument #6 from usp
 63load_a6: user_lw(t7, 24(t0))		# argument #7 from usp
 64load_a7: user_lw(t8, 28(t0))		# argument #8 from usp
 65loads_done:
 66
 67	sw	t5, 16(sp)		# argument #5 to ksp
 68	sw	t6, 20(sp)		# argument #6 to ksp
 69	sw	t7, 24(sp)		# argument #7 to ksp
 70	sw	t8, 28(sp)		# argument #8 to ksp
 71	.set	pop
 72
 73	.section __ex_table,"a"
 74	PTR_WD	load_a4, bad_stack_a4
 75	PTR_WD	load_a5, bad_stack_a5
 76	PTR_WD	load_a6, bad_stack_a6
 77	PTR_WD	load_a7, bad_stack_a7
 78	.previous
 79
 80	lw	t0, TI_FLAGS($28)	# syscall tracing enabled?
 81	li	t1, _TIF_WORK_SYSCALL_ENTRY
 82	and	t0, t1
 83	bnez	t0, syscall_trace_entry # -> yes
 84syscall_common:
 85	subu	v0, v0, __NR_O32_Linux	# check syscall number
 86	sltiu	t0, v0, __NR_O32_Linux_syscalls
 87	beqz	t0, illegal_syscall
 88
 89	sll	t0, v0, 2
 90	la	t1, sys_call_table
 91	addu	t1, t0
 92	lw	t2, (t1)		# syscall routine
 93
 94	beqz	t2, illegal_syscall
 95
 96	jalr	t2			# Do The Real Thing (TM)
 97
 98	li	t0, -EMAXERRNO - 1	# error?
 99	sltu	t0, t0, v0
100	sw	t0, PT_R7(sp)		# set error flag
101	beqz	t0, 1f
102
103	lw	t1, PT_R2(sp)		# syscall number
104	negu	v0			# error
105	sw	t1, PT_R0(sp)		# save it for syscall restarting
1061:	sw	v0, PT_R2(sp)		# result
107
108o32_syscall_exit:
109	j	syscall_exit_partial
110
111/* ------------------------------------------------------------------------ */
112
113syscall_trace_entry:
114	SAVE_STATIC
115	move	a0, sp
116
117	/*
118	 * syscall number is in v0 unless we called syscall(__NR_###)
119	 * where the real syscall number is in a0
120	 */
121	move	a1, v0
122	subu	t2, v0,  __NR_O32_Linux
123	bnez	t2, 1f /* __NR_syscall at offset 0 */
124	lw	a1, PT_R4(sp)
125
1261:	jal	syscall_trace_enter
127
128	bltz	v0, 1f			# seccomp failed? Skip syscall
129
130	RESTORE_STATIC
131	lw	v0, PT_R2(sp)		# Restore syscall (maybe modified)
132	lw	a0, PT_R4(sp)		# Restore argument registers
133	lw	a1, PT_R5(sp)
134	lw	a2, PT_R6(sp)
135	lw	a3, PT_R7(sp)
136	j	syscall_common
137
1381:	j	syscall_exit
139
140/* ------------------------------------------------------------------------ */
141
142	/*
143	 * Our open-coded access area sanity test for the stack pointer
144	 * failed. We probably should handle this case a bit more drastic.
145	 */
146bad_stack:
147	li	v0, EFAULT
148	sw	v0, PT_R2(sp)
149	li	t0, 1				# set error flag
150	sw	t0, PT_R7(sp)
151	j	o32_syscall_exit
152
153bad_stack_a4:
154	li	t5, 0
155	b	load_a5
156
157bad_stack_a5:
158	li	t6, 0
159	b	load_a6
160
161bad_stack_a6:
162	li	t7, 0
163	b	load_a7
164
165bad_stack_a7:
166	li	t8, 0
167	b	loads_done
168
169	/*
170	 * The system call does not exist in this kernel
171	 */
172illegal_syscall:
173	li	v0, ENOSYS			# error
174	sw	v0, PT_R2(sp)
175	li	t0, 1				# set error flag
176	sw	t0, PT_R7(sp)
177	j	o32_syscall_exit
178	END(handle_sys)
179
180	LEAF(sys_syscall)
181	subu	t0, a0, __NR_O32_Linux	# check syscall number
182	sltiu	v0, t0, __NR_O32_Linux_syscalls
183	beqz	t0, einval		# do not recurse
184	sll	t1, t0, 2
185	beqz	v0, einval
186	lw	t2, sys_call_table(t1)		# syscall routine
187
188	move	a0, a1				# shift argument registers
189	move	a1, a2
190	move	a2, a3
191	lw	a3, 16(sp)
192	lw	t4, 20(sp)
193	lw	t5, 24(sp)
194	lw	t6, 28(sp)
195	sw	t4, 16(sp)
196	sw	t5, 20(sp)
197	sw	t6, 24(sp)
198	jr	t2
199	/* Unreached */
200
201einval: li	v0, -ENOSYS
202	jr	ra
203	END(sys_syscall)
204
205#ifdef CONFIG_MIPS_MT_FPAFF
206	/*
207	 * For FPU affinity scheduling on MIPS MT processors, we need to
208	 * intercept sys_sched_xxxaffinity() calls until we get a proper hook
209	 * in kernel/sched/core.c.  Considered only temporary we only support
210	 * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
211	 * atm.
212	 */
213#define sys_sched_setaffinity	mipsmt_sys_sched_setaffinity
214#define sys_sched_getaffinity	mipsmt_sys_sched_getaffinity
215#endif /* CONFIG_MIPS_MT_FPAFF */
216
217#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, native)
218#define __SYSCALL(nr, entry) 	PTR_WD entry
219	.align	2
220	.type	sys_call_table, @object
221EXPORT(sys_call_table)
222#include <asm/syscall_table_o32.h>
v5.14.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
  7 * Copyright (C) 2001 MIPS Technologies, Inc.
  8 * Copyright (C) 2004 Thiemo Seufer
  9 * Copyright (C) 2014 Imagination Technologies Ltd.
 10 */
 11#include <linux/errno.h>
 12#include <asm/asm.h>
 13#include <asm/asmmacro.h>
 14#include <asm/irqflags.h>
 15#include <asm/mipsregs.h>
 16#include <asm/regdef.h>
 17#include <asm/stackframe.h>
 18#include <asm/isadep.h>
 19#include <asm/sysmips.h>
 20#include <asm/thread_info.h>
 21#include <asm/unistd.h>
 22#include <asm/war.h>
 23#include <asm/asm-offsets.h>
 24
 25	.align	5
 26NESTED(handle_sys, PT_SIZE, sp)
 27	.set	noat
 28	SAVE_SOME
 29	TRACE_IRQS_ON_RELOAD
 30	STI
 31	.set	at
 32
 33	lw	t1, PT_EPC(sp)		# skip syscall on return
 34
 35	addiu	t1, 4			# skip to next instruction
 36	sw	t1, PT_EPC(sp)
 37
 38	sw	a3, PT_R26(sp)		# save a3 for syscall restarting
 39
 40	/*
 41	 * More than four arguments.  Try to deal with it by copying the
 42	 * stack arguments from the user stack to the kernel stack.
 43	 * This Sucks (TM).
 44	 */
 45	lw	t0, PT_R29(sp)		# get old user stack pointer
 46
 47	/*
 48	 * We intentionally keep the kernel stack a little below the top of
 49	 * userspace so we don't have to do a slower byte accurate check here.
 50	 */
 51	addu	t4, t0, 32
 52	bltz	t4, bad_stack		# -> sp is bad
 53
 54	/*
 55	 * Ok, copy the args from the luser stack to the kernel stack.
 56	 */
 57
 58	.set    push
 59	.set    noreorder
 60	.set	nomacro
 61
 62load_a4: user_lw(t5, 16(t0))		# argument #5 from usp
 63load_a5: user_lw(t6, 20(t0))		# argument #6 from usp
 64load_a6: user_lw(t7, 24(t0))		# argument #7 from usp
 65load_a7: user_lw(t8, 28(t0))		# argument #8 from usp
 66loads_done:
 67
 68	sw	t5, 16(sp)		# argument #5 to ksp
 69	sw	t6, 20(sp)		# argument #6 to ksp
 70	sw	t7, 24(sp)		# argument #7 to ksp
 71	sw	t8, 28(sp)		# argument #8 to ksp
 72	.set	pop
 73
 74	.section __ex_table,"a"
 75	PTR	load_a4, bad_stack_a4
 76	PTR	load_a5, bad_stack_a5
 77	PTR	load_a6, bad_stack_a6
 78	PTR	load_a7, bad_stack_a7
 79	.previous
 80
 81	lw	t0, TI_FLAGS($28)	# syscall tracing enabled?
 82	li	t1, _TIF_WORK_SYSCALL_ENTRY
 83	and	t0, t1
 84	bnez	t0, syscall_trace_entry # -> yes
 85syscall_common:
 86	subu	v0, v0, __NR_O32_Linux	# check syscall number
 87	sltiu	t0, v0, __NR_O32_Linux_syscalls
 88	beqz	t0, illegal_syscall
 89
 90	sll	t0, v0, 2
 91	la	t1, sys_call_table
 92	addu	t1, t0
 93	lw	t2, (t1)		# syscall routine
 94
 95	beqz	t2, illegal_syscall
 96
 97	jalr	t2			# Do The Real Thing (TM)
 98
 99	li	t0, -EMAXERRNO - 1	# error?
100	sltu	t0, t0, v0
101	sw	t0, PT_R7(sp)		# set error flag
102	beqz	t0, 1f
103
104	lw	t1, PT_R2(sp)		# syscall number
105	negu	v0			# error
106	sw	t1, PT_R0(sp)		# save it for syscall restarting
1071:	sw	v0, PT_R2(sp)		# result
108
109o32_syscall_exit:
110	j	syscall_exit_partial
111
112/* ------------------------------------------------------------------------ */
113
114syscall_trace_entry:
115	SAVE_STATIC
116	move	a0, sp
117
118	/*
119	 * syscall number is in v0 unless we called syscall(__NR_###)
120	 * where the real syscall number is in a0
121	 */
122	move	a1, v0
123	subu	t2, v0,  __NR_O32_Linux
124	bnez	t2, 1f /* __NR_syscall at offset 0 */
125	lw	a1, PT_R4(sp)
126
1271:	jal	syscall_trace_enter
128
129	bltz	v0, 1f			# seccomp failed? Skip syscall
130
131	RESTORE_STATIC
132	lw	v0, PT_R2(sp)		# Restore syscall (maybe modified)
133	lw	a0, PT_R4(sp)		# Restore argument registers
134	lw	a1, PT_R5(sp)
135	lw	a2, PT_R6(sp)
136	lw	a3, PT_R7(sp)
137	j	syscall_common
138
1391:	j	syscall_exit
140
141/* ------------------------------------------------------------------------ */
142
143	/*
144	 * Our open-coded access area sanity test for the stack pointer
145	 * failed. We probably should handle this case a bit more drastic.
146	 */
147bad_stack:
148	li	v0, EFAULT
149	sw	v0, PT_R2(sp)
150	li	t0, 1				# set error flag
151	sw	t0, PT_R7(sp)
152	j	o32_syscall_exit
153
154bad_stack_a4:
155	li	t5, 0
156	b	load_a5
157
158bad_stack_a5:
159	li	t6, 0
160	b	load_a6
161
162bad_stack_a6:
163	li	t7, 0
164	b	load_a7
165
166bad_stack_a7:
167	li	t8, 0
168	b	loads_done
169
170	/*
171	 * The system call does not exist in this kernel
172	 */
173illegal_syscall:
174	li	v0, ENOSYS			# error
175	sw	v0, PT_R2(sp)
176	li	t0, 1				# set error flag
177	sw	t0, PT_R7(sp)
178	j	o32_syscall_exit
179	END(handle_sys)
180
181	LEAF(sys_syscall)
182	subu	t0, a0, __NR_O32_Linux	# check syscall number
183	sltiu	v0, t0, __NR_O32_Linux_syscalls
184	beqz	t0, einval		# do not recurse
185	sll	t1, t0, 2
186	beqz	v0, einval
187	lw	t2, sys_call_table(t1)		# syscall routine
188
189	move	a0, a1				# shift argument registers
190	move	a1, a2
191	move	a2, a3
192	lw	a3, 16(sp)
193	lw	t4, 20(sp)
194	lw	t5, 24(sp)
195	lw	t6, 28(sp)
196	sw	t4, 16(sp)
197	sw	t5, 20(sp)
198	sw	t6, 24(sp)
199	jr	t2
200	/* Unreached */
201
202einval: li	v0, -ENOSYS
203	jr	ra
204	END(sys_syscall)
205
206#ifdef CONFIG_MIPS_MT_FPAFF
207	/*
208	 * For FPU affinity scheduling on MIPS MT processors, we need to
209	 * intercept sys_sched_xxxaffinity() calls until we get a proper hook
210	 * in kernel/sched/core.c.  Considered only temporary we only support
211	 * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
212	 * atm.
213	 */
214#define sys_sched_setaffinity	mipsmt_sys_sched_setaffinity
215#define sys_sched_getaffinity	mipsmt_sys_sched_getaffinity
216#endif /* CONFIG_MIPS_MT_FPAFF */
217
218#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, native)
219#define __SYSCALL(nr, entry) 	PTR entry
220	.align	2
221	.type	sys_call_table, @object
222EXPORT(sys_call_table)
223#include <asm/syscall_table_o32.h>