Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
  7 * Copyright (C) 2001 MIPS Technologies, Inc.
  8 * Copyright (C) 2004 Thiemo Seufer
  9 * Copyright (C) 2014 Imagination Technologies Ltd.
 10 */
 11#include <linux/errno.h>
 12#include <asm/asm.h>
 13#include <asm/asmmacro.h>
 14#include <asm/irqflags.h>
 15#include <asm/mipsregs.h>
 16#include <asm/regdef.h>
 17#include <asm/stackframe.h>
 18#include <asm/isadep.h>
 19#include <asm/sysmips.h>
 20#include <asm/thread_info.h>
 21#include <asm/unistd.h>
 22#include <asm/war.h>
 23#include <asm/asm-offsets.h>
 24
 25	.align	5
 26NESTED(handle_sys, PT_SIZE, sp)
 27	.set	noat
 28	SAVE_SOME
 29	TRACE_IRQS_ON_RELOAD
 30	STI
 31	.set	at
 32
 33	lw	t1, PT_EPC(sp)		# skip syscall on return
 34
 35	addiu	t1, 4			# skip to next instruction
 36	sw	t1, PT_EPC(sp)
 37
 38	sw	a3, PT_R26(sp)		# save a3 for syscall restarting
 39
 40	/*
 41	 * More than four arguments.  Try to deal with it by copying the
 42	 * stack arguments from the user stack to the kernel stack.
 43	 * This Sucks (TM).
 44	 */
 45	lw	t0, PT_R29(sp)		# get old user stack pointer
 46
 47	/*
 48	 * We intentionally keep the kernel stack a little below the top of
 49	 * userspace so we don't have to do a slower byte accurate check here.
 50	 */
 51	lw	t5, TI_ADDR_LIMIT($28)
 52	addu	t4, t0, 32
 53	and	t5, t4
 54	bltz	t5, bad_stack		# -> sp is bad
 55
 56	/*
 57	 * Ok, copy the args from the luser stack to the kernel stack.
 58	 */
 59
 60	.set    push
 61	.set    noreorder
 62	.set	nomacro
 63
 64load_a4: user_lw(t5, 16(t0))		# argument #5 from usp
 65load_a5: user_lw(t6, 20(t0))		# argument #6 from usp
 66load_a6: user_lw(t7, 24(t0))		# argument #7 from usp
 67load_a7: user_lw(t8, 28(t0))		# argument #8 from usp
 68loads_done:
 69
 70	sw	t5, 16(sp)		# argument #5 to ksp
 71	sw	t6, 20(sp)		# argument #6 to ksp
 72	sw	t7, 24(sp)		# argument #7 to ksp
 73	sw	t8, 28(sp)		# argument #8 to ksp
 74	.set	pop
 75
 76	.section __ex_table,"a"
 77	PTR	load_a4, bad_stack_a4
 78	PTR	load_a5, bad_stack_a5
 79	PTR	load_a6, bad_stack_a6
 80	PTR	load_a7, bad_stack_a7
 81	.previous
 82
 
 
 
 
 
 
 
 
 
 
 
 
 83	lw	t0, TI_FLAGS($28)	# syscall tracing enabled?
 84	li	t1, _TIF_WORK_SYSCALL_ENTRY
 85	and	t0, t1
 86	bnez	t0, syscall_trace_entry # -> yes
 87syscall_common:
 88	subu	v0, v0, __NR_O32_Linux	# check syscall number
 89	sltiu	t0, v0, __NR_O32_Linux_syscalls
 90	beqz	t0, illegal_syscall
 91
 92	sll	t0, v0, 2
 93	la	t1, sys_call_table
 94	addu	t1, t0
 95	lw	t2, (t1)		# syscall routine
 96
 97	beqz	t2, illegal_syscall
 98
 99	jalr	t2			# Do The Real Thing (TM)
100
101	li	t0, -EMAXERRNO - 1	# error?
102	sltu	t0, t0, v0
103	sw	t0, PT_R7(sp)		# set error flag
104	beqz	t0, 1f
105
106	lw	t1, PT_R2(sp)		# syscall number
107	negu	v0			# error
108	sw	t1, PT_R0(sp)		# save it for syscall restarting
1091:	sw	v0, PT_R2(sp)		# result
110
111o32_syscall_exit:
112	j	syscall_exit_partial
113
114/* ------------------------------------------------------------------------ */
115
116syscall_trace_entry:
117	SAVE_STATIC
118	move	a0, sp
119
120	/*
121	 * syscall number is in v0 unless we called syscall(__NR_###)
122	 * where the real syscall number is in a0
123	 */
124	move	a1, v0
125	subu	t2, v0,  __NR_O32_Linux
126	bnez	t2, 1f /* __NR_syscall at offset 0 */
127	lw	a1, PT_R4(sp)
128
1291:	jal	syscall_trace_enter
130
131	bltz	v0, 1f			# seccomp failed? Skip syscall
132
133	RESTORE_STATIC
134	lw	v0, PT_R2(sp)		# Restore syscall (maybe modified)
135	lw	a0, PT_R4(sp)		# Restore argument registers
136	lw	a1, PT_R5(sp)
137	lw	a2, PT_R6(sp)
138	lw	a3, PT_R7(sp)
139	j	syscall_common
140
1411:	j	syscall_exit
142
143/* ------------------------------------------------------------------------ */
144
145	/*
146	 * Our open-coded access area sanity test for the stack pointer
147	 * failed. We probably should handle this case a bit more drastic.
148	 */
149bad_stack:
150	li	v0, EFAULT
151	sw	v0, PT_R2(sp)
152	li	t0, 1				# set error flag
153	sw	t0, PT_R7(sp)
154	j	o32_syscall_exit
155
156bad_stack_a4:
157	li	t5, 0
158	b	load_a5
159
160bad_stack_a5:
161	li	t6, 0
162	b	load_a6
163
164bad_stack_a6:
165	li	t7, 0
166	b	load_a7
167
168bad_stack_a7:
169	li	t8, 0
170	b	loads_done
171
172	/*
173	 * The system call does not exist in this kernel
174	 */
175illegal_syscall:
176	li	v0, ENOSYS			# error
177	sw	v0, PT_R2(sp)
178	li	t0, 1				# set error flag
179	sw	t0, PT_R7(sp)
180	j	o32_syscall_exit
181	END(handle_sys)
182
183	LEAF(sys_syscall)
184	subu	t0, a0, __NR_O32_Linux	# check syscall number
185	sltiu	v0, t0, __NR_O32_Linux_syscalls
186	beqz	t0, einval		# do not recurse
187	sll	t1, t0, 2
188	beqz	v0, einval
189	lw	t2, sys_call_table(t1)		# syscall routine
190
191	move	a0, a1				# shift argument registers
192	move	a1, a2
193	move	a2, a3
194	lw	a3, 16(sp)
195	lw	t4, 20(sp)
196	lw	t5, 24(sp)
197	lw	t6, 28(sp)
198	sw	t4, 16(sp)
199	sw	t5, 20(sp)
200	sw	t6, 24(sp)
201	jr	t2
202	/* Unreached */
203
204einval: li	v0, -ENOSYS
205	jr	ra
206	END(sys_syscall)
207
208#ifdef CONFIG_MIPS_MT_FPAFF
209	/*
210	 * For FPU affinity scheduling on MIPS MT processors, we need to
211	 * intercept sys_sched_xxxaffinity() calls until we get a proper hook
212	 * in kernel/sched/core.c.  Considered only temporary we only support
213	 * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
214	 * atm.
215	 */
216#define sys_sched_setaffinity	mipsmt_sys_sched_setaffinity
217#define sys_sched_getaffinity	mipsmt_sys_sched_getaffinity
218#endif /* CONFIG_MIPS_MT_FPAFF */
219
220#define __SYSCALL(nr, entry) 	PTR entry
 
221	.align	2
222	.type	sys_call_table, @object
223EXPORT(sys_call_table)
224#include <asm/syscall_table_32_o32.h>
225#undef __SYSCALL
v6.9.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
  7 * Copyright (C) 2001 MIPS Technologies, Inc.
  8 * Copyright (C) 2004 Thiemo Seufer
  9 * Copyright (C) 2014 Imagination Technologies Ltd.
 10 */
 11#include <linux/errno.h>
 12#include <asm/asm.h>
 13#include <asm/asmmacro.h>
 14#include <asm/irqflags.h>
 15#include <asm/mipsregs.h>
 16#include <asm/regdef.h>
 17#include <asm/stackframe.h>
 18#include <asm/isadep.h>
 19#include <asm/sysmips.h>
 20#include <asm/thread_info.h>
 21#include <asm/unistd.h>
 
 22#include <asm/asm-offsets.h>
 23
 24	.align	5
 25NESTED(handle_sys, PT_SIZE, sp)
 26	.set	noat
 27	SAVE_SOME
 28	TRACE_IRQS_ON_RELOAD
 29	STI
 30	.set	at
 31
 32	lw	t1, PT_EPC(sp)		# skip syscall on return
 33
 34	addiu	t1, 4			# skip to next instruction
 35	sw	t1, PT_EPC(sp)
 36
 37	sw	a3, PT_R26(sp)		# save a3 for syscall restarting
 38
 39	/*
 40	 * More than four arguments.  Try to deal with it by copying the
 41	 * stack arguments from the user stack to the kernel stack.
 42	 * This Sucks (TM).
 43	 */
 44	lw	t0, PT_R29(sp)		# get old user stack pointer
 45
 46	/*
 47	 * We intentionally keep the kernel stack a little below the top of
 48	 * userspace so we don't have to do a slower byte accurate check here.
 49	 */
 
 50	addu	t4, t0, 32
 51	bltz	t4, bad_stack		# -> sp is bad
 
 52
 53	/*
 54	 * Ok, copy the args from the luser stack to the kernel stack.
 55	 */
 56
 57	.set    push
 58	.set    noreorder
 59	.set	nomacro
 60
 61load_a4: user_lw(t5, 16(t0))		# argument #5 from usp
 62load_a5: user_lw(t6, 20(t0))		# argument #6 from usp
 63load_a6: user_lw(t7, 24(t0))		# argument #7 from usp
 64load_a7: user_lw(t8, 28(t0))		# argument #8 from usp
 65loads_done:
 66
 67	sw	t5, 16(sp)		# argument #5 to ksp
 68	sw	t6, 20(sp)		# argument #6 to ksp
 69	sw	t7, 24(sp)		# argument #7 to ksp
 70	sw	t8, 28(sp)		# argument #8 to ksp
 71	.set	pop
 72
 73	.section __ex_table,"a"
 74	PTR_WD	load_a4, bad_stack_a4
 75	PTR_WD	load_a5, bad_stack_a5
 76	PTR_WD	load_a6, bad_stack_a6
 77	PTR_WD	load_a7, bad_stack_a7
 78	.previous
 79
 80	/*
 81	 * syscall number is in v0 unless we called syscall(__NR_###)
 82	 * where the real syscall number is in a0
 83	 */
 84	subu	t2, v0,  __NR_O32_Linux
 85	bnez	t2, 1f /* __NR_syscall at offset 0 */
 86	LONG_S	a0, TI_SYSCALL($28)	# Save a0 as syscall number
 87	b	2f
 881:
 89	LONG_S	v0, TI_SYSCALL($28)	# Save v0 as syscall number
 902:
 91
 92	lw	t0, TI_FLAGS($28)	# syscall tracing enabled?
 93	li	t1, _TIF_WORK_SYSCALL_ENTRY
 94	and	t0, t1
 95	bnez	t0, syscall_trace_entry # -> yes
 96syscall_common:
 97	subu	v0, v0, __NR_O32_Linux	# check syscall number
 98	sltiu	t0, v0, __NR_O32_Linux_syscalls
 99	beqz	t0, illegal_syscall
100
101	sll	t0, v0, 2
102	la	t1, sys_call_table
103	addu	t1, t0
104	lw	t2, (t1)		# syscall routine
105
106	beqz	t2, illegal_syscall
107
108	jalr	t2			# Do The Real Thing (TM)
109
110	li	t0, -EMAXERRNO - 1	# error?
111	sltu	t0, t0, v0
112	sw	t0, PT_R7(sp)		# set error flag
113	beqz	t0, 1f
114
115	lw	t1, PT_R2(sp)		# syscall number
116	negu	v0			# error
117	sw	t1, PT_R0(sp)		# save it for syscall restarting
1181:	sw	v0, PT_R2(sp)		# result
119
120o32_syscall_exit:
121	j	syscall_exit_partial
122
123/* ------------------------------------------------------------------------ */
124
125syscall_trace_entry:
126	SAVE_STATIC
127	move	a0, sp
128
129	jal	syscall_trace_enter
 
 
 
 
 
 
 
 
 
130
131	bltz	v0, 1f			# seccomp failed? Skip syscall
132
133	RESTORE_STATIC
134	lw	v0, PT_R2(sp)		# Restore syscall (maybe modified)
135	lw	a0, PT_R4(sp)		# Restore argument registers
136	lw	a1, PT_R5(sp)
137	lw	a2, PT_R6(sp)
138	lw	a3, PT_R7(sp)
139	j	syscall_common
140
1411:	j	syscall_exit
142
143/* ------------------------------------------------------------------------ */
144
145	/*
146	 * Our open-coded access area sanity test for the stack pointer
147	 * failed. We probably should handle this case a bit more drastic.
148	 */
149bad_stack:
150	li	v0, EFAULT
151	sw	v0, PT_R2(sp)
152	li	t0, 1				# set error flag
153	sw	t0, PT_R7(sp)
154	j	o32_syscall_exit
155
156bad_stack_a4:
157	li	t5, 0
158	b	load_a5
159
160bad_stack_a5:
161	li	t6, 0
162	b	load_a6
163
164bad_stack_a6:
165	li	t7, 0
166	b	load_a7
167
168bad_stack_a7:
169	li	t8, 0
170	b	loads_done
171
172	/*
173	 * The system call does not exist in this kernel
174	 */
175illegal_syscall:
176	li	v0, ENOSYS			# error
177	sw	v0, PT_R2(sp)
178	li	t0, 1				# set error flag
179	sw	t0, PT_R7(sp)
180	j	o32_syscall_exit
181	END(handle_sys)
182
183	LEAF(sys_syscall)
184	subu	t0, a0, __NR_O32_Linux	# check syscall number
185	sltiu	v0, t0, __NR_O32_Linux_syscalls
186	beqz	t0, einval		# do not recurse
187	sll	t1, t0, 2
188	beqz	v0, einval
189	lw	t2, sys_call_table(t1)		# syscall routine
190
191	move	a0, a1				# shift argument registers
192	move	a1, a2
193	move	a2, a3
194	lw	a3, 16(sp)
195	lw	t4, 20(sp)
196	lw	t5, 24(sp)
197	lw	t6, 28(sp)
198	sw	t4, 16(sp)
199	sw	t5, 20(sp)
200	sw	t6, 24(sp)
201	jr	t2
202	/* Unreached */
203
204einval: li	v0, -ENOSYS
205	jr	ra
206	END(sys_syscall)
207
208#ifdef CONFIG_MIPS_MT_FPAFF
209	/*
210	 * For FPU affinity scheduling on MIPS MT processors, we need to
211	 * intercept sys_sched_xxxaffinity() calls until we get a proper hook
212	 * in kernel/sched/core.c.  Considered only temporary we only support
213	 * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
214	 * atm.
215	 */
216#define sys_sched_setaffinity	mipsmt_sys_sched_setaffinity
217#define sys_sched_getaffinity	mipsmt_sys_sched_getaffinity
218#endif /* CONFIG_MIPS_MT_FPAFF */
219
220#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, native)
221#define __SYSCALL(nr, entry) 	PTR_WD entry
222	.align	2
223	.type	sys_call_table, @object
224EXPORT(sys_call_table)
225#include <asm/syscall_table_o32.h>