Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) 2001 MIPS Technologies, Inc.
8 * Copyright (C) 2004 Thiemo Seufer
9 * Copyright (C) 2014 Imagination Technologies Ltd.
10 */
11#include <linux/errno.h>
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/irqflags.h>
15#include <asm/mipsregs.h>
16#include <asm/regdef.h>
17#include <asm/stackframe.h>
18#include <asm/isadep.h>
19#include <asm/sysmips.h>
20#include <asm/thread_info.h>
21#include <asm/unistd.h>
22#include <asm/war.h>
23#include <asm/asm-offsets.h>
24
25 .align 5
26NESTED(handle_sys, PT_SIZE, sp)
27 .set noat
28 SAVE_SOME
29 TRACE_IRQS_ON_RELOAD
30 STI
31 .set at
32
33 lw t1, PT_EPC(sp) # skip syscall on return
34
35 addiu t1, 4 # skip to next instruction
36 sw t1, PT_EPC(sp)
37
38 sw a3, PT_R26(sp) # save a3 for syscall restarting
39
40 /*
41 * More than four arguments. Try to deal with it by copying the
42 * stack arguments from the user stack to the kernel stack.
43 * This Sucks (TM).
44 */
45 lw t0, PT_R29(sp) # get old user stack pointer
46
47 /*
48 * We intentionally keep the kernel stack a little below the top of
49 * userspace so we don't have to do a slower byte accurate check here.
50 */
51 lw t5, TI_ADDR_LIMIT($28)
52 addu t4, t0, 32
53 and t5, t4
54 bltz t5, bad_stack # -> sp is bad
55
56 /*
57 * Ok, copy the args from the luser stack to the kernel stack.
58 */
59
60 .set push
61 .set noreorder
62 .set nomacro
63
64load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
65load_a5: user_lw(t6, 20(t0)) # argument #6 from usp
66load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
67load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
68loads_done:
69
70 sw t5, 16(sp) # argument #5 to ksp
71 sw t6, 20(sp) # argument #6 to ksp
72 sw t7, 24(sp) # argument #7 to ksp
73 sw t8, 28(sp) # argument #8 to ksp
74 .set pop
75
76 .section __ex_table,"a"
77 PTR load_a4, bad_stack_a4
78 PTR load_a5, bad_stack_a5
79 PTR load_a6, bad_stack_a6
80 PTR load_a7, bad_stack_a7
81 .previous
82
83 lw t0, TI_FLAGS($28) # syscall tracing enabled?
84 li t1, _TIF_WORK_SYSCALL_ENTRY
85 and t0, t1
86 bnez t0, syscall_trace_entry # -> yes
87syscall_common:
88 subu v0, v0, __NR_O32_Linux # check syscall number
89 sltiu t0, v0, __NR_O32_Linux_syscalls
90 beqz t0, illegal_syscall
91
92 sll t0, v0, 2
93 la t1, sys_call_table
94 addu t1, t0
95 lw t2, (t1) # syscall routine
96
97 beqz t2, illegal_syscall
98
99 jalr t2 # Do The Real Thing (TM)
100
101 li t0, -EMAXERRNO - 1 # error?
102 sltu t0, t0, v0
103 sw t0, PT_R7(sp) # set error flag
104 beqz t0, 1f
105
106 lw t1, PT_R2(sp) # syscall number
107 negu v0 # error
108 sw t1, PT_R0(sp) # save it for syscall restarting
1091: sw v0, PT_R2(sp) # result
110
111o32_syscall_exit:
112 j syscall_exit_partial
113
114/* ------------------------------------------------------------------------ */
115
116syscall_trace_entry:
117 SAVE_STATIC
118 move a0, sp
119
120 /*
121 * syscall number is in v0 unless we called syscall(__NR_###)
122 * where the real syscall number is in a0
123 */
124 move a1, v0
125 subu t2, v0, __NR_O32_Linux
126 bnez t2, 1f /* __NR_syscall at offset 0 */
127 lw a1, PT_R4(sp)
128
1291: jal syscall_trace_enter
130
131 bltz v0, 1f # seccomp failed? Skip syscall
132
133 RESTORE_STATIC
134 lw v0, PT_R2(sp) # Restore syscall (maybe modified)
135 lw a0, PT_R4(sp) # Restore argument registers
136 lw a1, PT_R5(sp)
137 lw a2, PT_R6(sp)
138 lw a3, PT_R7(sp)
139 j syscall_common
140
1411: j syscall_exit
142
143/* ------------------------------------------------------------------------ */
144
145 /*
146 * Our open-coded access area sanity test for the stack pointer
147 * failed. We probably should handle this case a bit more drastic.
148 */
149bad_stack:
150 li v0, EFAULT
151 sw v0, PT_R2(sp)
152 li t0, 1 # set error flag
153 sw t0, PT_R7(sp)
154 j o32_syscall_exit
155
156bad_stack_a4:
157 li t5, 0
158 b load_a5
159
160bad_stack_a5:
161 li t6, 0
162 b load_a6
163
164bad_stack_a6:
165 li t7, 0
166 b load_a7
167
168bad_stack_a7:
169 li t8, 0
170 b loads_done
171
172 /*
173 * The system call does not exist in this kernel
174 */
175illegal_syscall:
176 li v0, ENOSYS # error
177 sw v0, PT_R2(sp)
178 li t0, 1 # set error flag
179 sw t0, PT_R7(sp)
180 j o32_syscall_exit
181 END(handle_sys)
182
183 LEAF(sys_syscall)
184 subu t0, a0, __NR_O32_Linux # check syscall number
185 sltiu v0, t0, __NR_O32_Linux_syscalls
186 beqz t0, einval # do not recurse
187 sll t1, t0, 2
188 beqz v0, einval
189 lw t2, sys_call_table(t1) # syscall routine
190
191 move a0, a1 # shift argument registers
192 move a1, a2
193 move a2, a3
194 lw a3, 16(sp)
195 lw t4, 20(sp)
196 lw t5, 24(sp)
197 lw t6, 28(sp)
198 sw t4, 16(sp)
199 sw t5, 20(sp)
200 sw t6, 24(sp)
201 jr t2
202 /* Unreached */
203
204einval: li v0, -ENOSYS
205 jr ra
206 END(sys_syscall)
207
208#ifdef CONFIG_MIPS_MT_FPAFF
209 /*
210 * For FPU affinity scheduling on MIPS MT processors, we need to
211 * intercept sys_sched_xxxaffinity() calls until we get a proper hook
212 * in kernel/sched/core.c. Considered only temporary we only support
213 * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
214 * atm.
215 */
216#define sys_sched_setaffinity mipsmt_sys_sched_setaffinity
217#define sys_sched_getaffinity mipsmt_sys_sched_getaffinity
218#endif /* CONFIG_MIPS_MT_FPAFF */
219
220#define __SYSCALL(nr, entry) PTR entry
221 .align 2
222 .type sys_call_table, @object
223EXPORT(sys_call_table)
224#include <asm/syscall_table_32_o32.h>
225#undef __SYSCALL
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) 2001 MIPS Technologies, Inc.
8 * Copyright (C) 2004 Thiemo Seufer
9 * Copyright (C) 2014 Imagination Technologies Ltd.
10 */
11#include <linux/errno.h>
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/irqflags.h>
15#include <asm/mipsregs.h>
16#include <asm/regdef.h>
17#include <asm/stackframe.h>
18#include <asm/isadep.h>
19#include <asm/sysmips.h>
20#include <asm/thread_info.h>
21#include <asm/unistd.h>
22#include <asm/war.h>
23#include <asm/asm-offsets.h>
24
25 .align 5
26NESTED(handle_sys, PT_SIZE, sp)
27 .set noat
28 SAVE_SOME
29 TRACE_IRQS_ON_RELOAD
30 STI
31 .set at
32
33 lw t1, PT_EPC(sp) # skip syscall on return
34
35 addiu t1, 4 # skip to next instruction
36 sw t1, PT_EPC(sp)
37
38 sw a3, PT_R26(sp) # save a3 for syscall restarting
39
40 /*
41 * More than four arguments. Try to deal with it by copying the
42 * stack arguments from the user stack to the kernel stack.
43 * This Sucks (TM).
44 */
45 lw t0, PT_R29(sp) # get old user stack pointer
46
47 /*
48 * We intentionally keep the kernel stack a little below the top of
49 * userspace so we don't have to do a slower byte accurate check here.
50 */
51 addu t4, t0, 32
52 bltz t4, bad_stack # -> sp is bad
53
54 /*
55 * Ok, copy the args from the luser stack to the kernel stack.
56 */
57
58 .set push
59 .set noreorder
60 .set nomacro
61
62load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
63load_a5: user_lw(t6, 20(t0)) # argument #6 from usp
64load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
65load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
66loads_done:
67
68 sw t5, 16(sp) # argument #5 to ksp
69 sw t6, 20(sp) # argument #6 to ksp
70 sw t7, 24(sp) # argument #7 to ksp
71 sw t8, 28(sp) # argument #8 to ksp
72 .set pop
73
74 .section __ex_table,"a"
75 PTR load_a4, bad_stack_a4
76 PTR load_a5, bad_stack_a5
77 PTR load_a6, bad_stack_a6
78 PTR load_a7, bad_stack_a7
79 .previous
80
81 lw t0, TI_FLAGS($28) # syscall tracing enabled?
82 li t1, _TIF_WORK_SYSCALL_ENTRY
83 and t0, t1
84 bnez t0, syscall_trace_entry # -> yes
85syscall_common:
86 subu v0, v0, __NR_O32_Linux # check syscall number
87 sltiu t0, v0, __NR_O32_Linux_syscalls
88 beqz t0, illegal_syscall
89
90 sll t0, v0, 2
91 la t1, sys_call_table
92 addu t1, t0
93 lw t2, (t1) # syscall routine
94
95 beqz t2, illegal_syscall
96
97 jalr t2 # Do The Real Thing (TM)
98
99 li t0, -EMAXERRNO - 1 # error?
100 sltu t0, t0, v0
101 sw t0, PT_R7(sp) # set error flag
102 beqz t0, 1f
103
104 lw t1, PT_R2(sp) # syscall number
105 negu v0 # error
106 sw t1, PT_R0(sp) # save it for syscall restarting
1071: sw v0, PT_R2(sp) # result
108
109o32_syscall_exit:
110 j syscall_exit_partial
111
112/* ------------------------------------------------------------------------ */
113
114syscall_trace_entry:
115 SAVE_STATIC
116 move a0, sp
117
118 /*
119 * syscall number is in v0 unless we called syscall(__NR_###)
120 * where the real syscall number is in a0
121 */
122 move a1, v0
123 subu t2, v0, __NR_O32_Linux
124 bnez t2, 1f /* __NR_syscall at offset 0 */
125 lw a1, PT_R4(sp)
126
1271: jal syscall_trace_enter
128
129 bltz v0, 1f # seccomp failed? Skip syscall
130
131 RESTORE_STATIC
132 lw v0, PT_R2(sp) # Restore syscall (maybe modified)
133 lw a0, PT_R4(sp) # Restore argument registers
134 lw a1, PT_R5(sp)
135 lw a2, PT_R6(sp)
136 lw a3, PT_R7(sp)
137 j syscall_common
138
1391: j syscall_exit
140
141/* ------------------------------------------------------------------------ */
142
143 /*
144 * Our open-coded access area sanity test for the stack pointer
145 * failed. We probably should handle this case a bit more drastic.
146 */
147bad_stack:
148 li v0, EFAULT
149 sw v0, PT_R2(sp)
150 li t0, 1 # set error flag
151 sw t0, PT_R7(sp)
152 j o32_syscall_exit
153
154bad_stack_a4:
155 li t5, 0
156 b load_a5
157
158bad_stack_a5:
159 li t6, 0
160 b load_a6
161
162bad_stack_a6:
163 li t7, 0
164 b load_a7
165
166bad_stack_a7:
167 li t8, 0
168 b loads_done
169
170 /*
171 * The system call does not exist in this kernel
172 */
173illegal_syscall:
174 li v0, ENOSYS # error
175 sw v0, PT_R2(sp)
176 li t0, 1 # set error flag
177 sw t0, PT_R7(sp)
178 j o32_syscall_exit
179 END(handle_sys)
180
181 LEAF(sys_syscall)
182 subu t0, a0, __NR_O32_Linux # check syscall number
183 sltiu v0, t0, __NR_O32_Linux_syscalls
184 beqz t0, einval # do not recurse
185 sll t1, t0, 2
186 beqz v0, einval
187 lw t2, sys_call_table(t1) # syscall routine
188
189 move a0, a1 # shift argument registers
190 move a1, a2
191 move a2, a3
192 lw a3, 16(sp)
193 lw t4, 20(sp)
194 lw t5, 24(sp)
195 lw t6, 28(sp)
196 sw t4, 16(sp)
197 sw t5, 20(sp)
198 sw t6, 24(sp)
199 jr t2
200 /* Unreached */
201
202einval: li v0, -ENOSYS
203 jr ra
204 END(sys_syscall)
205
206#ifdef CONFIG_MIPS_MT_FPAFF
207 /*
208 * For FPU affinity scheduling on MIPS MT processors, we need to
209 * intercept sys_sched_xxxaffinity() calls until we get a proper hook
210 * in kernel/sched/core.c. Considered only temporary we only support
211 * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
212 * atm.
213 */
214#define sys_sched_setaffinity mipsmt_sys_sched_setaffinity
215#define sys_sched_getaffinity mipsmt_sys_sched_getaffinity
216#endif /* CONFIG_MIPS_MT_FPAFF */
217
218#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
219#define __SYSCALL(nr, entry) PTR entry
220 .align 2
221 .type sys_call_table, @object
222EXPORT(sys_call_table)
223#include <asm/syscall_table_o32.h>