Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/kernel/entry-common.S
4 *
5 * Copyright (C) 2000 Russell King
6 */
7
8#include <asm/assembler.h>
9#include <asm/unistd.h>
10#include <asm/ftrace.h>
11#include <asm/unwind.h>
12#include <asm/page.h>
13#ifdef CONFIG_AEABI
14#include <asm/unistd-oabi.h>
15#endif
16
17 .equ NR_syscalls, __NR_syscalls
18
19#include "entry-header.S"
20
21saved_psr .req r8
22#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER)
23saved_pc .req r9
24#define TRACE(x...) x
25#else
26saved_pc .req lr
27#define TRACE(x...)
28#endif
29
30 .section .entry.text,"ax",%progbits
31 .align 5
32#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING_USER) || \
33 IS_ENABLED(CONFIG_DEBUG_RSEQ))
34/*
35 * This is the fast syscall return path. We do as little as possible here,
36 * such as avoiding writing r0 to the stack. We only use this path if we
37 * have tracing, context tracking and rseq debug disabled - the overheads
38 * from those features make this path too inefficient.
39 */
40ret_fast_syscall:
41__ret_fast_syscall:
42 UNWIND(.fnstart )
43 UNWIND(.cantunwind )
44 disable_irq_notrace @ disable interrupts
45 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
46 movs r1, r1, lsl #16
47 bne fast_work_pending
48
49 restore_user_regs fast = 1, offset = S_OFF
50 UNWIND(.fnend )
51ENDPROC(ret_fast_syscall)
52
53 /* Ok, we need to do extra processing, enter the slow path. */
54fast_work_pending:
55 str r0, [sp, #S_R0+S_OFF]! @ returned r0
56 /* fall through to work_pending */
57#else
58/*
59 * The "replacement" ret_fast_syscall for when tracing, context tracking,
60 * or rseq debug is enabled. As we will need to call out to some C functions,
61 * we save r0 first to avoid needing to save registers around each C function
62 * call.
63 */
64ret_fast_syscall:
65__ret_fast_syscall:
66 UNWIND(.fnstart )
67 UNWIND(.cantunwind )
68 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
69#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
70 /* do_rseq_syscall needs interrupts enabled. */
71 mov r0, sp @ 'regs'
72 bl do_rseq_syscall
73#endif
74 disable_irq_notrace @ disable interrupts
75 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
76 movs r1, r1, lsl #16
77 beq no_work_pending
78 UNWIND(.fnend )
79ENDPROC(ret_fast_syscall)
80
81 /* Slower path - fall through to work_pending */
82#endif
83
84 tst r1, #_TIF_SYSCALL_WORK
85 bne __sys_trace_return_nosave
86slow_work_pending:
87 mov r0, sp @ 'regs'
88 mov r2, why @ 'syscall'
89 bl do_work_pending
90 cmp r0, #0
91 beq no_work_pending
92 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
93 str scno, [tsk, #TI_ABI_SYSCALL] @ make sure tracers see update
94 ldmia sp, {r0 - r6} @ have to reload r0 - r6
95 b local_restart @ ... and off we go
96ENDPROC(ret_fast_syscall)
97
98/*
99 * "slow" syscall return path. "why" tells us if this was a real syscall.
100 * IRQs may be enabled here, so always disable them. Note that we use the
101 * "notrace" version to avoid calling into the tracing code unnecessarily.
102 * do_work_pending() will update this state if necessary.
103 */
104ENTRY(ret_to_user)
105ret_slow_syscall:
106#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
107 /* do_rseq_syscall needs interrupts enabled. */
108 enable_irq_notrace @ enable interrupts
109 mov r0, sp @ 'regs'
110 bl do_rseq_syscall
111#endif
112 disable_irq_notrace @ disable interrupts
113ENTRY(ret_to_user_from_irq)
114 ldr r1, [tsk, #TI_FLAGS]
115 movs r1, r1, lsl #16
116 bne slow_work_pending
117no_work_pending:
118 asm_trace_hardirqs_on save = 0
119
120 ct_user_enter save = 0
121
122#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
123 bl stackleak_erase_on_task_stack
124#endif
125 restore_user_regs fast = 0, offset = 0
126ENDPROC(ret_to_user_from_irq)
127ENDPROC(ret_to_user)
128
129/*
130 * This is how we return from a fork.
131 */
132ENTRY(ret_from_fork)
133 bl schedule_tail
134 cmp r5, #0
135 movne r0, r4
136 badrne lr, 1f
137 retne r5
1381: get_thread_info tsk
139 b ret_slow_syscall
140ENDPROC(ret_from_fork)
141
142/*=============================================================================
143 * SWI handler
144 *-----------------------------------------------------------------------------
145 */
146
147 .align 5
148#ifdef CONFIG_HARDEN_BRANCH_HISTORY
149ENTRY(vector_bhb_loop8_swi)
150 sub sp, sp, #PT_REGS_SIZE
151 stmia sp, {r0 - r12}
152 mov r8, #8
1531: b 2f
1542: subs r8, r8, #1
155 bne 1b
156 dsb nsh
157 isb
158 b 3f
159ENDPROC(vector_bhb_loop8_swi)
160
161 .align 5
162ENTRY(vector_bhb_bpiall_swi)
163 sub sp, sp, #PT_REGS_SIZE
164 stmia sp, {r0 - r12}
165 mcr p15, 0, r8, c7, c5, 6 @ BPIALL
166 isb
167 b 3f
168ENDPROC(vector_bhb_bpiall_swi)
169#endif
170 .align 5
171ENTRY(vector_swi)
172#ifdef CONFIG_CPU_V7M
173 v7m_exception_entry
174#else
175 sub sp, sp, #PT_REGS_SIZE
176 stmia sp, {r0 - r12} @ Calling r0 - r12
1773:
178 ARM( add r8, sp, #S_PC )
179 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
180 THUMB( mov r8, sp )
181 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
182 mrs saved_psr, spsr @ called from non-FIQ mode, so ok.
183 TRACE( mov saved_pc, lr )
184 str saved_pc, [sp, #S_PC] @ Save calling PC
185 str saved_psr, [sp, #S_PSR] @ Save CPSR
186 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
187#endif
188 reload_current r10, ip
189 zero_fp
190 alignment_trap r10, ip, cr_alignment
191 asm_trace_hardirqs_on save=0
192 enable_irq_notrace
193 ct_user_exit save=0
194
195 /*
196 * Get the system call number.
197 */
198
199#if defined(CONFIG_OABI_COMPAT)
200
201 /*
202 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
203 * value to determine if it is an EABI or an old ABI call.
204 */
205#ifdef CONFIG_ARM_THUMB
206 tst saved_psr, #PSR_T_BIT
207 movne r10, #0 @ no thumb OABI emulation
208 USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction
209#else
210 USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction
211#endif
212 ARM_BE8(rev r10, r10) @ little endian instruction
213
214#elif defined(CONFIG_AEABI)
215
216 /*
217 * Pure EABI user space always put syscall number into scno (r7).
218 */
219#elif defined(CONFIG_ARM_THUMB)
220 /* Legacy ABI only, possibly thumb mode. */
221 tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs
222 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
223 USER( ldreq scno, [saved_pc, #-4] )
224
225#else
226 /* Legacy ABI only. */
227 USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction
228#endif
229
230 /* saved_psr and saved_pc are now dead */
231
232 uaccess_disable tbl
233 get_thread_info tsk
234
235 adr tbl, sys_call_table @ load syscall table pointer
236
237#if defined(CONFIG_OABI_COMPAT)
238 /*
239 * If the swi argument is zero, this is an EABI call and we do nothing.
240 *
241 * If this is an old ABI call, get the syscall number into scno and
242 * get the old ABI syscall table address.
243 */
244 bics r10, r10, #0xff000000
245 strne r10, [tsk, #TI_ABI_SYSCALL]
246 streq scno, [tsk, #TI_ABI_SYSCALL]
247 eorne scno, r10, #__NR_OABI_SYSCALL_BASE
248 ldrne tbl, =sys_oabi_call_table
249#elif !defined(CONFIG_AEABI)
250 bic scno, scno, #0xff000000 @ mask off SWI op-code
251 str scno, [tsk, #TI_ABI_SYSCALL]
252 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
253#else
254 str scno, [tsk, #TI_ABI_SYSCALL]
255#endif
256 /*
257 * Reload the registers that may have been corrupted on entry to
258 * the syscall assembly (by tracing or context tracking.)
259 */
260 TRACE( ldmia sp, {r0 - r3} )
261
262local_restart:
263 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
264 stmdb sp!, {r4, r5} @ push fifth and sixth args
265
266 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
267 bne __sys_trace
268
269 invoke_syscall tbl, scno, r10, __ret_fast_syscall
270
271 add r1, sp, #S_OFF
2722: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
273 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
274 bcs arm_syscall
275 mov why, #0 @ no longer a real syscall
276 b sys_ni_syscall @ not private func
277
278#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
279 /*
280 * We failed to handle a fault trying to access the page
281 * containing the swi instruction, but we're not really in a
282 * position to return -EFAULT. Instead, return back to the
283 * instruction and re-enter the user fault handling path trying
284 * to page it in. This will likely result in sending SEGV to the
285 * current task.
286 */
2879001:
288 sub lr, saved_pc, #4
289 str lr, [sp, #S_PC]
290 get_thread_info tsk
291 b ret_fast_syscall
292#endif
293ENDPROC(vector_swi)
294 .ltorg
295
296 /*
297 * This is the really slow path. We're going to be doing
298 * context switches, and waiting for our parent to respond.
299 */
300__sys_trace:
301 add r0, sp, #S_OFF
302 bl syscall_trace_enter
303 mov scno, r0
304 invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
305 cmp scno, #-1 @ skip the syscall?
306 bne 2b
307 add sp, sp, #S_OFF @ restore stack
308
309__sys_trace_return_nosave:
310 enable_irq_notrace
311 mov r0, sp
312 bl syscall_trace_exit
313 b ret_slow_syscall
314
315__sys_trace_return:
316 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
317 mov r0, sp
318 bl syscall_trace_exit
319 b ret_slow_syscall
320
321 .macro syscall_table_start, sym
322 .equ __sys_nr, 0
323 .type \sym, #object
324ENTRY(\sym)
325 .endm
326
327 .macro syscall, nr, func
328 .ifgt __sys_nr - \nr
329 .error "Duplicated/unorded system call entry"
330 .endif
331 .rept \nr - __sys_nr
332 .long sys_ni_syscall
333 .endr
334 .long \func
335 .equ __sys_nr, \nr + 1
336 .endm
337
338 .macro syscall_table_end, sym
339 .ifgt __sys_nr - __NR_syscalls
340 .error "System call table too big"
341 .endif
342 .rept __NR_syscalls - __sys_nr
343 .long sys_ni_syscall
344 .endr
345 .size \sym, . - \sym
346 .endm
347
348#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
349#define __SYSCALL(nr, func) syscall nr, func
350
351/*
352 * This is the syscall table declaration for native ABI syscalls.
353 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
354 */
355 syscall_table_start sys_call_table
356#ifdef CONFIG_AEABI
357#include <calls-eabi.S>
358#else
359#include <calls-oabi.S>
360#endif
361 syscall_table_end sys_call_table
362
363/*============================================================================
364 * Special system call wrappers
365 */
366@ r0 = syscall number
367@ r8 = syscall table
368sys_syscall:
369 bic scno, r0, #__NR_OABI_SYSCALL_BASE
370 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
371 cmpne scno, #NR_syscalls @ check range
372#ifdef CONFIG_CPU_SPECTRE
373 movhs scno, #0
374 csdb
375#endif
376 stmialo sp, {r5, r6} @ shuffle args
377 movlo r0, r1
378 movlo r1, r2
379 movlo r2, r3
380 movlo r3, r4
381 ldrlo pc, [tbl, scno, lsl #2]
382 b sys_ni_syscall
383ENDPROC(sys_syscall)
384
385sys_sigreturn_wrapper:
386 add r0, sp, #S_OFF
387 mov why, #0 @ prevent syscall restart handling
388 b sys_sigreturn
389ENDPROC(sys_sigreturn_wrapper)
390
391sys_rt_sigreturn_wrapper:
392 add r0, sp, #S_OFF
393 mov why, #0 @ prevent syscall restart handling
394 b sys_rt_sigreturn
395ENDPROC(sys_rt_sigreturn_wrapper)
396
397sys_statfs64_wrapper:
398 teq r1, #88
399 moveq r1, #84
400 b sys_statfs64
401ENDPROC(sys_statfs64_wrapper)
402
403sys_fstatfs64_wrapper:
404 teq r1, #88
405 moveq r1, #84
406 b sys_fstatfs64
407ENDPROC(sys_fstatfs64_wrapper)
408
409/*
410 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
411 * offset, we return EINVAL.
412 */
413sys_mmap2:
414 str r5, [sp, #4]
415 b sys_mmap_pgoff
416ENDPROC(sys_mmap2)
417
418#ifdef CONFIG_OABI_COMPAT
419
420/*
421 * These are syscalls with argument register differences
422 */
423
424sys_oabi_pread64:
425 stmia sp, {r3, r4}
426 b sys_pread64
427ENDPROC(sys_oabi_pread64)
428
429sys_oabi_pwrite64:
430 stmia sp, {r3, r4}
431 b sys_pwrite64
432ENDPROC(sys_oabi_pwrite64)
433
434sys_oabi_truncate64:
435 mov r3, r2
436 mov r2, r1
437 b sys_truncate64
438ENDPROC(sys_oabi_truncate64)
439
440sys_oabi_ftruncate64:
441 mov r3, r2
442 mov r2, r1
443 b sys_ftruncate64
444ENDPROC(sys_oabi_ftruncate64)
445
446sys_oabi_readahead:
447 str r3, [sp]
448 mov r3, r2
449 mov r2, r1
450 b sys_readahead
451ENDPROC(sys_oabi_readahead)
452
453/*
454 * Let's declare a second syscall table for old ABI binaries
455 * using the compatibility syscall entries.
456 */
457 syscall_table_start sys_oabi_call_table
458#undef __SYSCALL_WITH_COMPAT
459#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
460#include <calls-oabi.S>
461 syscall_table_end sys_oabi_call_table
462
463#endif
464
1/*
2 * linux/arch/arm/kernel/entry-common.S
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/assembler.h>
12#include <asm/unistd.h>
13#include <asm/ftrace.h>
14#include <asm/unwind.h>
15
16#ifdef CONFIG_NEED_RET_TO_USER
17#include <mach/entry-macro.S>
18#else
19 .macro arch_ret_to_user, tmp1, tmp2
20 .endm
21#endif
22
23#include "entry-header.S"
24
25
26 .align 5
27#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
28/*
29 * This is the fast syscall return path. We do as little as possible here,
30 * such as avoiding writing r0 to the stack. We only use this path if we
31 * have tracing and context tracking disabled - the overheads from those
32 * features make this path too inefficient.
33 */
34ret_fast_syscall:
35 UNWIND(.fnstart )
36 UNWIND(.cantunwind )
37 disable_irq_notrace @ disable interrupts
38 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
39 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
40 bne fast_work_pending
41
42 /* perform architecture specific actions before user return */
43 arch_ret_to_user r1, lr
44
45 restore_user_regs fast = 1, offset = S_OFF
46 UNWIND(.fnend )
47ENDPROC(ret_fast_syscall)
48
49 /* Ok, we need to do extra processing, enter the slow path. */
50fast_work_pending:
51 str r0, [sp, #S_R0+S_OFF]! @ returned r0
52 /* fall through to work_pending */
53#else
54/*
55 * The "replacement" ret_fast_syscall for when tracing or context tracking
56 * is enabled. As we will need to call out to some C functions, we save
57 * r0 first to avoid needing to save registers around each C function call.
58 */
59ret_fast_syscall:
60 UNWIND(.fnstart )
61 UNWIND(.cantunwind )
62 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
63 disable_irq_notrace @ disable interrupts
64 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
65 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
66 beq no_work_pending
67 UNWIND(.fnend )
68ENDPROC(ret_fast_syscall)
69
70 /* Slower path - fall through to work_pending */
71#endif
72
73 tst r1, #_TIF_SYSCALL_WORK
74 bne __sys_trace_return_nosave
75slow_work_pending:
76 mov r0, sp @ 'regs'
77 mov r2, why @ 'syscall'
78 bl do_work_pending
79 cmp r0, #0
80 beq no_work_pending
81 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
82 ldmia sp, {r0 - r6} @ have to reload r0 - r6
83 b local_restart @ ... and off we go
84ENDPROC(ret_fast_syscall)
85
86/*
87 * "slow" syscall return path. "why" tells us if this was a real syscall.
88 * IRQs may be enabled here, so always disable them. Note that we use the
89 * "notrace" version to avoid calling into the tracing code unnecessarily.
90 * do_work_pending() will update this state if necessary.
91 */
92ENTRY(ret_to_user)
93ret_slow_syscall:
94 disable_irq_notrace @ disable interrupts
95ENTRY(ret_to_user_from_irq)
96 ldr r1, [tsk, #TI_FLAGS]
97 tst r1, #_TIF_WORK_MASK
98 bne slow_work_pending
99no_work_pending:
100 asm_trace_hardirqs_on save = 0
101
102 /* perform architecture specific actions before user return */
103 arch_ret_to_user r1, lr
104 ct_user_enter save = 0
105
106 restore_user_regs fast = 0, offset = 0
107ENDPROC(ret_to_user_from_irq)
108ENDPROC(ret_to_user)
109
110/*
111 * This is how we return from a fork.
112 */
113ENTRY(ret_from_fork)
114 bl schedule_tail
115 cmp r5, #0
116 movne r0, r4
117 badrne lr, 1f
118 retne r5
1191: get_thread_info tsk
120 b ret_slow_syscall
121ENDPROC(ret_from_fork)
122
123 .equ NR_syscalls,0
124#define CALL(x) .equ NR_syscalls,NR_syscalls+1
125#include "calls.S"
126
127/*
128 * Ensure that the system call table is equal to __NR_syscalls,
129 * which is the value the rest of the system sees
130 */
131.ifne NR_syscalls - __NR_syscalls
132.error "__NR_syscalls is not equal to the size of the syscall table"
133.endif
134
135#undef CALL
136#define CALL(x) .long x
137
138/*=============================================================================
139 * SWI handler
140 *-----------------------------------------------------------------------------
141 */
142
143 .align 5
144ENTRY(vector_swi)
145#ifdef CONFIG_CPU_V7M
146 v7m_exception_entry
147#else
148 sub sp, sp, #S_FRAME_SIZE
149 stmia sp, {r0 - r12} @ Calling r0 - r12
150 ARM( add r8, sp, #S_PC )
151 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
152 THUMB( mov r8, sp )
153 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
154 mrs r8, spsr @ called from non-FIQ mode, so ok.
155 str lr, [sp, #S_PC] @ Save calling PC
156 str r8, [sp, #S_PSR] @ Save CPSR
157 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
158#endif
159 zero_fp
160 alignment_trap r10, ip, __cr_alignment
161 enable_irq
162 ct_user_exit
163 get_thread_info tsk
164
165 /*
166 * Get the system call number.
167 */
168
169#if defined(CONFIG_OABI_COMPAT)
170
171 /*
172 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
173 * value to determine if it is an EABI or an old ABI call.
174 */
175#ifdef CONFIG_ARM_THUMB
176 tst r8, #PSR_T_BIT
177 movne r10, #0 @ no thumb OABI emulation
178 USER( ldreq r10, [lr, #-4] ) @ get SWI instruction
179#else
180 USER( ldr r10, [lr, #-4] ) @ get SWI instruction
181#endif
182 ARM_BE8(rev r10, r10) @ little endian instruction
183
184#elif defined(CONFIG_AEABI)
185
186 /*
187 * Pure EABI user space always put syscall number into scno (r7).
188 */
189#elif defined(CONFIG_ARM_THUMB)
190 /* Legacy ABI only, possibly thumb mode. */
191 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
192 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
193 USER( ldreq scno, [lr, #-4] )
194
195#else
196 /* Legacy ABI only. */
197 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
198#endif
199
200 uaccess_disable tbl
201
202 adr tbl, sys_call_table @ load syscall table pointer
203
204#if defined(CONFIG_OABI_COMPAT)
205 /*
206 * If the swi argument is zero, this is an EABI call and we do nothing.
207 *
208 * If this is an old ABI call, get the syscall number into scno and
209 * get the old ABI syscall table address.
210 */
211 bics r10, r10, #0xff000000
212 eorne scno, r10, #__NR_OABI_SYSCALL_BASE
213 ldrne tbl, =sys_oabi_call_table
214#elif !defined(CONFIG_AEABI)
215 bic scno, scno, #0xff000000 @ mask off SWI op-code
216 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
217#endif
218
219local_restart:
220 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
221 stmdb sp!, {r4, r5} @ push fifth and sixth args
222
223 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
224 bne __sys_trace
225
226 cmp scno, #NR_syscalls @ check upper syscall limit
227 badr lr, ret_fast_syscall @ return address
228 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
229
230 add r1, sp, #S_OFF
2312: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
232 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
233 bcs arm_syscall
234 mov why, #0 @ no longer a real syscall
235 b sys_ni_syscall @ not private func
236
237#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
238 /*
239 * We failed to handle a fault trying to access the page
240 * containing the swi instruction, but we're not really in a
241 * position to return -EFAULT. Instead, return back to the
242 * instruction and re-enter the user fault handling path trying
243 * to page it in. This will likely result in sending SEGV to the
244 * current task.
245 */
2469001:
247 sub lr, lr, #4
248 str lr, [sp, #S_PC]
249 b ret_fast_syscall
250#endif
251ENDPROC(vector_swi)
252
253 /*
254 * This is the really slow path. We're going to be doing
255 * context switches, and waiting for our parent to respond.
256 */
257__sys_trace:
258 mov r1, scno
259 add r0, sp, #S_OFF
260 bl syscall_trace_enter
261
262 badr lr, __sys_trace_return @ return address
263 mov scno, r0 @ syscall number (possibly new)
264 add r1, sp, #S_R0 + S_OFF @ pointer to regs
265 cmp scno, #NR_syscalls @ check upper syscall limit
266 ldmccia r1, {r0 - r6} @ have to reload r0 - r6
267 stmccia sp, {r4, r5} @ and update the stack args
268 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
269 cmp scno, #-1 @ skip the syscall?
270 bne 2b
271 add sp, sp, #S_OFF @ restore stack
272 b ret_slow_syscall
273
274__sys_trace_return:
275 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
276 mov r0, sp
277 bl syscall_trace_exit
278 b ret_slow_syscall
279
280__sys_trace_return_nosave:
281 enable_irq_notrace
282 mov r0, sp
283 bl syscall_trace_exit
284 b ret_slow_syscall
285
286 .align 5
287#ifdef CONFIG_ALIGNMENT_TRAP
288 .type __cr_alignment, #object
289__cr_alignment:
290 .word cr_alignment
291#endif
292 .ltorg
293
294/*
295 * This is the syscall table declaration for native ABI syscalls.
296 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
297 */
298#define ABI(native, compat) native
299#ifdef CONFIG_AEABI
300#define OBSOLETE(syscall) sys_ni_syscall
301#else
302#define OBSOLETE(syscall) syscall
303#endif
304
305 .type sys_call_table, #object
306ENTRY(sys_call_table)
307#include "calls.S"
308#undef ABI
309#undef OBSOLETE
310
311/*============================================================================
312 * Special system call wrappers
313 */
314@ r0 = syscall number
315@ r8 = syscall table
316sys_syscall:
317 bic scno, r0, #__NR_OABI_SYSCALL_BASE
318 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
319 cmpne scno, #NR_syscalls @ check range
320 stmloia sp, {r5, r6} @ shuffle args
321 movlo r0, r1
322 movlo r1, r2
323 movlo r2, r3
324 movlo r3, r4
325 ldrlo pc, [tbl, scno, lsl #2]
326 b sys_ni_syscall
327ENDPROC(sys_syscall)
328
329sys_sigreturn_wrapper:
330 add r0, sp, #S_OFF
331 mov why, #0 @ prevent syscall restart handling
332 b sys_sigreturn
333ENDPROC(sys_sigreturn_wrapper)
334
335sys_rt_sigreturn_wrapper:
336 add r0, sp, #S_OFF
337 mov why, #0 @ prevent syscall restart handling
338 b sys_rt_sigreturn
339ENDPROC(sys_rt_sigreturn_wrapper)
340
341sys_statfs64_wrapper:
342 teq r1, #88
343 moveq r1, #84
344 b sys_statfs64
345ENDPROC(sys_statfs64_wrapper)
346
347sys_fstatfs64_wrapper:
348 teq r1, #88
349 moveq r1, #84
350 b sys_fstatfs64
351ENDPROC(sys_fstatfs64_wrapper)
352
353/*
354 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
355 * offset, we return EINVAL.
356 */
357sys_mmap2:
358#if PAGE_SHIFT > 12
359 tst r5, #PGOFF_MASK
360 moveq r5, r5, lsr #PAGE_SHIFT - 12
361 streq r5, [sp, #4]
362 beq sys_mmap_pgoff
363 mov r0, #-EINVAL
364 ret lr
365#else
366 str r5, [sp, #4]
367 b sys_mmap_pgoff
368#endif
369ENDPROC(sys_mmap2)
370
371#ifdef CONFIG_OABI_COMPAT
372
373/*
374 * These are syscalls with argument register differences
375 */
376
377sys_oabi_pread64:
378 stmia sp, {r3, r4}
379 b sys_pread64
380ENDPROC(sys_oabi_pread64)
381
382sys_oabi_pwrite64:
383 stmia sp, {r3, r4}
384 b sys_pwrite64
385ENDPROC(sys_oabi_pwrite64)
386
387sys_oabi_truncate64:
388 mov r3, r2
389 mov r2, r1
390 b sys_truncate64
391ENDPROC(sys_oabi_truncate64)
392
393sys_oabi_ftruncate64:
394 mov r3, r2
395 mov r2, r1
396 b sys_ftruncate64
397ENDPROC(sys_oabi_ftruncate64)
398
399sys_oabi_readahead:
400 str r3, [sp]
401 mov r3, r2
402 mov r2, r1
403 b sys_readahead
404ENDPROC(sys_oabi_readahead)
405
406/*
407 * Let's declare a second syscall table for old ABI binaries
408 * using the compatibility syscall entries.
409 */
410#define ABI(native, compat) compat
411#define OBSOLETE(syscall) syscall
412
413 .type sys_oabi_call_table, #object
414ENTRY(sys_oabi_call_table)
415#include "calls.S"
416#undef ABI
417#undef OBSOLETE
418
419#endif
420