Loading...
1/*
2 * linux/arch/arm/kernel/entry-common.S
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/assembler.h>
12#include <asm/unistd.h>
13#include <asm/ftrace.h>
14#include <asm/unwind.h>
15
16#ifdef CONFIG_NEED_RET_TO_USER
17#include <mach/entry-macro.S>
18#else
19 .macro arch_ret_to_user, tmp1, tmp2
20 .endm
21#endif
22
23#include "entry-header.S"
24
25
26 .align 5
27#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
28/*
29 * This is the fast syscall return path. We do as little as possible here,
30 * such as avoiding writing r0 to the stack. We only use this path if we
31 * have tracing and context tracking disabled - the overheads from those
32 * features make this path too inefficient.
33 */
34ret_fast_syscall:
35 UNWIND(.fnstart )
36 UNWIND(.cantunwind )
37 disable_irq_notrace @ disable interrupts
38 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
39 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
40 bne fast_work_pending
41
42 /* perform architecture specific actions before user return */
43 arch_ret_to_user r1, lr
44
45 restore_user_regs fast = 1, offset = S_OFF
46 UNWIND(.fnend )
47ENDPROC(ret_fast_syscall)
48
49 /* Ok, we need to do extra processing, enter the slow path. */
50fast_work_pending:
51 str r0, [sp, #S_R0+S_OFF]! @ returned r0
52 /* fall through to work_pending */
53#else
54/*
55 * The "replacement" ret_fast_syscall for when tracing or context tracking
56 * is enabled. As we will need to call out to some C functions, we save
57 * r0 first to avoid needing to save registers around each C function call.
58 */
59ret_fast_syscall:
60 UNWIND(.fnstart )
61 UNWIND(.cantunwind )
62 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
63 disable_irq_notrace @ disable interrupts
64 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
65 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
66 beq no_work_pending
67 UNWIND(.fnend )
68ENDPROC(ret_fast_syscall)
69
70 /* Slower path - fall through to work_pending */
71#endif
72
73 tst r1, #_TIF_SYSCALL_WORK
74 bne __sys_trace_return_nosave
75slow_work_pending:
76 mov r0, sp @ 'regs'
77 mov r2, why @ 'syscall'
78 bl do_work_pending
79 cmp r0, #0
80 beq no_work_pending
81 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
82 ldmia sp, {r0 - r6} @ have to reload r0 - r6
83 b local_restart @ ... and off we go
84ENDPROC(ret_fast_syscall)
85
86/*
87 * "slow" syscall return path. "why" tells us if this was a real syscall.
88 * IRQs may be enabled here, so always disable them. Note that we use the
89 * "notrace" version to avoid calling into the tracing code unnecessarily.
90 * do_work_pending() will update this state if necessary.
91 */
92ENTRY(ret_to_user)
93ret_slow_syscall:
94 disable_irq_notrace @ disable interrupts
95ENTRY(ret_to_user_from_irq)
96 ldr r1, [tsk, #TI_FLAGS]
97 tst r1, #_TIF_WORK_MASK
98 bne slow_work_pending
99no_work_pending:
100 asm_trace_hardirqs_on save = 0
101
102 /* perform architecture specific actions before user return */
103 arch_ret_to_user r1, lr
104 ct_user_enter save = 0
105
106 restore_user_regs fast = 0, offset = 0
107ENDPROC(ret_to_user_from_irq)
108ENDPROC(ret_to_user)
109
110/*
111 * This is how we return from a fork.
112 */
113ENTRY(ret_from_fork)
114 bl schedule_tail
115 cmp r5, #0
116 movne r0, r4
117 badrne lr, 1f
118 retne r5
1191: get_thread_info tsk
120 b ret_slow_syscall
121ENDPROC(ret_from_fork)
122
123 .equ NR_syscalls,0
124#define CALL(x) .equ NR_syscalls,NR_syscalls+1
125#include "calls.S"
126
127/*
128 * Ensure that the system call table is equal to __NR_syscalls,
129 * which is the value the rest of the system sees
130 */
131.ifne NR_syscalls - __NR_syscalls
132.error "__NR_syscalls is not equal to the size of the syscall table"
133.endif
134
135#undef CALL
136#define CALL(x) .long x
137
138/*=============================================================================
139 * SWI handler
140 *-----------------------------------------------------------------------------
141 */
142
143 .align 5
144ENTRY(vector_swi)
145#ifdef CONFIG_CPU_V7M
146 v7m_exception_entry
147#else
148 sub sp, sp, #S_FRAME_SIZE
149 stmia sp, {r0 - r12} @ Calling r0 - r12
150 ARM( add r8, sp, #S_PC )
151 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
152 THUMB( mov r8, sp )
153 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
154 mrs r8, spsr @ called from non-FIQ mode, so ok.
155 str lr, [sp, #S_PC] @ Save calling PC
156 str r8, [sp, #S_PSR] @ Save CPSR
157 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
158#endif
159 zero_fp
160 alignment_trap r10, ip, __cr_alignment
161 enable_irq
162 ct_user_exit
163 get_thread_info tsk
164
165 /*
166 * Get the system call number.
167 */
168
169#if defined(CONFIG_OABI_COMPAT)
170
171 /*
172 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
173 * value to determine if it is an EABI or an old ABI call.
174 */
175#ifdef CONFIG_ARM_THUMB
176 tst r8, #PSR_T_BIT
177 movne r10, #0 @ no thumb OABI emulation
178 USER( ldreq r10, [lr, #-4] ) @ get SWI instruction
179#else
180 USER( ldr r10, [lr, #-4] ) @ get SWI instruction
181#endif
182 ARM_BE8(rev r10, r10) @ little endian instruction
183
184#elif defined(CONFIG_AEABI)
185
186 /*
187 * Pure EABI user space always put syscall number into scno (r7).
188 */
189#elif defined(CONFIG_ARM_THUMB)
190 /* Legacy ABI only, possibly thumb mode. */
191 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
192 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
193 USER( ldreq scno, [lr, #-4] )
194
195#else
196 /* Legacy ABI only. */
197 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
198#endif
199
200 uaccess_disable tbl
201
202 adr tbl, sys_call_table @ load syscall table pointer
203
204#if defined(CONFIG_OABI_COMPAT)
205 /*
206 * If the swi argument is zero, this is an EABI call and we do nothing.
207 *
208 * If this is an old ABI call, get the syscall number into scno and
209 * get the old ABI syscall table address.
210 */
211 bics r10, r10, #0xff000000
212 eorne scno, r10, #__NR_OABI_SYSCALL_BASE
213 ldrne tbl, =sys_oabi_call_table
214#elif !defined(CONFIG_AEABI)
215 bic scno, scno, #0xff000000 @ mask off SWI op-code
216 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
217#endif
218
219local_restart:
220 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
221 stmdb sp!, {r4, r5} @ push fifth and sixth args
222
223 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
224 bne __sys_trace
225
226 cmp scno, #NR_syscalls @ check upper syscall limit
227 badr lr, ret_fast_syscall @ return address
228 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
229
230 add r1, sp, #S_OFF
2312: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
232 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
233 bcs arm_syscall
234 mov why, #0 @ no longer a real syscall
235 b sys_ni_syscall @ not private func
236
237#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
238 /*
239 * We failed to handle a fault trying to access the page
240 * containing the swi instruction, but we're not really in a
241 * position to return -EFAULT. Instead, return back to the
242 * instruction and re-enter the user fault handling path trying
243 * to page it in. This will likely result in sending SEGV to the
244 * current task.
245 */
2469001:
247 sub lr, lr, #4
248 str lr, [sp, #S_PC]
249 b ret_fast_syscall
250#endif
251ENDPROC(vector_swi)
252
253 /*
254 * This is the really slow path. We're going to be doing
255 * context switches, and waiting for our parent to respond.
256 */
257__sys_trace:
258 mov r1, scno
259 add r0, sp, #S_OFF
260 bl syscall_trace_enter
261
262 badr lr, __sys_trace_return @ return address
263 mov scno, r0 @ syscall number (possibly new)
264 add r1, sp, #S_R0 + S_OFF @ pointer to regs
265 cmp scno, #NR_syscalls @ check upper syscall limit
266 ldmccia r1, {r0 - r6} @ have to reload r0 - r6
267 stmccia sp, {r4, r5} @ and update the stack args
268 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
269 cmp scno, #-1 @ skip the syscall?
270 bne 2b
271 add sp, sp, #S_OFF @ restore stack
272 b ret_slow_syscall
273
274__sys_trace_return:
275 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
276 mov r0, sp
277 bl syscall_trace_exit
278 b ret_slow_syscall
279
280__sys_trace_return_nosave:
281 enable_irq_notrace
282 mov r0, sp
283 bl syscall_trace_exit
284 b ret_slow_syscall
285
286 .align 5
287#ifdef CONFIG_ALIGNMENT_TRAP
288 .type __cr_alignment, #object
289__cr_alignment:
290 .word cr_alignment
291#endif
292 .ltorg
293
294/*
295 * This is the syscall table declaration for native ABI syscalls.
296 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
297 */
298#define ABI(native, compat) native
299#ifdef CONFIG_AEABI
300#define OBSOLETE(syscall) sys_ni_syscall
301#else
302#define OBSOLETE(syscall) syscall
303#endif
304
305 .type sys_call_table, #object
306ENTRY(sys_call_table)
307#include "calls.S"
308#undef ABI
309#undef OBSOLETE
310
311/*============================================================================
312 * Special system call wrappers
313 */
314@ r0 = syscall number
315@ r8 = syscall table
316sys_syscall:
317 bic scno, r0, #__NR_OABI_SYSCALL_BASE
318 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
319 cmpne scno, #NR_syscalls @ check range
320 stmloia sp, {r5, r6} @ shuffle args
321 movlo r0, r1
322 movlo r1, r2
323 movlo r2, r3
324 movlo r3, r4
325 ldrlo pc, [tbl, scno, lsl #2]
326 b sys_ni_syscall
327ENDPROC(sys_syscall)
328
329sys_sigreturn_wrapper:
330 add r0, sp, #S_OFF
331 mov why, #0 @ prevent syscall restart handling
332 b sys_sigreturn
333ENDPROC(sys_sigreturn_wrapper)
334
335sys_rt_sigreturn_wrapper:
336 add r0, sp, #S_OFF
337 mov why, #0 @ prevent syscall restart handling
338 b sys_rt_sigreturn
339ENDPROC(sys_rt_sigreturn_wrapper)
340
341sys_statfs64_wrapper:
342 teq r1, #88
343 moveq r1, #84
344 b sys_statfs64
345ENDPROC(sys_statfs64_wrapper)
346
347sys_fstatfs64_wrapper:
348 teq r1, #88
349 moveq r1, #84
350 b sys_fstatfs64
351ENDPROC(sys_fstatfs64_wrapper)
352
353/*
354 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
355 * offset, we return EINVAL.
356 */
357sys_mmap2:
358#if PAGE_SHIFT > 12
359 tst r5, #PGOFF_MASK
360 moveq r5, r5, lsr #PAGE_SHIFT - 12
361 streq r5, [sp, #4]
362 beq sys_mmap_pgoff
363 mov r0, #-EINVAL
364 ret lr
365#else
366 str r5, [sp, #4]
367 b sys_mmap_pgoff
368#endif
369ENDPROC(sys_mmap2)
370
371#ifdef CONFIG_OABI_COMPAT
372
373/*
374 * These are syscalls with argument register differences
375 */
376
377sys_oabi_pread64:
378 stmia sp, {r3, r4}
379 b sys_pread64
380ENDPROC(sys_oabi_pread64)
381
382sys_oabi_pwrite64:
383 stmia sp, {r3, r4}
384 b sys_pwrite64
385ENDPROC(sys_oabi_pwrite64)
386
387sys_oabi_truncate64:
388 mov r3, r2
389 mov r2, r1
390 b sys_truncate64
391ENDPROC(sys_oabi_truncate64)
392
393sys_oabi_ftruncate64:
394 mov r3, r2
395 mov r2, r1
396 b sys_ftruncate64
397ENDPROC(sys_oabi_ftruncate64)
398
399sys_oabi_readahead:
400 str r3, [sp]
401 mov r3, r2
402 mov r2, r1
403 b sys_readahead
404ENDPROC(sys_oabi_readahead)
405
406/*
407 * Let's declare a second syscall table for old ABI binaries
408 * using the compatibility syscall entries.
409 */
410#define ABI(native, compat) compat
411#define OBSOLETE(syscall) syscall
412
413 .type sys_oabi_call_table, #object
414ENTRY(sys_oabi_call_table)
415#include "calls.S"
416#undef ABI
417#undef OBSOLETE
418
419#endif
420
1/*
2 * linux/arch/arm/kernel/entry-common.S
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/unistd.h>
12#include <asm/ftrace.h>
13#include <asm/unwind.h>
14
15#ifdef CONFIG_NEED_RET_TO_USER
16#include <mach/entry-macro.S>
17#else
18 .macro arch_ret_to_user, tmp1, tmp2
19 .endm
20#endif
21
22#include "entry-header.S"
23
24
25 .align 5
26/*
27 * This is the fast syscall return path. We do as little as
28 * possible here, and this includes saving r0 back into the SVC
29 * stack.
30 */
31ret_fast_syscall:
32 UNWIND(.fnstart )
33 UNWIND(.cantunwind )
34 disable_irq @ disable interrupts
35 ldr r1, [tsk, #TI_FLAGS]
36 tst r1, #_TIF_WORK_MASK
37 bne fast_work_pending
38#if defined(CONFIG_IRQSOFF_TRACER)
39 asm_trace_hardirqs_on
40#endif
41
42 /* perform architecture specific actions before user return */
43 arch_ret_to_user r1, lr
44
45 restore_user_regs fast = 1, offset = S_OFF
46 UNWIND(.fnend )
47
48/*
49 * Ok, we need to do extra processing, enter the slow path.
50 */
51fast_work_pending:
52 str r0, [sp, #S_R0+S_OFF]! @ returned r0
53work_pending:
54 tst r1, #_TIF_NEED_RESCHED
55 bne work_resched
56 /*
57 * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here
58 */
59 ldr r2, [sp, #S_PSR]
60 mov r0, sp @ 'regs'
61 tst r2, #15 @ are we returning to user mode?
62 bne no_work_pending @ no? just leave, then...
63 mov r2, why @ 'syscall'
64 tst r1, #_TIF_SIGPENDING @ delivering a signal?
65 movne why, #0 @ prevent further restarts
66 bl do_notify_resume
67 b ret_slow_syscall @ Check work again
68
69work_resched:
70 bl schedule
71/*
72 * "slow" syscall return path. "why" tells us if this was a real syscall.
73 */
74ENTRY(ret_to_user)
75ret_slow_syscall:
76 disable_irq @ disable interrupts
77ENTRY(ret_to_user_from_irq)
78 ldr r1, [tsk, #TI_FLAGS]
79 tst r1, #_TIF_WORK_MASK
80 bne work_pending
81no_work_pending:
82#if defined(CONFIG_IRQSOFF_TRACER)
83 asm_trace_hardirqs_on
84#endif
85 /* perform architecture specific actions before user return */
86 arch_ret_to_user r1, lr
87
88 restore_user_regs fast = 0, offset = 0
89ENDPROC(ret_to_user_from_irq)
90ENDPROC(ret_to_user)
91
92/*
93 * This is how we return from a fork.
94 */
95ENTRY(ret_from_fork)
96 bl schedule_tail
97 get_thread_info tsk
98 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
99 mov why, #1
100 tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
101 beq ret_slow_syscall
102 mov r1, sp
103 mov r0, #1 @ trace exit [IP = 1]
104 bl syscall_trace
105 b ret_slow_syscall
106ENDPROC(ret_from_fork)
107
108 .equ NR_syscalls,0
109#define CALL(x) .equ NR_syscalls,NR_syscalls+1
110#include "calls.S"
111#undef CALL
112#define CALL(x) .long x
113
114#ifdef CONFIG_FUNCTION_TRACER
115/*
116 * When compiling with -pg, gcc inserts a call to the mcount routine at the
117 * start of every function. In mcount, apart from the function's address (in
118 * lr), we need to get hold of the function's caller's address.
119 *
120 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
121 *
122 * bl mcount
123 *
124 * These versions have the limitation that in order for the mcount routine to
125 * be able to determine the function's caller's address, an APCS-style frame
126 * pointer (which is set up with something like the code below) is required.
127 *
128 * mov ip, sp
129 * push {fp, ip, lr, pc}
130 * sub fp, ip, #4
131 *
132 * With EABI, these frame pointers are not available unless -mapcs-frame is
133 * specified, and if building as Thumb-2, not even then.
134 *
135 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
136 * with call sites like:
137 *
138 * push {lr}
139 * bl __gnu_mcount_nc
140 *
141 * With these compilers, frame pointers are not necessary.
142 *
143 * mcount can be thought of as a function called in the middle of a subroutine
144 * call. As such, it needs to be transparent for both the caller and the
145 * callee: the original lr needs to be restored when leaving mcount, and no
146 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
147 * clobber the ip register. This is OK because the ARM calling convention
148 * allows it to be clobbered in subroutines and doesn't use it to hold
149 * parameters.)
150 *
151 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
152 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
153 * arch/arm/kernel/ftrace.c).
154 */
155
156#ifndef CONFIG_OLD_MCOUNT
157#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
158#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
159#endif
160#endif
161
162.macro mcount_adjust_addr rd, rn
163 bic \rd, \rn, #1 @ clear the Thumb bit if present
164 sub \rd, \rd, #MCOUNT_INSN_SIZE
165.endm
166
167.macro __mcount suffix
168 mcount_enter
169 ldr r0, =ftrace_trace_function
170 ldr r2, [r0]
171 adr r0, .Lftrace_stub
172 cmp r0, r2
173 bne 1f
174
175#ifdef CONFIG_FUNCTION_GRAPH_TRACER
176 ldr r1, =ftrace_graph_return
177 ldr r2, [r1]
178 cmp r0, r2
179 bne ftrace_graph_caller\suffix
180
181 ldr r1, =ftrace_graph_entry
182 ldr r2, [r1]
183 ldr r0, =ftrace_graph_entry_stub
184 cmp r0, r2
185 bne ftrace_graph_caller\suffix
186#endif
187
188 mcount_exit
189
1901: mcount_get_lr r1 @ lr of instrumented func
191 mcount_adjust_addr r0, lr @ instrumented function
192 adr lr, BSYM(2f)
193 mov pc, r2
1942: mcount_exit
195.endm
196
197.macro __ftrace_caller suffix
198 mcount_enter
199
200 mcount_get_lr r1 @ lr of instrumented func
201 mcount_adjust_addr r0, lr @ instrumented function
202
203 .globl ftrace_call\suffix
204ftrace_call\suffix:
205 bl ftrace_stub
206
207#ifdef CONFIG_FUNCTION_GRAPH_TRACER
208 .globl ftrace_graph_call\suffix
209ftrace_graph_call\suffix:
210 mov r0, r0
211#endif
212
213 mcount_exit
214.endm
215
216.macro __ftrace_graph_caller
217 sub r0, fp, #4 @ &lr of instrumented routine (&parent)
218#ifdef CONFIG_DYNAMIC_FTRACE
219 @ called from __ftrace_caller, saved in mcount_enter
220 ldr r1, [sp, #16] @ instrumented routine (func)
221 mcount_adjust_addr r1, r1
222#else
223 @ called from __mcount, untouched in lr
224 mcount_adjust_addr r1, lr @ instrumented routine (func)
225#endif
226 mov r2, fp @ frame pointer
227 bl prepare_ftrace_return
228 mcount_exit
229.endm
230
231#ifdef CONFIG_OLD_MCOUNT
232/*
233 * mcount
234 */
235
236.macro mcount_enter
237 stmdb sp!, {r0-r3, lr}
238.endm
239
240.macro mcount_get_lr reg
241 ldr \reg, [fp, #-4]
242.endm
243
244.macro mcount_exit
245 ldr lr, [fp, #-4]
246 ldmia sp!, {r0-r3, pc}
247.endm
248
249ENTRY(mcount)
250#ifdef CONFIG_DYNAMIC_FTRACE
251 stmdb sp!, {lr}
252 ldr lr, [fp, #-4]
253 ldmia sp!, {pc}
254#else
255 __mcount _old
256#endif
257ENDPROC(mcount)
258
259#ifdef CONFIG_DYNAMIC_FTRACE
260ENTRY(ftrace_caller_old)
261 __ftrace_caller _old
262ENDPROC(ftrace_caller_old)
263#endif
264
265#ifdef CONFIG_FUNCTION_GRAPH_TRACER
266ENTRY(ftrace_graph_caller_old)
267 __ftrace_graph_caller
268ENDPROC(ftrace_graph_caller_old)
269#endif
270
271.purgem mcount_enter
272.purgem mcount_get_lr
273.purgem mcount_exit
274#endif
275
276/*
277 * __gnu_mcount_nc
278 */
279
280.macro mcount_enter
281 stmdb sp!, {r0-r3, lr}
282.endm
283
284.macro mcount_get_lr reg
285 ldr \reg, [sp, #20]
286.endm
287
288.macro mcount_exit
289 ldmia sp!, {r0-r3, ip, lr}
290 mov pc, ip
291.endm
292
293ENTRY(__gnu_mcount_nc)
294#ifdef CONFIG_DYNAMIC_FTRACE
295 mov ip, lr
296 ldmia sp!, {lr}
297 mov pc, ip
298#else
299 __mcount
300#endif
301ENDPROC(__gnu_mcount_nc)
302
303#ifdef CONFIG_DYNAMIC_FTRACE
304ENTRY(ftrace_caller)
305 __ftrace_caller
306ENDPROC(ftrace_caller)
307#endif
308
309#ifdef CONFIG_FUNCTION_GRAPH_TRACER
310ENTRY(ftrace_graph_caller)
311 __ftrace_graph_caller
312ENDPROC(ftrace_graph_caller)
313#endif
314
315.purgem mcount_enter
316.purgem mcount_get_lr
317.purgem mcount_exit
318
319#ifdef CONFIG_FUNCTION_GRAPH_TRACER
320 .globl return_to_handler
321return_to_handler:
322 stmdb sp!, {r0-r3}
323 mov r0, fp @ frame pointer
324 bl ftrace_return_to_handler
325 mov lr, r0 @ r0 has real ret addr
326 ldmia sp!, {r0-r3}
327 mov pc, lr
328#endif
329
330ENTRY(ftrace_stub)
331.Lftrace_stub:
332 mov pc, lr
333ENDPROC(ftrace_stub)
334
335#endif /* CONFIG_FUNCTION_TRACER */
336
337/*=============================================================================
338 * SWI handler
339 *-----------------------------------------------------------------------------
340 */
341
342 .align 5
343ENTRY(vector_swi)
344 sub sp, sp, #S_FRAME_SIZE
345 stmia sp, {r0 - r12} @ Calling r0 - r12
346 ARM( add r8, sp, #S_PC )
347 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
348 THUMB( mov r8, sp )
349 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
350 mrs r8, spsr @ called from non-FIQ mode, so ok.
351 str lr, [sp, #S_PC] @ Save calling PC
352 str r8, [sp, #S_PSR] @ Save CPSR
353 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
354 zero_fp
355
356 /*
357 * Get the system call number.
358 */
359
360#if defined(CONFIG_OABI_COMPAT)
361
362 /*
363 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
364 * value to determine if it is an EABI or an old ABI call.
365 */
366#ifdef CONFIG_ARM_THUMB
367 tst r8, #PSR_T_BIT
368 movne r10, #0 @ no thumb OABI emulation
369 ldreq r10, [lr, #-4] @ get SWI instruction
370#else
371 ldr r10, [lr, #-4] @ get SWI instruction
372#endif
373#ifdef CONFIG_CPU_ENDIAN_BE8
374 rev r10, r10 @ little endian instruction
375#endif
376
377#elif defined(CONFIG_AEABI)
378
379 /*
380 * Pure EABI user space always put syscall number into scno (r7).
381 */
382#elif defined(CONFIG_ARM_THUMB)
383 /* Legacy ABI only, possibly thumb mode. */
384 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
385 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
386 ldreq scno, [lr, #-4]
387
388#else
389 /* Legacy ABI only. */
390 ldr scno, [lr, #-4] @ get SWI instruction
391#endif
392
393#ifdef CONFIG_ALIGNMENT_TRAP
394 ldr ip, __cr_alignment
395 ldr ip, [ip]
396 mcr p15, 0, ip, c1, c0 @ update control register
397#endif
398 enable_irq
399
400 get_thread_info tsk
401 adr tbl, sys_call_table @ load syscall table pointer
402
403#if defined(CONFIG_OABI_COMPAT)
404 /*
405 * If the swi argument is zero, this is an EABI call and we do nothing.
406 *
407 * If this is an old ABI call, get the syscall number into scno and
408 * get the old ABI syscall table address.
409 */
410 bics r10, r10, #0xff000000
411 eorne scno, r10, #__NR_OABI_SYSCALL_BASE
412 ldrne tbl, =sys_oabi_call_table
413#elif !defined(CONFIG_AEABI)
414 bic scno, scno, #0xff000000 @ mask off SWI op-code
415 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
416#endif
417
418 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
419 stmdb sp!, {r4, r5} @ push fifth and sixth args
420
421#ifdef CONFIG_SECCOMP
422 tst r10, #_TIF_SECCOMP
423 beq 1f
424 mov r0, scno
425 bl __secure_computing
426 add r0, sp, #S_R0 + S_OFF @ pointer to regs
427 ldmia r0, {r0 - r3} @ have to reload r0 - r3
4281:
429#endif
430
431 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
432 bne __sys_trace
433
434 cmp scno, #NR_syscalls @ check upper syscall limit
435 adr lr, BSYM(ret_fast_syscall) @ return address
436 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
437
438 add r1, sp, #S_OFF
4392: mov why, #0 @ no longer a real syscall
440 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
441 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
442 bcs arm_syscall
443 b sys_ni_syscall @ not private func
444ENDPROC(vector_swi)
445
446 /*
447 * This is the really slow path. We're going to be doing
448 * context switches, and waiting for our parent to respond.
449 */
450__sys_trace:
451 mov r2, scno
452 add r1, sp, #S_OFF
453 mov r0, #0 @ trace entry [IP = 0]
454 bl syscall_trace
455
456 adr lr, BSYM(__sys_trace_return) @ return address
457 mov scno, r0 @ syscall number (possibly new)
458 add r1, sp, #S_R0 + S_OFF @ pointer to regs
459 cmp scno, #NR_syscalls @ check upper syscall limit
460 ldmccia r1, {r0 - r3} @ have to reload r0 - r3
461 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
462 b 2b
463
464__sys_trace_return:
465 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
466 mov r2, scno
467 mov r1, sp
468 mov r0, #1 @ trace exit [IP = 1]
469 bl syscall_trace
470 b ret_slow_syscall
471
472 .align 5
473#ifdef CONFIG_ALIGNMENT_TRAP
474 .type __cr_alignment, #object
475__cr_alignment:
476 .word cr_alignment
477#endif
478 .ltorg
479
480/*
481 * This is the syscall table declaration for native ABI syscalls.
482 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
483 */
484#define ABI(native, compat) native
485#ifdef CONFIG_AEABI
486#define OBSOLETE(syscall) sys_ni_syscall
487#else
488#define OBSOLETE(syscall) syscall
489#endif
490
491 .type sys_call_table, #object
492ENTRY(sys_call_table)
493#include "calls.S"
494#undef ABI
495#undef OBSOLETE
496
497/*============================================================================
498 * Special system call wrappers
499 */
500@ r0 = syscall number
501@ r8 = syscall table
502sys_syscall:
503 bic scno, r0, #__NR_OABI_SYSCALL_BASE
504 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
505 cmpne scno, #NR_syscalls @ check range
506 stmloia sp, {r5, r6} @ shuffle args
507 movlo r0, r1
508 movlo r1, r2
509 movlo r2, r3
510 movlo r3, r4
511 ldrlo pc, [tbl, scno, lsl #2]
512 b sys_ni_syscall
513ENDPROC(sys_syscall)
514
515sys_fork_wrapper:
516 add r0, sp, #S_OFF
517 b sys_fork
518ENDPROC(sys_fork_wrapper)
519
520sys_vfork_wrapper:
521 add r0, sp, #S_OFF
522 b sys_vfork
523ENDPROC(sys_vfork_wrapper)
524
525sys_execve_wrapper:
526 add r3, sp, #S_OFF
527 b sys_execve
528ENDPROC(sys_execve_wrapper)
529
530sys_clone_wrapper:
531 add ip, sp, #S_OFF
532 str ip, [sp, #4]
533 b sys_clone
534ENDPROC(sys_clone_wrapper)
535
536sys_sigreturn_wrapper:
537 add r0, sp, #S_OFF
538 mov why, #0 @ prevent syscall restart handling
539 b sys_sigreturn
540ENDPROC(sys_sigreturn_wrapper)
541
542sys_rt_sigreturn_wrapper:
543 add r0, sp, #S_OFF
544 mov why, #0 @ prevent syscall restart handling
545 b sys_rt_sigreturn
546ENDPROC(sys_rt_sigreturn_wrapper)
547
548sys_sigaltstack_wrapper:
549 ldr r2, [sp, #S_OFF + S_SP]
550 b do_sigaltstack
551ENDPROC(sys_sigaltstack_wrapper)
552
553sys_statfs64_wrapper:
554 teq r1, #88
555 moveq r1, #84
556 b sys_statfs64
557ENDPROC(sys_statfs64_wrapper)
558
559sys_fstatfs64_wrapper:
560 teq r1, #88
561 moveq r1, #84
562 b sys_fstatfs64
563ENDPROC(sys_fstatfs64_wrapper)
564
565/*
566 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
567 * offset, we return EINVAL.
568 */
569sys_mmap2:
570#if PAGE_SHIFT > 12
571 tst r5, #PGOFF_MASK
572 moveq r5, r5, lsr #PAGE_SHIFT - 12
573 streq r5, [sp, #4]
574 beq sys_mmap_pgoff
575 mov r0, #-EINVAL
576 mov pc, lr
577#else
578 str r5, [sp, #4]
579 b sys_mmap_pgoff
580#endif
581ENDPROC(sys_mmap2)
582
583#ifdef CONFIG_OABI_COMPAT
584
585/*
586 * These are syscalls with argument register differences
587 */
588
589sys_oabi_pread64:
590 stmia sp, {r3, r4}
591 b sys_pread64
592ENDPROC(sys_oabi_pread64)
593
594sys_oabi_pwrite64:
595 stmia sp, {r3, r4}
596 b sys_pwrite64
597ENDPROC(sys_oabi_pwrite64)
598
599sys_oabi_truncate64:
600 mov r3, r2
601 mov r2, r1
602 b sys_truncate64
603ENDPROC(sys_oabi_truncate64)
604
605sys_oabi_ftruncate64:
606 mov r3, r2
607 mov r2, r1
608 b sys_ftruncate64
609ENDPROC(sys_oabi_ftruncate64)
610
611sys_oabi_readahead:
612 str r3, [sp]
613 mov r3, r2
614 mov r2, r1
615 b sys_readahead
616ENDPROC(sys_oabi_readahead)
617
618/*
619 * Let's declare a second syscall table for old ABI binaries
620 * using the compatibility syscall entries.
621 */
622#define ABI(native, compat) compat
623#define OBSOLETE(syscall) syscall
624
625 .type sys_oabi_call_table, #object
626ENTRY(sys_oabi_call_table)
627#include "calls.S"
628#undef ABI
629#undef OBSOLETE
630
631#endif
632