Loading...
1/*
2 * linux/arch/arm/kernel/entry-common.S
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/assembler.h>
12#include <asm/unistd.h>
13#include <asm/ftrace.h>
14#include <asm/unwind.h>
15
16#ifdef CONFIG_NEED_RET_TO_USER
17#include <mach/entry-macro.S>
18#else
19 .macro arch_ret_to_user, tmp1, tmp2
20 .endm
21#endif
22
23#include "entry-header.S"
24
25
26 .align 5
27#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
28/*
29 * This is the fast syscall return path. We do as little as possible here,
30 * such as avoiding writing r0 to the stack. We only use this path if we
31 * have tracing and context tracking disabled - the overheads from those
32 * features make this path too inefficient.
33 */
34ret_fast_syscall:
35 UNWIND(.fnstart )
36 UNWIND(.cantunwind )
37 disable_irq_notrace @ disable interrupts
38 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
39 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
40 bne fast_work_pending
41
42 /* perform architecture specific actions before user return */
43 arch_ret_to_user r1, lr
44
45 restore_user_regs fast = 1, offset = S_OFF
46 UNWIND(.fnend )
47ENDPROC(ret_fast_syscall)
48
49 /* Ok, we need to do extra processing, enter the slow path. */
50fast_work_pending:
51 str r0, [sp, #S_R0+S_OFF]! @ returned r0
52 /* fall through to work_pending */
53#else
54/*
55 * The "replacement" ret_fast_syscall for when tracing or context tracking
56 * is enabled. As we will need to call out to some C functions, we save
57 * r0 first to avoid needing to save registers around each C function call.
58 */
59ret_fast_syscall:
60 UNWIND(.fnstart )
61 UNWIND(.cantunwind )
62 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
63 disable_irq_notrace @ disable interrupts
64 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
65 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
66 beq no_work_pending
67 UNWIND(.fnend )
68ENDPROC(ret_fast_syscall)
69
70 /* Slower path - fall through to work_pending */
71#endif
72
73 tst r1, #_TIF_SYSCALL_WORK
74 bne __sys_trace_return_nosave
75slow_work_pending:
76 mov r0, sp @ 'regs'
77 mov r2, why @ 'syscall'
78 bl do_work_pending
79 cmp r0, #0
80 beq no_work_pending
81 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
82 ldmia sp, {r0 - r6} @ have to reload r0 - r6
83 b local_restart @ ... and off we go
84ENDPROC(ret_fast_syscall)
85
86/*
87 * "slow" syscall return path. "why" tells us if this was a real syscall.
88 * IRQs may be enabled here, so always disable them. Note that we use the
89 * "notrace" version to avoid calling into the tracing code unnecessarily.
90 * do_work_pending() will update this state if necessary.
91 */
92ENTRY(ret_to_user)
93ret_slow_syscall:
94 disable_irq_notrace @ disable interrupts
95ENTRY(ret_to_user_from_irq)
96 ldr r1, [tsk, #TI_FLAGS]
97 tst r1, #_TIF_WORK_MASK
98 bne slow_work_pending
99no_work_pending:
100 asm_trace_hardirqs_on save = 0
101
102 /* perform architecture specific actions before user return */
103 arch_ret_to_user r1, lr
104 ct_user_enter save = 0
105
106 restore_user_regs fast = 0, offset = 0
107ENDPROC(ret_to_user_from_irq)
108ENDPROC(ret_to_user)
109
110/*
111 * This is how we return from a fork.
112 */
113ENTRY(ret_from_fork)
114 bl schedule_tail
115 cmp r5, #0
116 movne r0, r4
117 badrne lr, 1f
118 retne r5
1191: get_thread_info tsk
120 b ret_slow_syscall
121ENDPROC(ret_from_fork)
122
123 .equ NR_syscalls,0
124#define CALL(x) .equ NR_syscalls,NR_syscalls+1
125#include "calls.S"
126
127/*
128 * Ensure that the system call table is equal to __NR_syscalls,
129 * which is the value the rest of the system sees
130 */
131.ifne NR_syscalls - __NR_syscalls
132.error "__NR_syscalls is not equal to the size of the syscall table"
133.endif
134
135#undef CALL
136#define CALL(x) .long x
137
138/*=============================================================================
139 * SWI handler
140 *-----------------------------------------------------------------------------
141 */
142
143 .align 5
144ENTRY(vector_swi)
145#ifdef CONFIG_CPU_V7M
146 v7m_exception_entry
147#else
148 sub sp, sp, #S_FRAME_SIZE
149 stmia sp, {r0 - r12} @ Calling r0 - r12
150 ARM( add r8, sp, #S_PC )
151 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
152 THUMB( mov r8, sp )
153 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
154 mrs r8, spsr @ called from non-FIQ mode, so ok.
155 str lr, [sp, #S_PC] @ Save calling PC
156 str r8, [sp, #S_PSR] @ Save CPSR
157 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
158#endif
159 zero_fp
160 alignment_trap r10, ip, __cr_alignment
161 enable_irq
162 ct_user_exit
163 get_thread_info tsk
164
165 /*
166 * Get the system call number.
167 */
168
169#if defined(CONFIG_OABI_COMPAT)
170
171 /*
172 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
173 * value to determine if it is an EABI or an old ABI call.
174 */
175#ifdef CONFIG_ARM_THUMB
176 tst r8, #PSR_T_BIT
177 movne r10, #0 @ no thumb OABI emulation
178 USER( ldreq r10, [lr, #-4] ) @ get SWI instruction
179#else
180 USER( ldr r10, [lr, #-4] ) @ get SWI instruction
181#endif
182 ARM_BE8(rev r10, r10) @ little endian instruction
183
184#elif defined(CONFIG_AEABI)
185
186 /*
187 * Pure EABI user space always put syscall number into scno (r7).
188 */
189#elif defined(CONFIG_ARM_THUMB)
190 /* Legacy ABI only, possibly thumb mode. */
191 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
192 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
193 USER( ldreq scno, [lr, #-4] )
194
195#else
196 /* Legacy ABI only. */
197 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
198#endif
199
200 uaccess_disable tbl
201
202 adr tbl, sys_call_table @ load syscall table pointer
203
204#if defined(CONFIG_OABI_COMPAT)
205 /*
206 * If the swi argument is zero, this is an EABI call and we do nothing.
207 *
208 * If this is an old ABI call, get the syscall number into scno and
209 * get the old ABI syscall table address.
210 */
211 bics r10, r10, #0xff000000
212 eorne scno, r10, #__NR_OABI_SYSCALL_BASE
213 ldrne tbl, =sys_oabi_call_table
214#elif !defined(CONFIG_AEABI)
215 bic scno, scno, #0xff000000 @ mask off SWI op-code
216 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
217#endif
218
219local_restart:
220 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
221 stmdb sp!, {r4, r5} @ push fifth and sixth args
222
223 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
224 bne __sys_trace
225
226 cmp scno, #NR_syscalls @ check upper syscall limit
227 badr lr, ret_fast_syscall @ return address
228 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
229
230 add r1, sp, #S_OFF
2312: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
232 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
233 bcs arm_syscall
234 mov why, #0 @ no longer a real syscall
235 b sys_ni_syscall @ not private func
236
237#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
238 /*
239 * We failed to handle a fault trying to access the page
240 * containing the swi instruction, but we're not really in a
241 * position to return -EFAULT. Instead, return back to the
242 * instruction and re-enter the user fault handling path trying
243 * to page it in. This will likely result in sending SEGV to the
244 * current task.
245 */
2469001:
247 sub lr, lr, #4
248 str lr, [sp, #S_PC]
249 b ret_fast_syscall
250#endif
251ENDPROC(vector_swi)
252
253 /*
254 * This is the really slow path. We're going to be doing
255 * context switches, and waiting for our parent to respond.
256 */
257__sys_trace:
258 mov r1, scno
259 add r0, sp, #S_OFF
260 bl syscall_trace_enter
261
262 badr lr, __sys_trace_return @ return address
263 mov scno, r0 @ syscall number (possibly new)
264 add r1, sp, #S_R0 + S_OFF @ pointer to regs
265 cmp scno, #NR_syscalls @ check upper syscall limit
266 ldmccia r1, {r0 - r6} @ have to reload r0 - r6
267 stmccia sp, {r4, r5} @ and update the stack args
268 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
269 cmp scno, #-1 @ skip the syscall?
270 bne 2b
271 add sp, sp, #S_OFF @ restore stack
272 b ret_slow_syscall
273
274__sys_trace_return:
275 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
276 mov r0, sp
277 bl syscall_trace_exit
278 b ret_slow_syscall
279
280__sys_trace_return_nosave:
281 enable_irq_notrace
282 mov r0, sp
283 bl syscall_trace_exit
284 b ret_slow_syscall
285
286 .align 5
287#ifdef CONFIG_ALIGNMENT_TRAP
288 .type __cr_alignment, #object
289__cr_alignment:
290 .word cr_alignment
291#endif
292 .ltorg
293
294/*
295 * This is the syscall table declaration for native ABI syscalls.
296 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
297 */
298#define ABI(native, compat) native
299#ifdef CONFIG_AEABI
300#define OBSOLETE(syscall) sys_ni_syscall
301#else
302#define OBSOLETE(syscall) syscall
303#endif
304
305 .type sys_call_table, #object
306ENTRY(sys_call_table)
307#include "calls.S"
308#undef ABI
309#undef OBSOLETE
310
311/*============================================================================
312 * Special system call wrappers
313 */
314@ r0 = syscall number
315@ r8 = syscall table
316sys_syscall:
317 bic scno, r0, #__NR_OABI_SYSCALL_BASE
318 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
319 cmpne scno, #NR_syscalls @ check range
320 stmloia sp, {r5, r6} @ shuffle args
321 movlo r0, r1
322 movlo r1, r2
323 movlo r2, r3
324 movlo r3, r4
325 ldrlo pc, [tbl, scno, lsl #2]
326 b sys_ni_syscall
327ENDPROC(sys_syscall)
328
329sys_sigreturn_wrapper:
330 add r0, sp, #S_OFF
331 mov why, #0 @ prevent syscall restart handling
332 b sys_sigreturn
333ENDPROC(sys_sigreturn_wrapper)
334
335sys_rt_sigreturn_wrapper:
336 add r0, sp, #S_OFF
337 mov why, #0 @ prevent syscall restart handling
338 b sys_rt_sigreturn
339ENDPROC(sys_rt_sigreturn_wrapper)
340
341sys_statfs64_wrapper:
342 teq r1, #88
343 moveq r1, #84
344 b sys_statfs64
345ENDPROC(sys_statfs64_wrapper)
346
347sys_fstatfs64_wrapper:
348 teq r1, #88
349 moveq r1, #84
350 b sys_fstatfs64
351ENDPROC(sys_fstatfs64_wrapper)
352
353/*
354 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
355 * offset, we return EINVAL.
356 */
357sys_mmap2:
358#if PAGE_SHIFT > 12
359 tst r5, #PGOFF_MASK
360 moveq r5, r5, lsr #PAGE_SHIFT - 12
361 streq r5, [sp, #4]
362 beq sys_mmap_pgoff
363 mov r0, #-EINVAL
364 ret lr
365#else
366 str r5, [sp, #4]
367 b sys_mmap_pgoff
368#endif
369ENDPROC(sys_mmap2)
370
371#ifdef CONFIG_OABI_COMPAT
372
373/*
374 * These are syscalls with argument register differences
375 */
376
377sys_oabi_pread64:
378 stmia sp, {r3, r4}
379 b sys_pread64
380ENDPROC(sys_oabi_pread64)
381
382sys_oabi_pwrite64:
383 stmia sp, {r3, r4}
384 b sys_pwrite64
385ENDPROC(sys_oabi_pwrite64)
386
387sys_oabi_truncate64:
388 mov r3, r2
389 mov r2, r1
390 b sys_truncate64
391ENDPROC(sys_oabi_truncate64)
392
393sys_oabi_ftruncate64:
394 mov r3, r2
395 mov r2, r1
396 b sys_ftruncate64
397ENDPROC(sys_oabi_ftruncate64)
398
399sys_oabi_readahead:
400 str r3, [sp]
401 mov r3, r2
402 mov r2, r1
403 b sys_readahead
404ENDPROC(sys_oabi_readahead)
405
406/*
407 * Let's declare a second syscall table for old ABI binaries
408 * using the compatibility syscall entries.
409 */
410#define ABI(native, compat) compat
411#define OBSOLETE(syscall) syscall
412
413 .type sys_oabi_call_table, #object
414ENTRY(sys_oabi_call_table)
415#include "calls.S"
416#undef ABI
417#undef OBSOLETE
418
419#endif
420
1/*
2 * linux/arch/arm/kernel/entry-common.S
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/unistd.h>
12#include <asm/ftrace.h>
13#include <asm/unwind.h>
14
15#ifdef CONFIG_NEED_RET_TO_USER
16#include <mach/entry-macro.S>
17#else
18 .macro arch_ret_to_user, tmp1, tmp2
19 .endm
20#endif
21
22#include "entry-header.S"
23
24
25 .align 5
26/*
27 * This is the fast syscall return path. We do as little as
28 * possible here, and this includes saving r0 back into the SVC
29 * stack.
30 */
31ret_fast_syscall:
32 UNWIND(.fnstart )
33 UNWIND(.cantunwind )
34 disable_irq @ disable interrupts
35 ldr r1, [tsk, #TI_FLAGS]
36 tst r1, #_TIF_WORK_MASK
37 bne fast_work_pending
38 asm_trace_hardirqs_on
39
40 /* perform architecture specific actions before user return */
41 arch_ret_to_user r1, lr
42 ct_user_enter
43
44 restore_user_regs fast = 1, offset = S_OFF
45 UNWIND(.fnend )
46
47/*
48 * Ok, we need to do extra processing, enter the slow path.
49 */
50fast_work_pending:
51 str r0, [sp, #S_R0+S_OFF]! @ returned r0
52work_pending:
53 mov r0, sp @ 'regs'
54 mov r2, why @ 'syscall'
55 bl do_work_pending
56 cmp r0, #0
57 beq no_work_pending
58 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
59 ldmia sp, {r0 - r6} @ have to reload r0 - r6
60 b local_restart @ ... and off we go
61
62/*
63 * "slow" syscall return path. "why" tells us if this was a real syscall.
64 */
65ENTRY(ret_to_user)
66ret_slow_syscall:
67 disable_irq @ disable interrupts
68ENTRY(ret_to_user_from_irq)
69 ldr r1, [tsk, #TI_FLAGS]
70 tst r1, #_TIF_WORK_MASK
71 bne work_pending
72no_work_pending:
73 asm_trace_hardirqs_on
74
75 /* perform architecture specific actions before user return */
76 arch_ret_to_user r1, lr
77 ct_user_enter save = 0
78
79 restore_user_regs fast = 0, offset = 0
80ENDPROC(ret_to_user_from_irq)
81ENDPROC(ret_to_user)
82
83/*
84 * This is how we return from a fork.
85 */
86ENTRY(ret_from_fork)
87 bl schedule_tail
88 cmp r5, #0
89 movne r0, r4
90 adrne lr, BSYM(1f)
91 movne pc, r5
921: get_thread_info tsk
93 b ret_slow_syscall
94ENDPROC(ret_from_fork)
95
96 .equ NR_syscalls,0
97#define CALL(x) .equ NR_syscalls,NR_syscalls+1
98#include "calls.S"
99
100/*
101 * Ensure that the system call table is equal to __NR_syscalls,
102 * which is the value the rest of the system sees
103 */
104.ifne NR_syscalls - __NR_syscalls
105.error "__NR_syscalls is not equal to the size of the syscall table"
106.endif
107
108#undef CALL
109#define CALL(x) .long x
110
111#ifdef CONFIG_FUNCTION_TRACER
112/*
113 * When compiling with -pg, gcc inserts a call to the mcount routine at the
114 * start of every function. In mcount, apart from the function's address (in
115 * lr), we need to get hold of the function's caller's address.
116 *
117 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
118 *
119 * bl mcount
120 *
121 * These versions have the limitation that in order for the mcount routine to
122 * be able to determine the function's caller's address, an APCS-style frame
123 * pointer (which is set up with something like the code below) is required.
124 *
125 * mov ip, sp
126 * push {fp, ip, lr, pc}
127 * sub fp, ip, #4
128 *
129 * With EABI, these frame pointers are not available unless -mapcs-frame is
130 * specified, and if building as Thumb-2, not even then.
131 *
132 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
133 * with call sites like:
134 *
135 * push {lr}
136 * bl __gnu_mcount_nc
137 *
138 * With these compilers, frame pointers are not necessary.
139 *
140 * mcount can be thought of as a function called in the middle of a subroutine
141 * call. As such, it needs to be transparent for both the caller and the
142 * callee: the original lr needs to be restored when leaving mcount, and no
143 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
144 * clobber the ip register. This is OK because the ARM calling convention
145 * allows it to be clobbered in subroutines and doesn't use it to hold
146 * parameters.)
147 *
148 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
149 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
150 * arch/arm/kernel/ftrace.c).
151 */
152
153#ifndef CONFIG_OLD_MCOUNT
154#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
155#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
156#endif
157#endif
158
159.macro mcount_adjust_addr rd, rn
160 bic \rd, \rn, #1 @ clear the Thumb bit if present
161 sub \rd, \rd, #MCOUNT_INSN_SIZE
162.endm
163
164.macro __mcount suffix
165 mcount_enter
166 ldr r0, =ftrace_trace_function
167 ldr r2, [r0]
168 adr r0, .Lftrace_stub
169 cmp r0, r2
170 bne 1f
171
172#ifdef CONFIG_FUNCTION_GRAPH_TRACER
173 ldr r1, =ftrace_graph_return
174 ldr r2, [r1]
175 cmp r0, r2
176 bne ftrace_graph_caller\suffix
177
178 ldr r1, =ftrace_graph_entry
179 ldr r2, [r1]
180 ldr r0, =ftrace_graph_entry_stub
181 cmp r0, r2
182 bne ftrace_graph_caller\suffix
183#endif
184
185 mcount_exit
186
1871: mcount_get_lr r1 @ lr of instrumented func
188 mcount_adjust_addr r0, lr @ instrumented function
189 adr lr, BSYM(2f)
190 mov pc, r2
1912: mcount_exit
192.endm
193
194.macro __ftrace_caller suffix
195 mcount_enter
196
197 mcount_get_lr r1 @ lr of instrumented func
198 mcount_adjust_addr r0, lr @ instrumented function
199
200 .globl ftrace_call\suffix
201ftrace_call\suffix:
202 bl ftrace_stub
203
204#ifdef CONFIG_FUNCTION_GRAPH_TRACER
205 .globl ftrace_graph_call\suffix
206ftrace_graph_call\suffix:
207 mov r0, r0
208#endif
209
210 mcount_exit
211.endm
212
213.macro __ftrace_graph_caller
214 sub r0, fp, #4 @ &lr of instrumented routine (&parent)
215#ifdef CONFIG_DYNAMIC_FTRACE
216 @ called from __ftrace_caller, saved in mcount_enter
217 ldr r1, [sp, #16] @ instrumented routine (func)
218 mcount_adjust_addr r1, r1
219#else
220 @ called from __mcount, untouched in lr
221 mcount_adjust_addr r1, lr @ instrumented routine (func)
222#endif
223 mov r2, fp @ frame pointer
224 bl prepare_ftrace_return
225 mcount_exit
226.endm
227
228#ifdef CONFIG_OLD_MCOUNT
229/*
230 * mcount
231 */
232
233.macro mcount_enter
234 stmdb sp!, {r0-r3, lr}
235.endm
236
237.macro mcount_get_lr reg
238 ldr \reg, [fp, #-4]
239.endm
240
241.macro mcount_exit
242 ldr lr, [fp, #-4]
243 ldmia sp!, {r0-r3, pc}
244.endm
245
246ENTRY(mcount)
247#ifdef CONFIG_DYNAMIC_FTRACE
248 stmdb sp!, {lr}
249 ldr lr, [fp, #-4]
250 ldmia sp!, {pc}
251#else
252 __mcount _old
253#endif
254ENDPROC(mcount)
255
256#ifdef CONFIG_DYNAMIC_FTRACE
257ENTRY(ftrace_caller_old)
258 __ftrace_caller _old
259ENDPROC(ftrace_caller_old)
260#endif
261
262#ifdef CONFIG_FUNCTION_GRAPH_TRACER
263ENTRY(ftrace_graph_caller_old)
264 __ftrace_graph_caller
265ENDPROC(ftrace_graph_caller_old)
266#endif
267
268.purgem mcount_enter
269.purgem mcount_get_lr
270.purgem mcount_exit
271#endif
272
273/*
274 * __gnu_mcount_nc
275 */
276
277.macro mcount_enter
278/*
279 * This pad compensates for the push {lr} at the call site. Note that we are
280 * unable to unwind through a function which does not otherwise save its lr.
281 */
282 UNWIND(.pad #4)
283 stmdb sp!, {r0-r3, lr}
284 UNWIND(.save {r0-r3, lr})
285.endm
286
287.macro mcount_get_lr reg
288 ldr \reg, [sp, #20]
289.endm
290
291.macro mcount_exit
292 ldmia sp!, {r0-r3, ip, lr}
293 mov pc, ip
294.endm
295
296ENTRY(__gnu_mcount_nc)
297UNWIND(.fnstart)
298#ifdef CONFIG_DYNAMIC_FTRACE
299 mov ip, lr
300 ldmia sp!, {lr}
301 mov pc, ip
302#else
303 __mcount
304#endif
305UNWIND(.fnend)
306ENDPROC(__gnu_mcount_nc)
307
308#ifdef CONFIG_DYNAMIC_FTRACE
309ENTRY(ftrace_caller)
310UNWIND(.fnstart)
311 __ftrace_caller
312UNWIND(.fnend)
313ENDPROC(ftrace_caller)
314#endif
315
316#ifdef CONFIG_FUNCTION_GRAPH_TRACER
317ENTRY(ftrace_graph_caller)
318UNWIND(.fnstart)
319 __ftrace_graph_caller
320UNWIND(.fnend)
321ENDPROC(ftrace_graph_caller)
322#endif
323
324.purgem mcount_enter
325.purgem mcount_get_lr
326.purgem mcount_exit
327
328#ifdef CONFIG_FUNCTION_GRAPH_TRACER
329 .globl return_to_handler
330return_to_handler:
331 stmdb sp!, {r0-r3}
332 mov r0, fp @ frame pointer
333 bl ftrace_return_to_handler
334 mov lr, r0 @ r0 has real ret addr
335 ldmia sp!, {r0-r3}
336 mov pc, lr
337#endif
338
339ENTRY(ftrace_stub)
340.Lftrace_stub:
341 mov pc, lr
342ENDPROC(ftrace_stub)
343
344#endif /* CONFIG_FUNCTION_TRACER */
345
346/*=============================================================================
347 * SWI handler
348 *-----------------------------------------------------------------------------
349 */
350
351 .align 5
352ENTRY(vector_swi)
353#ifdef CONFIG_CPU_V7M
354 v7m_exception_entry
355#else
356 sub sp, sp, #S_FRAME_SIZE
357 stmia sp, {r0 - r12} @ Calling r0 - r12
358 ARM( add r8, sp, #S_PC )
359 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
360 THUMB( mov r8, sp )
361 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
362 mrs r8, spsr @ called from non-FIQ mode, so ok.
363 str lr, [sp, #S_PC] @ Save calling PC
364 str r8, [sp, #S_PSR] @ Save CPSR
365 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
366#endif
367 zero_fp
368
369#ifdef CONFIG_ALIGNMENT_TRAP
370 ldr ip, __cr_alignment
371 ldr ip, [ip]
372 mcr p15, 0, ip, c1, c0 @ update control register
373#endif
374
375 enable_irq
376 ct_user_exit
377 get_thread_info tsk
378
379 /*
380 * Get the system call number.
381 */
382
383#if defined(CONFIG_OABI_COMPAT)
384
385 /*
386 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
387 * value to determine if it is an EABI or an old ABI call.
388 */
389#ifdef CONFIG_ARM_THUMB
390 tst r8, #PSR_T_BIT
391 movne r10, #0 @ no thumb OABI emulation
392 USER( ldreq r10, [lr, #-4] ) @ get SWI instruction
393#else
394 USER( ldr r10, [lr, #-4] ) @ get SWI instruction
395#endif
396 ARM_BE8(rev r10, r10) @ little endian instruction
397
398#elif defined(CONFIG_AEABI)
399
400 /*
401 * Pure EABI user space always put syscall number into scno (r7).
402 */
403#elif defined(CONFIG_ARM_THUMB)
404 /* Legacy ABI only, possibly thumb mode. */
405 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
406 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
407 USER( ldreq scno, [lr, #-4] )
408
409#else
410 /* Legacy ABI only. */
411 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
412#endif
413
414 adr tbl, sys_call_table @ load syscall table pointer
415
416#if defined(CONFIG_OABI_COMPAT)
417 /*
418 * If the swi argument is zero, this is an EABI call and we do nothing.
419 *
420 * If this is an old ABI call, get the syscall number into scno and
421 * get the old ABI syscall table address.
422 */
423 bics r10, r10, #0xff000000
424 eorne scno, r10, #__NR_OABI_SYSCALL_BASE
425 ldrne tbl, =sys_oabi_call_table
426#elif !defined(CONFIG_AEABI)
427 bic scno, scno, #0xff000000 @ mask off SWI op-code
428 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
429#endif
430
431local_restart:
432 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
433 stmdb sp!, {r4, r5} @ push fifth and sixth args
434
435 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
436 bne __sys_trace
437
438 cmp scno, #NR_syscalls @ check upper syscall limit
439 adr lr, BSYM(ret_fast_syscall) @ return address
440 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
441
442 add r1, sp, #S_OFF
4432: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
444 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
445 bcs arm_syscall
446 mov why, #0 @ no longer a real syscall
447 b sys_ni_syscall @ not private func
448
449#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
450 /*
451 * We failed to handle a fault trying to access the page
452 * containing the swi instruction, but we're not really in a
453 * position to return -EFAULT. Instead, return back to the
454 * instruction and re-enter the user fault handling path trying
455 * to page it in. This will likely result in sending SEGV to the
456 * current task.
457 */
4589001:
459 sub lr, lr, #4
460 str lr, [sp, #S_PC]
461 b ret_fast_syscall
462#endif
463ENDPROC(vector_swi)
464
465 /*
466 * This is the really slow path. We're going to be doing
467 * context switches, and waiting for our parent to respond.
468 */
469__sys_trace:
470 mov r1, scno
471 add r0, sp, #S_OFF
472 bl syscall_trace_enter
473
474 adr lr, BSYM(__sys_trace_return) @ return address
475 mov scno, r0 @ syscall number (possibly new)
476 add r1, sp, #S_R0 + S_OFF @ pointer to regs
477 cmp scno, #NR_syscalls @ check upper syscall limit
478 ldmccia r1, {r0 - r6} @ have to reload r0 - r6
479 stmccia sp, {r4, r5} @ and update the stack args
480 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
481 cmp scno, #-1 @ skip the syscall?
482 bne 2b
483 add sp, sp, #S_OFF @ restore stack
484 b ret_slow_syscall
485
486__sys_trace_return:
487 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
488 mov r0, sp
489 bl syscall_trace_exit
490 b ret_slow_syscall
491
492 .align 5
493#ifdef CONFIG_ALIGNMENT_TRAP
494 .type __cr_alignment, #object
495__cr_alignment:
496 .word cr_alignment
497#endif
498 .ltorg
499
500/*
501 * This is the syscall table declaration for native ABI syscalls.
502 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
503 */
504#define ABI(native, compat) native
505#ifdef CONFIG_AEABI
506#define OBSOLETE(syscall) sys_ni_syscall
507#else
508#define OBSOLETE(syscall) syscall
509#endif
510
511 .type sys_call_table, #object
512ENTRY(sys_call_table)
513#include "calls.S"
514#undef ABI
515#undef OBSOLETE
516
517/*============================================================================
518 * Special system call wrappers
519 */
520@ r0 = syscall number
521@ r8 = syscall table
522sys_syscall:
523 bic scno, r0, #__NR_OABI_SYSCALL_BASE
524 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
525 cmpne scno, #NR_syscalls @ check range
526 stmloia sp, {r5, r6} @ shuffle args
527 movlo r0, r1
528 movlo r1, r2
529 movlo r2, r3
530 movlo r3, r4
531 ldrlo pc, [tbl, scno, lsl #2]
532 b sys_ni_syscall
533ENDPROC(sys_syscall)
534
535sys_sigreturn_wrapper:
536 add r0, sp, #S_OFF
537 mov why, #0 @ prevent syscall restart handling
538 b sys_sigreturn
539ENDPROC(sys_sigreturn_wrapper)
540
541sys_rt_sigreturn_wrapper:
542 add r0, sp, #S_OFF
543 mov why, #0 @ prevent syscall restart handling
544 b sys_rt_sigreturn
545ENDPROC(sys_rt_sigreturn_wrapper)
546
547sys_statfs64_wrapper:
548 teq r1, #88
549 moveq r1, #84
550 b sys_statfs64
551ENDPROC(sys_statfs64_wrapper)
552
553sys_fstatfs64_wrapper:
554 teq r1, #88
555 moveq r1, #84
556 b sys_fstatfs64
557ENDPROC(sys_fstatfs64_wrapper)
558
559/*
560 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
561 * offset, we return EINVAL.
562 */
563sys_mmap2:
564#if PAGE_SHIFT > 12
565 tst r5, #PGOFF_MASK
566 moveq r5, r5, lsr #PAGE_SHIFT - 12
567 streq r5, [sp, #4]
568 beq sys_mmap_pgoff
569 mov r0, #-EINVAL
570 mov pc, lr
571#else
572 str r5, [sp, #4]
573 b sys_mmap_pgoff
574#endif
575ENDPROC(sys_mmap2)
576
577#ifdef CONFIG_OABI_COMPAT
578
579/*
580 * These are syscalls with argument register differences
581 */
582
583sys_oabi_pread64:
584 stmia sp, {r3, r4}
585 b sys_pread64
586ENDPROC(sys_oabi_pread64)
587
588sys_oabi_pwrite64:
589 stmia sp, {r3, r4}
590 b sys_pwrite64
591ENDPROC(sys_oabi_pwrite64)
592
593sys_oabi_truncate64:
594 mov r3, r2
595 mov r2, r1
596 b sys_truncate64
597ENDPROC(sys_oabi_truncate64)
598
599sys_oabi_ftruncate64:
600 mov r3, r2
601 mov r2, r1
602 b sys_ftruncate64
603ENDPROC(sys_oabi_ftruncate64)
604
605sys_oabi_readahead:
606 str r3, [sp]
607 mov r3, r2
608 mov r2, r1
609 b sys_readahead
610ENDPROC(sys_oabi_readahead)
611
612/*
613 * Let's declare a second syscall table for old ABI binaries
614 * using the compatibility syscall entries.
615 */
616#define ABI(native, compat) compat
617#define OBSOLETE(syscall) syscall
618
619 .type sys_oabi_call_table, #object
620ENTRY(sys_oabi_call_table)
621#include "calls.S"
622#undef ABI
623#undef OBSOLETE
624
625#endif
626