Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 *  linux/arch/arm/kernel/entry-common.S
  3 *
  4 *  Copyright (C) 2000 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10
 11#include <asm/assembler.h>
 12#include <asm/unistd.h>
 13#include <asm/ftrace.h>
 14#include <asm/unwind.h>
 
 15#ifdef CONFIG_AEABI
 16#include <asm/unistd-oabi.h>
 17#endif
 18
 19	.equ	NR_syscalls, __NR_syscalls
 20
 21#ifdef CONFIG_NEED_RET_TO_USER
 22#include <mach/entry-macro.S>
 23#else
 24	.macro  arch_ret_to_user, tmp1, tmp2
 25	.endm
 
 26#endif
 
 27
 28#include "entry-header.S"
 29
 
 
 
 
 
 
 
 
 30
 
 31	.align	5
 32#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
 
 33/*
 34 * This is the fast syscall return path.  We do as little as possible here,
 35 * such as avoiding writing r0 to the stack.  We only use this path if we
 36 * have tracing and context tracking disabled - the overheads from those
 37 * features make this path too inefficient.
 38 */
 39ret_fast_syscall:
 
 40 UNWIND(.fnstart	)
 41 UNWIND(.cantunwind	)
 42	disable_irq_notrace			@ disable interrupts
 43	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
 44	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
 45	bne	fast_work_pending
 46
 
 47	/* perform architecture specific actions before user return */
 48	arch_ret_to_user r1, lr
 49
 50	restore_user_regs fast = 1, offset = S_OFF
 51 UNWIND(.fnend		)
 52ENDPROC(ret_fast_syscall)
 53
 54	/* Ok, we need to do extra processing, enter the slow path. */
 55fast_work_pending:
 56	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
 57	/* fall through to work_pending */
 58#else
 59/*
 60 * The "replacement" ret_fast_syscall for when tracing or context tracking
 61 * is enabled.  As we will need to call out to some C functions, we save
 62 * r0 first to avoid needing to save registers around each C function call.
 
 63 */
 64ret_fast_syscall:
 
 65 UNWIND(.fnstart	)
 66 UNWIND(.cantunwind	)
 67	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
 
 
 
 
 
 68	disable_irq_notrace			@ disable interrupts
 69	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
 70	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
 71	beq	no_work_pending
 72 UNWIND(.fnend		)
 73ENDPROC(ret_fast_syscall)
 74
 75	/* Slower path - fall through to work_pending */
 76#endif
 77
 78	tst	r1, #_TIF_SYSCALL_WORK
 79	bne	__sys_trace_return_nosave
 80slow_work_pending:
 81	mov	r0, sp				@ 'regs'
 82	mov	r2, why				@ 'syscall'
 83	bl	do_work_pending
 84	cmp	r0, #0
 85	beq	no_work_pending
 86	movlt	scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
 87	ldmia	sp, {r0 - r6}			@ have to reload r0 - r6
 88	b	local_restart			@ ... and off we go
 89ENDPROC(ret_fast_syscall)
 90
 91/*
 92 * "slow" syscall return path.  "why" tells us if this was a real syscall.
 93 * IRQs may be enabled here, so always disable them.  Note that we use the
 94 * "notrace" version to avoid calling into the tracing code unnecessarily.
 95 * do_work_pending() will update this state if necessary.
 96 */
 97ENTRY(ret_to_user)
 98ret_slow_syscall:
 
 
 
 
 
 
 99	disable_irq_notrace			@ disable interrupts
100ENTRY(ret_to_user_from_irq)
101	ldr	r1, [tsk, #TI_FLAGS]
102	tst	r1, #_TIF_WORK_MASK
103	bne	slow_work_pending
104no_work_pending:
105	asm_trace_hardirqs_on save = 0
106
107	/* perform architecture specific actions before user return */
108	arch_ret_to_user r1, lr
109	ct_user_enter save = 0
110
111	restore_user_regs fast = 0, offset = 0
112ENDPROC(ret_to_user_from_irq)
113ENDPROC(ret_to_user)
114
115/*
116 * This is how we return from a fork.
117 */
118ENTRY(ret_from_fork)
119	bl	schedule_tail
120	cmp	r5, #0
121	movne	r0, r4
122	badrne	lr, 1f
123	retne	r5
1241:	get_thread_info tsk
125	b	ret_slow_syscall
126ENDPROC(ret_from_fork)
127
128/*=============================================================================
129 * SWI handler
130 *-----------------------------------------------------------------------------
131 */
132
133	.align	5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134ENTRY(vector_swi)
135#ifdef CONFIG_CPU_V7M
136	v7m_exception_entry
137#else
138	sub	sp, sp, #PT_REGS_SIZE
139	stmia	sp, {r0 - r12}			@ Calling r0 - r12
 
140 ARM(	add	r8, sp, #S_PC		)
141 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
142 THUMB(	mov	r8, sp			)
143 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
144	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
145	str	lr, [sp, #S_PC]			@ Save calling PC
146	str	r8, [sp, #S_PSR]		@ Save CPSR
 
147	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
148#endif
 
149	zero_fp
150	alignment_trap r10, ip, __cr_alignment
151	enable_irq
152	ct_user_exit
153	get_thread_info tsk
154
155	/*
156	 * Get the system call number.
157	 */
158
159#if defined(CONFIG_OABI_COMPAT)
160
161	/*
162	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
163	 * value to determine if it is an EABI or an old ABI call.
164	 */
165#ifdef CONFIG_ARM_THUMB
166	tst	r8, #PSR_T_BIT
167	movne	r10, #0				@ no thumb OABI emulation
168 USER(	ldreq	r10, [lr, #-4]		)	@ get SWI instruction
169#else
170 USER(	ldr	r10, [lr, #-4]		)	@ get SWI instruction
171#endif
172 ARM_BE8(rev	r10, r10)			@ little endian instruction
173
174#elif defined(CONFIG_AEABI)
175
176	/*
177	 * Pure EABI user space always put syscall number into scno (r7).
178	 */
179#elif defined(CONFIG_ARM_THUMB)
180	/* Legacy ABI only, possibly thumb mode. */
181	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
182	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
183 USER(	ldreq	scno, [lr, #-4]		)
184
185#else
186	/* Legacy ABI only. */
187 USER(	ldr	scno, [lr, #-4]		)	@ get SWI instruction
188#endif
189
 
 
190	uaccess_disable tbl
 
191
192	adr	tbl, sys_call_table		@ load syscall table pointer
193
194#if defined(CONFIG_OABI_COMPAT)
195	/*
196	 * If the swi argument is zero, this is an EABI call and we do nothing.
197	 *
198	 * If this is an old ABI call, get the syscall number into scno and
199	 * get the old ABI syscall table address.
200	 */
201	bics	r10, r10, #0xff000000
 
 
202	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
203	ldrne	tbl, =sys_oabi_call_table
204#elif !defined(CONFIG_AEABI)
205	bic	scno, scno, #0xff000000		@ mask off SWI op-code
 
206	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
 
 
207#endif
 
 
 
 
 
208
209local_restart:
210	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
211	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
212
213	tst	r10, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
214	bne	__sys_trace
215
216	cmp	scno, #NR_syscalls		@ check upper syscall limit
217	badr	lr, ret_fast_syscall		@ return address
218	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
219
220	add	r1, sp, #S_OFF
2212:	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
222	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
223	bcs	arm_syscall
224	mov	why, #0				@ no longer a real syscall
225	b	sys_ni_syscall			@ not private func
226
227#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
228	/*
229	 * We failed to handle a fault trying to access the page
230	 * containing the swi instruction, but we're not really in a
231	 * position to return -EFAULT. Instead, return back to the
232	 * instruction and re-enter the user fault handling path trying
233	 * to page it in. This will likely result in sending SEGV to the
234	 * current task.
235	 */
2369001:
237	sub	lr, lr, #4
238	str	lr, [sp, #S_PC]
 
239	b	ret_fast_syscall
240#endif
241ENDPROC(vector_swi)
 
242
243	/*
244	 * This is the really slow path.  We're going to be doing
245	 * context switches, and waiting for our parent to respond.
246	 */
247__sys_trace:
248	mov	r1, scno
249	add	r0, sp, #S_OFF
250	bl	syscall_trace_enter
251
252	badr	lr, __sys_trace_return		@ return address
253	mov	scno, r0			@ syscall number (possibly new)
254	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
255	cmp	scno, #NR_syscalls		@ check upper syscall limit
256	ldmccia	r1, {r0 - r6}			@ have to reload r0 - r6
257	stmccia	sp, {r4, r5}			@ and update the stack args
258	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
259	cmp	scno, #-1			@ skip the syscall?
260	bne	2b
261	add	sp, sp, #S_OFF			@ restore stack
262	b	ret_slow_syscall
263
264__sys_trace_return:
265	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
266	mov	r0, sp
267	bl	syscall_trace_exit
268	b	ret_slow_syscall
269
270__sys_trace_return_nosave:
271	enable_irq_notrace
272	mov	r0, sp
273	bl	syscall_trace_exit
274	b	ret_slow_syscall
275
276	.align	5
277#ifdef CONFIG_ALIGNMENT_TRAP
278	.type	__cr_alignment, #object
279__cr_alignment:
280	.word	cr_alignment
281#endif
282	.ltorg
283
284	.macro	syscall_table_start, sym
285	.equ	__sys_nr, 0
286	.type	\sym, #object
287ENTRY(\sym)
288	.endm
289
290	.macro	syscall, nr, func
291	.ifgt	__sys_nr - \nr
292	.error	"Duplicated/unorded system call entry"
293	.endif
294	.rept	\nr - __sys_nr
295	.long	sys_ni_syscall
296	.endr
297	.long	\func
298	.equ	__sys_nr, \nr + 1
299	.endm
300
301	.macro	syscall_table_end, sym
302	.ifgt	__sys_nr - __NR_syscalls
303	.error	"System call table too big"
304	.endif
305	.rept	__NR_syscalls - __sys_nr
306	.long	sys_ni_syscall
307	.endr
308	.size	\sym, . - \sym
309	.endm
310
311#define NATIVE(nr, func) syscall nr, func
 
312
313/*
314 * This is the syscall table declaration for native ABI syscalls.
315 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
316 */
317	syscall_table_start sys_call_table
318#define COMPAT(nr, native, compat) syscall nr, native
319#ifdef CONFIG_AEABI
320#include <calls-eabi.S>
321#else
322#include <calls-oabi.S>
323#endif
324#undef COMPAT
325	syscall_table_end sys_call_table
326
327/*============================================================================
328 * Special system call wrappers
329 */
330@ r0 = syscall number
331@ r8 = syscall table
332sys_syscall:
333		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
334		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
335		cmpne	scno, #NR_syscalls	@ check range
336		stmloia	sp, {r5, r6}		@ shuffle args
 
 
 
 
337		movlo	r0, r1
338		movlo	r1, r2
339		movlo	r2, r3
340		movlo	r3, r4
341		ldrlo	pc, [tbl, scno, lsl #2]
342		b	sys_ni_syscall
343ENDPROC(sys_syscall)
344
345sys_sigreturn_wrapper:
346		add	r0, sp, #S_OFF
347		mov	why, #0		@ prevent syscall restart handling
348		b	sys_sigreturn
349ENDPROC(sys_sigreturn_wrapper)
350
351sys_rt_sigreturn_wrapper:
352		add	r0, sp, #S_OFF
353		mov	why, #0		@ prevent syscall restart handling
354		b	sys_rt_sigreturn
355ENDPROC(sys_rt_sigreturn_wrapper)
356
357sys_statfs64_wrapper:
358		teq	r1, #88
359		moveq	r1, #84
360		b	sys_statfs64
361ENDPROC(sys_statfs64_wrapper)
362
363sys_fstatfs64_wrapper:
364		teq	r1, #88
365		moveq	r1, #84
366		b	sys_fstatfs64
367ENDPROC(sys_fstatfs64_wrapper)
368
369/*
370 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
371 * offset, we return EINVAL.
372 */
373sys_mmap2:
374#if PAGE_SHIFT > 12
375		tst	r5, #PGOFF_MASK
376		moveq	r5, r5, lsr #PAGE_SHIFT - 12
377		streq	r5, [sp, #4]
378		beq	sys_mmap_pgoff
379		mov	r0, #-EINVAL
380		ret	lr
381#else
382		str	r5, [sp, #4]
383		b	sys_mmap_pgoff
384#endif
385ENDPROC(sys_mmap2)
386
387#ifdef CONFIG_OABI_COMPAT
388
389/*
390 * These are syscalls with argument register differences
391 */
392
393sys_oabi_pread64:
394		stmia	sp, {r3, r4}
395		b	sys_pread64
396ENDPROC(sys_oabi_pread64)
397
398sys_oabi_pwrite64:
399		stmia	sp, {r3, r4}
400		b	sys_pwrite64
401ENDPROC(sys_oabi_pwrite64)
402
403sys_oabi_truncate64:
404		mov	r3, r2
405		mov	r2, r1
406		b	sys_truncate64
407ENDPROC(sys_oabi_truncate64)
408
409sys_oabi_ftruncate64:
410		mov	r3, r2
411		mov	r2, r1
412		b	sys_ftruncate64
413ENDPROC(sys_oabi_ftruncate64)
414
415sys_oabi_readahead:
416		str	r3, [sp]
417		mov	r3, r2
418		mov	r2, r1
419		b	sys_readahead
420ENDPROC(sys_oabi_readahead)
421
422/*
423 * Let's declare a second syscall table for old ABI binaries
424 * using the compatibility syscall entries.
425 */
426	syscall_table_start sys_oabi_call_table
427#define COMPAT(nr, native, compat) syscall nr, compat
 
428#include <calls-oabi.S>
429	syscall_table_end sys_oabi_call_table
430
431#endif
432
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  linux/arch/arm/kernel/entry-common.S
  4 *
  5 *  Copyright (C) 2000 Russell King
 
 
 
 
  6 */
  7
  8#include <asm/assembler.h>
  9#include <asm/unistd.h>
 10#include <asm/ftrace.h>
 11#include <asm/unwind.h>
 12#include <asm/memory.h>
 13#ifdef CONFIG_AEABI
 14#include <asm/unistd-oabi.h>
 15#endif
 16
 17	.equ	NR_syscalls, __NR_syscalls
 18
 19	.macro  arch_ret_to_user, tmp
 20#ifdef CONFIG_ARCH_IOP32X
 21	mrc	p15, 0, \tmp, c15, c1, 0
 22	tst	\tmp, #(1 << 6)
 23	bicne	\tmp, \tmp, #(1 << 6)
 24	mcrne	p15, 0, \tmp, c15, c1, 0	@ Disable cp6 access
 25#endif
 26	.endm
 27
 28#include "entry-header.S"
 29
 30saved_psr	.req	r8
 31#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER)
 32saved_pc	.req	r9
 33#define TRACE(x...) x
 34#else
 35saved_pc	.req	lr
 36#define TRACE(x...)
 37#endif
 38
 39	.section .entry.text,"ax",%progbits
 40	.align	5
 41#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING_USER) || \
 42	IS_ENABLED(CONFIG_DEBUG_RSEQ))
 43/*
 44 * This is the fast syscall return path.  We do as little as possible here,
 45 * such as avoiding writing r0 to the stack.  We only use this path if we
 46 * have tracing, context tracking and rseq debug disabled - the overheads
 47 * from those features make this path too inefficient.
 48 */
 49ret_fast_syscall:
 50__ret_fast_syscall:
 51 UNWIND(.fnstart	)
 52 UNWIND(.cantunwind	)
 53	disable_irq_notrace			@ disable interrupts
 54	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
 55	movs	r1, r1, lsl #16
 56	bne	fast_work_pending
 57
 58
 59	/* perform architecture specific actions before user return */
 60	arch_ret_to_user r1
 61
 62	restore_user_regs fast = 1, offset = S_OFF
 63 UNWIND(.fnend		)
 64ENDPROC(ret_fast_syscall)
 65
 66	/* Ok, we need to do extra processing, enter the slow path. */
 67fast_work_pending:
 68	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
 69	/* fall through to work_pending */
 70#else
 71/*
 72 * The "replacement" ret_fast_syscall for when tracing, context tracking,
 73 * or rseq debug is enabled.  As we will need to call out to some C functions,
 74 * we save r0 first to avoid needing to save registers around each C function
 75 * call.
 76 */
 77ret_fast_syscall:
 78__ret_fast_syscall:
 79 UNWIND(.fnstart	)
 80 UNWIND(.cantunwind	)
 81	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
 82#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
 83	/* do_rseq_syscall needs interrupts enabled. */
 84	mov	r0, sp				@ 'regs'
 85	bl	do_rseq_syscall
 86#endif
 87	disable_irq_notrace			@ disable interrupts
 88	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
 89	movs	r1, r1, lsl #16
 90	beq	no_work_pending
 91 UNWIND(.fnend		)
 92ENDPROC(ret_fast_syscall)
 93
 94	/* Slower path - fall through to work_pending */
 95#endif
 96
 97	tst	r1, #_TIF_SYSCALL_WORK
 98	bne	__sys_trace_return_nosave
 99slow_work_pending:
100	mov	r0, sp				@ 'regs'
101	mov	r2, why				@ 'syscall'
102	bl	do_work_pending
103	cmp	r0, #0
104	beq	no_work_pending
105	movlt	scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
106	ldmia	sp, {r0 - r6}			@ have to reload r0 - r6
107	b	local_restart			@ ... and off we go
108ENDPROC(ret_fast_syscall)
109
110/*
111 * "slow" syscall return path.  "why" tells us if this was a real syscall.
112 * IRQs may be enabled here, so always disable them.  Note that we use the
113 * "notrace" version to avoid calling into the tracing code unnecessarily.
114 * do_work_pending() will update this state if necessary.
115 */
116ENTRY(ret_to_user)
117ret_slow_syscall:
118#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
119	/* do_rseq_syscall needs interrupts enabled. */
120	enable_irq_notrace			@ enable interrupts
121	mov	r0, sp				@ 'regs'
122	bl	do_rseq_syscall
123#endif
124	disable_irq_notrace			@ disable interrupts
125ENTRY(ret_to_user_from_irq)
126	ldr	r1, [tsk, #TI_FLAGS]
127	movs	r1, r1, lsl #16
128	bne	slow_work_pending
129no_work_pending:
130	asm_trace_hardirqs_on save = 0
131
132	/* perform architecture specific actions before user return */
133	arch_ret_to_user r1
134	ct_user_enter save = 0
135
136	restore_user_regs fast = 0, offset = 0
137ENDPROC(ret_to_user_from_irq)
138ENDPROC(ret_to_user)
139
140/*
141 * This is how we return from a fork.
142 */
143ENTRY(ret_from_fork)
144	bl	schedule_tail
145	cmp	r5, #0
146	movne	r0, r4
147	badrne	lr, 1f
148	retne	r5
1491:	get_thread_info tsk
150	b	ret_slow_syscall
151ENDPROC(ret_from_fork)
152
153/*=============================================================================
154 * SWI handler
155 *-----------------------------------------------------------------------------
156 */
157
158	.align	5
159#ifdef CONFIG_HARDEN_BRANCH_HISTORY
160ENTRY(vector_bhb_loop8_swi)
161	sub	sp, sp, #PT_REGS_SIZE
162	stmia	sp, {r0 - r12}
163	mov	r8, #8
1641:	b	2f
1652:	subs	r8, r8, #1
166	bne	1b
167	dsb	nsh
168	isb
169	b	3f
170ENDPROC(vector_bhb_loop8_swi)
171
172	.align	5
173ENTRY(vector_bhb_bpiall_swi)
174	sub	sp, sp, #PT_REGS_SIZE
175	stmia	sp, {r0 - r12}
176	mcr	p15, 0, r8, c7, c5, 6	@ BPIALL
177	isb
178	b	3f
179ENDPROC(vector_bhb_bpiall_swi)
180#endif
181	.align	5
182ENTRY(vector_swi)
183#ifdef CONFIG_CPU_V7M
184	v7m_exception_entry
185#else
186	sub	sp, sp, #PT_REGS_SIZE
187	stmia	sp, {r0 - r12}			@ Calling r0 - r12
1883:
189 ARM(	add	r8, sp, #S_PC		)
190 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
191 THUMB(	mov	r8, sp			)
192 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
193	mrs	saved_psr, spsr			@ called from non-FIQ mode, so ok.
194 TRACE(	mov	saved_pc, lr		)
195	str	saved_pc, [sp, #S_PC]		@ Save calling PC
196	str	saved_psr, [sp, #S_PSR]		@ Save CPSR
197	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
198#endif
199	reload_current r10, ip
200	zero_fp
201	alignment_trap r10, ip, cr_alignment
202	asm_trace_hardirqs_on save=0
203	enable_irq_notrace
204	ct_user_exit save=0
205
206	/*
207	 * Get the system call number.
208	 */
209
210#if defined(CONFIG_OABI_COMPAT)
211
212	/*
213	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
214	 * value to determine if it is an EABI or an old ABI call.
215	 */
216#ifdef CONFIG_ARM_THUMB
217	tst	saved_psr, #PSR_T_BIT
218	movne	r10, #0				@ no thumb OABI emulation
219 USER(	ldreq	r10, [saved_pc, #-4]	)	@ get SWI instruction
220#else
221 USER(	ldr	r10, [saved_pc, #-4]	)	@ get SWI instruction
222#endif
223 ARM_BE8(rev	r10, r10)			@ little endian instruction
224
225#elif defined(CONFIG_AEABI)
226
227	/*
228	 * Pure EABI user space always put syscall number into scno (r7).
229	 */
230#elif defined(CONFIG_ARM_THUMB)
231	/* Legacy ABI only, possibly thumb mode. */
232	tst	saved_psr, #PSR_T_BIT		@ this is SPSR from save_user_regs
233	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
234 USER(	ldreq	scno, [saved_pc, #-4]	)
235
236#else
237	/* Legacy ABI only. */
238 USER(	ldr	scno, [saved_pc, #-4]	)	@ get SWI instruction
239#endif
240
241	/* saved_psr and saved_pc are now dead */
242
243	uaccess_disable tbl
244	get_thread_info tsk
245
246	adr	tbl, sys_call_table		@ load syscall table pointer
247
248#if defined(CONFIG_OABI_COMPAT)
249	/*
250	 * If the swi argument is zero, this is an EABI call and we do nothing.
251	 *
252	 * If this is an old ABI call, get the syscall number into scno and
253	 * get the old ABI syscall table address.
254	 */
255	bics	r10, r10, #0xff000000
256	strne	r10, [tsk, #TI_ABI_SYSCALL]
257	streq	scno, [tsk, #TI_ABI_SYSCALL]
258	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
259	ldrne	tbl, =sys_oabi_call_table
260#elif !defined(CONFIG_AEABI)
261	bic	scno, scno, #0xff000000		@ mask off SWI op-code
262	str	scno, [tsk, #TI_ABI_SYSCALL]
263	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
264#else
265	str	scno, [tsk, #TI_ABI_SYSCALL]
266#endif
267	/*
268	 * Reload the registers that may have been corrupted on entry to
269	 * the syscall assembly (by tracing or context tracking.)
270	 */
271 TRACE(	ldmia	sp, {r0 - r3}		)
272
273local_restart:
274	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
275	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
276
277	tst	r10, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
278	bne	__sys_trace
279
280	invoke_syscall tbl, scno, r10, __ret_fast_syscall
 
 
281
282	add	r1, sp, #S_OFF
2832:	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
284	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
285	bcs	arm_syscall
286	mov	why, #0				@ no longer a real syscall
287	b	sys_ni_syscall			@ not private func
288
289#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
290	/*
291	 * We failed to handle a fault trying to access the page
292	 * containing the swi instruction, but we're not really in a
293	 * position to return -EFAULT. Instead, return back to the
294	 * instruction and re-enter the user fault handling path trying
295	 * to page it in. This will likely result in sending SEGV to the
296	 * current task.
297	 */
2989001:
299	sub	lr, saved_pc, #4
300	str	lr, [sp, #S_PC]
301	get_thread_info tsk
302	b	ret_fast_syscall
303#endif
304ENDPROC(vector_swi)
305	.ltorg
306
307	/*
308	 * This is the really slow path.  We're going to be doing
309	 * context switches, and waiting for our parent to respond.
310	 */
311__sys_trace:
 
312	add	r0, sp, #S_OFF
313	bl	syscall_trace_enter
314	mov	scno, r0
315	invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
 
 
 
 
 
 
316	cmp	scno, #-1			@ skip the syscall?
317	bne	2b
318	add	sp, sp, #S_OFF			@ restore stack
 
319
320__sys_trace_return_nosave:
321	enable_irq_notrace
322	mov	r0, sp
323	bl	syscall_trace_exit
324	b	ret_slow_syscall
325
326__sys_trace_return:
327	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
328	mov	r0, sp
329	bl	syscall_trace_exit
330	b	ret_slow_syscall
331
 
 
 
 
 
 
 
 
332	.macro	syscall_table_start, sym
333	.equ	__sys_nr, 0
334	.type	\sym, #object
335ENTRY(\sym)
336	.endm
337
338	.macro	syscall, nr, func
339	.ifgt	__sys_nr - \nr
340	.error	"Duplicated/unorded system call entry"
341	.endif
342	.rept	\nr - __sys_nr
343	.long	sys_ni_syscall
344	.endr
345	.long	\func
346	.equ	__sys_nr, \nr + 1
347	.endm
348
349	.macro	syscall_table_end, sym
350	.ifgt	__sys_nr - __NR_syscalls
351	.error	"System call table too big"
352	.endif
353	.rept	__NR_syscalls - __sys_nr
354	.long	sys_ni_syscall
355	.endr
356	.size	\sym, . - \sym
357	.endm
358
359#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, native)
360#define __SYSCALL(nr, func) syscall nr, func
361
362/*
363 * This is the syscall table declaration for native ABI syscalls.
364 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
365 */
366	syscall_table_start sys_call_table
 
367#ifdef CONFIG_AEABI
368#include <calls-eabi.S>
369#else
370#include <calls-oabi.S>
371#endif
 
372	syscall_table_end sys_call_table
373
374/*============================================================================
375 * Special system call wrappers
376 */
377@ r0 = syscall number
378@ r8 = syscall table
379sys_syscall:
380		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
381		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
382		cmpne	scno, #NR_syscalls	@ check range
383#ifdef CONFIG_CPU_SPECTRE
384		movhs	scno, #0
385		csdb
386#endif
387		stmialo	sp, {r5, r6}		@ shuffle args
388		movlo	r0, r1
389		movlo	r1, r2
390		movlo	r2, r3
391		movlo	r3, r4
392		ldrlo	pc, [tbl, scno, lsl #2]
393		b	sys_ni_syscall
394ENDPROC(sys_syscall)
395
396sys_sigreturn_wrapper:
397		add	r0, sp, #S_OFF
398		mov	why, #0		@ prevent syscall restart handling
399		b	sys_sigreturn
400ENDPROC(sys_sigreturn_wrapper)
401
402sys_rt_sigreturn_wrapper:
403		add	r0, sp, #S_OFF
404		mov	why, #0		@ prevent syscall restart handling
405		b	sys_rt_sigreturn
406ENDPROC(sys_rt_sigreturn_wrapper)
407
408sys_statfs64_wrapper:
409		teq	r1, #88
410		moveq	r1, #84
411		b	sys_statfs64
412ENDPROC(sys_statfs64_wrapper)
413
414sys_fstatfs64_wrapper:
415		teq	r1, #88
416		moveq	r1, #84
417		b	sys_fstatfs64
418ENDPROC(sys_fstatfs64_wrapper)
419
420/*
421 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
422 * offset, we return EINVAL.
423 */
424sys_mmap2:
 
 
 
 
 
 
 
 
425		str	r5, [sp, #4]
426		b	sys_mmap_pgoff
 
427ENDPROC(sys_mmap2)
428
429#ifdef CONFIG_OABI_COMPAT
430
431/*
432 * These are syscalls with argument register differences
433 */
434
435sys_oabi_pread64:
436		stmia	sp, {r3, r4}
437		b	sys_pread64
438ENDPROC(sys_oabi_pread64)
439
440sys_oabi_pwrite64:
441		stmia	sp, {r3, r4}
442		b	sys_pwrite64
443ENDPROC(sys_oabi_pwrite64)
444
445sys_oabi_truncate64:
446		mov	r3, r2
447		mov	r2, r1
448		b	sys_truncate64
449ENDPROC(sys_oabi_truncate64)
450
451sys_oabi_ftruncate64:
452		mov	r3, r2
453		mov	r2, r1
454		b	sys_ftruncate64
455ENDPROC(sys_oabi_ftruncate64)
456
457sys_oabi_readahead:
458		str	r3, [sp]
459		mov	r3, r2
460		mov	r2, r1
461		b	sys_readahead
462ENDPROC(sys_oabi_readahead)
463
464/*
465 * Let's declare a second syscall table for old ABI binaries
466 * using the compatibility syscall entries.
467 */
468	syscall_table_start sys_oabi_call_table
469#undef __SYSCALL_WITH_COMPAT
470#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, compat)
471#include <calls-oabi.S>
472	syscall_table_end sys_oabi_call_table
473
474#endif
475