Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-or-later
  2 * -*- mode: asm -*-
  3 *
  4 *  linux/arch/m68k/kernel/entry.S
  5 *
  6 *  Copyright (C) 1991, 1992  Linus Torvalds
  7 *
 
 
 
 
  8 * Linux/m68k support by Hamish Macdonald
  9 *
 10 * 68060 fixes by Jesper Skov
 11 *
 12 */
 13
 14/*
 15 * entry.S  contains the system-call and fault low-level handling routines.
 16 * This also contains the timer-interrupt handler, as well as all interrupts
 17 * and faults that can result in a task-switch.
 18 *
 19 * NOTE: This code handles signal-recognition, which happens every time
 20 * after a timer-interrupt and after each system call.
 21 *
 22 */
 23
 24/*
 25 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
 26 *               all pointers that used to be 'current' are now entry
 27 *               number 0 in the 'current_set' list.
 28 *
 29 *  6/05/00 RZ:	 addedd writeback completion after return from sighandler
 30 *		 for 68040
 31 */
 32
 33#include <linux/linkage.h>
 34#include <asm/errno.h>
 35#include <asm/setup.h>
 
 36#include <asm/traps.h>
 37#include <asm/unistd.h>
 38#include <asm/asm-offsets.h>
 39#include <asm/entry.h>
 40
 41.globl system_call, buserr, trap, resume
 42.globl sys_call_table
 43.globl __sys_fork, __sys_clone, __sys_vfork
 44.globl bad_interrupt
 45.globl auto_irqhandler_fixup
 46.globl user_irqvec_fixup
 47
 48.text
 49ENTRY(__sys_fork)
 50	SAVE_SWITCH_STACK
 51	jbsr	sys_fork
 52	lea     %sp@(24),%sp
 53	rts
 54
 55ENTRY(__sys_clone)
 56	SAVE_SWITCH_STACK
 57	pea	%sp@(SWITCH_STACK_SIZE)
 58	jbsr	m68k_clone
 59	lea     %sp@(28),%sp
 60	rts
 61
 62ENTRY(__sys_vfork)
 63	SAVE_SWITCH_STACK
 64	jbsr	sys_vfork
 65	lea     %sp@(24),%sp
 66	rts
 67
 68ENTRY(__sys_clone3)
 69	SAVE_SWITCH_STACK
 70	pea	%sp@(SWITCH_STACK_SIZE)
 71	jbsr	m68k_clone3
 72	lea	%sp@(28),%sp
 73	rts
 74
 75ENTRY(sys_sigreturn)
 76	SAVE_SWITCH_STACK
 77	movel	%sp,%a1			  	| switch_stack pointer
 78	lea	%sp@(SWITCH_STACK_SIZE),%a0	| pt_regs pointer
 79	lea     %sp@(-84),%sp			| leave a gap
 80	movel	%a1,%sp@-
 81	movel	%a0,%sp@-
 82	jbsr	do_sigreturn
 83	jra	1f				| shared with rt_sigreturn()
 
 
 84
 85ENTRY(sys_rt_sigreturn)
 86	SAVE_SWITCH_STACK
 87	movel	%sp,%a1			  	| switch_stack pointer
 88	lea	%sp@(SWITCH_STACK_SIZE),%a0	| pt_regs pointer
 89	lea     %sp@(-84),%sp			| leave a gap
 90	movel	%a1,%sp@-
 91	movel	%a0,%sp@-
 92	| stack contents:
 93	|   [original pt_regs address] [original switch_stack address]
 94	|   [gap] [switch_stack] [pt_regs] [exception frame]
 95	jbsr	do_rt_sigreturn
 96
 971:
 98	| stack contents now:
 99	|   [original pt_regs address] [original switch_stack address]
100	|   [unused part of the gap] [moved switch_stack] [moved pt_regs]
101	|   [replacement exception frame]
102	| return value of do_{rt_,}sigreturn() points to moved switch_stack.
103
104	movel	%d0,%sp				| discard the leftover junk
105	RESTORE_SWITCH_STACK
106	| stack contents now is just [syscall return address] [pt_regs] [frame]
107	| return pt_regs.d0
108	movel	%sp@(PT_OFF_D0+4),%d0
109	rts
110
111ENTRY(buserr)
112	SAVE_ALL_INT
113	GET_CURRENT(%d0)
114	movel	%sp,%sp@-		| stack frame pointer argument
115	jbsr	buserr_c
116	addql	#4,%sp
117	jra	ret_from_exception
118
119ENTRY(trap)
120	SAVE_ALL_INT
121	GET_CURRENT(%d0)
122	movel	%sp,%sp@-		| stack frame pointer argument
123	jbsr	trap_c
124	addql	#4,%sp
125	jra	ret_from_exception
126
127	| After a fork we jump here directly from resume,
128	| so that %d1 contains the previous task
129	| schedule_tail now used regardless of CONFIG_SMP
130ENTRY(ret_from_fork)
131	movel	%d1,%sp@-
132	jsr	schedule_tail
133	addql	#4,%sp
134	jra	ret_from_exception
135
136ENTRY(ret_from_kernel_thread)
137	| a3 contains the kernel thread payload, d7 - its argument
138	movel	%d1,%sp@-
139	jsr	schedule_tail
140	movel	%d7,(%sp)
141	jsr	%a3@
142	addql	#4,%sp
143	jra	ret_from_exception
144
145#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
146
147#ifdef TRAP_DBG_INTERRUPT
148
149.globl dbginterrupt
150ENTRY(dbginterrupt)
151	SAVE_ALL_INT
152	GET_CURRENT(%d0)
153	movel	%sp,%sp@- 		/* stack frame pointer argument */
154	jsr	dbginterrupt_c
155	addql	#4,%sp
156	jra	ret_from_exception
157#endif
158
159ENTRY(reschedule)
160	/* save top of frame */
161	pea	%sp@
162	jbsr	set_esp0
163	addql	#4,%sp
164	pea	ret_from_exception
165	jmp	schedule
166
167ENTRY(ret_from_user_signal)
168	moveq #__NR_sigreturn,%d0
169	trap #0
170
171ENTRY(ret_from_user_rt_signal)
172	movel #__NR_rt_sigreturn,%d0
173	trap #0
174
175#else
176
177do_trace_entry:
178	movel	#-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
179	subql	#4,%sp
180	SAVE_SWITCH_STACK
181	jbsr	syscall_trace_enter
182	RESTORE_SWITCH_STACK
183	addql	#4,%sp
184	addql	#1,%d0			| optimization for cmpil #-1,%d0
185	jeq	ret_from_syscall
186	movel	%sp@(PT_OFF_ORIG_D0),%d0
187	cmpl	#NR_syscalls,%d0
188	jcs	syscall
189	jra	ret_from_syscall
190badsys:
191	movel	#-ENOSYS,%sp@(PT_OFF_D0)
192	jra	ret_from_syscall
193
194do_trace_exit:
195	subql	#4,%sp
196	SAVE_SWITCH_STACK
197	jbsr	syscall_trace_leave
198	RESTORE_SWITCH_STACK
199	addql	#4,%sp
200	jra	.Lret_from_exception
201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202ENTRY(system_call)
203	SAVE_ALL_SYS
204
205	GET_CURRENT(%d1)
206	movel	%d1,%a1
207
208	| save top of frame
209	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
210
211	| syscall trace?
212	tstb	%a1@(TINFO_FLAGS+2)
213	jmi	do_trace_entry
214	| seccomp filter active?
215	btst	#5,%a1@(TINFO_FLAGS+2)
216	bnes	do_trace_entry
217	cmpl	#NR_syscalls,%d0
218	jcc	badsys
219syscall:
220	jbsr	@(sys_call_table,%d0:l:4)@(0)
221	movel	%d0,%sp@(PT_OFF_D0)	| save the return value
222ret_from_syscall:
223	|oriw	#0x0700,%sr
224	movel	%curptr@(TASK_STACK),%a1
225	movew	%a1@(TINFO_FLAGS+2),%d0
226	jne	syscall_exit_work
2271:	RESTORE_ALL
228
229syscall_exit_work:
230	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
231	bnes	1b			| if so, skip resched, signals
232	lslw	#1,%d0
233	jcs	do_trace_exit
234	jmi	do_delayed_trace
235	lslw	#8,%d0
236	jne	do_signal_return
237	pea	resume_userspace
238	jra	schedule
239
240
241ENTRY(ret_from_exception)
242.Lret_from_exception:
243	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
244	bnes	1f			| if so, skip resched, signals
245	| only allow interrupts when we are really the last one on the
246	| kernel stack, otherwise stack overflow can occur during
247	| heavy interrupt load
248	andw	#ALLOWINT,%sr
249
250resume_userspace:
251	movel	%curptr@(TASK_STACK),%a1
252	moveb	%a1@(TINFO_FLAGS+3),%d0
253	jne	exit_work
2541:	RESTORE_ALL
255
256exit_work:
257	| save top of frame
258	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
259	lslb	#1,%d0
260	jne	do_signal_return
261	pea	resume_userspace
262	jra	schedule
263
264
265do_signal_return:
266	|andw	#ALLOWINT,%sr
267	subql	#4,%sp			| dummy return address
268	SAVE_SWITCH_STACK
269	pea	%sp@(SWITCH_STACK_SIZE)
270	bsrl	do_notify_resume
271	addql	#4,%sp
272	RESTORE_SWITCH_STACK
273	addql	#4,%sp
274	jbra	resume_userspace
275
276do_delayed_trace:
277	bclr	#7,%sp@(PT_OFF_SR)	| clear trace bit in SR
278	pea	1			| send SIGTRAP
279	movel	%curptr,%sp@-
280	pea	LSIGTRAP
281	jbsr	send_sig
282	addql	#8,%sp
283	addql	#4,%sp
284	jbra	resume_userspace
285
286
287/* This is the main interrupt handler for autovector interrupts */
288
289ENTRY(auto_inthandler)
290	SAVE_ALL_INT
291	GET_CURRENT(%d0)
292					|  put exception # in d0
293	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
294	subw	#VEC_SPUR,%d0
295
296	movel	%sp,%sp@-
297	movel	%d0,%sp@-		|  put vector # on stack
298auto_irqhandler_fixup = . + 2
299	jsr	do_IRQ			|  process the IRQ
300	addql	#8,%sp			|  pop parameters off stack
301	jra	ret_from_exception
302
303/* Handler for user defined interrupt vectors */
304
305ENTRY(user_inthandler)
306	SAVE_ALL_INT
307	GET_CURRENT(%d0)
308					|  put exception # in d0
309	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
310user_irqvec_fixup = . + 2
311	subw	#VEC_USER,%d0
312
313	movel	%sp,%sp@-
314	movel	%d0,%sp@-		|  put vector # on stack
315	jsr	do_IRQ			|  process the IRQ
316	addql	#8,%sp			|  pop parameters off stack
317	jra	ret_from_exception
318
319/* Handler for uninitialized and spurious interrupts */
320
321ENTRY(bad_inthandler)
322	SAVE_ALL_INT
323	GET_CURRENT(%d0)
324
325	movel	%sp,%sp@-
326	jsr	handle_badint
327	addql	#4,%sp
328	jra	ret_from_exception
329
330resume:
331	/*
332	 * Beware - when entering resume, prev (the current task) is
333	 * in a0, next (the new task) is in a1,so don't change these
334	 * registers until their contents are no longer needed.
335	 */
336
337	/* save sr */
338	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)
339
340	/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
341	movec	%sfc,%d0
342	movew	%d0,%a0@(TASK_THREAD+THREAD_FC)
343
344	/* save usp */
345	/* it is better to use a movel here instead of a movew 8*) */
346	movec	%usp,%d0
347	movel	%d0,%a0@(TASK_THREAD+THREAD_USP)
348
349	/* save non-scratch registers on stack */
350	SAVE_SWITCH_STACK
351
352	/* save current kernel stack pointer */
353	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP)
354
355	/* save floating point context */
356#ifndef CONFIG_M68KFPU_EMU_ONLY
357#ifdef CONFIG_M68KFPU_EMU
358	tstl	m68k_fputype
359	jeq	3f
360#endif
361	fsave	%a0@(TASK_THREAD+THREAD_FPSTATE)
362
363#if defined(CONFIG_M68060)
364#if !defined(CPU_M68060_ONLY)
365	btst	#3,m68k_cputype+3
366	beqs	1f
367#endif
368	/* The 060 FPU keeps status in bits 15-8 of the first longword */
369	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE+2)
370	jeq	3f
371#if !defined(CPU_M68060_ONLY)
372	jra	2f
373#endif
374#endif /* CONFIG_M68060 */
375#if !defined(CPU_M68060_ONLY)
3761:	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE)
377	jeq	3f
378#endif
3792:	fmovemx	%fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
380	fmoveml	%fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3813:
382#endif	/* CONFIG_M68KFPU_EMU_ONLY */
383	/* Return previous task in %d1 */
384	movel	%curptr,%d1
385
386	/* switch to new task (a1 contains new task) */
387	movel	%a1,%curptr
388
389	/* restore floating point context */
390#ifndef CONFIG_M68KFPU_EMU_ONLY
391#ifdef CONFIG_M68KFPU_EMU
392	tstl	m68k_fputype
393	jeq	4f
394#endif
395#if defined(CONFIG_M68060)
396#if !defined(CPU_M68060_ONLY)
397	btst	#3,m68k_cputype+3
398	beqs	1f
399#endif
400	/* The 060 FPU keeps status in bits 15-8 of the first longword */
401	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE+2)
402	jeq	3f
403#if !defined(CPU_M68060_ONLY)
404	jra	2f
405#endif
406#endif /* CONFIG_M68060 */
407#if !defined(CPU_M68060_ONLY)
4081:	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE)
409	jeq	3f
410#endif
4112:	fmovemx	%a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
412	fmoveml	%a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4133:	frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4144:
415#endif	/* CONFIG_M68KFPU_EMU_ONLY */
416
417	/* restore the kernel stack pointer */
418	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp
419
420	/* restore non-scratch registers */
421	RESTORE_SWITCH_STACK
422
423	/* restore user stack pointer */
424	movel	%a1@(TASK_THREAD+THREAD_USP),%a0
425	movel	%a0,%usp
426
427	/* restore fs (sfc,%dfc) */
428	movew	%a1@(TASK_THREAD+THREAD_FC),%a0
429	movec	%a0,%sfc
430	movec	%a0,%dfc
431
432	/* restore status register */
433	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
434
435	rts
436
437#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
v5.14.15
  1/* -*- mode: asm -*-
 
  2 *
  3 *  linux/arch/m68k/kernel/entry.S
  4 *
  5 *  Copyright (C) 1991, 1992  Linus Torvalds
  6 *
  7 * This file is subject to the terms and conditions of the GNU General Public
  8 * License.  See the file README.legal in the main directory of this archive
  9 * for more details.
 10 *
 11 * Linux/m68k support by Hamish Macdonald
 12 *
 13 * 68060 fixes by Jesper Skov
 14 *
 15 */
 16
 17/*
 18 * entry.S  contains the system-call and fault low-level handling routines.
 19 * This also contains the timer-interrupt handler, as well as all interrupts
 20 * and faults that can result in a task-switch.
 21 *
 22 * NOTE: This code handles signal-recognition, which happens every time
 23 * after a timer-interrupt and after each system call.
 24 *
 25 */
 26
 27/*
 28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
 29 *               all pointers that used to be 'current' are now entry
 30 *               number 0 in the 'current_set' list.
 31 *
 32 *  6/05/00 RZ:	 addedd writeback completion after return from sighandler
 33 *		 for 68040
 34 */
 35
 36#include <linux/linkage.h>
 37#include <asm/errno.h>
 38#include <asm/setup.h>
 39#include <asm/segment.h>
 40#include <asm/traps.h>
 41#include <asm/unistd.h>
 42#include <asm/asm-offsets.h>
 43#include <asm/entry.h>
 44
 45.globl system_call, buserr, trap, resume
 46.globl sys_call_table
 47.globl __sys_fork, __sys_clone, __sys_vfork
 48.globl bad_interrupt
 49.globl auto_irqhandler_fixup
 50.globl user_irqvec_fixup
 51
 52.text
 53ENTRY(__sys_fork)
 54	SAVE_SWITCH_STACK
 55	jbsr	sys_fork
 56	lea     %sp@(24),%sp
 57	rts
 58
 59ENTRY(__sys_clone)
 60	SAVE_SWITCH_STACK
 61	pea	%sp@(SWITCH_STACK_SIZE)
 62	jbsr	m68k_clone
 63	lea     %sp@(28),%sp
 64	rts
 65
 66ENTRY(__sys_vfork)
 67	SAVE_SWITCH_STACK
 68	jbsr	sys_vfork
 69	lea     %sp@(24),%sp
 70	rts
 71
 72ENTRY(__sys_clone3)
 73	SAVE_SWITCH_STACK
 74	pea	%sp@(SWITCH_STACK_SIZE)
 75	jbsr	m68k_clone3
 76	lea	%sp@(28),%sp
 77	rts
 78
 79ENTRY(sys_sigreturn)
 80	SAVE_SWITCH_STACK
 81	movel	%sp,%sp@-		  | switch_stack pointer
 82	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
 
 
 
 83	jbsr	do_sigreturn
 84	addql	#8,%sp
 85	RESTORE_SWITCH_STACK
 86	rts
 87
 88ENTRY(sys_rt_sigreturn)
 89	SAVE_SWITCH_STACK
 90	movel	%sp,%sp@-		  | switch_stack pointer
 91	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
 
 
 
 
 
 
 92	jbsr	do_rt_sigreturn
 93	addql	#8,%sp
 
 
 
 
 
 
 
 
 94	RESTORE_SWITCH_STACK
 
 
 
 95	rts
 96
 97ENTRY(buserr)
 98	SAVE_ALL_INT
 99	GET_CURRENT(%d0)
100	movel	%sp,%sp@-		| stack frame pointer argument
101	jbsr	buserr_c
102	addql	#4,%sp
103	jra	ret_from_exception
104
105ENTRY(trap)
106	SAVE_ALL_INT
107	GET_CURRENT(%d0)
108	movel	%sp,%sp@-		| stack frame pointer argument
109	jbsr	trap_c
110	addql	#4,%sp
111	jra	ret_from_exception
112
113	| After a fork we jump here directly from resume,
114	| so that %d1 contains the previous task
115	| schedule_tail now used regardless of CONFIG_SMP
116ENTRY(ret_from_fork)
117	movel	%d1,%sp@-
118	jsr	schedule_tail
119	addql	#4,%sp
120	jra	ret_from_exception
121
122ENTRY(ret_from_kernel_thread)
123	| a3 contains the kernel thread payload, d7 - its argument
124	movel	%d1,%sp@-
125	jsr	schedule_tail
126	movel	%d7,(%sp)
127	jsr	%a3@
128	addql	#4,%sp
129	jra	ret_from_exception
130
131#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
132
133#ifdef TRAP_DBG_INTERRUPT
134
135.globl dbginterrupt
136ENTRY(dbginterrupt)
137	SAVE_ALL_INT
138	GET_CURRENT(%d0)
139	movel	%sp,%sp@- 		/* stack frame pointer argument */
140	jsr	dbginterrupt_c
141	addql	#4,%sp
142	jra	ret_from_exception
143#endif
144
145ENTRY(reschedule)
146	/* save top of frame */
147	pea	%sp@
148	jbsr	set_esp0
149	addql	#4,%sp
150	pea	ret_from_exception
151	jmp	schedule
152
153ENTRY(ret_from_user_signal)
154	moveq #__NR_sigreturn,%d0
155	trap #0
156
157ENTRY(ret_from_user_rt_signal)
158	movel #__NR_rt_sigreturn,%d0
159	trap #0
160
161#else
162
163do_trace_entry:
164	movel	#-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
165	subql	#4,%sp
166	SAVE_SWITCH_STACK
167	jbsr	syscall_trace
168	RESTORE_SWITCH_STACK
169	addql	#4,%sp
 
 
170	movel	%sp@(PT_OFF_ORIG_D0),%d0
171	cmpl	#NR_syscalls,%d0
172	jcs	syscall
 
173badsys:
174	movel	#-ENOSYS,%sp@(PT_OFF_D0)
175	jra	ret_from_syscall
176
177do_trace_exit:
178	subql	#4,%sp
179	SAVE_SWITCH_STACK
180	jbsr	syscall_trace
181	RESTORE_SWITCH_STACK
182	addql	#4,%sp
183	jra	.Lret_from_exception
184
185ENTRY(ret_from_signal)
186	movel	%curptr@(TASK_STACK),%a1
187	tstb	%a1@(TINFO_FLAGS+2)
188	jge	1f
189	lea	%sp@(SWITCH_STACK_SIZE),%a1
190	movel	%a1,%curptr@(TASK_THREAD+THREAD_ESP0)
191	jbsr	syscall_trace
1921:	RESTORE_SWITCH_STACK
193	addql	#4,%sp
194/* on 68040 complete pending writebacks if any */
195#ifdef CONFIG_M68040
196	bfextu	%sp@(PT_OFF_FORMATVEC){#0,#4},%d0
197	subql	#7,%d0				| bus error frame ?
198	jbne	1f
199	movel	%sp,%sp@-
200	jbsr	berr_040cleanup
201	addql	#4,%sp
2021:
203#endif
204	jra	.Lret_from_exception
205
206ENTRY(system_call)
207	SAVE_ALL_SYS
208
209	GET_CURRENT(%d1)
210	movel	%d1,%a1
211
212	| save top of frame
213	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
214
215	| syscall trace?
216	tstb	%a1@(TINFO_FLAGS+2)
217	jmi	do_trace_entry
 
 
 
218	cmpl	#NR_syscalls,%d0
219	jcc	badsys
220syscall:
221	jbsr	@(sys_call_table,%d0:l:4)@(0)
222	movel	%d0,%sp@(PT_OFF_D0)	| save the return value
223ret_from_syscall:
224	|oriw	#0x0700,%sr
225	movel	%curptr@(TASK_STACK),%a1
226	movew	%a1@(TINFO_FLAGS+2),%d0
227	jne	syscall_exit_work
2281:	RESTORE_ALL
229
230syscall_exit_work:
231	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
232	bnes	1b			| if so, skip resched, signals
233	lslw	#1,%d0
234	jcs	do_trace_exit
235	jmi	do_delayed_trace
236	lslw	#8,%d0
237	jne	do_signal_return
238	pea	resume_userspace
239	jra	schedule
240
241
242ENTRY(ret_from_exception)
243.Lret_from_exception:
244	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
245	bnes	1f			| if so, skip resched, signals
246	| only allow interrupts when we are really the last one on the
247	| kernel stack, otherwise stack overflow can occur during
248	| heavy interrupt load
249	andw	#ALLOWINT,%sr
250
251resume_userspace:
252	movel	%curptr@(TASK_STACK),%a1
253	moveb	%a1@(TINFO_FLAGS+3),%d0
254	jne	exit_work
2551:	RESTORE_ALL
256
257exit_work:
258	| save top of frame
259	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
260	lslb	#1,%d0
261	jne	do_signal_return
262	pea	resume_userspace
263	jra	schedule
264
265
266do_signal_return:
267	|andw	#ALLOWINT,%sr
268	subql	#4,%sp			| dummy return address
269	SAVE_SWITCH_STACK
270	pea	%sp@(SWITCH_STACK_SIZE)
271	bsrl	do_notify_resume
272	addql	#4,%sp
273	RESTORE_SWITCH_STACK
274	addql	#4,%sp
275	jbra	resume_userspace
276
277do_delayed_trace:
278	bclr	#7,%sp@(PT_OFF_SR)	| clear trace bit in SR
279	pea	1			| send SIGTRAP
280	movel	%curptr,%sp@-
281	pea	LSIGTRAP
282	jbsr	send_sig
283	addql	#8,%sp
284	addql	#4,%sp
285	jbra	resume_userspace
286
287
288/* This is the main interrupt handler for autovector interrupts */
289
290ENTRY(auto_inthandler)
291	SAVE_ALL_INT
292	GET_CURRENT(%d0)
293					|  put exception # in d0
294	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
295	subw	#VEC_SPUR,%d0
296
297	movel	%sp,%sp@-
298	movel	%d0,%sp@-		|  put vector # on stack
299auto_irqhandler_fixup = . + 2
300	jsr	do_IRQ			|  process the IRQ
301	addql	#8,%sp			|  pop parameters off stack
302	jra	ret_from_exception
303
304/* Handler for user defined interrupt vectors */
305
306ENTRY(user_inthandler)
307	SAVE_ALL_INT
308	GET_CURRENT(%d0)
309					|  put exception # in d0
310	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
311user_irqvec_fixup = . + 2
312	subw	#VEC_USER,%d0
313
314	movel	%sp,%sp@-
315	movel	%d0,%sp@-		|  put vector # on stack
316	jsr	do_IRQ			|  process the IRQ
317	addql	#8,%sp			|  pop parameters off stack
318	jra	ret_from_exception
319
320/* Handler for uninitialized and spurious interrupts */
321
322ENTRY(bad_inthandler)
323	SAVE_ALL_INT
324	GET_CURRENT(%d0)
325
326	movel	%sp,%sp@-
327	jsr	handle_badint
328	addql	#4,%sp
329	jra	ret_from_exception
330
331resume:
332	/*
333	 * Beware - when entering resume, prev (the current task) is
334	 * in a0, next (the new task) is in a1,so don't change these
335	 * registers until their contents are no longer needed.
336	 */
337
338	/* save sr */
339	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)
340
341	/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
342	movec	%sfc,%d0
343	movew	%d0,%a0@(TASK_THREAD+THREAD_FS)
344
345	/* save usp */
346	/* it is better to use a movel here instead of a movew 8*) */
347	movec	%usp,%d0
348	movel	%d0,%a0@(TASK_THREAD+THREAD_USP)
349
350	/* save non-scratch registers on stack */
351	SAVE_SWITCH_STACK
352
353	/* save current kernel stack pointer */
354	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP)
355
356	/* save floating point context */
357#ifndef CONFIG_M68KFPU_EMU_ONLY
358#ifdef CONFIG_M68KFPU_EMU
359	tstl	m68k_fputype
360	jeq	3f
361#endif
362	fsave	%a0@(TASK_THREAD+THREAD_FPSTATE)
363
364#if defined(CONFIG_M68060)
365#if !defined(CPU_M68060_ONLY)
366	btst	#3,m68k_cputype+3
367	beqs	1f
368#endif
369	/* The 060 FPU keeps status in bits 15-8 of the first longword */
370	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE+2)
371	jeq	3f
372#if !defined(CPU_M68060_ONLY)
373	jra	2f
374#endif
375#endif /* CONFIG_M68060 */
376#if !defined(CPU_M68060_ONLY)
3771:	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE)
378	jeq	3f
379#endif
3802:	fmovemx	%fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
381	fmoveml	%fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3823:
383#endif	/* CONFIG_M68KFPU_EMU_ONLY */
384	/* Return previous task in %d1 */
385	movel	%curptr,%d1
386
387	/* switch to new task (a1 contains new task) */
388	movel	%a1,%curptr
389
390	/* restore floating point context */
391#ifndef CONFIG_M68KFPU_EMU_ONLY
392#ifdef CONFIG_M68KFPU_EMU
393	tstl	m68k_fputype
394	jeq	4f
395#endif
396#if defined(CONFIG_M68060)
397#if !defined(CPU_M68060_ONLY)
398	btst	#3,m68k_cputype+3
399	beqs	1f
400#endif
401	/* The 060 FPU keeps status in bits 15-8 of the first longword */
402	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE+2)
403	jeq	3f
404#if !defined(CPU_M68060_ONLY)
405	jra	2f
406#endif
407#endif /* CONFIG_M68060 */
408#if !defined(CPU_M68060_ONLY)
4091:	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE)
410	jeq	3f
411#endif
4122:	fmovemx	%a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
413	fmoveml	%a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4143:	frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4154:
416#endif	/* CONFIG_M68KFPU_EMU_ONLY */
417
418	/* restore the kernel stack pointer */
419	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp
420
421	/* restore non-scratch registers */
422	RESTORE_SWITCH_STACK
423
424	/* restore user stack pointer */
425	movel	%a1@(TASK_THREAD+THREAD_USP),%a0
426	movel	%a0,%usp
427
428	/* restore fs (sfc,%dfc) */
429	movew	%a1@(TASK_THREAD+THREAD_FS),%a0
430	movec	%a0,%sfc
431	movec	%a0,%dfc
432
433	/* restore status register */
434	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
435
436	rts
437
438#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */