Linux Audio

Check our new training course

Loading...
v5.9
  1/* -*- mode: asm -*-
  2 *
  3 *  linux/arch/m68k/kernel/entry.S
  4 *
  5 *  Copyright (C) 1991, 1992  Linus Torvalds
  6 *
  7 * This file is subject to the terms and conditions of the GNU General Public
  8 * License.  See the file README.legal in the main directory of this archive
  9 * for more details.
 10 *
 11 * Linux/m68k support by Hamish Macdonald
 12 *
 13 * 68060 fixes by Jesper Skov
 14 *
 15 */
 16
 17/*
 18 * entry.S  contains the system-call and fault low-level handling routines.
 19 * This also contains the timer-interrupt handler, as well as all interrupts
 20 * and faults that can result in a task-switch.
 21 *
 22 * NOTE: This code handles signal-recognition, which happens every time
 23 * after a timer-interrupt and after each system call.
 24 *
 25 */
 26
 27/*
 28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
 29 *               all pointers that used to be 'current' are now entry
 30 *               number 0 in the 'current_set' list.
 31 *
 32 *  6/05/00 RZ:	 addedd writeback completion after return from sighandler
 33 *		 for 68040
 34 */
 35
 36#include <linux/linkage.h>
 37#include <asm/errno.h>
 38#include <asm/setup.h>
 39#include <asm/segment.h>
 40#include <asm/traps.h>
 41#include <asm/unistd.h>
 42#include <asm/asm-offsets.h>
 43#include <asm/entry.h>
 44
 45.globl system_call, buserr, trap, resume
 46.globl sys_call_table
 47.globl __sys_fork, __sys_clone, __sys_vfork
 48.globl bad_interrupt
 49.globl auto_irqhandler_fixup
 50.globl user_irqvec_fixup
 51
 52.text
 53ENTRY(__sys_fork)
 54	SAVE_SWITCH_STACK
 55	jbsr	sys_fork
 56	lea     %sp@(24),%sp
 57	rts
 58
 59ENTRY(__sys_clone)
 60	SAVE_SWITCH_STACK
 61	pea	%sp@(SWITCH_STACK_SIZE)
 62	jbsr	m68k_clone
 63	lea     %sp@(28),%sp
 64	rts
 65
 66ENTRY(__sys_vfork)
 67	SAVE_SWITCH_STACK
 68	jbsr	sys_vfork
 69	lea     %sp@(24),%sp
 70	rts
 71
 72ENTRY(__sys_clone3)
 73	SAVE_SWITCH_STACK
 74	pea	%sp@(SWITCH_STACK_SIZE)
 75	jbsr	m68k_clone3
 76	lea	%sp@(28),%sp
 77	rts
 78
 79ENTRY(sys_sigreturn)
 80	SAVE_SWITCH_STACK
 81	movel	%sp,%sp@-		  | switch_stack pointer
 82	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
 83	jbsr	do_sigreturn
 84	addql	#8,%sp
 85	RESTORE_SWITCH_STACK
 86	rts
 87
 88ENTRY(sys_rt_sigreturn)
 89	SAVE_SWITCH_STACK
 90	movel	%sp,%sp@-		  | switch_stack pointer
 91	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
 92	jbsr	do_rt_sigreturn
 93	addql	#8,%sp
 94	RESTORE_SWITCH_STACK
 95	rts
 96
 97ENTRY(buserr)
 98	SAVE_ALL_INT
 99	GET_CURRENT(%d0)
100	movel	%sp,%sp@-		| stack frame pointer argument
101	jbsr	buserr_c
102	addql	#4,%sp
103	jra	ret_from_exception
104
105ENTRY(trap)
106	SAVE_ALL_INT
107	GET_CURRENT(%d0)
108	movel	%sp,%sp@-		| stack frame pointer argument
109	jbsr	trap_c
110	addql	#4,%sp
111	jra	ret_from_exception
112
113	| After a fork we jump here directly from resume,
114	| so that %d1 contains the previous task
115	| schedule_tail now used regardless of CONFIG_SMP
116ENTRY(ret_from_fork)
117	movel	%d1,%sp@-
118	jsr	schedule_tail
119	addql	#4,%sp
120	jra	ret_from_exception
121
122ENTRY(ret_from_kernel_thread)
123	| a3 contains the kernel thread payload, d7 - its argument
124	movel	%d1,%sp@-
125	jsr	schedule_tail
126	movel	%d7,(%sp)
127	jsr	%a3@
128	addql	#4,%sp
129	jra	ret_from_exception
130
131#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
132
133#ifdef TRAP_DBG_INTERRUPT
134
135.globl dbginterrupt
136ENTRY(dbginterrupt)
137	SAVE_ALL_INT
138	GET_CURRENT(%d0)
139	movel	%sp,%sp@- 		/* stack frame pointer argument */
140	jsr	dbginterrupt_c
141	addql	#4,%sp
142	jra	ret_from_exception
143#endif
144
145ENTRY(reschedule)
146	/* save top of frame */
147	pea	%sp@
148	jbsr	set_esp0
149	addql	#4,%sp
150	pea	ret_from_exception
151	jmp	schedule
152
153ENTRY(ret_from_user_signal)
154	moveq #__NR_sigreturn,%d0
155	trap #0
156
157ENTRY(ret_from_user_rt_signal)
158	movel #__NR_rt_sigreturn,%d0
159	trap #0
160
161#else
162
163do_trace_entry:
164	movel	#-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
165	subql	#4,%sp
166	SAVE_SWITCH_STACK
167	jbsr	syscall_trace
168	RESTORE_SWITCH_STACK
169	addql	#4,%sp
170	movel	%sp@(PT_OFF_ORIG_D0),%d0
171	cmpl	#NR_syscalls,%d0
172	jcs	syscall
173badsys:
174	movel	#-ENOSYS,%sp@(PT_OFF_D0)
175	jra	ret_from_syscall
176
177do_trace_exit:
178	subql	#4,%sp
179	SAVE_SWITCH_STACK
180	jbsr	syscall_trace
181	RESTORE_SWITCH_STACK
182	addql	#4,%sp
183	jra	.Lret_from_exception
184
185ENTRY(ret_from_signal)
186	movel	%curptr@(TASK_STACK),%a1
187	tstb	%a1@(TINFO_FLAGS+2)
188	jge	1f
189	jbsr	syscall_trace
1901:	RESTORE_SWITCH_STACK
191	addql	#4,%sp
192/* on 68040 complete pending writebacks if any */
193#ifdef CONFIG_M68040
194	bfextu	%sp@(PT_OFF_FORMATVEC){#0,#4},%d0
195	subql	#7,%d0				| bus error frame ?
196	jbne	1f
197	movel	%sp,%sp@-
198	jbsr	berr_040cleanup
199	addql	#4,%sp
2001:
201#endif
202	jra	.Lret_from_exception
203
204ENTRY(system_call)
205	SAVE_ALL_SYS
206
207	GET_CURRENT(%d1)
208	movel	%d1,%a1
209
210	| save top of frame
211	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
212
213	| syscall trace?
214	tstb	%a1@(TINFO_FLAGS+2)
215	jmi	do_trace_entry
216	cmpl	#NR_syscalls,%d0
217	jcc	badsys
218syscall:
219	jbsr	@(sys_call_table,%d0:l:4)@(0)
220	movel	%d0,%sp@(PT_OFF_D0)	| save the return value
221ret_from_syscall:
222	|oriw	#0x0700,%sr
223	movel	%curptr@(TASK_STACK),%a1
224	movew	%a1@(TINFO_FLAGS+2),%d0
225	jne	syscall_exit_work
2261:	RESTORE_ALL
227
228syscall_exit_work:
229	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
230	bnes	1b			| if so, skip resched, signals
231	lslw	#1,%d0
232	jcs	do_trace_exit
233	jmi	do_delayed_trace
234	lslw	#8,%d0
235	jne	do_signal_return
236	pea	resume_userspace
237	jra	schedule
238
239
240ENTRY(ret_from_exception)
241.Lret_from_exception:
242	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
243	bnes	1f			| if so, skip resched, signals
244	| only allow interrupts when we are really the last one on the
245	| kernel stack, otherwise stack overflow can occur during
246	| heavy interrupt load
247	andw	#ALLOWINT,%sr
248
249resume_userspace:
250	movel	%curptr@(TASK_STACK),%a1
251	moveb	%a1@(TINFO_FLAGS+3),%d0
252	jne	exit_work
2531:	RESTORE_ALL
254
255exit_work:
256	| save top of frame
257	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
258	lslb	#1,%d0
259	jne	do_signal_return
260	pea	resume_userspace
261	jra	schedule
262
263
264do_signal_return:
265	|andw	#ALLOWINT,%sr
266	subql	#4,%sp			| dummy return address
267	SAVE_SWITCH_STACK
268	pea	%sp@(SWITCH_STACK_SIZE)
269	bsrl	do_notify_resume
270	addql	#4,%sp
271	RESTORE_SWITCH_STACK
272	addql	#4,%sp
273	jbra	resume_userspace
274
275do_delayed_trace:
276	bclr	#7,%sp@(PT_OFF_SR)	| clear trace bit in SR
277	pea	1			| send SIGTRAP
278	movel	%curptr,%sp@-
279	pea	LSIGTRAP
280	jbsr	send_sig
281	addql	#8,%sp
282	addql	#4,%sp
283	jbra	resume_userspace
284
285
286/* This is the main interrupt handler for autovector interrupts */
287
288ENTRY(auto_inthandler)
289	SAVE_ALL_INT
290	GET_CURRENT(%d0)
291					|  put exception # in d0
292	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
293	subw	#VEC_SPUR,%d0
294
295	movel	%sp,%sp@-
296	movel	%d0,%sp@-		|  put vector # on stack
297auto_irqhandler_fixup = . + 2
298	jsr	do_IRQ			|  process the IRQ
299	addql	#8,%sp			|  pop parameters off stack
300	jra	ret_from_exception
301
302/* Handler for user defined interrupt vectors */
303
304ENTRY(user_inthandler)
305	SAVE_ALL_INT
306	GET_CURRENT(%d0)
307					|  put exception # in d0
308	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
309user_irqvec_fixup = . + 2
310	subw	#VEC_USER,%d0
311
312	movel	%sp,%sp@-
313	movel	%d0,%sp@-		|  put vector # on stack
314	jsr	do_IRQ			|  process the IRQ
315	addql	#8,%sp			|  pop parameters off stack
316	jra	ret_from_exception
317
318/* Handler for uninitialized and spurious interrupts */
319
320ENTRY(bad_inthandler)
321	SAVE_ALL_INT
322	GET_CURRENT(%d0)
323
324	movel	%sp,%sp@-
325	jsr	handle_badint
326	addql	#4,%sp
327	jra	ret_from_exception
328
329resume:
330	/*
331	 * Beware - when entering resume, prev (the current task) is
332	 * in a0, next (the new task) is in a1,so don't change these
333	 * registers until their contents are no longer needed.
334	 */
335
336	/* save sr */
337	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)
338
339	/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
340	movec	%sfc,%d0
341	movew	%d0,%a0@(TASK_THREAD+THREAD_FS)
342
343	/* save usp */
344	/* it is better to use a movel here instead of a movew 8*) */
345	movec	%usp,%d0
346	movel	%d0,%a0@(TASK_THREAD+THREAD_USP)
347
348	/* save non-scratch registers on stack */
349	SAVE_SWITCH_STACK
350
351	/* save current kernel stack pointer */
352	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP)
353
354	/* save floating point context */
355#ifndef CONFIG_M68KFPU_EMU_ONLY
356#ifdef CONFIG_M68KFPU_EMU
357	tstl	m68k_fputype
358	jeq	3f
359#endif
360	fsave	%a0@(TASK_THREAD+THREAD_FPSTATE)
361
362#if defined(CONFIG_M68060)
363#if !defined(CPU_M68060_ONLY)
364	btst	#3,m68k_cputype+3
365	beqs	1f
366#endif
367	/* The 060 FPU keeps status in bits 15-8 of the first longword */
368	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE+2)
369	jeq	3f
370#if !defined(CPU_M68060_ONLY)
371	jra	2f
372#endif
373#endif /* CONFIG_M68060 */
374#if !defined(CPU_M68060_ONLY)
3751:	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE)
376	jeq	3f
377#endif
3782:	fmovemx	%fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
379	fmoveml	%fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3803:
381#endif	/* CONFIG_M68KFPU_EMU_ONLY */
382	/* Return previous task in %d1 */
383	movel	%curptr,%d1
384
385	/* switch to new task (a1 contains new task) */
386	movel	%a1,%curptr
387
388	/* restore floating point context */
389#ifndef CONFIG_M68KFPU_EMU_ONLY
390#ifdef CONFIG_M68KFPU_EMU
391	tstl	m68k_fputype
392	jeq	4f
393#endif
394#if defined(CONFIG_M68060)
395#if !defined(CPU_M68060_ONLY)
396	btst	#3,m68k_cputype+3
397	beqs	1f
398#endif
399	/* The 060 FPU keeps status in bits 15-8 of the first longword */
400	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE+2)
401	jeq	3f
402#if !defined(CPU_M68060_ONLY)
403	jra	2f
404#endif
405#endif /* CONFIG_M68060 */
406#if !defined(CPU_M68060_ONLY)
4071:	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE)
408	jeq	3f
409#endif
4102:	fmovemx	%a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
411	fmoveml	%a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4123:	frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4134:
414#endif	/* CONFIG_M68KFPU_EMU_ONLY */
415
416	/* restore the kernel stack pointer */
417	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp
418
419	/* restore non-scratch registers */
420	RESTORE_SWITCH_STACK
421
422	/* restore user stack pointer */
423	movel	%a1@(TASK_THREAD+THREAD_USP),%a0
424	movel	%a0,%usp
425
426	/* restore fs (sfc,%dfc) */
427	movew	%a1@(TASK_THREAD+THREAD_FS),%a0
428	movec	%a0,%sfc
429	movec	%a0,%dfc
430
431	/* restore status register */
432	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
433
434	rts
435
436#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
v4.6
  1/* -*- mode: asm -*-
  2 *
  3 *  linux/arch/m68k/kernel/entry.S
  4 *
  5 *  Copyright (C) 1991, 1992  Linus Torvalds
  6 *
  7 * This file is subject to the terms and conditions of the GNU General Public
  8 * License.  See the file README.legal in the main directory of this archive
  9 * for more details.
 10 *
 11 * Linux/m68k support by Hamish Macdonald
 12 *
 13 * 68060 fixes by Jesper Skov
 14 *
 15 */
 16
 17/*
 18 * entry.S  contains the system-call and fault low-level handling routines.
 19 * This also contains the timer-interrupt handler, as well as all interrupts
 20 * and faults that can result in a task-switch.
 21 *
 22 * NOTE: This code handles signal-recognition, which happens every time
 23 * after a timer-interrupt and after each system call.
 24 *
 25 */
 26
 27/*
 28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
 29 *               all pointers that used to be 'current' are now entry
 30 *               number 0 in the 'current_set' list.
 31 *
 32 *  6/05/00 RZ:	 addedd writeback completion after return from sighandler
 33 *		 for 68040
 34 */
 35
 36#include <linux/linkage.h>
 37#include <asm/errno.h>
 38#include <asm/setup.h>
 39#include <asm/segment.h>
 40#include <asm/traps.h>
 41#include <asm/unistd.h>
 42#include <asm/asm-offsets.h>
 43#include <asm/entry.h>
 44
 45.globl system_call, buserr, trap, resume
 46.globl sys_call_table
 47.globl __sys_fork, __sys_clone, __sys_vfork
 48.globl bad_interrupt
 49.globl auto_irqhandler_fixup
 50.globl user_irqvec_fixup
 51
 52.text
 53ENTRY(__sys_fork)
 54	SAVE_SWITCH_STACK
 55	jbsr	sys_fork
 56	lea     %sp@(24),%sp
 57	rts
 58
 59ENTRY(__sys_clone)
 60	SAVE_SWITCH_STACK
 61	pea	%sp@(SWITCH_STACK_SIZE)
 62	jbsr	m68k_clone
 63	lea     %sp@(28),%sp
 64	rts
 65
 66ENTRY(__sys_vfork)
 67	SAVE_SWITCH_STACK
 68	jbsr	sys_vfork
 69	lea     %sp@(24),%sp
 70	rts
 71
 
 
 
 
 
 
 
 72ENTRY(sys_sigreturn)
 73	SAVE_SWITCH_STACK
 74	movel	%sp,%sp@-		  | switch_stack pointer
 75	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
 76	jbsr	do_sigreturn
 77	addql	#8,%sp
 78	RESTORE_SWITCH_STACK
 79	rts
 80
 81ENTRY(sys_rt_sigreturn)
 82	SAVE_SWITCH_STACK
 83	movel	%sp,%sp@-		  | switch_stack pointer
 84	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
 85	jbsr	do_rt_sigreturn
 86	addql	#8,%sp
 87	RESTORE_SWITCH_STACK
 88	rts
 89
 90ENTRY(buserr)
 91	SAVE_ALL_INT
 92	GET_CURRENT(%d0)
 93	movel	%sp,%sp@-		| stack frame pointer argument
 94	jbsr	buserr_c
 95	addql	#4,%sp
 96	jra	ret_from_exception
 97
 98ENTRY(trap)
 99	SAVE_ALL_INT
100	GET_CURRENT(%d0)
101	movel	%sp,%sp@-		| stack frame pointer argument
102	jbsr	trap_c
103	addql	#4,%sp
104	jra	ret_from_exception
105
106	| After a fork we jump here directly from resume,
107	| so that %d1 contains the previous task
108	| schedule_tail now used regardless of CONFIG_SMP
109ENTRY(ret_from_fork)
110	movel	%d1,%sp@-
111	jsr	schedule_tail
112	addql	#4,%sp
113	jra	ret_from_exception
114
115ENTRY(ret_from_kernel_thread)
116	| a3 contains the kernel thread payload, d7 - its argument
117	movel	%d1,%sp@-
118	jsr	schedule_tail
119	movel	%d7,(%sp)
120	jsr	%a3@
121	addql	#4,%sp
122	jra	ret_from_exception
123
124#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
125
126#ifdef TRAP_DBG_INTERRUPT
127
128.globl dbginterrupt
129ENTRY(dbginterrupt)
130	SAVE_ALL_INT
131	GET_CURRENT(%d0)
132	movel	%sp,%sp@- 		/* stack frame pointer argument */
133	jsr	dbginterrupt_c
134	addql	#4,%sp
135	jra	ret_from_exception
136#endif
137
138ENTRY(reschedule)
139	/* save top of frame */
140	pea	%sp@
141	jbsr	set_esp0
142	addql	#4,%sp
143	pea	ret_from_exception
144	jmp	schedule
145
146ENTRY(ret_from_user_signal)
147	moveq #__NR_sigreturn,%d0
148	trap #0
149
150ENTRY(ret_from_user_rt_signal)
151	movel #__NR_rt_sigreturn,%d0
152	trap #0
153
154#else
155
156do_trace_entry:
157	movel	#-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
158	subql	#4,%sp
159	SAVE_SWITCH_STACK
160	jbsr	syscall_trace
161	RESTORE_SWITCH_STACK
162	addql	#4,%sp
163	movel	%sp@(PT_OFF_ORIG_D0),%d0
164	cmpl	#NR_syscalls,%d0
165	jcs	syscall
166badsys:
167	movel	#-ENOSYS,%sp@(PT_OFF_D0)
168	jra	ret_from_syscall
169
170do_trace_exit:
171	subql	#4,%sp
172	SAVE_SWITCH_STACK
173	jbsr	syscall_trace
174	RESTORE_SWITCH_STACK
175	addql	#4,%sp
176	jra	.Lret_from_exception
177
178ENTRY(ret_from_signal)
179	movel	%curptr@(TASK_STACK),%a1
180	tstb	%a1@(TINFO_FLAGS+2)
181	jge	1f
182	jbsr	syscall_trace
1831:	RESTORE_SWITCH_STACK
184	addql	#4,%sp
185/* on 68040 complete pending writebacks if any */
186#ifdef CONFIG_M68040
187	bfextu	%sp@(PT_OFF_FORMATVEC){#0,#4},%d0
188	subql	#7,%d0				| bus error frame ?
189	jbne	1f
190	movel	%sp,%sp@-
191	jbsr	berr_040cleanup
192	addql	#4,%sp
1931:
194#endif
195	jra	.Lret_from_exception
196
197ENTRY(system_call)
198	SAVE_ALL_SYS
199
200	GET_CURRENT(%d1)
201	movel	%d1,%a1
202
203	| save top of frame
204	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
205
206	| syscall trace?
207	tstb	%a1@(TINFO_FLAGS+2)
208	jmi	do_trace_entry
209	cmpl	#NR_syscalls,%d0
210	jcc	badsys
211syscall:
212	jbsr	@(sys_call_table,%d0:l:4)@(0)
213	movel	%d0,%sp@(PT_OFF_D0)	| save the return value
214ret_from_syscall:
215	|oriw	#0x0700,%sr
216	movel	%curptr@(TASK_STACK),%a1
217	movew	%a1@(TINFO_FLAGS+2),%d0
218	jne	syscall_exit_work
2191:	RESTORE_ALL
220
221syscall_exit_work:
222	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
223	bnes	1b			| if so, skip resched, signals
224	lslw	#1,%d0
225	jcs	do_trace_exit
226	jmi	do_delayed_trace
227	lslw	#8,%d0
228	jne	do_signal_return
229	pea	resume_userspace
230	jra	schedule
231
232
233ENTRY(ret_from_exception)
234.Lret_from_exception:
235	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
236	bnes	1f			| if so, skip resched, signals
237	| only allow interrupts when we are really the last one on the
238	| kernel stack, otherwise stack overflow can occur during
239	| heavy interrupt load
240	andw	#ALLOWINT,%sr
241
242resume_userspace:
243	movel	%curptr@(TASK_STACK),%a1
244	moveb	%a1@(TINFO_FLAGS+3),%d0
245	jne	exit_work
2461:	RESTORE_ALL
247
248exit_work:
249	| save top of frame
250	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
251	lslb	#1,%d0
252	jne	do_signal_return
253	pea	resume_userspace
254	jra	schedule
255
256
257do_signal_return:
258	|andw	#ALLOWINT,%sr
259	subql	#4,%sp			| dummy return address
260	SAVE_SWITCH_STACK
261	pea	%sp@(SWITCH_STACK_SIZE)
262	bsrl	do_notify_resume
263	addql	#4,%sp
264	RESTORE_SWITCH_STACK
265	addql	#4,%sp
266	jbra	resume_userspace
267
268do_delayed_trace:
269	bclr	#7,%sp@(PT_OFF_SR)	| clear trace bit in SR
270	pea	1			| send SIGTRAP
271	movel	%curptr,%sp@-
272	pea	LSIGTRAP
273	jbsr	send_sig
274	addql	#8,%sp
275	addql	#4,%sp
276	jbra	resume_userspace
277
278
279/* This is the main interrupt handler for autovector interrupts */
280
281ENTRY(auto_inthandler)
282	SAVE_ALL_INT
283	GET_CURRENT(%d0)
284					|  put exception # in d0
285	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
286	subw	#VEC_SPUR,%d0
287
288	movel	%sp,%sp@-
289	movel	%d0,%sp@-		|  put vector # on stack
290auto_irqhandler_fixup = . + 2
291	jsr	do_IRQ			|  process the IRQ
292	addql	#8,%sp			|  pop parameters off stack
293	jra	ret_from_exception
294
295/* Handler for user defined interrupt vectors */
296
297ENTRY(user_inthandler)
298	SAVE_ALL_INT
299	GET_CURRENT(%d0)
300					|  put exception # in d0
301	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
302user_irqvec_fixup = . + 2
303	subw	#VEC_USER,%d0
304
305	movel	%sp,%sp@-
306	movel	%d0,%sp@-		|  put vector # on stack
307	jsr	do_IRQ			|  process the IRQ
308	addql	#8,%sp			|  pop parameters off stack
309	jra	ret_from_exception
310
311/* Handler for uninitialized and spurious interrupts */
312
313ENTRY(bad_inthandler)
314	SAVE_ALL_INT
315	GET_CURRENT(%d0)
316
317	movel	%sp,%sp@-
318	jsr	handle_badint
319	addql	#4,%sp
320	jra	ret_from_exception
321
322resume:
323	/*
324	 * Beware - when entering resume, prev (the current task) is
325	 * in a0, next (the new task) is in a1,so don't change these
326	 * registers until their contents are no longer needed.
327	 */
328
329	/* save sr */
330	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)
331
332	/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
333	movec	%sfc,%d0
334	movew	%d0,%a0@(TASK_THREAD+THREAD_FS)
335
336	/* save usp */
337	/* it is better to use a movel here instead of a movew 8*) */
338	movec	%usp,%d0
339	movel	%d0,%a0@(TASK_THREAD+THREAD_USP)
340
341	/* save non-scratch registers on stack */
342	SAVE_SWITCH_STACK
343
344	/* save current kernel stack pointer */
345	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP)
346
347	/* save floating point context */
348#ifndef CONFIG_M68KFPU_EMU_ONLY
349#ifdef CONFIG_M68KFPU_EMU
350	tstl	m68k_fputype
351	jeq	3f
352#endif
353	fsave	%a0@(TASK_THREAD+THREAD_FPSTATE)
354
355#if defined(CONFIG_M68060)
356#if !defined(CPU_M68060_ONLY)
357	btst	#3,m68k_cputype+3
358	beqs	1f
359#endif
360	/* The 060 FPU keeps status in bits 15-8 of the first longword */
361	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE+2)
362	jeq	3f
363#if !defined(CPU_M68060_ONLY)
364	jra	2f
365#endif
366#endif /* CONFIG_M68060 */
367#if !defined(CPU_M68060_ONLY)
3681:	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE)
369	jeq	3f
370#endif
3712:	fmovemx	%fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
372	fmoveml	%fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3733:
374#endif	/* CONFIG_M68KFPU_EMU_ONLY */
375	/* Return previous task in %d1 */
376	movel	%curptr,%d1
377
378	/* switch to new task (a1 contains new task) */
379	movel	%a1,%curptr
380
381	/* restore floating point context */
382#ifndef CONFIG_M68KFPU_EMU_ONLY
383#ifdef CONFIG_M68KFPU_EMU
384	tstl	m68k_fputype
385	jeq	4f
386#endif
387#if defined(CONFIG_M68060)
388#if !defined(CPU_M68060_ONLY)
389	btst	#3,m68k_cputype+3
390	beqs	1f
391#endif
392	/* The 060 FPU keeps status in bits 15-8 of the first longword */
393	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE+2)
394	jeq	3f
395#if !defined(CPU_M68060_ONLY)
396	jra	2f
397#endif
398#endif /* CONFIG_M68060 */
399#if !defined(CPU_M68060_ONLY)
4001:	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE)
401	jeq	3f
402#endif
4032:	fmovemx	%a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
404	fmoveml	%a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4053:	frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4064:
407#endif	/* CONFIG_M68KFPU_EMU_ONLY */
408
409	/* restore the kernel stack pointer */
410	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp
411
412	/* restore non-scratch registers */
413	RESTORE_SWITCH_STACK
414
415	/* restore user stack pointer */
416	movel	%a1@(TASK_THREAD+THREAD_USP),%a0
417	movel	%a0,%usp
418
419	/* restore fs (sfc,%dfc) */
420	movew	%a1@(TASK_THREAD+THREAD_FS),%a0
421	movec	%a0,%sfc
422	movec	%a0,%dfc
423
424	/* restore status register */
425	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
426
427	rts
428
429#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */