Loading...
1/* -*- mode: asm -*-
2 *
3 * linux/arch/m68k/kernel/entry.S
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file README.legal in the main directory of this archive
9 * for more details.
10 *
11 * Linux/m68k support by Hamish Macdonald
12 *
13 * 68060 fixes by Jesper Skov
14 *
15 */
16
17/*
18 * entry.S contains the system-call and fault low-level handling routines.
19 * This also contains the timer-interrupt handler, as well as all interrupts
20 * and faults that can result in a task-switch.
21 *
22 * NOTE: This code handles signal-recognition, which happens every time
23 * after a timer-interrupt and after each system call.
24 *
25 */
26
27/*
28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
29 * all pointers that used to be 'current' are now entry
30 * number 0 in the 'current_set' list.
31 *
32 * 6/05/00 RZ: addedd writeback completion after return from sighandler
33 * for 68040
34 */
35
36#include <linux/linkage.h>
37#include <asm/errno.h>
38#include <asm/setup.h>
39#include <asm/traps.h>
40#include <asm/unistd.h>
41#include <asm/asm-offsets.h>
42#include <asm/entry.h>
43
44.globl system_call, buserr, trap, resume
45.globl sys_call_table
46.globl __sys_fork, __sys_clone, __sys_vfork
47.globl bad_interrupt
48.globl auto_irqhandler_fixup
49.globl user_irqvec_fixup
50
51.text
52ENTRY(__sys_fork)
53 SAVE_SWITCH_STACK
54 jbsr sys_fork
55 lea %sp@(24),%sp
56 rts
57
58ENTRY(__sys_clone)
59 SAVE_SWITCH_STACK
60 pea %sp@(SWITCH_STACK_SIZE)
61 jbsr m68k_clone
62 lea %sp@(28),%sp
63 rts
64
65ENTRY(__sys_vfork)
66 SAVE_SWITCH_STACK
67 jbsr sys_vfork
68 lea %sp@(24),%sp
69 rts
70
71ENTRY(__sys_clone3)
72 SAVE_SWITCH_STACK
73 pea %sp@(SWITCH_STACK_SIZE)
74 jbsr m68k_clone3
75 lea %sp@(28),%sp
76 rts
77
78ENTRY(sys_sigreturn)
79 SAVE_SWITCH_STACK
80 movel %sp,%a1 | switch_stack pointer
81 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
82 lea %sp@(-84),%sp | leave a gap
83 movel %a1,%sp@-
84 movel %a0,%sp@-
85 jbsr do_sigreturn
86 jra 1f | shared with rt_sigreturn()
87
88ENTRY(sys_rt_sigreturn)
89 SAVE_SWITCH_STACK
90 movel %sp,%a1 | switch_stack pointer
91 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
92 lea %sp@(-84),%sp | leave a gap
93 movel %a1,%sp@-
94 movel %a0,%sp@-
95 | stack contents:
96 | [original pt_regs address] [original switch_stack address]
97 | [gap] [switch_stack] [pt_regs] [exception frame]
98 jbsr do_rt_sigreturn
99
1001:
101 | stack contents now:
102 | [original pt_regs address] [original switch_stack address]
103 | [unused part of the gap] [moved switch_stack] [moved pt_regs]
104 | [replacement exception frame]
105 | return value of do_{rt_,}sigreturn() points to moved switch_stack.
106
107 movel %d0,%sp | discard the leftover junk
108 RESTORE_SWITCH_STACK
109 | stack contents now is just [syscall return address] [pt_regs] [frame]
110 | return pt_regs.d0
111 movel %sp@(PT_OFF_D0+4),%d0
112 rts
113
114ENTRY(buserr)
115 SAVE_ALL_INT
116 GET_CURRENT(%d0)
117 movel %sp,%sp@- | stack frame pointer argument
118 jbsr buserr_c
119 addql #4,%sp
120 jra ret_from_exception
121
122ENTRY(trap)
123 SAVE_ALL_INT
124 GET_CURRENT(%d0)
125 movel %sp,%sp@- | stack frame pointer argument
126 jbsr trap_c
127 addql #4,%sp
128 jra ret_from_exception
129
130 | After a fork we jump here directly from resume,
131 | so that %d1 contains the previous task
132 | schedule_tail now used regardless of CONFIG_SMP
133ENTRY(ret_from_fork)
134 movel %d1,%sp@-
135 jsr schedule_tail
136 addql #4,%sp
137 jra ret_from_exception
138
139ENTRY(ret_from_kernel_thread)
140 | a3 contains the kernel thread payload, d7 - its argument
141 movel %d1,%sp@-
142 jsr schedule_tail
143 movel %d7,(%sp)
144 jsr %a3@
145 addql #4,%sp
146 jra ret_from_exception
147
148#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
149
150#ifdef TRAP_DBG_INTERRUPT
151
152.globl dbginterrupt
153ENTRY(dbginterrupt)
154 SAVE_ALL_INT
155 GET_CURRENT(%d0)
156 movel %sp,%sp@- /* stack frame pointer argument */
157 jsr dbginterrupt_c
158 addql #4,%sp
159 jra ret_from_exception
160#endif
161
162ENTRY(reschedule)
163 /* save top of frame */
164 pea %sp@
165 jbsr set_esp0
166 addql #4,%sp
167 pea ret_from_exception
168 jmp schedule
169
170ENTRY(ret_from_user_signal)
171 moveq #__NR_sigreturn,%d0
172 trap #0
173
174ENTRY(ret_from_user_rt_signal)
175 movel #__NR_rt_sigreturn,%d0
176 trap #0
177
178#else
179
180do_trace_entry:
181 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
182 subql #4,%sp
183 SAVE_SWITCH_STACK
184 jbsr syscall_trace_enter
185 RESTORE_SWITCH_STACK
186 addql #4,%sp
187 movel %sp@(PT_OFF_ORIG_D0),%d0
188 cmpl #NR_syscalls,%d0
189 jcs syscall
190badsys:
191 movel #-ENOSYS,%sp@(PT_OFF_D0)
192 jra ret_from_syscall
193
194do_trace_exit:
195 subql #4,%sp
196 SAVE_SWITCH_STACK
197 jbsr syscall_trace_leave
198 RESTORE_SWITCH_STACK
199 addql #4,%sp
200 jra .Lret_from_exception
201
202ENTRY(system_call)
203 SAVE_ALL_SYS
204
205 GET_CURRENT(%d1)
206 movel %d1,%a1
207
208 | save top of frame
209 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
210
211 | syscall trace?
212 tstb %a1@(TINFO_FLAGS+2)
213 jmi do_trace_entry
214 cmpl #NR_syscalls,%d0
215 jcc badsys
216syscall:
217 jbsr @(sys_call_table,%d0:l:4)@(0)
218 movel %d0,%sp@(PT_OFF_D0) | save the return value
219ret_from_syscall:
220 |oriw #0x0700,%sr
221 movel %curptr@(TASK_STACK),%a1
222 movew %a1@(TINFO_FLAGS+2),%d0
223 jne syscall_exit_work
2241: RESTORE_ALL
225
226syscall_exit_work:
227 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
228 bnes 1b | if so, skip resched, signals
229 lslw #1,%d0
230 jcs do_trace_exit
231 jmi do_delayed_trace
232 lslw #8,%d0
233 jne do_signal_return
234 pea resume_userspace
235 jra schedule
236
237
238ENTRY(ret_from_exception)
239.Lret_from_exception:
240 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
241 bnes 1f | if so, skip resched, signals
242 | only allow interrupts when we are really the last one on the
243 | kernel stack, otherwise stack overflow can occur during
244 | heavy interrupt load
245 andw #ALLOWINT,%sr
246
247resume_userspace:
248 movel %curptr@(TASK_STACK),%a1
249 moveb %a1@(TINFO_FLAGS+3),%d0
250 jne exit_work
2511: RESTORE_ALL
252
253exit_work:
254 | save top of frame
255 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
256 lslb #1,%d0
257 jne do_signal_return
258 pea resume_userspace
259 jra schedule
260
261
262do_signal_return:
263 |andw #ALLOWINT,%sr
264 subql #4,%sp | dummy return address
265 SAVE_SWITCH_STACK
266 pea %sp@(SWITCH_STACK_SIZE)
267 bsrl do_notify_resume
268 addql #4,%sp
269 RESTORE_SWITCH_STACK
270 addql #4,%sp
271 jbra resume_userspace
272
273do_delayed_trace:
274 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
275 pea 1 | send SIGTRAP
276 movel %curptr,%sp@-
277 pea LSIGTRAP
278 jbsr send_sig
279 addql #8,%sp
280 addql #4,%sp
281 jbra resume_userspace
282
283
284/* This is the main interrupt handler for autovector interrupts */
285
286ENTRY(auto_inthandler)
287 SAVE_ALL_INT
288 GET_CURRENT(%d0)
289 | put exception # in d0
290 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
291 subw #VEC_SPUR,%d0
292
293 movel %sp,%sp@-
294 movel %d0,%sp@- | put vector # on stack
295auto_irqhandler_fixup = . + 2
296 jsr do_IRQ | process the IRQ
297 addql #8,%sp | pop parameters off stack
298 jra ret_from_exception
299
300/* Handler for user defined interrupt vectors */
301
302ENTRY(user_inthandler)
303 SAVE_ALL_INT
304 GET_CURRENT(%d0)
305 | put exception # in d0
306 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
307user_irqvec_fixup = . + 2
308 subw #VEC_USER,%d0
309
310 movel %sp,%sp@-
311 movel %d0,%sp@- | put vector # on stack
312 jsr do_IRQ | process the IRQ
313 addql #8,%sp | pop parameters off stack
314 jra ret_from_exception
315
316/* Handler for uninitialized and spurious interrupts */
317
318ENTRY(bad_inthandler)
319 SAVE_ALL_INT
320 GET_CURRENT(%d0)
321
322 movel %sp,%sp@-
323 jsr handle_badint
324 addql #4,%sp
325 jra ret_from_exception
326
327resume:
328 /*
329 * Beware - when entering resume, prev (the current task) is
330 * in a0, next (the new task) is in a1,so don't change these
331 * registers until their contents are no longer needed.
332 */
333
334 /* save sr */
335 movew %sr,%a0@(TASK_THREAD+THREAD_SR)
336
337 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
338 movec %sfc,%d0
339 movew %d0,%a0@(TASK_THREAD+THREAD_FC)
340
341 /* save usp */
342 /* it is better to use a movel here instead of a movew 8*) */
343 movec %usp,%d0
344 movel %d0,%a0@(TASK_THREAD+THREAD_USP)
345
346 /* save non-scratch registers on stack */
347 SAVE_SWITCH_STACK
348
349 /* save current kernel stack pointer */
350 movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
351
352 /* save floating point context */
353#ifndef CONFIG_M68KFPU_EMU_ONLY
354#ifdef CONFIG_M68KFPU_EMU
355 tstl m68k_fputype
356 jeq 3f
357#endif
358 fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
359
360#if defined(CONFIG_M68060)
361#if !defined(CPU_M68060_ONLY)
362 btst #3,m68k_cputype+3
363 beqs 1f
364#endif
365 /* The 060 FPU keeps status in bits 15-8 of the first longword */
366 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
367 jeq 3f
368#if !defined(CPU_M68060_ONLY)
369 jra 2f
370#endif
371#endif /* CONFIG_M68060 */
372#if !defined(CPU_M68060_ONLY)
3731: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
374 jeq 3f
375#endif
3762: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
377 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3783:
379#endif /* CONFIG_M68KFPU_EMU_ONLY */
380 /* Return previous task in %d1 */
381 movel %curptr,%d1
382
383 /* switch to new task (a1 contains new task) */
384 movel %a1,%curptr
385
386 /* restore floating point context */
387#ifndef CONFIG_M68KFPU_EMU_ONLY
388#ifdef CONFIG_M68KFPU_EMU
389 tstl m68k_fputype
390 jeq 4f
391#endif
392#if defined(CONFIG_M68060)
393#if !defined(CPU_M68060_ONLY)
394 btst #3,m68k_cputype+3
395 beqs 1f
396#endif
397 /* The 060 FPU keeps status in bits 15-8 of the first longword */
398 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
399 jeq 3f
400#if !defined(CPU_M68060_ONLY)
401 jra 2f
402#endif
403#endif /* CONFIG_M68060 */
404#if !defined(CPU_M68060_ONLY)
4051: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
406 jeq 3f
407#endif
4082: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
409 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4103: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4114:
412#endif /* CONFIG_M68KFPU_EMU_ONLY */
413
414 /* restore the kernel stack pointer */
415 movel %a1@(TASK_THREAD+THREAD_KSP),%sp
416
417 /* restore non-scratch registers */
418 RESTORE_SWITCH_STACK
419
420 /* restore user stack pointer */
421 movel %a1@(TASK_THREAD+THREAD_USP),%a0
422 movel %a0,%usp
423
424 /* restore fs (sfc,%dfc) */
425 movew %a1@(TASK_THREAD+THREAD_FC),%a0
426 movec %a0,%sfc
427 movec %a0,%dfc
428
429 /* restore status register */
430 movew %a1@(TASK_THREAD+THREAD_SR),%sr
431
432 rts
433
434#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
1/* SPDX-License-Identifier: GPL-2.0-or-later
2 * -*- mode: asm -*-
3 *
4 * linux/arch/m68k/kernel/entry.S
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * Linux/m68k support by Hamish Macdonald
9 *
10 * 68060 fixes by Jesper Skov
11 *
12 */
13
14/*
15 * entry.S contains the system-call and fault low-level handling routines.
16 * This also contains the timer-interrupt handler, as well as all interrupts
17 * and faults that can result in a task-switch.
18 *
19 * NOTE: This code handles signal-recognition, which happens every time
20 * after a timer-interrupt and after each system call.
21 *
22 */
23
24/*
25 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
26 * all pointers that used to be 'current' are now entry
27 * number 0 in the 'current_set' list.
28 *
29 * 6/05/00 RZ: addedd writeback completion after return from sighandler
30 * for 68040
31 */
32
33#include <linux/linkage.h>
34#include <asm/errno.h>
35#include <asm/setup.h>
36#include <asm/traps.h>
37#include <asm/unistd.h>
38#include <asm/asm-offsets.h>
39#include <asm/entry.h>
40
41.globl system_call, buserr, trap, resume
42.globl sys_call_table
43.globl __sys_fork, __sys_clone, __sys_vfork
44.globl bad_interrupt
45.globl auto_irqhandler_fixup
46.globl user_irqvec_fixup
47
48.text
49ENTRY(__sys_fork)
50 SAVE_SWITCH_STACK
51 jbsr sys_fork
52 lea %sp@(24),%sp
53 rts
54
55ENTRY(__sys_clone)
56 SAVE_SWITCH_STACK
57 pea %sp@(SWITCH_STACK_SIZE)
58 jbsr m68k_clone
59 lea %sp@(28),%sp
60 rts
61
62ENTRY(__sys_vfork)
63 SAVE_SWITCH_STACK
64 jbsr sys_vfork
65 lea %sp@(24),%sp
66 rts
67
68ENTRY(__sys_clone3)
69 SAVE_SWITCH_STACK
70 pea %sp@(SWITCH_STACK_SIZE)
71 jbsr m68k_clone3
72 lea %sp@(28),%sp
73 rts
74
75ENTRY(sys_sigreturn)
76 SAVE_SWITCH_STACK
77 movel %sp,%a1 | switch_stack pointer
78 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
79 lea %sp@(-84),%sp | leave a gap
80 movel %a1,%sp@-
81 movel %a0,%sp@-
82 jbsr do_sigreturn
83 jra 1f | shared with rt_sigreturn()
84
85ENTRY(sys_rt_sigreturn)
86 SAVE_SWITCH_STACK
87 movel %sp,%a1 | switch_stack pointer
88 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
89 lea %sp@(-84),%sp | leave a gap
90 movel %a1,%sp@-
91 movel %a0,%sp@-
92 | stack contents:
93 | [original pt_regs address] [original switch_stack address]
94 | [gap] [switch_stack] [pt_regs] [exception frame]
95 jbsr do_rt_sigreturn
96
971:
98 | stack contents now:
99 | [original pt_regs address] [original switch_stack address]
100 | [unused part of the gap] [moved switch_stack] [moved pt_regs]
101 | [replacement exception frame]
102 | return value of do_{rt_,}sigreturn() points to moved switch_stack.
103
104 movel %d0,%sp | discard the leftover junk
105 RESTORE_SWITCH_STACK
106 | stack contents now is just [syscall return address] [pt_regs] [frame]
107 | return pt_regs.d0
108 movel %sp@(PT_OFF_D0+4),%d0
109 rts
110
111ENTRY(buserr)
112 SAVE_ALL_INT
113 GET_CURRENT(%d0)
114 movel %sp,%sp@- | stack frame pointer argument
115 jbsr buserr_c
116 addql #4,%sp
117 jra ret_from_exception
118
119ENTRY(trap)
120 SAVE_ALL_INT
121 GET_CURRENT(%d0)
122 movel %sp,%sp@- | stack frame pointer argument
123 jbsr trap_c
124 addql #4,%sp
125 jra ret_from_exception
126
127 | After a fork we jump here directly from resume,
128 | so that %d1 contains the previous task
129 | schedule_tail now used regardless of CONFIG_SMP
130ENTRY(ret_from_fork)
131 movel %d1,%sp@-
132 jsr schedule_tail
133 addql #4,%sp
134 jra ret_from_exception
135
136ENTRY(ret_from_kernel_thread)
137 | a3 contains the kernel thread payload, d7 - its argument
138 movel %d1,%sp@-
139 jsr schedule_tail
140 movel %d7,(%sp)
141 jsr %a3@
142 addql #4,%sp
143 jra ret_from_exception
144
145#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
146
147#ifdef TRAP_DBG_INTERRUPT
148
149.globl dbginterrupt
150ENTRY(dbginterrupt)
151 SAVE_ALL_INT
152 GET_CURRENT(%d0)
153 movel %sp,%sp@- /* stack frame pointer argument */
154 jsr dbginterrupt_c
155 addql #4,%sp
156 jra ret_from_exception
157#endif
158
159ENTRY(reschedule)
160 /* save top of frame */
161 pea %sp@
162 jbsr set_esp0
163 addql #4,%sp
164 pea ret_from_exception
165 jmp schedule
166
167ENTRY(ret_from_user_signal)
168 moveq #__NR_sigreturn,%d0
169 trap #0
170
171ENTRY(ret_from_user_rt_signal)
172 movel #__NR_rt_sigreturn,%d0
173 trap #0
174
175#else
176
177do_trace_entry:
178 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
179 subql #4,%sp
180 SAVE_SWITCH_STACK
181 jbsr syscall_trace_enter
182 RESTORE_SWITCH_STACK
183 addql #4,%sp
184 addql #1,%d0 | optimization for cmpil #-1,%d0
185 jeq ret_from_syscall
186 movel %sp@(PT_OFF_ORIG_D0),%d0
187 cmpl #NR_syscalls,%d0
188 jcs syscall
189 jra ret_from_syscall
190badsys:
191 movel #-ENOSYS,%sp@(PT_OFF_D0)
192 jra ret_from_syscall
193
194do_trace_exit:
195 subql #4,%sp
196 SAVE_SWITCH_STACK
197 jbsr syscall_trace_leave
198 RESTORE_SWITCH_STACK
199 addql #4,%sp
200 jra .Lret_from_exception
201
202ENTRY(system_call)
203 SAVE_ALL_SYS
204
205 GET_CURRENT(%d1)
206 movel %d1,%a1
207
208 | save top of frame
209 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
210
211 | syscall trace?
212 tstb %a1@(TINFO_FLAGS+2)
213 jmi do_trace_entry
214 | seccomp filter active?
215 btst #5,%a1@(TINFO_FLAGS+2)
216 bnes do_trace_entry
217 cmpl #NR_syscalls,%d0
218 jcc badsys
219syscall:
220 jbsr @(sys_call_table,%d0:l:4)@(0)
221 movel %d0,%sp@(PT_OFF_D0) | save the return value
222ret_from_syscall:
223 |oriw #0x0700,%sr
224 movel %curptr@(TASK_STACK),%a1
225 movew %a1@(TINFO_FLAGS+2),%d0
226 jne syscall_exit_work
2271: RESTORE_ALL
228
229syscall_exit_work:
230 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
231 bnes 1b | if so, skip resched, signals
232 lslw #1,%d0
233 jcs do_trace_exit
234 jmi do_delayed_trace
235 lslw #8,%d0
236 jne do_signal_return
237 pea resume_userspace
238 jra schedule
239
240
241ENTRY(ret_from_exception)
242.Lret_from_exception:
243 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
244 bnes 1f | if so, skip resched, signals
245 | only allow interrupts when we are really the last one on the
246 | kernel stack, otherwise stack overflow can occur during
247 | heavy interrupt load
248 andw #ALLOWINT,%sr
249
250resume_userspace:
251 movel %curptr@(TASK_STACK),%a1
252 moveb %a1@(TINFO_FLAGS+3),%d0
253 jne exit_work
2541: RESTORE_ALL
255
256exit_work:
257 | save top of frame
258 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
259 lslb #1,%d0
260 jne do_signal_return
261 pea resume_userspace
262 jra schedule
263
264
265do_signal_return:
266 |andw #ALLOWINT,%sr
267 subql #4,%sp | dummy return address
268 SAVE_SWITCH_STACK
269 pea %sp@(SWITCH_STACK_SIZE)
270 bsrl do_notify_resume
271 addql #4,%sp
272 RESTORE_SWITCH_STACK
273 addql #4,%sp
274 jbra resume_userspace
275
276do_delayed_trace:
277 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
278 pea 1 | send SIGTRAP
279 movel %curptr,%sp@-
280 pea LSIGTRAP
281 jbsr send_sig
282 addql #8,%sp
283 addql #4,%sp
284 jbra resume_userspace
285
286
287/* This is the main interrupt handler for autovector interrupts */
288
289ENTRY(auto_inthandler)
290 SAVE_ALL_INT
291 GET_CURRENT(%d0)
292 | put exception # in d0
293 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
294 subw #VEC_SPUR,%d0
295
296 movel %sp,%sp@-
297 movel %d0,%sp@- | put vector # on stack
298auto_irqhandler_fixup = . + 2
299 jsr do_IRQ | process the IRQ
300 addql #8,%sp | pop parameters off stack
301 jra ret_from_exception
302
303/* Handler for user defined interrupt vectors */
304
305ENTRY(user_inthandler)
306 SAVE_ALL_INT
307 GET_CURRENT(%d0)
308 | put exception # in d0
309 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
310user_irqvec_fixup = . + 2
311 subw #VEC_USER,%d0
312
313 movel %sp,%sp@-
314 movel %d0,%sp@- | put vector # on stack
315 jsr do_IRQ | process the IRQ
316 addql #8,%sp | pop parameters off stack
317 jra ret_from_exception
318
319/* Handler for uninitialized and spurious interrupts */
320
321ENTRY(bad_inthandler)
322 SAVE_ALL_INT
323 GET_CURRENT(%d0)
324
325 movel %sp,%sp@-
326 jsr handle_badint
327 addql #4,%sp
328 jra ret_from_exception
329
330resume:
331 /*
332 * Beware - when entering resume, prev (the current task) is
333 * in a0, next (the new task) is in a1,so don't change these
334 * registers until their contents are no longer needed.
335 */
336
337 /* save sr */
338 movew %sr,%a0@(TASK_THREAD+THREAD_SR)
339
340 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
341 movec %sfc,%d0
342 movew %d0,%a0@(TASK_THREAD+THREAD_FC)
343
344 /* save usp */
345 /* it is better to use a movel here instead of a movew 8*) */
346 movec %usp,%d0
347 movel %d0,%a0@(TASK_THREAD+THREAD_USP)
348
349 /* save non-scratch registers on stack */
350 SAVE_SWITCH_STACK
351
352 /* save current kernel stack pointer */
353 movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
354
355 /* save floating point context */
356#ifndef CONFIG_M68KFPU_EMU_ONLY
357#ifdef CONFIG_M68KFPU_EMU
358 tstl m68k_fputype
359 jeq 3f
360#endif
361 fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
362
363#if defined(CONFIG_M68060)
364#if !defined(CPU_M68060_ONLY)
365 btst #3,m68k_cputype+3
366 beqs 1f
367#endif
368 /* The 060 FPU keeps status in bits 15-8 of the first longword */
369 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
370 jeq 3f
371#if !defined(CPU_M68060_ONLY)
372 jra 2f
373#endif
374#endif /* CONFIG_M68060 */
375#if !defined(CPU_M68060_ONLY)
3761: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
377 jeq 3f
378#endif
3792: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
380 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3813:
382#endif /* CONFIG_M68KFPU_EMU_ONLY */
383 /* Return previous task in %d1 */
384 movel %curptr,%d1
385
386 /* switch to new task (a1 contains new task) */
387 movel %a1,%curptr
388
389 /* restore floating point context */
390#ifndef CONFIG_M68KFPU_EMU_ONLY
391#ifdef CONFIG_M68KFPU_EMU
392 tstl m68k_fputype
393 jeq 4f
394#endif
395#if defined(CONFIG_M68060)
396#if !defined(CPU_M68060_ONLY)
397 btst #3,m68k_cputype+3
398 beqs 1f
399#endif
400 /* The 060 FPU keeps status in bits 15-8 of the first longword */
401 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
402 jeq 3f
403#if !defined(CPU_M68060_ONLY)
404 jra 2f
405#endif
406#endif /* CONFIG_M68060 */
407#if !defined(CPU_M68060_ONLY)
4081: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
409 jeq 3f
410#endif
4112: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
412 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4133: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4144:
415#endif /* CONFIG_M68KFPU_EMU_ONLY */
416
417 /* restore the kernel stack pointer */
418 movel %a1@(TASK_THREAD+THREAD_KSP),%sp
419
420 /* restore non-scratch registers */
421 RESTORE_SWITCH_STACK
422
423 /* restore user stack pointer */
424 movel %a1@(TASK_THREAD+THREAD_USP),%a0
425 movel %a0,%usp
426
427 /* restore fs (sfc,%dfc) */
428 movew %a1@(TASK_THREAD+THREAD_FC),%a0
429 movec %a0,%sfc
430 movec %a0,%dfc
431
432 /* restore status register */
433 movew %a1@(TASK_THREAD+THREAD_SR),%d0
434 oriw #0x0700,%d0
435 movew %d0,%sr
436
437 rts
438
439#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */