Loading...
1/* -*- mode: asm -*-
2 *
3 * linux/arch/m68k/kernel/entry.S
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file README.legal in the main directory of this archive
9 * for more details.
10 *
11 * Linux/m68k support by Hamish Macdonald
12 *
13 * 68060 fixes by Jesper Skov
14 *
15 */
16
17/*
18 * entry.S contains the system-call and fault low-level handling routines.
19 * This also contains the timer-interrupt handler, as well as all interrupts
20 * and faults that can result in a task-switch.
21 *
22 * NOTE: This code handles signal-recognition, which happens every time
23 * after a timer-interrupt and after each system call.
24 *
25 */
26
27/*
28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
29 * all pointers that used to be 'current' are now entry
30 * number 0 in the 'current_set' list.
31 *
32 * 6/05/00 RZ: addedd writeback completion after return from sighandler
33 * for 68040
34 */
35
36#include <linux/linkage.h>
37#include <asm/errno.h>
38#include <asm/setup.h>
39#include <asm/segment.h>
40#include <asm/traps.h>
41#include <asm/unistd.h>
42#include <asm/asm-offsets.h>
43#include <asm/entry.h>
44
45.globl system_call, buserr, trap, resume
46.globl sys_call_table
47.globl __sys_fork, __sys_clone, __sys_vfork
48.globl bad_interrupt
49.globl auto_irqhandler_fixup
50.globl user_irqvec_fixup
51
52.text
53ENTRY(__sys_fork)
54 SAVE_SWITCH_STACK
55 jbsr sys_fork
56 lea %sp@(24),%sp
57 rts
58
59ENTRY(__sys_clone)
60 SAVE_SWITCH_STACK
61 pea %sp@(SWITCH_STACK_SIZE)
62 jbsr m68k_clone
63 lea %sp@(28),%sp
64 rts
65
66ENTRY(__sys_vfork)
67 SAVE_SWITCH_STACK
68 jbsr sys_vfork
69 lea %sp@(24),%sp
70 rts
71
72ENTRY(__sys_clone3)
73 SAVE_SWITCH_STACK
74 pea %sp@(SWITCH_STACK_SIZE)
75 jbsr m68k_clone3
76 lea %sp@(28),%sp
77 rts
78
79ENTRY(sys_sigreturn)
80 SAVE_SWITCH_STACK
81 movel %sp,%sp@- | switch_stack pointer
82 pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
83 jbsr do_sigreturn
84 addql #8,%sp
85 RESTORE_SWITCH_STACK
86 rts
87
88ENTRY(sys_rt_sigreturn)
89 SAVE_SWITCH_STACK
90 movel %sp,%sp@- | switch_stack pointer
91 pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
92 jbsr do_rt_sigreturn
93 addql #8,%sp
94 RESTORE_SWITCH_STACK
95 rts
96
97ENTRY(buserr)
98 SAVE_ALL_INT
99 GET_CURRENT(%d0)
100 movel %sp,%sp@- | stack frame pointer argument
101 jbsr buserr_c
102 addql #4,%sp
103 jra ret_from_exception
104
105ENTRY(trap)
106 SAVE_ALL_INT
107 GET_CURRENT(%d0)
108 movel %sp,%sp@- | stack frame pointer argument
109 jbsr trap_c
110 addql #4,%sp
111 jra ret_from_exception
112
113 | After a fork we jump here directly from resume,
114 | so that %d1 contains the previous task
115 | schedule_tail now used regardless of CONFIG_SMP
116ENTRY(ret_from_fork)
117 movel %d1,%sp@-
118 jsr schedule_tail
119 addql #4,%sp
120 jra ret_from_exception
121
122ENTRY(ret_from_kernel_thread)
123 | a3 contains the kernel thread payload, d7 - its argument
124 movel %d1,%sp@-
125 jsr schedule_tail
126 movel %d7,(%sp)
127 jsr %a3@
128 addql #4,%sp
129 jra ret_from_exception
130
131#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
132
133#ifdef TRAP_DBG_INTERRUPT
134
135.globl dbginterrupt
136ENTRY(dbginterrupt)
137 SAVE_ALL_INT
138 GET_CURRENT(%d0)
139 movel %sp,%sp@- /* stack frame pointer argument */
140 jsr dbginterrupt_c
141 addql #4,%sp
142 jra ret_from_exception
143#endif
144
145ENTRY(reschedule)
146 /* save top of frame */
147 pea %sp@
148 jbsr set_esp0
149 addql #4,%sp
150 pea ret_from_exception
151 jmp schedule
152
153ENTRY(ret_from_user_signal)
154 moveq #__NR_sigreturn,%d0
155 trap #0
156
157ENTRY(ret_from_user_rt_signal)
158 movel #__NR_rt_sigreturn,%d0
159 trap #0
160
161#else
162
163do_trace_entry:
164 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
165 subql #4,%sp
166 SAVE_SWITCH_STACK
167 jbsr syscall_trace
168 RESTORE_SWITCH_STACK
169 addql #4,%sp
170 movel %sp@(PT_OFF_ORIG_D0),%d0
171 cmpl #NR_syscalls,%d0
172 jcs syscall
173badsys:
174 movel #-ENOSYS,%sp@(PT_OFF_D0)
175 jra ret_from_syscall
176
177do_trace_exit:
178 subql #4,%sp
179 SAVE_SWITCH_STACK
180 jbsr syscall_trace
181 RESTORE_SWITCH_STACK
182 addql #4,%sp
183 jra .Lret_from_exception
184
185ENTRY(ret_from_signal)
186 movel %curptr@(TASK_STACK),%a1
187 tstb %a1@(TINFO_FLAGS+2)
188 jge 1f
189 jbsr syscall_trace
1901: RESTORE_SWITCH_STACK
191 addql #4,%sp
192/* on 68040 complete pending writebacks if any */
193#ifdef CONFIG_M68040
194 bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
195 subql #7,%d0 | bus error frame ?
196 jbne 1f
197 movel %sp,%sp@-
198 jbsr berr_040cleanup
199 addql #4,%sp
2001:
201#endif
202 jra .Lret_from_exception
203
204ENTRY(system_call)
205 SAVE_ALL_SYS
206
207 GET_CURRENT(%d1)
208 movel %d1,%a1
209
210 | save top of frame
211 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
212
213 | syscall trace?
214 tstb %a1@(TINFO_FLAGS+2)
215 jmi do_trace_entry
216 cmpl #NR_syscalls,%d0
217 jcc badsys
218syscall:
219 jbsr @(sys_call_table,%d0:l:4)@(0)
220 movel %d0,%sp@(PT_OFF_D0) | save the return value
221ret_from_syscall:
222 |oriw #0x0700,%sr
223 movel %curptr@(TASK_STACK),%a1
224 movew %a1@(TINFO_FLAGS+2),%d0
225 jne syscall_exit_work
2261: RESTORE_ALL
227
228syscall_exit_work:
229 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
230 bnes 1b | if so, skip resched, signals
231 lslw #1,%d0
232 jcs do_trace_exit
233 jmi do_delayed_trace
234 lslw #8,%d0
235 jne do_signal_return
236 pea resume_userspace
237 jra schedule
238
239
240ENTRY(ret_from_exception)
241.Lret_from_exception:
242 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
243 bnes 1f | if so, skip resched, signals
244 | only allow interrupts when we are really the last one on the
245 | kernel stack, otherwise stack overflow can occur during
246 | heavy interrupt load
247 andw #ALLOWINT,%sr
248
249resume_userspace:
250 movel %curptr@(TASK_STACK),%a1
251 moveb %a1@(TINFO_FLAGS+3),%d0
252 jne exit_work
2531: RESTORE_ALL
254
255exit_work:
256 | save top of frame
257 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
258 lslb #1,%d0
259 jne do_signal_return
260 pea resume_userspace
261 jra schedule
262
263
264do_signal_return:
265 |andw #ALLOWINT,%sr
266 subql #4,%sp | dummy return address
267 SAVE_SWITCH_STACK
268 pea %sp@(SWITCH_STACK_SIZE)
269 bsrl do_notify_resume
270 addql #4,%sp
271 RESTORE_SWITCH_STACK
272 addql #4,%sp
273 jbra resume_userspace
274
275do_delayed_trace:
276 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
277 pea 1 | send SIGTRAP
278 movel %curptr,%sp@-
279 pea LSIGTRAP
280 jbsr send_sig
281 addql #8,%sp
282 addql #4,%sp
283 jbra resume_userspace
284
285
286/* This is the main interrupt handler for autovector interrupts */
287
288ENTRY(auto_inthandler)
289 SAVE_ALL_INT
290 GET_CURRENT(%d0)
291 | put exception # in d0
292 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
293 subw #VEC_SPUR,%d0
294
295 movel %sp,%sp@-
296 movel %d0,%sp@- | put vector # on stack
297auto_irqhandler_fixup = . + 2
298 jsr do_IRQ | process the IRQ
299 addql #8,%sp | pop parameters off stack
300 jra ret_from_exception
301
302/* Handler for user defined interrupt vectors */
303
304ENTRY(user_inthandler)
305 SAVE_ALL_INT
306 GET_CURRENT(%d0)
307 | put exception # in d0
308 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
309user_irqvec_fixup = . + 2
310 subw #VEC_USER,%d0
311
312 movel %sp,%sp@-
313 movel %d0,%sp@- | put vector # on stack
314 jsr do_IRQ | process the IRQ
315 addql #8,%sp | pop parameters off stack
316 jra ret_from_exception
317
318/* Handler for uninitialized and spurious interrupts */
319
320ENTRY(bad_inthandler)
321 SAVE_ALL_INT
322 GET_CURRENT(%d0)
323
324 movel %sp,%sp@-
325 jsr handle_badint
326 addql #4,%sp
327 jra ret_from_exception
328
329resume:
330 /*
331 * Beware - when entering resume, prev (the current task) is
332 * in a0, next (the new task) is in a1,so don't change these
333 * registers until their contents are no longer needed.
334 */
335
336 /* save sr */
337 movew %sr,%a0@(TASK_THREAD+THREAD_SR)
338
339 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
340 movec %sfc,%d0
341 movew %d0,%a0@(TASK_THREAD+THREAD_FS)
342
343 /* save usp */
344 /* it is better to use a movel here instead of a movew 8*) */
345 movec %usp,%d0
346 movel %d0,%a0@(TASK_THREAD+THREAD_USP)
347
348 /* save non-scratch registers on stack */
349 SAVE_SWITCH_STACK
350
351 /* save current kernel stack pointer */
352 movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
353
354 /* save floating point context */
355#ifndef CONFIG_M68KFPU_EMU_ONLY
356#ifdef CONFIG_M68KFPU_EMU
357 tstl m68k_fputype
358 jeq 3f
359#endif
360 fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
361
362#if defined(CONFIG_M68060)
363#if !defined(CPU_M68060_ONLY)
364 btst #3,m68k_cputype+3
365 beqs 1f
366#endif
367 /* The 060 FPU keeps status in bits 15-8 of the first longword */
368 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
369 jeq 3f
370#if !defined(CPU_M68060_ONLY)
371 jra 2f
372#endif
373#endif /* CONFIG_M68060 */
374#if !defined(CPU_M68060_ONLY)
3751: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
376 jeq 3f
377#endif
3782: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
379 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3803:
381#endif /* CONFIG_M68KFPU_EMU_ONLY */
382 /* Return previous task in %d1 */
383 movel %curptr,%d1
384
385 /* switch to new task (a1 contains new task) */
386 movel %a1,%curptr
387
388 /* restore floating point context */
389#ifndef CONFIG_M68KFPU_EMU_ONLY
390#ifdef CONFIG_M68KFPU_EMU
391 tstl m68k_fputype
392 jeq 4f
393#endif
394#if defined(CONFIG_M68060)
395#if !defined(CPU_M68060_ONLY)
396 btst #3,m68k_cputype+3
397 beqs 1f
398#endif
399 /* The 060 FPU keeps status in bits 15-8 of the first longword */
400 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
401 jeq 3f
402#if !defined(CPU_M68060_ONLY)
403 jra 2f
404#endif
405#endif /* CONFIG_M68060 */
406#if !defined(CPU_M68060_ONLY)
4071: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
408 jeq 3f
409#endif
4102: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
411 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4123: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4134:
414#endif /* CONFIG_M68KFPU_EMU_ONLY */
415
416 /* restore the kernel stack pointer */
417 movel %a1@(TASK_THREAD+THREAD_KSP),%sp
418
419 /* restore non-scratch registers */
420 RESTORE_SWITCH_STACK
421
422 /* restore user stack pointer */
423 movel %a1@(TASK_THREAD+THREAD_USP),%a0
424 movel %a0,%usp
425
426 /* restore fs (sfc,%dfc) */
427 movew %a1@(TASK_THREAD+THREAD_FS),%a0
428 movec %a0,%sfc
429 movec %a0,%dfc
430
431 /* restore status register */
432 movew %a1@(TASK_THREAD+THREAD_SR),%sr
433
434 rts
435
436#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
1#ifdef CONFIG_MMU
2#include "entry_mm.S"
3#else
4#include "entry_no.S"
5#endif