Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 */
10
11#include <asm/asm.h>
12#include <asm/asmmacro.h>
13#include <asm/compiler.h>
14#include <asm/irqflags.h>
15#include <asm/regdef.h>
16#include <asm/mipsregs.h>
17#include <asm/stackframe.h>
18#include <asm/isadep.h>
19#include <asm/thread_info.h>
20#include <asm/war.h>
21
22#ifndef CONFIG_PREEMPT
23#define resume_kernel restore_all
24#else
25#define __ret_from_irq ret_from_exception
26#endif
27
28 .text
29 .align 5
30#ifndef CONFIG_PREEMPT
31FEXPORT(ret_from_exception)
32 local_irq_disable # preempt stop
33 b __ret_from_irq
34#endif
35FEXPORT(ret_from_irq)
36 LONG_S s0, TI_REGS($28)
37FEXPORT(__ret_from_irq)
38/*
39 * We can be coming here from a syscall done in the kernel space,
40 * e.g. a failed kernel_execve().
41 */
42resume_userspace_check:
43 LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
44 andi t0, t0, KU_USER
45 beqz t0, resume_kernel
46
47resume_userspace:
48 local_irq_disable # make sure we dont miss an
49 # interrupt setting need_resched
50 # between sampling and return
51 LONG_L a2, TI_FLAGS($28) # current->work
52 andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
53 bnez t0, work_pending
54 j restore_all
55
56#ifdef CONFIG_PREEMPT
57resume_kernel:
58 local_irq_disable
59 lw t0, TI_PRE_COUNT($28)
60 bnez t0, restore_all
61need_resched:
62 LONG_L t0, TI_FLAGS($28)
63 andi t1, t0, _TIF_NEED_RESCHED
64 beqz t1, restore_all
65 LONG_L t0, PT_STATUS(sp) # Interrupts off?
66 andi t0, 1
67 beqz t0, restore_all
68 jal preempt_schedule_irq
69 b need_resched
70#endif
71
72FEXPORT(ret_from_kernel_thread)
73 jal schedule_tail # a0 = struct task_struct *prev
74 move a0, s1
75 jal s0
76 j syscall_exit
77
78FEXPORT(ret_from_fork)
79 jal schedule_tail # a0 = struct task_struct *prev
80
81FEXPORT(syscall_exit)
82 local_irq_disable # make sure need_resched and
83 # signals dont change between
84 # sampling and return
85 LONG_L a2, TI_FLAGS($28) # current->work
86 li t0, _TIF_ALLWORK_MASK
87 and t0, a2, t0
88 bnez t0, syscall_exit_work
89
90restore_all: # restore full frame
91 .set noat
92 RESTORE_TEMP
93 RESTORE_AT
94 RESTORE_STATIC
95restore_partial: # restore partial frame
96#ifdef CONFIG_TRACE_IRQFLAGS
97 SAVE_STATIC
98 SAVE_AT
99 SAVE_TEMP
100 LONG_L v0, PT_STATUS(sp)
101#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
102 and v0, ST0_IEP
103#else
104 and v0, ST0_IE
105#endif
106 beqz v0, 1f
107 jal trace_hardirqs_on
108 b 2f
1091: jal trace_hardirqs_off
1102:
111 RESTORE_TEMP
112 RESTORE_AT
113 RESTORE_STATIC
114#endif
115 RESTORE_SOME
116 RESTORE_SP_AND_RET
117 .set at
118
119work_pending:
120 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
121 beqz t0, work_notifysig
122work_resched:
123 TRACE_IRQS_OFF
124 jal schedule
125
126 local_irq_disable # make sure need_resched and
127 # signals dont change between
128 # sampling and return
129 LONG_L a2, TI_FLAGS($28)
130 andi t0, a2, _TIF_WORK_MASK # is there any work to be done
131 # other than syscall tracing?
132 beqz t0, restore_all
133 andi t0, a2, _TIF_NEED_RESCHED
134 bnez t0, work_resched
135
136work_notifysig: # deal with pending signals and
137 # notify-resume requests
138 move a0, sp
139 li a1, 0
140 jal do_notify_resume # a2 already loaded
141 j resume_userspace_check
142
143FEXPORT(syscall_exit_partial)
144 local_irq_disable # make sure need_resched doesn't
145 # change between and return
146 LONG_L a2, TI_FLAGS($28) # current->work
147 li t0, _TIF_ALLWORK_MASK
148 and t0, a2
149 beqz t0, restore_partial
150 SAVE_STATIC
151syscall_exit_work:
152 LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
153 andi t0, t0, KU_USER
154 beqz t0, resume_kernel
155 li t0, _TIF_WORK_SYSCALL_EXIT
156 and t0, a2 # a2 is preloaded with TI_FLAGS
157 beqz t0, work_pending # trace bit set?
158 local_irq_enable # could let syscall_trace_leave()
159 # call schedule() instead
160 TRACE_IRQS_ON
161 move a0, sp
162 jal syscall_trace_leave
163 b resume_userspace
164
165#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \
166 defined(CONFIG_MIPS_MT)
167
168/*
169 * MIPS32R2 Instruction Hazard Barrier - must be called
170 *
171 * For C code use the inline version named instruction_hazard().
172 */
173LEAF(mips_ihb)
174 .set MIPS_ISA_LEVEL_RAW
175 jr.hb ra
176 nop
177 END(mips_ihb)
178
179#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 */
10
11#include <asm/asm.h>
12#include <asm/asmmacro.h>
13#include <asm/regdef.h>
14#include <asm/mipsregs.h>
15#include <asm/stackframe.h>
16#include <asm/isadep.h>
17#include <asm/thread_info.h>
18#include <asm/war.h>
19#ifdef CONFIG_MIPS_MT_SMTC
20#include <asm/mipsmtregs.h>
21#endif
22
23#ifndef CONFIG_PREEMPT
24#define resume_kernel restore_all
25#else
26#define __ret_from_irq ret_from_exception
27#endif
28
29 .text
30 .align 5
31#ifndef CONFIG_PREEMPT
32FEXPORT(ret_from_exception)
33 local_irq_disable # preempt stop
34 b __ret_from_irq
35#endif
36FEXPORT(ret_from_irq)
37 LONG_S s0, TI_REGS($28)
38FEXPORT(__ret_from_irq)
39 LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
40 andi t0, t0, KU_USER
41 beqz t0, resume_kernel
42
43resume_userspace:
44 local_irq_disable # make sure we dont miss an
45 # interrupt setting need_resched
46 # between sampling and return
47 LONG_L a2, TI_FLAGS($28) # current->work
48 andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
49 bnez t0, work_pending
50 j restore_all
51
52#ifdef CONFIG_PREEMPT
53resume_kernel:
54 local_irq_disable
55 lw t0, TI_PRE_COUNT($28)
56 bnez t0, restore_all
57need_resched:
58 LONG_L t0, TI_FLAGS($28)
59 andi t1, t0, _TIF_NEED_RESCHED
60 beqz t1, restore_all
61 LONG_L t0, PT_STATUS(sp) # Interrupts off?
62 andi t0, 1
63 beqz t0, restore_all
64 jal preempt_schedule_irq
65 b need_resched
66#endif
67
68FEXPORT(ret_from_fork)
69 jal schedule_tail # a0 = struct task_struct *prev
70
71FEXPORT(syscall_exit)
72 local_irq_disable # make sure need_resched and
73 # signals dont change between
74 # sampling and return
75 LONG_L a2, TI_FLAGS($28) # current->work
76 li t0, _TIF_ALLWORK_MASK
77 and t0, a2, t0
78 bnez t0, syscall_exit_work
79
80FEXPORT(restore_all) # restore full frame
81#ifdef CONFIG_MIPS_MT_SMTC
82#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
83/* Re-arm any temporarily masked interrupts not explicitly "acked" */
84 mfc0 v0, CP0_TCSTATUS
85 ori v1, v0, TCSTATUS_IXMT
86 mtc0 v1, CP0_TCSTATUS
87 andi v0, TCSTATUS_IXMT
88 _ehb
89 mfc0 t0, CP0_TCCONTEXT
90 DMT 9 # dmt t1
91 jal mips_ihb
92 mfc0 t2, CP0_STATUS
93 andi t3, t0, 0xff00
94 or t2, t2, t3
95 mtc0 t2, CP0_STATUS
96 _ehb
97 andi t1, t1, VPECONTROL_TE
98 beqz t1, 1f
99 EMT
1001:
101 mfc0 v1, CP0_TCSTATUS
102 /* We set IXMT above, XOR should clear it here */
103 xori v1, v1, TCSTATUS_IXMT
104 or v1, v0, v1
105 mtc0 v1, CP0_TCSTATUS
106 _ehb
107 xor t0, t0, t3
108 mtc0 t0, CP0_TCCONTEXT
109#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
110/* Detect and execute deferred IPI "interrupts" */
111 LONG_L s0, TI_REGS($28)
112 LONG_S sp, TI_REGS($28)
113 jal deferred_smtc_ipi
114 LONG_S s0, TI_REGS($28)
115#endif /* CONFIG_MIPS_MT_SMTC */
116 .set noat
117 RESTORE_TEMP
118 RESTORE_AT
119 RESTORE_STATIC
120FEXPORT(restore_partial) # restore partial frame
121#ifdef CONFIG_TRACE_IRQFLAGS
122 SAVE_STATIC
123 SAVE_AT
124 SAVE_TEMP
125 LONG_L v0, PT_STATUS(sp)
126#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
127 and v0, ST0_IEP
128#else
129 and v0, ST0_IE
130#endif
131 beqz v0, 1f
132 jal trace_hardirqs_on
133 b 2f
1341: jal trace_hardirqs_off
1352:
136 RESTORE_TEMP
137 RESTORE_AT
138 RESTORE_STATIC
139#endif
140 RESTORE_SOME
141 RESTORE_SP_AND_RET
142 .set at
143
144work_pending:
145 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
146 beqz t0, work_notifysig
147work_resched:
148 jal schedule
149
150 local_irq_disable # make sure need_resched and
151 # signals dont change between
152 # sampling and return
153 LONG_L a2, TI_FLAGS($28)
154 andi t0, a2, _TIF_WORK_MASK # is there any work to be done
155 # other than syscall tracing?
156 beqz t0, restore_all
157 andi t0, a2, _TIF_NEED_RESCHED
158 bnez t0, work_resched
159
160work_notifysig: # deal with pending signals and
161 # notify-resume requests
162 move a0, sp
163 li a1, 0
164 jal do_notify_resume # a2 already loaded
165 j resume_userspace
166
167FEXPORT(syscall_exit_work_partial)
168 SAVE_STATIC
169syscall_exit_work:
170 li t0, _TIF_WORK_SYSCALL_EXIT
171 and t0, a2 # a2 is preloaded with TI_FLAGS
172 beqz t0, work_pending # trace bit set?
173 local_irq_enable # could let syscall_trace_leave()
174 # call schedule() instead
175 move a0, sp
176 jal syscall_trace_leave
177 b resume_userspace
178
179#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
180
181/*
182 * MIPS32R2 Instruction Hazard Barrier - must be called
183 *
184 * For C code use the inline version named instruction_hazard().
185 */
186LEAF(mips_ihb)
187 .set mips32r2
188 jr.hb ra
189 nop
190 END(mips_ihb)
191
192#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */