Loading...
1/*
2 * rtrap.S: Preparing for return from trap on Sparc V9.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8
9#include <asm/asi.h>
10#include <asm/pstate.h>
11#include <asm/ptrace.h>
12#include <asm/spitfire.h>
13#include <asm/head.h>
14#include <asm/visasm.h>
15#include <asm/processor.h>
16
17#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
18#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
19#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
20
21 .text
22 .align 32
23__handle_preemption:
24 call schedule
25 wrpr %g0, RTRAP_PSTATE, %pstate
26 ba,pt %xcc, __handle_preemption_continue
27 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
28
29__handle_user_windows:
30 call fault_in_user_windows
31 wrpr %g0, RTRAP_PSTATE, %pstate
32 ba,pt %xcc, __handle_preemption_continue
33 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
34
35__handle_userfpu:
36 rd %fprs, %l5
37 andcc %l5, FPRS_FEF, %g0
38 sethi %hi(TSTATE_PEF), %o0
39 be,a,pn %icc, __handle_userfpu_continue
40 andn %l1, %o0, %l1
41 ba,a,pt %xcc, __handle_userfpu_continue
42
43__handle_signal:
44 mov %l5, %o1
45 add %sp, PTREGS_OFF, %o0
46 mov %l0, %o2
47 call do_notify_resume
48 wrpr %g0, RTRAP_PSTATE, %pstate
49 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
50
51 /* Signal delivery can modify pt_regs tstate, so we must
52 * reload it.
53 */
54 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
55 sethi %hi(0xf << 20), %l4
56 and %l1, %l4, %l4
57 ba,pt %xcc, __handle_preemption_continue
58 andn %l1, %l4, %l1
59
60 /* When returning from a NMI (%pil==15) interrupt we want to
61 * avoid running softirqs, doing IRQ tracing, preempting, etc.
62 */
63 .globl rtrap_nmi
64rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
65 sethi %hi(0xf << 20), %l4
66 and %l1, %l4, %l4
67 andn %l1, %l4, %l1
68 srl %l4, 20, %l4
69 ba,pt %xcc, rtrap_no_irq_enable
70 wrpr %l4, %pil
71
72 .align 64
73 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
74rtrap_irq:
75rtrap:
76 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
77 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
78rtrap_xcall:
79 sethi %hi(0xf << 20), %l4
80 and %l1, %l4, %l4
81 andn %l1, %l4, %l1
82 srl %l4, 20, %l4
83#ifdef CONFIG_TRACE_IRQFLAGS
84 brnz,pn %l4, rtrap_no_irq_enable
85 nop
86 call trace_hardirqs_on
87 nop
88 /* Do not actually set the %pil here. We will do that
89 * below after we clear PSTATE_IE in the %pstate register.
90 * If we re-enable interrupts here, we can recurse down
91 * the hardirq stack potentially endlessly, causing a
92 * stack overflow.
93 *
94 * It is tempting to put this test and trace_hardirqs_on
95 * call at the 'rt_continue' label, but that will not work
96 * as that path hits unconditionally and we do not want to
97 * execute this in NMI return paths, for example.
98 */
99#endif
100rtrap_no_irq_enable:
101 andcc %l1, TSTATE_PRIV, %l3
102 bne,pn %icc, to_kernel
103 nop
104
105 /* We must hold IRQs off and atomically test schedule+signal
106 * state, then hold them off all the way back to userspace.
107 * If we are returning to kernel, none of this matters. Note
108 * that we are disabling interrupts via PSTATE_IE, not using
109 * %pil.
110 *
111 * If we do not do this, there is a window where we would do
112 * the tests, later the signal/resched event arrives but we do
113 * not process it since we are still in kernel mode. It would
114 * take until the next local IRQ before the signal/resched
115 * event would be handled.
116 *
117 * This also means that if we have to deal with user
118 * windows, we have to redo all of these sched+signal checks
119 * with IRQs disabled.
120 */
121to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
122 wrpr 0, %pil
123__handle_preemption_continue:
124 ldx [%g6 + TI_FLAGS], %l0
125 sethi %hi(_TIF_USER_WORK_MASK), %o0
126 or %o0, %lo(_TIF_USER_WORK_MASK), %o0
127 andcc %l0, %o0, %g0
128 sethi %hi(TSTATE_PEF), %o0
129 be,pt %xcc, user_nowork
130 andcc %l1, %o0, %g0
131 andcc %l0, _TIF_NEED_RESCHED, %g0
132 bne,pn %xcc, __handle_preemption
133 andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
134 bne,pn %xcc, __handle_signal
135 ldub [%g6 + TI_WSAVED], %o2
136 brnz,pn %o2, __handle_user_windows
137 nop
138 sethi %hi(TSTATE_PEF), %o0
139 andcc %l1, %o0, %g0
140
141 /* This fpdepth clear is necessary for non-syscall rtraps only */
142user_nowork:
143 bne,pn %xcc, __handle_userfpu
144 stb %g0, [%g6 + TI_FPDEPTH]
145__handle_userfpu_continue:
146
147rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
148 ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
149
150 ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
151 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
152 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
153 brz,pt %l3, 1f
154 mov %g6, %l2
155
156 /* Must do this before thread reg is clobbered below. */
157 LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
1581:
159 ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
160 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
161
162 /* Normal globals are restored, go to trap globals. */
163661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
164 nop
165 .section .sun4v_2insn_patch, "ax"
166 .word 661b
167 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
168 SET_GL(1)
169 .previous
170
171 mov %l2, %g6
172
173 ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
174 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
175
176 ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
177 ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
178 ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
179 ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
180 ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
181 ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
182 ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
183 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
184
185 ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
186 wr %o3, %g0, %y
187 wrpr %l4, 0x0, %pil
188 wrpr %g0, 0x1, %tl
189 andn %l1, TSTATE_SYSCALL, %l1
190 wrpr %l1, %g0, %tstate
191 wrpr %l2, %g0, %tpc
192 wrpr %o2, %g0, %tnpc
193
194 brnz,pn %l3, kern_rtt
195 mov PRIMARY_CONTEXT, %l7
196
197661: ldxa [%l7 + %l7] ASI_DMMU, %l0
198 .section .sun4v_1insn_patch, "ax"
199 .word 661b
200 ldxa [%l7 + %l7] ASI_MMU, %l0
201 .previous
202
203 sethi %hi(sparc64_kern_pri_nuc_bits), %l1
204 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
205 or %l0, %l1, %l0
206
207661: stxa %l0, [%l7] ASI_DMMU
208 .section .sun4v_1insn_patch, "ax"
209 .word 661b
210 stxa %l0, [%l7] ASI_MMU
211 .previous
212
213 sethi %hi(KERNBASE), %l7
214 flush %l7
215 rdpr %wstate, %l1
216 rdpr %otherwin, %l2
217 srl %l1, 3, %l1
218
219 wrpr %l2, %g0, %canrestore
220 wrpr %l1, %g0, %wstate
221 brnz,pt %l2, user_rtt_restore
222 wrpr %g0, %g0, %otherwin
223
224 ldx [%g6 + TI_FLAGS], %g3
225 wr %g0, ASI_AIUP, %asi
226 rdpr %cwp, %g1
227 andcc %g3, _TIF_32BIT, %g0
228 sub %g1, 1, %g1
229 bne,pt %xcc, user_rtt_fill_32bit
230 wrpr %g1, %cwp
231 ba,a,pt %xcc, user_rtt_fill_64bit
232
233user_rtt_fill_fixup:
234 rdpr %cwp, %g1
235 add %g1, 1, %g1
236 wrpr %g1, 0x0, %cwp
237
238 rdpr %wstate, %g2
239 sll %g2, 3, %g2
240 wrpr %g2, 0x0, %wstate
241
242 /* We know %canrestore and %otherwin are both zero. */
243
244 sethi %hi(sparc64_kern_pri_context), %g2
245 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
246 mov PRIMARY_CONTEXT, %g1
247
248661: stxa %g2, [%g1] ASI_DMMU
249 .section .sun4v_1insn_patch, "ax"
250 .word 661b
251 stxa %g2, [%g1] ASI_MMU
252 .previous
253
254 sethi %hi(KERNBASE), %g1
255 flush %g1
256
257 or %g4, FAULT_CODE_WINFIXUP, %g4
258 stb %g4, [%g6 + TI_FAULT_CODE]
259 stx %g5, [%g6 + TI_FAULT_ADDR]
260
261 mov %g6, %l1
262 wrpr %g0, 0x0, %tl
263
264661: nop
265 .section .sun4v_1insn_patch, "ax"
266 .word 661b
267 SET_GL(0)
268 .previous
269
270 wrpr %g0, RTRAP_PSTATE, %pstate
271
272 mov %l1, %g6
273 ldx [%g6 + TI_TASK], %g4
274 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
275 call do_sparc64_fault
276 add %sp, PTREGS_OFF, %o0
277 ba,pt %xcc, rtrap
278 nop
279
280user_rtt_pre_restore:
281 add %g1, 1, %g1
282 wrpr %g1, 0x0, %cwp
283
284user_rtt_restore:
285 restore
286 rdpr %canrestore, %g1
287 wrpr %g1, 0x0, %cleanwin
288 retry
289 nop
290
291kern_rtt: rdpr %canrestore, %g1
292 brz,pn %g1, kern_rtt_fill
293 nop
294kern_rtt_restore:
295 stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC]
296 restore
297 retry
298
299to_kernel:
300#ifdef CONFIG_PREEMPT
301 ldsw [%g6 + TI_PRE_COUNT], %l5
302 brnz %l5, kern_fpucheck
303 ldx [%g6 + TI_FLAGS], %l5
304 andcc %l5, _TIF_NEED_RESCHED, %g0
305 be,pt %xcc, kern_fpucheck
306 nop
307 cmp %l4, 0
308 bne,pn %xcc, kern_fpucheck
309 sethi %hi(PREEMPT_ACTIVE), %l6
310 stw %l6, [%g6 + TI_PRE_COUNT]
311 call schedule
312 nop
313 ba,pt %xcc, rtrap
314 stw %g0, [%g6 + TI_PRE_COUNT]
315#endif
316kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
317 brz,pt %l5, rt_continue
318 srl %l5, 1, %o0
319 add %g6, TI_FPSAVED, %l6
320 ldub [%l6 + %o0], %l2
321 sub %l5, 2, %l5
322
323 add %g6, TI_GSR, %o1
324 andcc %l2, (FPRS_FEF|FPRS_DU), %g0
325 be,pt %icc, 2f
326 and %l2, FPRS_DL, %l6
327 andcc %l2, FPRS_FEF, %g0
328 be,pn %icc, 5f
329 sll %o0, 3, %o5
330 rd %fprs, %g1
331
332 wr %g1, FPRS_FEF, %fprs
333 ldx [%o1 + %o5], %g1
334 add %g6, TI_XFSR, %o1
335 sll %o0, 8, %o2
336 add %g6, TI_FPREGS, %o3
337 brz,pn %l6, 1f
338 add %g6, TI_FPREGS+0x40, %o4
339
340 membar #Sync
341 ldda [%o3 + %o2] ASI_BLK_P, %f0
342 ldda [%o4 + %o2] ASI_BLK_P, %f16
343 membar #Sync
3441: andcc %l2, FPRS_DU, %g0
345 be,pn %icc, 1f
346 wr %g1, 0, %gsr
347 add %o2, 0x80, %o2
348 membar #Sync
349 ldda [%o3 + %o2] ASI_BLK_P, %f32
350 ldda [%o4 + %o2] ASI_BLK_P, %f48
3511: membar #Sync
352 ldx [%o1 + %o5], %fsr
3532: stb %l5, [%g6 + TI_FPDEPTH]
354 ba,pt %xcc, rt_continue
355 nop
3565: wr %g0, FPRS_FEF, %fprs
357 sll %o0, 8, %o2
358
359 add %g6, TI_FPREGS+0x80, %o3
360 add %g6, TI_FPREGS+0xc0, %o4
361 membar #Sync
362 ldda [%o3 + %o2] ASI_BLK_P, %f32
363 ldda [%o4 + %o2] ASI_BLK_P, %f48
364 membar #Sync
365 wr %g0, FPRS_DU, %fprs
366 ba,pt %xcc, rt_continue
367 stb %l5, [%g6 + TI_FPDEPTH]
1/*
2 * rtrap.S: Preparing for return from trap on Sparc V9.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8
9#include <asm/asi.h>
10#include <asm/pstate.h>
11#include <asm/ptrace.h>
12#include <asm/spitfire.h>
13#include <asm/head.h>
14#include <asm/visasm.h>
15#include <asm/processor.h>
16
17#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
18#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
19#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
20
21#ifdef CONFIG_CONTEXT_TRACKING
22# define SCHEDULE_USER schedule_user
23#else
24# define SCHEDULE_USER schedule
25#endif
26
27 .text
28 .align 32
29__handle_preemption:
30 call SCHEDULE_USER
31 wrpr %g0, RTRAP_PSTATE, %pstate
32 ba,pt %xcc, __handle_preemption_continue
33 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
34
35__handle_user_windows:
36 call fault_in_user_windows
37 wrpr %g0, RTRAP_PSTATE, %pstate
38 ba,pt %xcc, __handle_preemption_continue
39 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
40
41__handle_userfpu:
42 rd %fprs, %l5
43 andcc %l5, FPRS_FEF, %g0
44 sethi %hi(TSTATE_PEF), %o0
45 be,a,pn %icc, __handle_userfpu_continue
46 andn %l1, %o0, %l1
47 ba,a,pt %xcc, __handle_userfpu_continue
48
49__handle_signal:
50 mov %l5, %o1
51 add %sp, PTREGS_OFF, %o0
52 mov %l0, %o2
53 call do_notify_resume
54 wrpr %g0, RTRAP_PSTATE, %pstate
55 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
56
57 /* Signal delivery can modify pt_regs tstate, so we must
58 * reload it.
59 */
60 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
61 sethi %hi(0xf << 20), %l4
62 and %l1, %l4, %l4
63 ba,pt %xcc, __handle_preemption_continue
64 andn %l1, %l4, %l1
65
66 /* When returning from a NMI (%pil==15) interrupt we want to
67 * avoid running softirqs, doing IRQ tracing, preempting, etc.
68 */
69 .globl rtrap_nmi
70rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
71 sethi %hi(0xf << 20), %l4
72 and %l1, %l4, %l4
73 andn %l1, %l4, %l1
74 srl %l4, 20, %l4
75 ba,pt %xcc, rtrap_no_irq_enable
76 wrpr %l4, %pil
77
78 .align 64
79 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
80rtrap_irq:
81rtrap:
82 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
83 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
84rtrap_xcall:
85 sethi %hi(0xf << 20), %l4
86 and %l1, %l4, %l4
87 andn %l1, %l4, %l1
88 srl %l4, 20, %l4
89#ifdef CONFIG_TRACE_IRQFLAGS
90 brnz,pn %l4, rtrap_no_irq_enable
91 nop
92 call trace_hardirqs_on
93 nop
94 /* Do not actually set the %pil here. We will do that
95 * below after we clear PSTATE_IE in the %pstate register.
96 * If we re-enable interrupts here, we can recurse down
97 * the hardirq stack potentially endlessly, causing a
98 * stack overflow.
99 *
100 * It is tempting to put this test and trace_hardirqs_on
101 * call at the 'rt_continue' label, but that will not work
102 * as that path hits unconditionally and we do not want to
103 * execute this in NMI return paths, for example.
104 */
105#endif
106rtrap_no_irq_enable:
107 andcc %l1, TSTATE_PRIV, %l3
108 bne,pn %icc, to_kernel
109 nop
110
111 /* We must hold IRQs off and atomically test schedule+signal
112 * state, then hold them off all the way back to userspace.
113 * If we are returning to kernel, none of this matters. Note
114 * that we are disabling interrupts via PSTATE_IE, not using
115 * %pil.
116 *
117 * If we do not do this, there is a window where we would do
118 * the tests, later the signal/resched event arrives but we do
119 * not process it since we are still in kernel mode. It would
120 * take until the next local IRQ before the signal/resched
121 * event would be handled.
122 *
123 * This also means that if we have to deal with user
124 * windows, we have to redo all of these sched+signal checks
125 * with IRQs disabled.
126 */
127to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
128 wrpr 0, %pil
129__handle_preemption_continue:
130 ldx [%g6 + TI_FLAGS], %l0
131 sethi %hi(_TIF_USER_WORK_MASK), %o0
132 or %o0, %lo(_TIF_USER_WORK_MASK), %o0
133 andcc %l0, %o0, %g0
134 sethi %hi(TSTATE_PEF), %o0
135 be,pt %xcc, user_nowork
136 andcc %l1, %o0, %g0
137 andcc %l0, _TIF_NEED_RESCHED, %g0
138 bne,pn %xcc, __handle_preemption
139 andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
140 bne,pn %xcc, __handle_signal
141 ldub [%g6 + TI_WSAVED], %o2
142 brnz,pn %o2, __handle_user_windows
143 nop
144 sethi %hi(TSTATE_PEF), %o0
145 andcc %l1, %o0, %g0
146
147 /* This fpdepth clear is necessary for non-syscall rtraps only */
148user_nowork:
149 bne,pn %xcc, __handle_userfpu
150 stb %g0, [%g6 + TI_FPDEPTH]
151__handle_userfpu_continue:
152
153rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
154 ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
155
156 ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
157 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
158 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
159 brz,pt %l3, 1f
160 mov %g6, %l2
161
162 /* Must do this before thread reg is clobbered below. */
163 LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
1641:
165 ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
166 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
167
168 /* Normal globals are restored, go to trap globals. */
169661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
170 nop
171 .section .sun4v_2insn_patch, "ax"
172 .word 661b
173 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
174 SET_GL(1)
175 .previous
176
177 mov %l2, %g6
178
179 ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
180 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
181
182 ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
183 ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
184 ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
185 ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
186 ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
187 ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
188 ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
189 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
190
191 ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
192 wr %o3, %g0, %y
193 wrpr %l4, 0x0, %pil
194 wrpr %g0, 0x1, %tl
195 andn %l1, TSTATE_SYSCALL, %l1
196 wrpr %l1, %g0, %tstate
197 wrpr %l2, %g0, %tpc
198 wrpr %o2, %g0, %tnpc
199
200 brnz,pn %l3, kern_rtt
201 mov PRIMARY_CONTEXT, %l7
202
203661: ldxa [%l7 + %l7] ASI_DMMU, %l0
204 .section .sun4v_1insn_patch, "ax"
205 .word 661b
206 ldxa [%l7 + %l7] ASI_MMU, %l0
207 .previous
208
209 sethi %hi(sparc64_kern_pri_nuc_bits), %l1
210 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
211 or %l0, %l1, %l0
212
213661: stxa %l0, [%l7] ASI_DMMU
214 .section .sun4v_1insn_patch, "ax"
215 .word 661b
216 stxa %l0, [%l7] ASI_MMU
217 .previous
218
219 sethi %hi(KERNBASE), %l7
220 flush %l7
221 rdpr %wstate, %l1
222 rdpr %otherwin, %l2
223 srl %l1, 3, %l1
224
225 wrpr %l2, %g0, %canrestore
226 wrpr %l1, %g0, %wstate
227 brnz,pt %l2, user_rtt_restore
228 wrpr %g0, %g0, %otherwin
229
230 ldx [%g6 + TI_FLAGS], %g3
231 wr %g0, ASI_AIUP, %asi
232 rdpr %cwp, %g1
233 andcc %g3, _TIF_32BIT, %g0
234 sub %g1, 1, %g1
235 bne,pt %xcc, user_rtt_fill_32bit
236 wrpr %g1, %cwp
237 ba,a,pt %xcc, user_rtt_fill_64bit
238
239user_rtt_fill_fixup:
240 rdpr %cwp, %g1
241 add %g1, 1, %g1
242 wrpr %g1, 0x0, %cwp
243
244 rdpr %wstate, %g2
245 sll %g2, 3, %g2
246 wrpr %g2, 0x0, %wstate
247
248 /* We know %canrestore and %otherwin are both zero. */
249
250 sethi %hi(sparc64_kern_pri_context), %g2
251 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
252 mov PRIMARY_CONTEXT, %g1
253
254661: stxa %g2, [%g1] ASI_DMMU
255 .section .sun4v_1insn_patch, "ax"
256 .word 661b
257 stxa %g2, [%g1] ASI_MMU
258 .previous
259
260 sethi %hi(KERNBASE), %g1
261 flush %g1
262
263 or %g4, FAULT_CODE_WINFIXUP, %g4
264 stb %g4, [%g6 + TI_FAULT_CODE]
265 stx %g5, [%g6 + TI_FAULT_ADDR]
266
267 mov %g6, %l1
268 wrpr %g0, 0x0, %tl
269
270661: nop
271 .section .sun4v_1insn_patch, "ax"
272 .word 661b
273 SET_GL(0)
274 .previous
275
276 wrpr %g0, RTRAP_PSTATE, %pstate
277
278 mov %l1, %g6
279 ldx [%g6 + TI_TASK], %g4
280 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
281 call do_sparc64_fault
282 add %sp, PTREGS_OFF, %o0
283 ba,pt %xcc, rtrap
284 nop
285
286user_rtt_pre_restore:
287 add %g1, 1, %g1
288 wrpr %g1, 0x0, %cwp
289
290user_rtt_restore:
291 restore
292 rdpr %canrestore, %g1
293 wrpr %g1, 0x0, %cleanwin
294 retry
295 nop
296
297kern_rtt: rdpr %canrestore, %g1
298 brz,pn %g1, kern_rtt_fill
299 nop
300kern_rtt_restore:
301 stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC]
302 restore
303 retry
304
305to_kernel:
306#ifdef CONFIG_PREEMPT
307 ldsw [%g6 + TI_PRE_COUNT], %l5
308 brnz %l5, kern_fpucheck
309 ldx [%g6 + TI_FLAGS], %l5
310 andcc %l5, _TIF_NEED_RESCHED, %g0
311 be,pt %xcc, kern_fpucheck
312 nop
313 cmp %l4, 0
314 bne,pn %xcc, kern_fpucheck
315 nop
316 call preempt_schedule_irq
317 nop
318 ba,pt %xcc, rtrap
319#endif
320kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
321 brz,pt %l5, rt_continue
322 srl %l5, 1, %o0
323 add %g6, TI_FPSAVED, %l6
324 ldub [%l6 + %o0], %l2
325 sub %l5, 2, %l5
326
327 add %g6, TI_GSR, %o1
328 andcc %l2, (FPRS_FEF|FPRS_DU), %g0
329 be,pt %icc, 2f
330 and %l2, FPRS_DL, %l6
331 andcc %l2, FPRS_FEF, %g0
332 be,pn %icc, 5f
333 sll %o0, 3, %o5
334 rd %fprs, %g1
335
336 wr %g1, FPRS_FEF, %fprs
337 ldx [%o1 + %o5], %g1
338 add %g6, TI_XFSR, %o1
339 sll %o0, 8, %o2
340 add %g6, TI_FPREGS, %o3
341 brz,pn %l6, 1f
342 add %g6, TI_FPREGS+0x40, %o4
343
344 membar #Sync
345 ldda [%o3 + %o2] ASI_BLK_P, %f0
346 ldda [%o4 + %o2] ASI_BLK_P, %f16
347 membar #Sync
3481: andcc %l2, FPRS_DU, %g0
349 be,pn %icc, 1f
350 wr %g1, 0, %gsr
351 add %o2, 0x80, %o2
352 membar #Sync
353 ldda [%o3 + %o2] ASI_BLK_P, %f32
354 ldda [%o4 + %o2] ASI_BLK_P, %f48
3551: membar #Sync
356 ldx [%o1 + %o5], %fsr
3572: stb %l5, [%g6 + TI_FPDEPTH]
358 ba,pt %xcc, rt_continue
359 nop
3605: wr %g0, FPRS_FEF, %fprs
361 sll %o0, 8, %o2
362
363 add %g6, TI_FPREGS+0x80, %o3
364 add %g6, TI_FPREGS+0xc0, %o4
365 membar #Sync
366 ldda [%o3 + %o2] ASI_BLK_P, %f32
367 ldda [%o4 + %o2] ASI_BLK_P, %f48
368 membar #Sync
369 wr %g0, FPRS_DU, %fprs
370 ba,pt %xcc, rt_continue
371 stb %l5, [%g6 + TI_FPDEPTH]