Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#include <linux/linkage.h>
5#include <asm/unistd.h>
6#include <asm/assembler.h>
7#include <asm/nds32.h>
8#include <asm/asm-offsets.h>
9#include <asm/thread_info.h>
10#include <asm/current.h>
11#include <asm/fpu.h>
12
13
14
15#ifdef CONFIG_HWZOL
16 .macro pop_zol
17 mtusr $r14, $LB
18 mtusr $r15, $LE
19 mtusr $r16, $LC
20 .endm
21#endif
22
23 .macro restore_user_regs_first
24 setgie.d
25 isb
26#if defined(CONFIG_FPU)
27 addi $sp, $sp, OSP_OFFSET
28 lmw.adm $r12, [$sp], $r25, #0x0
29 sethi $p0, hi20(has_fpu)
30 lbsi $p0, [$p0+lo12(has_fpu)]
31 beqz $p0, 2f
32 mtsr $r25, $FUCOP_CTL
332:
34#else
35 addi $sp, $sp, FUCOP_CTL_OFFSET
36 lmw.adm $r12, [$sp], $r24, #0x0
37#endif
38 mtsr $r12, $SP_USR
39 mtsr $r13, $IPC
40#ifdef CONFIG_HWZOL
41 pop_zol
42#endif
43 mtsr $r19, $PSW
44 mtsr $r20, $IPSW
45 mtsr $r21, $P_IPSW
46 mtsr $r22, $P_IPC
47 mtsr $r23, $P_P0
48 mtsr $r24, $P_P1
49 lmw.adm $sp, [$sp], $sp, #0xe
50 .endm
51
52 .macro restore_user_regs_last
53 pop $p0
54 cmovn $sp, $p0, $p0
55
56 iret
57 nop
58
59 .endm
60
61 .macro restore_user_regs
62 restore_user_regs_first
63 lmw.adm $r0, [$sp], $r25, #0x0
64 addi $sp, $sp, OSP_OFFSET
65 restore_user_regs_last
66 .endm
67
68 .macro fast_restore_user_regs
69 restore_user_regs_first
70 lmw.adm $r1, [$sp], $r25, #0x0
71 addi $sp, $sp, OSP_OFFSET-4
72 restore_user_regs_last
73 .endm
74
75#ifdef CONFIG_PREEMPT
76 .macro preempt_stop
77 .endm
78#else
79 .macro preempt_stop
80 setgie.d
81 isb
82 .endm
83#define resume_kernel no_work_pending
84#endif
85
86ENTRY(ret_from_exception)
87 preempt_stop
88ENTRY(ret_from_intr)
89
90/*
91 * judge Kernel or user mode
92 *
93 */
94 lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
95 andi $p0, $p0, #PSW_mskINTL
96 bnez $p0, resume_kernel ! done with iret
97 j resume_userspace
98
99
100/*
101 * This is the fast syscall return path. We do as little as
102 * possible here, and this includes saving $r0 back into the SVC
103 * stack.
104 * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8
105 */
106ENTRY(ret_fast_syscall)
107 gie_disable
108 lwi $r1, [tsk+#TSK_TI_FLAGS]
109 andi $p1, $r1, #_TIF_WORK_MASK
110 bnez $p1, fast_work_pending
111 fast_restore_user_regs ! iret
112
113/*
114 * Ok, we need to do extra processing,
115 * enter the slow path returning from syscall, while pending work.
116 */
117fast_work_pending:
118 swi $r0, [$sp+(#R0_OFFSET)] ! what is different from ret_from_exception
119work_pending:
120 andi $p1, $r1, #_TIF_NEED_RESCHED
121 bnez $p1, work_resched
122
123 andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME
124 beqz $p1, no_work_pending
125
126 move $r0, $sp ! 'regs'
127 gie_enable
128 bal do_notify_resume
129 b ret_slow_syscall
130work_resched:
131 bal schedule ! path, return to user mode
132
133/*
134 * "slow" syscall return path.
135 */
136ENTRY(resume_userspace)
137ENTRY(ret_slow_syscall)
138 gie_disable
139 lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
140 andi $p0, $p0, #PSW_mskINTL
141 bnez $p0, no_work_pending ! done with iret
142 lwi $r1, [tsk+#TSK_TI_FLAGS]
143 andi $p1, $r1, #_TIF_WORK_MASK
144 bnez $p1, work_pending ! handle work_resched, sig_pend
145
146no_work_pending:
147#ifdef CONFIG_TRACE_IRQFLAGS
148 lwi $p0, [$sp+(#IPSW_OFFSET)]
149 andi $p0, $p0, #0x1
150 la $r10, __trace_hardirqs_off
151 la $r9, __trace_hardirqs_on
152 cmovz $r9, $p0, $r10
153 jral $r9
154#endif
155 restore_user_regs ! return from iret
156
157
158/*
159 * preemptive kernel
160 */
161#ifdef CONFIG_PREEMPT
162resume_kernel:
163 gie_disable
164 lwi $t0, [tsk+#TSK_TI_PREEMPT]
165 bnez $t0, no_work_pending
166
167 lwi $t0, [tsk+#TSK_TI_FLAGS]
168 andi $p1, $t0, #_TIF_NEED_RESCHED
169 beqz $p1, no_work_pending
170
171 lwi $t0, [$sp+(#IPSW_OFFSET)] ! Interrupts off?
172 andi $t0, $t0, #1
173 beqz $t0, no_work_pending
174
175 jal preempt_schedule_irq
176 b no_work_pending
177#endif
178
179/*
180 * This is how we return from a fork.
181 */
182ENTRY(ret_from_fork)
183 bal schedule_tail
184 beqz $r6, 1f ! r6 stores fn for kernel thread
185 move $r0, $r7 ! prepare kernel thread arg
186 jral $r6
1871:
188 lwi $r1, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing
189 andi $p1, $r1, #_TIF_WORK_SYSCALL_LEAVE ! are we tracing syscalls?
190 beqz $p1, ret_slow_syscall
191 move $r0, $sp
192 bal syscall_trace_leave
193 b ret_slow_syscall
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#include <linux/linkage.h>
5#include <asm/unistd.h>
6#include <asm/assembler.h>
7#include <asm/nds32.h>
8#include <asm/asm-offsets.h>
9#include <asm/thread_info.h>
10#include <asm/current.h>
11
12
13
14#ifdef CONFIG_HWZOL
15 .macro pop_zol
16 mtusr $r14, $LB
17 mtusr $r15, $LE
18 mtusr $r16, $LC
19 .endm
20#endif
21
22 .macro restore_user_regs_first
23 setgie.d
24 isb
25
26 addi $sp, $sp, FUCOP_CTL_OFFSET
27
28 lmw.adm $r12, [$sp], $r24, #0x0
29 mtsr $r12, $SP_USR
30 mtsr $r13, $IPC
31#ifdef CONFIG_HWZOL
32 pop_zol
33#endif
34 mtsr $r19, $PSW
35 mtsr $r20, $IPSW
36 mtsr $r21, $P_IPSW
37 mtsr $r22, $P_IPC
38 mtsr $r23, $P_P0
39 mtsr $r24, $P_P1
40 lmw.adm $sp, [$sp], $sp, #0xe
41 .endm
42
43 .macro restore_user_regs_last
44 pop $p0
45 cmovn $sp, $p0, $p0
46
47 iret
48 nop
49
50 .endm
51
52 .macro restore_user_regs
53 restore_user_regs_first
54 lmw.adm $r0, [$sp], $r25, #0x0
55 addi $sp, $sp, OSP_OFFSET
56 restore_user_regs_last
57 .endm
58
59 .macro fast_restore_user_regs
60 restore_user_regs_first
61 lmw.adm $r1, [$sp], $r25, #0x0
62 addi $sp, $sp, OSP_OFFSET-4
63 restore_user_regs_last
64 .endm
65
66#ifdef CONFIG_PREEMPT
67 .macro preempt_stop
68 .endm
69#else
70 .macro preempt_stop
71 setgie.d
72 isb
73 .endm
74#define resume_kernel no_work_pending
75#endif
76
77ENTRY(ret_from_exception)
78 preempt_stop
79ENTRY(ret_from_intr)
80
81/*
82 * judge Kernel or user mode
83 *
84 */
85 lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
86 andi $p0, $p0, #PSW_mskINTL
87 bnez $p0, resume_kernel ! done with iret
88 j resume_userspace
89
90
91/*
92 * This is the fast syscall return path. We do as little as
93 * possible here, and this includes saving $r0 back into the SVC
94 * stack.
95 * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8
96 */
97ENTRY(ret_fast_syscall)
98 gie_disable
99 lwi $r1, [tsk+#TSK_TI_FLAGS]
100 andi $p1, $r1, #_TIF_WORK_MASK
101 bnez $p1, fast_work_pending
102 fast_restore_user_regs ! iret
103
104/*
105 * Ok, we need to do extra processing,
106 * enter the slow path returning from syscall, while pending work.
107 */
108fast_work_pending:
109 swi $r0, [$sp+(#R0_OFFSET)] ! what is different from ret_from_exception
110work_pending:
111 andi $p1, $r1, #_TIF_NEED_RESCHED
112 bnez $p1, work_resched
113
114 andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME
115 beqz $p1, no_work_pending
116
117 move $r0, $sp ! 'regs'
118 gie_enable
119 bal do_notify_resume
120 b ret_slow_syscall
121work_resched:
122 bal schedule ! path, return to user mode
123
124/*
125 * "slow" syscall return path.
126 */
127ENTRY(resume_userspace)
128ENTRY(ret_slow_syscall)
129 gie_disable
130 lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
131 andi $p0, $p0, #PSW_mskINTL
132 bnez $p0, no_work_pending ! done with iret
133 lwi $r1, [tsk+#TSK_TI_FLAGS]
134 andi $p1, $r1, #_TIF_WORK_MASK
135 bnez $p1, work_pending ! handle work_resched, sig_pend
136
137no_work_pending:
138#ifdef CONFIG_TRACE_IRQFLAGS
139 lwi $p0, [$sp+(#IPSW_OFFSET)]
140 andi $p0, $p0, #0x1
141 la $r10, trace_hardirqs_off
142 la $r9, trace_hardirqs_on
143 cmovz $r9, $p0, $r10
144 jral $r9
145#endif
146 restore_user_regs ! return from iret
147
148
149/*
150 * preemptive kernel
151 */
152#ifdef CONFIG_PREEMPT
153resume_kernel:
154 gie_disable
155 lwi $t0, [tsk+#TSK_TI_PREEMPT]
156 bnez $t0, no_work_pending
157need_resched:
158 lwi $t0, [tsk+#TSK_TI_FLAGS]
159 andi $p1, $t0, #_TIF_NEED_RESCHED
160 beqz $p1, no_work_pending
161
162 lwi $t0, [$sp+(#IPSW_OFFSET)] ! Interrupts off?
163 andi $t0, $t0, #1
164 beqz $t0, no_work_pending
165
166 jal preempt_schedule_irq
167 b need_resched
168#endif
169
170/*
171 * This is how we return from a fork.
172 */
173ENTRY(ret_from_fork)
174 bal schedule_tail
175 beqz $r6, 1f ! r6 stores fn for kernel thread
176 move $r0, $r7 ! prepare kernel thread arg
177 jral $r6
1781:
179 lwi $r1, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing
180 andi $p1, $r1, #_TIF_WORK_SYSCALL_LEAVE ! are we tracing syscalls?
181 beqz $p1, ret_slow_syscall
182 move $r0, $sp
183 bal syscall_trace_leave
184 b ret_slow_syscall