Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 */
6
7#include <linux/init.h>
8#include <linux/linkage.h>
9
10#include <asm/asm.h>
11#include <asm/csr.h>
12#include <asm/unistd.h>
13#include <asm/thread_info.h>
14#include <asm/asm-offsets.h>
15
16 .text
17 .altmacro
18
19/*
20 * Prepares to enter a system call or exception by saving all registers to the
21 * stack.
22 */
23 .macro SAVE_ALL
24 LOCAL _restore_kernel_tpsp
25 LOCAL _save_context
26
27 /*
28 * If coming from userspace, preserve the user thread pointer and load
29 * the kernel thread pointer. If we came from the kernel, sscratch
30 * will contain 0, and we should continue on the current TP.
31 */
32 csrrw tp, CSR_SSCRATCH, tp
33 bnez tp, _save_context
34
35_restore_kernel_tpsp:
36 csrr tp, CSR_SSCRATCH
37 REG_S sp, TASK_TI_KERNEL_SP(tp)
38_save_context:
39 REG_S sp, TASK_TI_USER_SP(tp)
40 REG_L sp, TASK_TI_KERNEL_SP(tp)
41 addi sp, sp, -(PT_SIZE_ON_STACK)
42 REG_S x1, PT_RA(sp)
43 REG_S x3, PT_GP(sp)
44 REG_S x5, PT_T0(sp)
45 REG_S x6, PT_T1(sp)
46 REG_S x7, PT_T2(sp)
47 REG_S x8, PT_S0(sp)
48 REG_S x9, PT_S1(sp)
49 REG_S x10, PT_A0(sp)
50 REG_S x11, PT_A1(sp)
51 REG_S x12, PT_A2(sp)
52 REG_S x13, PT_A3(sp)
53 REG_S x14, PT_A4(sp)
54 REG_S x15, PT_A5(sp)
55 REG_S x16, PT_A6(sp)
56 REG_S x17, PT_A7(sp)
57 REG_S x18, PT_S2(sp)
58 REG_S x19, PT_S3(sp)
59 REG_S x20, PT_S4(sp)
60 REG_S x21, PT_S5(sp)
61 REG_S x22, PT_S6(sp)
62 REG_S x23, PT_S7(sp)
63 REG_S x24, PT_S8(sp)
64 REG_S x25, PT_S9(sp)
65 REG_S x26, PT_S10(sp)
66 REG_S x27, PT_S11(sp)
67 REG_S x28, PT_T3(sp)
68 REG_S x29, PT_T4(sp)
69 REG_S x30, PT_T5(sp)
70 REG_S x31, PT_T6(sp)
71
72 /*
73 * Disable user-mode memory access as it should only be set in the
74 * actual user copy routines.
75 *
76 * Disable the FPU to detect illegal usage of floating point in kernel
77 * space.
78 */
79 li t0, SR_SUM | SR_FS
80
81 REG_L s0, TASK_TI_USER_SP(tp)
82 csrrc s1, CSR_SSTATUS, t0
83 csrr s2, CSR_SEPC
84 csrr s3, CSR_STVAL
85 csrr s4, CSR_SCAUSE
86 csrr s5, CSR_SSCRATCH
87 REG_S s0, PT_SP(sp)
88 REG_S s1, PT_SSTATUS(sp)
89 REG_S s2, PT_SEPC(sp)
90 REG_S s3, PT_SBADADDR(sp)
91 REG_S s4, PT_SCAUSE(sp)
92 REG_S s5, PT_TP(sp)
93 .endm
94
95/*
96 * Prepares to return from a system call or exception by restoring all
97 * registers from the stack.
98 */
99 .macro RESTORE_ALL
100 REG_L a0, PT_SSTATUS(sp)
101 /*
102 * The current load reservation is effectively part of the processor's
103 * state, in the sense that load reservations cannot be shared between
104 * different hart contexts. We can't actually save and restore a load
105 * reservation, so instead here we clear any existing reservation --
106 * it's always legal for implementations to clear load reservations at
107 * any point (as long as the forward progress guarantee is kept, but
108 * we'll ignore that here).
109 *
110 * Dangling load reservations can be the result of taking a trap in the
111 * middle of an LR/SC sequence, but can also be the result of a taken
112 * forward branch around an SC -- which is how we implement CAS. As a
113 * result we need to clear reservations between the last CAS and the
114 * jump back to the new context. While it is unlikely the store
115 * completes, implementations are allowed to expand reservations to be
116 * arbitrarily large.
117 */
118 REG_L a2, PT_SEPC(sp)
119 REG_SC x0, a2, PT_SEPC(sp)
120
121 csrw CSR_SSTATUS, a0
122 csrw CSR_SEPC, a2
123
124 REG_L x1, PT_RA(sp)
125 REG_L x3, PT_GP(sp)
126 REG_L x4, PT_TP(sp)
127 REG_L x5, PT_T0(sp)
128 REG_L x6, PT_T1(sp)
129 REG_L x7, PT_T2(sp)
130 REG_L x8, PT_S0(sp)
131 REG_L x9, PT_S1(sp)
132 REG_L x10, PT_A0(sp)
133 REG_L x11, PT_A1(sp)
134 REG_L x12, PT_A2(sp)
135 REG_L x13, PT_A3(sp)
136 REG_L x14, PT_A4(sp)
137 REG_L x15, PT_A5(sp)
138 REG_L x16, PT_A6(sp)
139 REG_L x17, PT_A7(sp)
140 REG_L x18, PT_S2(sp)
141 REG_L x19, PT_S3(sp)
142 REG_L x20, PT_S4(sp)
143 REG_L x21, PT_S5(sp)
144 REG_L x22, PT_S6(sp)
145 REG_L x23, PT_S7(sp)
146 REG_L x24, PT_S8(sp)
147 REG_L x25, PT_S9(sp)
148 REG_L x26, PT_S10(sp)
149 REG_L x27, PT_S11(sp)
150 REG_L x28, PT_T3(sp)
151 REG_L x29, PT_T4(sp)
152 REG_L x30, PT_T5(sp)
153 REG_L x31, PT_T6(sp)
154
155 REG_L x2, PT_SP(sp)
156 .endm
157
158#if !IS_ENABLED(CONFIG_PREEMPT)
159.set resume_kernel, restore_all
160#endif
161
162ENTRY(handle_exception)
163 SAVE_ALL
164
165 /*
166 * Set sscratch register to 0, so that if a recursive exception
167 * occurs, the exception vector knows it came from the kernel
168 */
169 csrw CSR_SSCRATCH, x0
170
171 /* Load the global pointer */
172.option push
173.option norelax
174 la gp, __global_pointer$
175.option pop
176
177 la ra, ret_from_exception
178 /*
179 * MSB of cause differentiates between
180 * interrupts and exceptions
181 */
182 bge s4, zero, 1f
183
184 /* Handle interrupts */
185 move a0, sp /* pt_regs */
186 tail do_IRQ
1871:
188 /* Exceptions run with interrupts enabled or disabled
189 depending on the state of sstatus.SR_SPIE */
190 andi t0, s1, SR_SPIE
191 beqz t0, 1f
192 csrs CSR_SSTATUS, SR_SIE
193
1941:
195 /* Handle syscalls */
196 li t0, EXC_SYSCALL
197 beq s4, t0, handle_syscall
198
199 /* Handle other exceptions */
200 slli t0, s4, RISCV_LGPTR
201 la t1, excp_vect_table
202 la t2, excp_vect_table_end
203 move a0, sp /* pt_regs */
204 add t0, t1, t0
205 /* Check if exception code lies within bounds */
206 bgeu t0, t2, 1f
207 REG_L t0, 0(t0)
208 jr t0
2091:
210 tail do_trap_unknown
211
212handle_syscall:
213 /* save the initial A0 value (needed in signal handlers) */
214 REG_S a0, PT_ORIG_A0(sp)
215 /*
216 * Advance SEPC to avoid executing the original
217 * scall instruction on sret
218 */
219 addi s2, s2, 0x4
220 REG_S s2, PT_SEPC(sp)
221 /* Trace syscalls, but only if requested by the user. */
222 REG_L t0, TASK_TI_FLAGS(tp)
223 andi t0, t0, _TIF_SYSCALL_WORK
224 bnez t0, handle_syscall_trace_enter
225check_syscall_nr:
226 /* Check to make sure we don't jump to a bogus syscall number. */
227 li t0, __NR_syscalls
228 la s0, sys_ni_syscall
229 /* Syscall number held in a7 */
230 bgeu a7, t0, 1f
231 la s0, sys_call_table
232 slli t0, a7, RISCV_LGPTR
233 add s0, s0, t0
234 REG_L s0, 0(s0)
2351:
236 jalr s0
237
238ret_from_syscall:
239 /* Set user a0 to kernel a0 */
240 REG_S a0, PT_A0(sp)
241 /* Trace syscalls, but only if requested by the user. */
242 REG_L t0, TASK_TI_FLAGS(tp)
243 andi t0, t0, _TIF_SYSCALL_WORK
244 bnez t0, handle_syscall_trace_exit
245
246ret_from_exception:
247 REG_L s0, PT_SSTATUS(sp)
248 csrc CSR_SSTATUS, SR_SIE
249 andi s0, s0, SR_SPP
250 bnez s0, resume_kernel
251
252resume_userspace:
253 /* Interrupts must be disabled here so flags are checked atomically */
254 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
255 andi s1, s0, _TIF_WORK_MASK
256 bnez s1, work_pending
257
258 /* Save unwound kernel stack pointer in thread_info */
259 addi s0, sp, PT_SIZE_ON_STACK
260 REG_S s0, TASK_TI_KERNEL_SP(tp)
261
262 /*
263 * Save TP into sscratch, so we can find the kernel data structures
264 * again.
265 */
266 csrw CSR_SSCRATCH, tp
267
268restore_all:
269 RESTORE_ALL
270 sret
271
272#if IS_ENABLED(CONFIG_PREEMPT)
273resume_kernel:
274 REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
275 bnez s0, restore_all
276 REG_L s0, TASK_TI_FLAGS(tp)
277 andi s0, s0, _TIF_NEED_RESCHED
278 beqz s0, restore_all
279 call preempt_schedule_irq
280 j restore_all
281#endif
282
283work_pending:
284 /* Enter slow path for supplementary processing */
285 la ra, ret_from_exception
286 andi s1, s0, _TIF_NEED_RESCHED
287 bnez s1, work_resched
288work_notifysig:
289 /* Handle pending signals and notify-resume requests */
290 csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */
291 move a0, sp /* pt_regs */
292 move a1, s0 /* current_thread_info->flags */
293 tail do_notify_resume
294work_resched:
295 tail schedule
296
297/* Slow paths for ptrace. */
298handle_syscall_trace_enter:
299 move a0, sp
300 call do_syscall_trace_enter
301 REG_L a0, PT_A0(sp)
302 REG_L a1, PT_A1(sp)
303 REG_L a2, PT_A2(sp)
304 REG_L a3, PT_A3(sp)
305 REG_L a4, PT_A4(sp)
306 REG_L a5, PT_A5(sp)
307 REG_L a6, PT_A6(sp)
308 REG_L a7, PT_A7(sp)
309 j check_syscall_nr
310handle_syscall_trace_exit:
311 move a0, sp
312 call do_syscall_trace_exit
313 j ret_from_exception
314
315END(handle_exception)
316
317ENTRY(ret_from_fork)
318 la ra, ret_from_exception
319 tail schedule_tail
320ENDPROC(ret_from_fork)
321
322ENTRY(ret_from_kernel_thread)
323 call schedule_tail
324 /* Call fn(arg) */
325 la ra, ret_from_exception
326 move a0, s1
327 jr s0
328ENDPROC(ret_from_kernel_thread)
329
330
331/*
332 * Integer register context switch
333 * The callee-saved registers must be saved and restored.
334 *
335 * a0: previous task_struct (must be preserved across the switch)
336 * a1: next task_struct
337 *
338 * The value of a0 and a1 must be preserved by this function, as that's how
339 * arguments are passed to schedule_tail.
340 */
341ENTRY(__switch_to)
342 /* Save context into prev->thread */
343 li a4, TASK_THREAD_RA
344 add a3, a0, a4
345 add a4, a1, a4
346 REG_S ra, TASK_THREAD_RA_RA(a3)
347 REG_S sp, TASK_THREAD_SP_RA(a3)
348 REG_S s0, TASK_THREAD_S0_RA(a3)
349 REG_S s1, TASK_THREAD_S1_RA(a3)
350 REG_S s2, TASK_THREAD_S2_RA(a3)
351 REG_S s3, TASK_THREAD_S3_RA(a3)
352 REG_S s4, TASK_THREAD_S4_RA(a3)
353 REG_S s5, TASK_THREAD_S5_RA(a3)
354 REG_S s6, TASK_THREAD_S6_RA(a3)
355 REG_S s7, TASK_THREAD_S7_RA(a3)
356 REG_S s8, TASK_THREAD_S8_RA(a3)
357 REG_S s9, TASK_THREAD_S9_RA(a3)
358 REG_S s10, TASK_THREAD_S10_RA(a3)
359 REG_S s11, TASK_THREAD_S11_RA(a3)
360 /* Restore context from next->thread */
361 REG_L ra, TASK_THREAD_RA_RA(a4)
362 REG_L sp, TASK_THREAD_SP_RA(a4)
363 REG_L s0, TASK_THREAD_S0_RA(a4)
364 REG_L s1, TASK_THREAD_S1_RA(a4)
365 REG_L s2, TASK_THREAD_S2_RA(a4)
366 REG_L s3, TASK_THREAD_S3_RA(a4)
367 REG_L s4, TASK_THREAD_S4_RA(a4)
368 REG_L s5, TASK_THREAD_S5_RA(a4)
369 REG_L s6, TASK_THREAD_S6_RA(a4)
370 REG_L s7, TASK_THREAD_S7_RA(a4)
371 REG_L s8, TASK_THREAD_S8_RA(a4)
372 REG_L s9, TASK_THREAD_S9_RA(a4)
373 REG_L s10, TASK_THREAD_S10_RA(a4)
374 REG_L s11, TASK_THREAD_S11_RA(a4)
375 /* Swap the CPU entry around. */
376 lw a3, TASK_TI_CPU(a0)
377 lw a4, TASK_TI_CPU(a1)
378 sw a3, TASK_TI_CPU(a1)
379 sw a4, TASK_TI_CPU(a0)
380#if TASK_TI != 0
381#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
382 addi tp, a1, TASK_TI
383#else
384 move tp, a1
385#endif
386 ret
387ENDPROC(__switch_to)
388
389 .section ".rodata"
390 /* Exception vector table */
391ENTRY(excp_vect_table)
392 RISCV_PTR do_trap_insn_misaligned
393 RISCV_PTR do_trap_insn_fault
394 RISCV_PTR do_trap_insn_illegal
395 RISCV_PTR do_trap_break
396 RISCV_PTR do_trap_load_misaligned
397 RISCV_PTR do_trap_load_fault
398 RISCV_PTR do_trap_store_misaligned
399 RISCV_PTR do_trap_store_fault
400 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
401 RISCV_PTR do_trap_ecall_s
402 RISCV_PTR do_trap_unknown
403 RISCV_PTR do_trap_ecall_m
404 RISCV_PTR do_page_fault /* instruction page fault */
405 RISCV_PTR do_page_fault /* load page fault */
406 RISCV_PTR do_trap_unknown
407 RISCV_PTR do_page_fault /* store page fault */
408excp_vect_table_end:
409END(excp_vect_table)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 */
6
7#include <linux/init.h>
8#include <linux/linkage.h>
9
10#include <asm/asm.h>
11#include <asm/csr.h>
12#include <asm/unistd.h>
13#include <asm/thread_info.h>
14#include <asm/asm-offsets.h>
15
16#if !IS_ENABLED(CONFIG_PREEMPTION)
17.set resume_kernel, restore_all
18#endif
19
20ENTRY(handle_exception)
21 /*
22 * If coming from userspace, preserve the user thread pointer and load
23 * the kernel thread pointer. If we came from the kernel, the scratch
24 * register will contain 0, and we should continue on the current TP.
25 */
26 csrrw tp, CSR_SCRATCH, tp
27 bnez tp, _save_context
28
29_restore_kernel_tpsp:
30 csrr tp, CSR_SCRATCH
31 REG_S sp, TASK_TI_KERNEL_SP(tp)
32_save_context:
33 REG_S sp, TASK_TI_USER_SP(tp)
34 REG_L sp, TASK_TI_KERNEL_SP(tp)
35 addi sp, sp, -(PT_SIZE_ON_STACK)
36 REG_S x1, PT_RA(sp)
37 REG_S x3, PT_GP(sp)
38 REG_S x5, PT_T0(sp)
39 REG_S x6, PT_T1(sp)
40 REG_S x7, PT_T2(sp)
41 REG_S x8, PT_S0(sp)
42 REG_S x9, PT_S1(sp)
43 REG_S x10, PT_A0(sp)
44 REG_S x11, PT_A1(sp)
45 REG_S x12, PT_A2(sp)
46 REG_S x13, PT_A3(sp)
47 REG_S x14, PT_A4(sp)
48 REG_S x15, PT_A5(sp)
49 REG_S x16, PT_A6(sp)
50 REG_S x17, PT_A7(sp)
51 REG_S x18, PT_S2(sp)
52 REG_S x19, PT_S3(sp)
53 REG_S x20, PT_S4(sp)
54 REG_S x21, PT_S5(sp)
55 REG_S x22, PT_S6(sp)
56 REG_S x23, PT_S7(sp)
57 REG_S x24, PT_S8(sp)
58 REG_S x25, PT_S9(sp)
59 REG_S x26, PT_S10(sp)
60 REG_S x27, PT_S11(sp)
61 REG_S x28, PT_T3(sp)
62 REG_S x29, PT_T4(sp)
63 REG_S x30, PT_T5(sp)
64 REG_S x31, PT_T6(sp)
65
66 /*
67 * Disable user-mode memory access as it should only be set in the
68 * actual user copy routines.
69 *
70 * Disable the FPU to detect illegal usage of floating point in kernel
71 * space.
72 */
73 li t0, SR_SUM | SR_FS
74
75 REG_L s0, TASK_TI_USER_SP(tp)
76 csrrc s1, CSR_STATUS, t0
77 csrr s2, CSR_EPC
78 csrr s3, CSR_TVAL
79 csrr s4, CSR_CAUSE
80 csrr s5, CSR_SCRATCH
81 REG_S s0, PT_SP(sp)
82 REG_S s1, PT_STATUS(sp)
83 REG_S s2, PT_EPC(sp)
84 REG_S s3, PT_BADADDR(sp)
85 REG_S s4, PT_CAUSE(sp)
86 REG_S s5, PT_TP(sp)
87
88 /*
89 * Set the scratch register to 0, so that if a recursive exception
90 * occurs, the exception vector knows it came from the kernel
91 */
92 csrw CSR_SCRATCH, x0
93
94 /* Load the global pointer */
95.option push
96.option norelax
97 la gp, __global_pointer$
98.option pop
99
100#ifdef CONFIG_TRACE_IRQFLAGS
101 call trace_hardirqs_off
102#endif
103
104#ifdef CONFIG_CONTEXT_TRACKING
105 /* If previous state is in user mode, call context_tracking_user_exit. */
106 li a0, SR_PP
107 and a0, s1, a0
108 bnez a0, skip_context_tracking
109 call context_tracking_user_exit
110skip_context_tracking:
111#endif
112
113 /*
114 * MSB of cause differentiates between
115 * interrupts and exceptions
116 */
117 bge s4, zero, 1f
118
119 la ra, ret_from_exception
120
121 /* Handle interrupts */
122 move a0, sp /* pt_regs */
123 la a1, handle_arch_irq
124 REG_L a1, (a1)
125 jr a1
1261:
127#ifdef CONFIG_TRACE_IRQFLAGS
128 call trace_hardirqs_on
129#endif
130 /*
131 * Exceptions run with interrupts enabled or disabled depending on the
132 * state of SR_PIE in m/sstatus.
133 */
134 andi t0, s1, SR_PIE
135 beqz t0, 1f
136 csrs CSR_STATUS, SR_IE
137
1381:
139 la ra, ret_from_exception
140 /* Handle syscalls */
141 li t0, EXC_SYSCALL
142 beq s4, t0, handle_syscall
143
144 /* Handle other exceptions */
145 slli t0, s4, RISCV_LGPTR
146 la t1, excp_vect_table
147 la t2, excp_vect_table_end
148 move a0, sp /* pt_regs */
149 add t0, t1, t0
150 /* Check if exception code lies within bounds */
151 bgeu t0, t2, 1f
152 REG_L t0, 0(t0)
153 jr t0
1541:
155 tail do_trap_unknown
156
157handle_syscall:
158#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
159 /* Recover a0 - a7 for system calls */
160 REG_L a0, PT_A0(sp)
161 REG_L a1, PT_A1(sp)
162 REG_L a2, PT_A2(sp)
163 REG_L a3, PT_A3(sp)
164 REG_L a4, PT_A4(sp)
165 REG_L a5, PT_A5(sp)
166 REG_L a6, PT_A6(sp)
167 REG_L a7, PT_A7(sp)
168#endif
169 /* save the initial A0 value (needed in signal handlers) */
170 REG_S a0, PT_ORIG_A0(sp)
171 /*
172 * Advance SEPC to avoid executing the original
173 * scall instruction on sret
174 */
175 addi s2, s2, 0x4
176 REG_S s2, PT_EPC(sp)
177 /* Trace syscalls, but only if requested by the user. */
178 REG_L t0, TASK_TI_FLAGS(tp)
179 andi t0, t0, _TIF_SYSCALL_WORK
180 bnez t0, handle_syscall_trace_enter
181check_syscall_nr:
182 /* Check to make sure we don't jump to a bogus syscall number. */
183 li t0, __NR_syscalls
184 la s0, sys_ni_syscall
185 /*
186 * Syscall number held in a7.
187 * If syscall number is above allowed value, redirect to ni_syscall.
188 */
189 bge a7, t0, 1f
190 /*
191 * Check if syscall is rejected by tracer, i.e., a7 == -1.
192 * If yes, we pretend it was executed.
193 */
194 li t1, -1
195 beq a7, t1, ret_from_syscall_rejected
196 blt a7, t1, 1f
197 /* Call syscall */
198 la s0, sys_call_table
199 slli t0, a7, RISCV_LGPTR
200 add s0, s0, t0
201 REG_L s0, 0(s0)
2021:
203 jalr s0
204
205ret_from_syscall:
206 /* Set user a0 to kernel a0 */
207 REG_S a0, PT_A0(sp)
208 /*
209 * We didn't execute the actual syscall.
210 * Seccomp already set return value for the current task pt_regs.
211 * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
212 */
213ret_from_syscall_rejected:
214 /* Trace syscalls, but only if requested by the user. */
215 REG_L t0, TASK_TI_FLAGS(tp)
216 andi t0, t0, _TIF_SYSCALL_WORK
217 bnez t0, handle_syscall_trace_exit
218
219ret_from_exception:
220 REG_L s0, PT_STATUS(sp)
221 csrc CSR_STATUS, SR_IE
222#ifdef CONFIG_TRACE_IRQFLAGS
223 call trace_hardirqs_off
224#endif
225#ifdef CONFIG_RISCV_M_MODE
226 /* the MPP value is too large to be used as an immediate arg for addi */
227 li t0, SR_MPP
228 and s0, s0, t0
229#else
230 andi s0, s0, SR_SPP
231#endif
232 bnez s0, resume_kernel
233
234resume_userspace:
235 /* Interrupts must be disabled here so flags are checked atomically */
236 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
237 andi s1, s0, _TIF_WORK_MASK
238 bnez s1, work_pending
239
240#ifdef CONFIG_CONTEXT_TRACKING
241 call context_tracking_user_enter
242#endif
243
244 /* Save unwound kernel stack pointer in thread_info */
245 addi s0, sp, PT_SIZE_ON_STACK
246 REG_S s0, TASK_TI_KERNEL_SP(tp)
247
248 /*
249 * Save TP into the scratch register , so we can find the kernel data
250 * structures again.
251 */
252 csrw CSR_SCRATCH, tp
253
254restore_all:
255#ifdef CONFIG_TRACE_IRQFLAGS
256 REG_L s1, PT_STATUS(sp)
257 andi t0, s1, SR_PIE
258 beqz t0, 1f
259 call trace_hardirqs_on
260 j 2f
2611:
262 call trace_hardirqs_off
2632:
264#endif
265 REG_L a0, PT_STATUS(sp)
266 /*
267 * The current load reservation is effectively part of the processor's
268 * state, in the sense that load reservations cannot be shared between
269 * different hart contexts. We can't actually save and restore a load
270 * reservation, so instead here we clear any existing reservation --
271 * it's always legal for implementations to clear load reservations at
272 * any point (as long as the forward progress guarantee is kept, but
273 * we'll ignore that here).
274 *
275 * Dangling load reservations can be the result of taking a trap in the
276 * middle of an LR/SC sequence, but can also be the result of a taken
277 * forward branch around an SC -- which is how we implement CAS. As a
278 * result we need to clear reservations between the last CAS and the
279 * jump back to the new context. While it is unlikely the store
280 * completes, implementations are allowed to expand reservations to be
281 * arbitrarily large.
282 */
283 REG_L a2, PT_EPC(sp)
284 REG_SC x0, a2, PT_EPC(sp)
285
286 csrw CSR_STATUS, a0
287 csrw CSR_EPC, a2
288
289 REG_L x1, PT_RA(sp)
290 REG_L x3, PT_GP(sp)
291 REG_L x4, PT_TP(sp)
292 REG_L x5, PT_T0(sp)
293 REG_L x6, PT_T1(sp)
294 REG_L x7, PT_T2(sp)
295 REG_L x8, PT_S0(sp)
296 REG_L x9, PT_S1(sp)
297 REG_L x10, PT_A0(sp)
298 REG_L x11, PT_A1(sp)
299 REG_L x12, PT_A2(sp)
300 REG_L x13, PT_A3(sp)
301 REG_L x14, PT_A4(sp)
302 REG_L x15, PT_A5(sp)
303 REG_L x16, PT_A6(sp)
304 REG_L x17, PT_A7(sp)
305 REG_L x18, PT_S2(sp)
306 REG_L x19, PT_S3(sp)
307 REG_L x20, PT_S4(sp)
308 REG_L x21, PT_S5(sp)
309 REG_L x22, PT_S6(sp)
310 REG_L x23, PT_S7(sp)
311 REG_L x24, PT_S8(sp)
312 REG_L x25, PT_S9(sp)
313 REG_L x26, PT_S10(sp)
314 REG_L x27, PT_S11(sp)
315 REG_L x28, PT_T3(sp)
316 REG_L x29, PT_T4(sp)
317 REG_L x30, PT_T5(sp)
318 REG_L x31, PT_T6(sp)
319
320 REG_L x2, PT_SP(sp)
321
322#ifdef CONFIG_RISCV_M_MODE
323 mret
324#else
325 sret
326#endif
327
328#if IS_ENABLED(CONFIG_PREEMPTION)
329resume_kernel:
330 REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
331 bnez s0, restore_all
332 REG_L s0, TASK_TI_FLAGS(tp)
333 andi s0, s0, _TIF_NEED_RESCHED
334 beqz s0, restore_all
335 call preempt_schedule_irq
336 j restore_all
337#endif
338
339work_pending:
340 /* Enter slow path for supplementary processing */
341 la ra, ret_from_exception
342 andi s1, s0, _TIF_NEED_RESCHED
343 bnez s1, work_resched
344work_notifysig:
345 /* Handle pending signals and notify-resume requests */
346 csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
347 move a0, sp /* pt_regs */
348 move a1, s0 /* current_thread_info->flags */
349 tail do_notify_resume
350work_resched:
351 tail schedule
352
353/* Slow paths for ptrace. */
354handle_syscall_trace_enter:
355 move a0, sp
356 call do_syscall_trace_enter
357 move t0, a0
358 REG_L a0, PT_A0(sp)
359 REG_L a1, PT_A1(sp)
360 REG_L a2, PT_A2(sp)
361 REG_L a3, PT_A3(sp)
362 REG_L a4, PT_A4(sp)
363 REG_L a5, PT_A5(sp)
364 REG_L a6, PT_A6(sp)
365 REG_L a7, PT_A7(sp)
366 bnez t0, ret_from_syscall_rejected
367 j check_syscall_nr
368handle_syscall_trace_exit:
369 move a0, sp
370 call do_syscall_trace_exit
371 j ret_from_exception
372
373END(handle_exception)
374
375ENTRY(ret_from_fork)
376 la ra, ret_from_exception
377 tail schedule_tail
378ENDPROC(ret_from_fork)
379
380ENTRY(ret_from_kernel_thread)
381 call schedule_tail
382 /* Call fn(arg) */
383 la ra, ret_from_exception
384 move a0, s1
385 jr s0
386ENDPROC(ret_from_kernel_thread)
387
388
389/*
390 * Integer register context switch
391 * The callee-saved registers must be saved and restored.
392 *
393 * a0: previous task_struct (must be preserved across the switch)
394 * a1: next task_struct
395 *
396 * The value of a0 and a1 must be preserved by this function, as that's how
397 * arguments are passed to schedule_tail.
398 */
399ENTRY(__switch_to)
400 /* Save context into prev->thread */
401 li a4, TASK_THREAD_RA
402 add a3, a0, a4
403 add a4, a1, a4
404 REG_S ra, TASK_THREAD_RA_RA(a3)
405 REG_S sp, TASK_THREAD_SP_RA(a3)
406 REG_S s0, TASK_THREAD_S0_RA(a3)
407 REG_S s1, TASK_THREAD_S1_RA(a3)
408 REG_S s2, TASK_THREAD_S2_RA(a3)
409 REG_S s3, TASK_THREAD_S3_RA(a3)
410 REG_S s4, TASK_THREAD_S4_RA(a3)
411 REG_S s5, TASK_THREAD_S5_RA(a3)
412 REG_S s6, TASK_THREAD_S6_RA(a3)
413 REG_S s7, TASK_THREAD_S7_RA(a3)
414 REG_S s8, TASK_THREAD_S8_RA(a3)
415 REG_S s9, TASK_THREAD_S9_RA(a3)
416 REG_S s10, TASK_THREAD_S10_RA(a3)
417 REG_S s11, TASK_THREAD_S11_RA(a3)
418 /* Restore context from next->thread */
419 REG_L ra, TASK_THREAD_RA_RA(a4)
420 REG_L sp, TASK_THREAD_SP_RA(a4)
421 REG_L s0, TASK_THREAD_S0_RA(a4)
422 REG_L s1, TASK_THREAD_S1_RA(a4)
423 REG_L s2, TASK_THREAD_S2_RA(a4)
424 REG_L s3, TASK_THREAD_S3_RA(a4)
425 REG_L s4, TASK_THREAD_S4_RA(a4)
426 REG_L s5, TASK_THREAD_S5_RA(a4)
427 REG_L s6, TASK_THREAD_S6_RA(a4)
428 REG_L s7, TASK_THREAD_S7_RA(a4)
429 REG_L s8, TASK_THREAD_S8_RA(a4)
430 REG_L s9, TASK_THREAD_S9_RA(a4)
431 REG_L s10, TASK_THREAD_S10_RA(a4)
432 REG_L s11, TASK_THREAD_S11_RA(a4)
433 /* Swap the CPU entry around. */
434 lw a3, TASK_TI_CPU(a0)
435 lw a4, TASK_TI_CPU(a1)
436 sw a3, TASK_TI_CPU(a1)
437 sw a4, TASK_TI_CPU(a0)
438 /* The offset of thread_info in task_struct is zero. */
439 move tp, a1
440 ret
441ENDPROC(__switch_to)
442
443#ifndef CONFIG_MMU
444#define do_page_fault do_trap_unknown
445#endif
446
447 .section ".rodata"
448 /* Exception vector table */
449ENTRY(excp_vect_table)
450 RISCV_PTR do_trap_insn_misaligned
451 RISCV_PTR do_trap_insn_fault
452 RISCV_PTR do_trap_insn_illegal
453 RISCV_PTR do_trap_break
454 RISCV_PTR do_trap_load_misaligned
455 RISCV_PTR do_trap_load_fault
456 RISCV_PTR do_trap_store_misaligned
457 RISCV_PTR do_trap_store_fault
458 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
459 RISCV_PTR do_trap_ecall_s
460 RISCV_PTR do_trap_unknown
461 RISCV_PTR do_trap_ecall_m
462 RISCV_PTR do_page_fault /* instruction page fault */
463 RISCV_PTR do_page_fault /* load page fault */
464 RISCV_PTR do_trap_unknown
465 RISCV_PTR do_page_fault /* store page fault */
466excp_vect_table_end:
467END(excp_vect_table)
468
469#ifndef CONFIG_MMU
470ENTRY(__user_rt_sigreturn)
471 li a7, __NR_rt_sigreturn
472 scall
473END(__user_rt_sigreturn)
474#endif