Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/page.h>
13#include <asm/thread_info.h>
14
15.macro zero_fp
16#ifdef CONFIG_STACKTRACE
17 movi r8, 0
18#endif
19.endm
20
21.macro context_tracking
22#ifdef CONFIG_CONTEXT_TRACKING_USER
23 mfcr a0, epsr
24 btsti a0, 31
25 bt 1f
26 jbsr user_exit_callable
27 ldw a0, (sp, LSAVE_A0)
28 ldw a1, (sp, LSAVE_A1)
29 ldw a2, (sp, LSAVE_A2)
30 ldw a3, (sp, LSAVE_A3)
31#if defined(__CSKYABIV1__)
32 ldw r6, (sp, LSAVE_A4)
33 ldw r7, (sp, LSAVE_A5)
34#endif
351:
36#endif
37.endm
38
39.text
40ENTRY(csky_pagefault)
41 SAVE_ALL 0
42 zero_fp
43 context_tracking
44 psrset ee
45 mov a0, sp
46 jbsr do_page_fault
47 jmpi ret_from_exception
48
49ENTRY(csky_systemcall)
50 SAVE_ALL TRAP0_SIZE
51 zero_fp
52 context_tracking
53 psrset ee, ie
54
55 lrw r9, __NR_syscalls
56 cmphs syscallid, r9 /* Check nr of syscall */
57 bt ret_from_exception
58
59 lrw r9, sys_call_table
60 ixw r9, syscallid
61 ldw syscallid, (r9)
62 cmpnei syscallid, 0
63 bf ret_from_exception
64
65 mov r9, sp
66 bmaski r10, THREAD_SHIFT
67 andn r9, r10
68 ldw r10, (r9, TINFO_FLAGS)
69 lrw r9, _TIF_SYSCALL_WORK
70 and r10, r9
71 cmpnei r10, 0
72 bt csky_syscall_trace
73#if defined(__CSKYABIV2__)
74 subi sp, 8
75 stw r5, (sp, 0x4)
76 stw r4, (sp, 0x0)
77 jsr syscallid /* Do system call */
78 addi sp, 8
79#else
80 jsr syscallid
81#endif
82 stw a0, (sp, LSAVE_A0) /* Save return value */
83 jmpi ret_from_exception
84
85csky_syscall_trace:
86 mov a0, sp /* sp = pt_regs pointer */
87 jbsr syscall_trace_enter
88 cmpnei a0, 0
89 bt 1f
90 /* Prepare args before do system call */
91 ldw a0, (sp, LSAVE_A0)
92 ldw a1, (sp, LSAVE_A1)
93 ldw a2, (sp, LSAVE_A2)
94 ldw a3, (sp, LSAVE_A3)
95#if defined(__CSKYABIV2__)
96 subi sp, 8
97 ldw r9, (sp, LSAVE_A4)
98 stw r9, (sp, 0x0)
99 ldw r9, (sp, LSAVE_A5)
100 stw r9, (sp, 0x4)
101 jsr syscallid /* Do system call */
102 addi sp, 8
103#else
104 ldw r6, (sp, LSAVE_A4)
105 ldw r7, (sp, LSAVE_A5)
106 jsr syscallid /* Do system call */
107#endif
108 stw a0, (sp, LSAVE_A0) /* Save return value */
109
1101:
111 mov a0, sp /* right now, sp --> pt_regs */
112 jbsr syscall_trace_exit
113 br ret_from_exception
114
115ENTRY(ret_from_kernel_thread)
116 jbsr schedule_tail
117 mov a0, r10
118 jsr r9
119 jbsr ret_from_exception
120
121ENTRY(ret_from_fork)
122 jbsr schedule_tail
123 mov r9, sp
124 bmaski r10, THREAD_SHIFT
125 andn r9, r10
126 ldw r10, (r9, TINFO_FLAGS)
127 lrw r9, _TIF_SYSCALL_WORK
128 and r10, r9
129 cmpnei r10, 0
130 bf ret_from_exception
131 mov a0, sp /* sp = pt_regs pointer */
132 jbsr syscall_trace_exit
133
134ret_from_exception:
135 psrclr ie
136 ld r9, (sp, LSAVE_PSR)
137 btsti r9, 31
138
139 bt 1f
140 /*
141 * Load address of current->thread_info, Then get address of task_struct
142 * Get task_needreshed in task_struct
143 */
144 mov r9, sp
145 bmaski r10, THREAD_SHIFT
146 andn r9, r10
147
148 ldw r10, (r9, TINFO_FLAGS)
149 lrw r9, _TIF_WORK_MASK
150 and r10, r9
151 cmpnei r10, 0
152 bt exit_work
153#ifdef CONFIG_CONTEXT_TRACKING_USER
154 jbsr user_enter_callable
155#endif
1561:
157#ifdef CONFIG_PREEMPTION
158 mov r9, sp
159 bmaski r10, THREAD_SHIFT
160 andn r9, r10
161
162 ldw r10, (r9, TINFO_PREEMPT)
163 cmpnei r10, 0
164 bt 2f
165 jbsr preempt_schedule_irq /* irq en/disable is done inside */
1662:
167#endif
168
169#ifdef CONFIG_TRACE_IRQFLAGS
170 ld r10, (sp, LSAVE_PSR)
171 btsti r10, 6
172 bf 2f
173 jbsr trace_hardirqs_on
1742:
175#endif
176 RESTORE_ALL
177
178exit_work:
179 lrw r9, ret_from_exception
180 mov lr, r9
181
182 btsti r10, TIF_NEED_RESCHED
183 bt work_resched
184
185 psrset ie
186 mov a0, sp
187 mov a1, r10
188 jmpi do_notify_resume
189
190work_resched:
191 jmpi schedule
192
193ENTRY(csky_trap)
194 SAVE_ALL 0
195 zero_fp
196 context_tracking
197 psrset ee
198 mov a0, sp /* Push Stack pointer arg */
199 jbsr trap_c /* Call C-level trap handler */
200 jmpi ret_from_exception
201
202/*
203 * Prototype from libc for abiv1:
204 * register unsigned int __result asm("a0");
205 * asm( "trap 3" :"=r"(__result)::);
206 */
207ENTRY(csky_get_tls)
208 USPTOKSP
209
210 RD_MEH a0
211 WR_MEH a0
212
213 /* increase epc for continue */
214 mfcr a0, epc
215 addi a0, TRAP0_SIZE
216 mtcr a0, epc
217
218 /* get current task thread_info with kernel 8K stack */
219 bmaski a0, THREAD_SHIFT
220 not a0
221 subi sp, 1
222 and a0, sp
223 addi sp, 1
224
225 /* get tls */
226 ldw a0, (a0, TINFO_TP_VALUE)
227
228 KSPTOUSP
229 rte
230
231ENTRY(csky_irq)
232 SAVE_ALL 0
233 zero_fp
234 context_tracking
235 psrset ee
236
237#ifdef CONFIG_TRACE_IRQFLAGS
238 jbsr trace_hardirqs_off
239#endif
240
241
242 mov a0, sp
243 jbsr generic_handle_arch_irq
244
245 jmpi ret_from_exception
246
247/*
248 * a0 = prev task_struct *
249 * a1 = next task_struct *
250 * a0 = return next
251 */
252ENTRY(__switch_to)
253 lrw a3, TASK_THREAD
254 addu a3, a0
255
256 SAVE_SWITCH_STACK
257
258 stw sp, (a3, THREAD_KSP)
259
260 /* Set up next process to run */
261 lrw a3, TASK_THREAD
262 addu a3, a1
263
264 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
265
266#if defined(__CSKYABIV2__)
267 addi a3, a1, TASK_THREAD_INFO
268 ldw tls, (a3, TINFO_TP_VALUE)
269#endif
270
271 RESTORE_SWITCH_STACK
272
273 rts
274ENDPROC(__switch_to)
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/thread_info.h>
15
16#define PTE_INDX_MSK 0xffc
17#define PTE_INDX_SHIFT 10
18#define _PGDIR_SHIFT 22
19
20.macro zero_fp
21#ifdef CONFIG_STACKTRACE
22 movi r8, 0
23#endif
24.endm
25
26.macro context_tracking
27#ifdef CONFIG_CONTEXT_TRACKING
28 mfcr a0, epsr
29 btsti a0, 31
30 bt 1f
31 jbsr context_tracking_user_exit
32 ldw a0, (sp, LSAVE_A0)
33 ldw a1, (sp, LSAVE_A1)
34 ldw a2, (sp, LSAVE_A2)
35 ldw a3, (sp, LSAVE_A3)
36#if defined(__CSKYABIV1__)
37 ldw r6, (sp, LSAVE_A4)
38 ldw r7, (sp, LSAVE_A5)
39#endif
401:
41#endif
42.endm
43
44.macro tlbop_begin name, val0, val1, val2
45ENTRY(csky_\name)
46 mtcr a3, ss2
47 mtcr r6, ss3
48 mtcr a2, ss4
49
50 RD_PGDR r6
51 RD_MEH a3
52#ifdef CONFIG_CPU_HAS_TLBI
53 tlbi.vaas a3
54 sync.is
55
56 btsti a3, 31
57 bf 1f
58 RD_PGDR_K r6
591:
60#else
61 bgeni a2, 31
62 WR_MCIR a2
63 bgeni a2, 25
64 WR_MCIR a2
65#endif
66 bclri r6, 0
67 lrw a2, va_pa_offset
68 ld.w a2, (a2, 0)
69 subu r6, a2
70 bseti r6, 31
71
72 mov a2, a3
73 lsri a2, _PGDIR_SHIFT
74 lsli a2, 2
75 addu r6, a2
76 ldw r6, (r6)
77
78 lrw a2, va_pa_offset
79 ld.w a2, (a2, 0)
80 subu r6, a2
81 bseti r6, 31
82
83 lsri a3, PTE_INDX_SHIFT
84 lrw a2, PTE_INDX_MSK
85 and a3, a2
86 addu r6, a3
87 ldw a3, (r6)
88
89 movi a2, (_PAGE_PRESENT | \val0)
90 and a3, a2
91 cmpne a3, a2
92 bt \name
93
94 /* First read/write the page, just update the flags */
95 ldw a3, (r6)
96 bgeni a2, PAGE_VALID_BIT
97 bseti a2, PAGE_ACCESSED_BIT
98 bseti a2, \val1
99 bseti a2, \val2
100 or a3, a2
101 stw a3, (r6)
102
103 /* Some cpu tlb-hardrefill bypass the cache */
104#ifdef CONFIG_CPU_NEED_TLBSYNC
105 movi a2, 0x22
106 bseti a2, 6
107 mtcr r6, cr22
108 mtcr a2, cr17
109 sync
110#endif
111
112 mfcr a3, ss2
113 mfcr r6, ss3
114 mfcr a2, ss4
115 rte
116\name:
117 mfcr a3, ss2
118 mfcr r6, ss3
119 mfcr a2, ss4
120 SAVE_ALL 0
121.endm
122.macro tlbop_end is_write
123 zero_fp
124 context_tracking
125 RD_MEH a2
126 psrset ee, ie
127 mov a0, sp
128 movi a1, \is_write
129 jbsr do_page_fault
130 jmpi ret_from_exception
131.endm
132
133.text
134
135tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
136tlbop_end 0
137
138tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
139tlbop_end 1
140
141tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
142#ifndef CONFIG_CPU_HAS_LDSTEX
143jbsr csky_cmpxchg_fixup
144#endif
145tlbop_end 1
146
147ENTRY(csky_systemcall)
148 SAVE_ALL TRAP0_SIZE
149 zero_fp
150 context_tracking
151 psrset ee, ie
152
153 lrw r9, __NR_syscalls
154 cmphs syscallid, r9 /* Check nr of syscall */
155 bt 1f
156
157 lrw r9, sys_call_table
158 ixw r9, syscallid
159 ldw syscallid, (r9)
160 cmpnei syscallid, 0
161 bf ret_from_exception
162
163 mov r9, sp
164 bmaski r10, THREAD_SHIFT
165 andn r9, r10
166 ldw r10, (r9, TINFO_FLAGS)
167 lrw r9, _TIF_SYSCALL_WORK
168 and r10, r9
169 cmpnei r10, 0
170 bt csky_syscall_trace
171#if defined(__CSKYABIV2__)
172 subi sp, 8
173 stw r5, (sp, 0x4)
174 stw r4, (sp, 0x0)
175 jsr syscallid /* Do system call */
176 addi sp, 8
177#else
178 jsr syscallid
179#endif
180 stw a0, (sp, LSAVE_A0) /* Save return value */
1811:
182#ifdef CONFIG_DEBUG_RSEQ
183 mov a0, sp
184 jbsr rseq_syscall
185#endif
186 jmpi ret_from_exception
187
188csky_syscall_trace:
189 mov a0, sp /* sp = pt_regs pointer */
190 jbsr syscall_trace_enter
191 cmpnei a0, 0
192 bt 1f
193 /* Prepare args before do system call */
194 ldw a0, (sp, LSAVE_A0)
195 ldw a1, (sp, LSAVE_A1)
196 ldw a2, (sp, LSAVE_A2)
197 ldw a3, (sp, LSAVE_A3)
198#if defined(__CSKYABIV2__)
199 subi sp, 8
200 ldw r9, (sp, LSAVE_A4)
201 stw r9, (sp, 0x0)
202 ldw r9, (sp, LSAVE_A5)
203 stw r9, (sp, 0x4)
204 jsr syscallid /* Do system call */
205 addi sp, 8
206#else
207 ldw r6, (sp, LSAVE_A4)
208 ldw r7, (sp, LSAVE_A5)
209 jsr syscallid /* Do system call */
210#endif
211 stw a0, (sp, LSAVE_A0) /* Save return value */
212
2131:
214#ifdef CONFIG_DEBUG_RSEQ
215 mov a0, sp
216 jbsr rseq_syscall
217#endif
218 mov a0, sp /* right now, sp --> pt_regs */
219 jbsr syscall_trace_exit
220 br ret_from_exception
221
222ENTRY(ret_from_kernel_thread)
223 jbsr schedule_tail
224 mov a0, r10
225 jsr r9
226 jbsr ret_from_exception
227
228ENTRY(ret_from_fork)
229 jbsr schedule_tail
230 mov r9, sp
231 bmaski r10, THREAD_SHIFT
232 andn r9, r10
233 ldw r10, (r9, TINFO_FLAGS)
234 lrw r9, _TIF_SYSCALL_WORK
235 and r10, r9
236 cmpnei r10, 0
237 bf ret_from_exception
238 mov a0, sp /* sp = pt_regs pointer */
239 jbsr syscall_trace_exit
240
241ret_from_exception:
242 psrclr ie
243 ld r9, (sp, LSAVE_PSR)
244 btsti r9, 31
245
246 bt 1f
247 /*
248 * Load address of current->thread_info, Then get address of task_struct
249 * Get task_needreshed in task_struct
250 */
251 mov r9, sp
252 bmaski r10, THREAD_SHIFT
253 andn r9, r10
254
255 ldw r10, (r9, TINFO_FLAGS)
256 lrw r9, _TIF_WORK_MASK
257 and r10, r9
258 cmpnei r10, 0
259 bt exit_work
260#ifdef CONFIG_CONTEXT_TRACKING
261 jbsr context_tracking_user_enter
262#endif
2631:
264#ifdef CONFIG_PREEMPTION
265 mov r9, sp
266 bmaski r10, THREAD_SHIFT
267 andn r9, r10
268
269 ldw r10, (r9, TINFO_PREEMPT)
270 cmpnei r10, 0
271 bt 2f
272 jbsr preempt_schedule_irq /* irq en/disable is done inside */
2732:
274#endif
275
276#ifdef CONFIG_TRACE_IRQFLAGS
277 ld r10, (sp, LSAVE_PSR)
278 btsti r10, 6
279 bf 2f
280 jbsr trace_hardirqs_on
2812:
282#endif
283 RESTORE_ALL
284
285exit_work:
286 lrw r9, ret_from_exception
287 mov lr, r9
288
289 btsti r10, TIF_NEED_RESCHED
290 bt work_resched
291
292 psrset ie
293 mov a0, sp
294 mov a1, r10
295 jmpi do_notify_resume
296
297work_resched:
298 jmpi schedule
299
300ENTRY(csky_trap)
301 SAVE_ALL 0
302 zero_fp
303 context_tracking
304 psrset ee
305 mov a0, sp /* Push Stack pointer arg */
306 jbsr trap_c /* Call C-level trap handler */
307 jmpi ret_from_exception
308
309/*
310 * Prototype from libc for abiv1:
311 * register unsigned int __result asm("a0");
312 * asm( "trap 3" :"=r"(__result)::);
313 */
314ENTRY(csky_get_tls)
315 USPTOKSP
316
317 /* increase epc for continue */
318 mfcr a0, epc
319 addi a0, TRAP0_SIZE
320 mtcr a0, epc
321
322 /* get current task thread_info with kernel 8K stack */
323 bmaski a0, THREAD_SHIFT
324 not a0
325 subi sp, 1
326 and a0, sp
327 addi sp, 1
328
329 /* get tls */
330 ldw a0, (a0, TINFO_TP_VALUE)
331
332 KSPTOUSP
333 rte
334
335ENTRY(csky_irq)
336 SAVE_ALL 0
337 zero_fp
338 context_tracking
339 psrset ee
340
341#ifdef CONFIG_TRACE_IRQFLAGS
342 jbsr trace_hardirqs_off
343#endif
344
345
346 mov a0, sp
347 jbsr csky_do_IRQ
348
349 jmpi ret_from_exception
350
351/*
352 * a0 = prev task_struct *
353 * a1 = next task_struct *
354 * a0 = return next
355 */
356ENTRY(__switch_to)
357 lrw a3, TASK_THREAD
358 addu a3, a0
359
360 SAVE_SWITCH_STACK
361
362 stw sp, (a3, THREAD_KSP)
363
364 /* Set up next process to run */
365 lrw a3, TASK_THREAD
366 addu a3, a1
367
368 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
369
370#if defined(__CSKYABIV2__)
371 addi a3, a1, TASK_THREAD_INFO
372 ldw tls, (a3, TINFO_TP_VALUE)
373#endif
374
375 RESTORE_SWITCH_STACK
376
377 rts
378ENDPROC(__switch_to)