Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/thread_info.h>
15
16#define PTE_INDX_MSK 0xffc
17#define PTE_INDX_SHIFT 10
18#define _PGDIR_SHIFT 22
19
20.macro zero_fp
21#ifdef CONFIG_STACKTRACE
22 movi r8, 0
23#endif
24.endm
25
26.macro context_tracking
27#ifdef CONFIG_CONTEXT_TRACKING
28 mfcr a0, epsr
29 btsti a0, 31
30 bt 1f
31 jbsr context_tracking_user_exit
32 ldw a0, (sp, LSAVE_A0)
33 ldw a1, (sp, LSAVE_A1)
34 ldw a2, (sp, LSAVE_A2)
35 ldw a3, (sp, LSAVE_A3)
36#if defined(__CSKYABIV1__)
37 ldw r6, (sp, LSAVE_A4)
38 ldw r7, (sp, LSAVE_A5)
39#endif
401:
41#endif
42.endm
43
44.macro tlbop_begin name, val0, val1, val2
45ENTRY(csky_\name)
46 mtcr a3, ss2
47 mtcr r6, ss3
48 mtcr a2, ss4
49
50 RD_PGDR r6
51 RD_MEH a3
52#ifdef CONFIG_CPU_HAS_TLBI
53 tlbi.vaas a3
54 sync.is
55
56 btsti a3, 31
57 bf 1f
58 RD_PGDR_K r6
591:
60#else
61 bgeni a2, 31
62 WR_MCIR a2
63 bgeni a2, 25
64 WR_MCIR a2
65#endif
66 bclri r6, 0
67 lrw a2, va_pa_offset
68 ld.w a2, (a2, 0)
69 subu r6, a2
70 bseti r6, 31
71
72 mov a2, a3
73 lsri a2, _PGDIR_SHIFT
74 lsli a2, 2
75 addu r6, a2
76 ldw r6, (r6)
77
78 lrw a2, va_pa_offset
79 ld.w a2, (a2, 0)
80 subu r6, a2
81 bseti r6, 31
82
83 lsri a3, PTE_INDX_SHIFT
84 lrw a2, PTE_INDX_MSK
85 and a3, a2
86 addu r6, a3
87 ldw a3, (r6)
88
89 movi a2, (_PAGE_PRESENT | \val0)
90 and a3, a2
91 cmpne a3, a2
92 bt \name
93
94 /* First read/write the page, just update the flags */
95 ldw a3, (r6)
96 bgeni a2, PAGE_VALID_BIT
97 bseti a2, PAGE_ACCESSED_BIT
98 bseti a2, \val1
99 bseti a2, \val2
100 or a3, a2
101 stw a3, (r6)
102
103 /* Some cpu tlb-hardrefill bypass the cache */
104#ifdef CONFIG_CPU_NEED_TLBSYNC
105 movi a2, 0x22
106 bseti a2, 6
107 mtcr r6, cr22
108 mtcr a2, cr17
109 sync
110#endif
111
112 mfcr a3, ss2
113 mfcr r6, ss3
114 mfcr a2, ss4
115 rte
116\name:
117 mfcr a3, ss2
118 mfcr r6, ss3
119 mfcr a2, ss4
120 SAVE_ALL 0
121.endm
122.macro tlbop_end is_write
123 zero_fp
124 context_tracking
125 RD_MEH a2
126 psrset ee, ie
127 mov a0, sp
128 movi a1, \is_write
129 jbsr do_page_fault
130 jmpi ret_from_exception
131.endm
132
133.text
134
135tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
136tlbop_end 0
137
138tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
139tlbop_end 1
140
141tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
142#ifndef CONFIG_CPU_HAS_LDSTEX
143jbsr csky_cmpxchg_fixup
144#endif
145tlbop_end 1
146
147ENTRY(csky_systemcall)
148 SAVE_ALL TRAP0_SIZE
149 zero_fp
150 context_tracking
151 psrset ee, ie
152
153 lrw r9, __NR_syscalls
154 cmphs syscallid, r9 /* Check nr of syscall */
155 bt 1f
156
157 lrw r9, sys_call_table
158 ixw r9, syscallid
159 ldw syscallid, (r9)
160 cmpnei syscallid, 0
161 bf ret_from_exception
162
163 mov r9, sp
164 bmaski r10, THREAD_SHIFT
165 andn r9, r10
166 ldw r10, (r9, TINFO_FLAGS)
167 lrw r9, _TIF_SYSCALL_WORK
168 and r10, r9
169 cmpnei r10, 0
170 bt csky_syscall_trace
171#if defined(__CSKYABIV2__)
172 subi sp, 8
173 stw r5, (sp, 0x4)
174 stw r4, (sp, 0x0)
175 jsr syscallid /* Do system call */
176 addi sp, 8
177#else
178 jsr syscallid
179#endif
180 stw a0, (sp, LSAVE_A0) /* Save return value */
1811:
182#ifdef CONFIG_DEBUG_RSEQ
183 mov a0, sp
184 jbsr rseq_syscall
185#endif
186 jmpi ret_from_exception
187
188csky_syscall_trace:
189 mov a0, sp /* sp = pt_regs pointer */
190 jbsr syscall_trace_enter
191 cmpnei a0, 0
192 bt 1f
193 /* Prepare args before do system call */
194 ldw a0, (sp, LSAVE_A0)
195 ldw a1, (sp, LSAVE_A1)
196 ldw a2, (sp, LSAVE_A2)
197 ldw a3, (sp, LSAVE_A3)
198#if defined(__CSKYABIV2__)
199 subi sp, 8
200 ldw r9, (sp, LSAVE_A4)
201 stw r9, (sp, 0x0)
202 ldw r9, (sp, LSAVE_A5)
203 stw r9, (sp, 0x4)
204 jsr syscallid /* Do system call */
205 addi sp, 8
206#else
207 ldw r6, (sp, LSAVE_A4)
208 ldw r7, (sp, LSAVE_A5)
209 jsr syscallid /* Do system call */
210#endif
211 stw a0, (sp, LSAVE_A0) /* Save return value */
212
2131:
214#ifdef CONFIG_DEBUG_RSEQ
215 mov a0, sp
216 jbsr rseq_syscall
217#endif
218 mov a0, sp /* right now, sp --> pt_regs */
219 jbsr syscall_trace_exit
220 br ret_from_exception
221
222ENTRY(ret_from_kernel_thread)
223 jbsr schedule_tail
224 mov a0, r10
225 jsr r9
226 jbsr ret_from_exception
227
228ENTRY(ret_from_fork)
229 jbsr schedule_tail
230 mov r9, sp
231 bmaski r10, THREAD_SHIFT
232 andn r9, r10
233 ldw r10, (r9, TINFO_FLAGS)
234 lrw r9, _TIF_SYSCALL_WORK
235 and r10, r9
236 cmpnei r10, 0
237 bf ret_from_exception
238 mov a0, sp /* sp = pt_regs pointer */
239 jbsr syscall_trace_exit
240
241ret_from_exception:
242 psrclr ie
243 ld r9, (sp, LSAVE_PSR)
244 btsti r9, 31
245
246 bt 1f
247 /*
248 * Load address of current->thread_info, Then get address of task_struct
249 * Get task_needreshed in task_struct
250 */
251 mov r9, sp
252 bmaski r10, THREAD_SHIFT
253 andn r9, r10
254
255 ldw r10, (r9, TINFO_FLAGS)
256 lrw r9, _TIF_WORK_MASK
257 and r10, r9
258 cmpnei r10, 0
259 bt exit_work
260#ifdef CONFIG_CONTEXT_TRACKING
261 jbsr context_tracking_user_enter
262#endif
2631:
264#ifdef CONFIG_PREEMPTION
265 mov r9, sp
266 bmaski r10, THREAD_SHIFT
267 andn r9, r10
268
269 ldw r10, (r9, TINFO_PREEMPT)
270 cmpnei r10, 0
271 bt 2f
272 jbsr preempt_schedule_irq /* irq en/disable is done inside */
2732:
274#endif
275
276#ifdef CONFIG_TRACE_IRQFLAGS
277 ld r10, (sp, LSAVE_PSR)
278 btsti r10, 6
279 bf 2f
280 jbsr trace_hardirqs_on
2812:
282#endif
283 RESTORE_ALL
284
285exit_work:
286 lrw r9, ret_from_exception
287 mov lr, r9
288
289 btsti r10, TIF_NEED_RESCHED
290 bt work_resched
291
292 psrset ie
293 mov a0, sp
294 mov a1, r10
295 jmpi do_notify_resume
296
297work_resched:
298 jmpi schedule
299
300ENTRY(csky_trap)
301 SAVE_ALL 0
302 zero_fp
303 context_tracking
304 psrset ee
305 mov a0, sp /* Push Stack pointer arg */
306 jbsr trap_c /* Call C-level trap handler */
307 jmpi ret_from_exception
308
309/*
310 * Prototype from libc for abiv1:
311 * register unsigned int __result asm("a0");
312 * asm( "trap 3" :"=r"(__result)::);
313 */
314ENTRY(csky_get_tls)
315 USPTOKSP
316
317 /* increase epc for continue */
318 mfcr a0, epc
319 addi a0, TRAP0_SIZE
320 mtcr a0, epc
321
322 /* get current task thread_info with kernel 8K stack */
323 bmaski a0, THREAD_SHIFT
324 not a0
325 subi sp, 1
326 and a0, sp
327 addi sp, 1
328
329 /* get tls */
330 ldw a0, (a0, TINFO_TP_VALUE)
331
332 KSPTOUSP
333 rte
334
335ENTRY(csky_irq)
336 SAVE_ALL 0
337 zero_fp
338 context_tracking
339 psrset ee
340
341#ifdef CONFIG_TRACE_IRQFLAGS
342 jbsr trace_hardirqs_off
343#endif
344
345
346 mov a0, sp
347 jbsr csky_do_IRQ
348
349 jmpi ret_from_exception
350
351/*
352 * a0 = prev task_struct *
353 * a1 = next task_struct *
354 * a0 = return next
355 */
356ENTRY(__switch_to)
357 lrw a3, TASK_THREAD
358 addu a3, a0
359
360 SAVE_SWITCH_STACK
361
362 stw sp, (a3, THREAD_KSP)
363
364 /* Set up next process to run */
365 lrw a3, TASK_THREAD
366 addu a3, a1
367
368 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
369
370#if defined(__CSKYABIV2__)
371 addi a3, a1, TASK_THREAD_INFO
372 ldw tls, (a3, TINFO_TP_VALUE)
373#endif
374
375 RESTORE_SWITCH_STACK
376
377 rts
378ENDPROC(__switch_to)
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/page.h>
13#include <asm/thread_info.h>
14
15.macro zero_fp
16#ifdef CONFIG_STACKTRACE
17 movi r8, 0
18#endif
19.endm
20
21.macro context_tracking
22#ifdef CONFIG_CONTEXT_TRACKING
23 mfcr a0, epsr
24 btsti a0, 31
25 bt 1f
26 jbsr context_tracking_user_exit
27 ldw a0, (sp, LSAVE_A0)
28 ldw a1, (sp, LSAVE_A1)
29 ldw a2, (sp, LSAVE_A2)
30 ldw a3, (sp, LSAVE_A3)
31#if defined(__CSKYABIV1__)
32 ldw r6, (sp, LSAVE_A4)
33 ldw r7, (sp, LSAVE_A5)
34#endif
351:
36#endif
37.endm
38
39.text
40ENTRY(csky_pagefault)
41 SAVE_ALL 0
42 zero_fp
43 context_tracking
44 psrset ee
45 mov a0, sp
46 jbsr do_page_fault
47 jmpi ret_from_exception
48
49ENTRY(csky_systemcall)
50 SAVE_ALL TRAP0_SIZE
51 zero_fp
52 context_tracking
53 psrset ee, ie
54
55 lrw r9, __NR_syscalls
56 cmphs syscallid, r9 /* Check nr of syscall */
57 bt 1f
58
59 lrw r9, sys_call_table
60 ixw r9, syscallid
61 ldw syscallid, (r9)
62 cmpnei syscallid, 0
63 bf ret_from_exception
64
65 mov r9, sp
66 bmaski r10, THREAD_SHIFT
67 andn r9, r10
68 ldw r10, (r9, TINFO_FLAGS)
69 lrw r9, _TIF_SYSCALL_WORK
70 and r10, r9
71 cmpnei r10, 0
72 bt csky_syscall_trace
73#if defined(__CSKYABIV2__)
74 subi sp, 8
75 stw r5, (sp, 0x4)
76 stw r4, (sp, 0x0)
77 jsr syscallid /* Do system call */
78 addi sp, 8
79#else
80 jsr syscallid
81#endif
82 stw a0, (sp, LSAVE_A0) /* Save return value */
831:
84#ifdef CONFIG_DEBUG_RSEQ
85 mov a0, sp
86 jbsr rseq_syscall
87#endif
88 jmpi ret_from_exception
89
90csky_syscall_trace:
91 mov a0, sp /* sp = pt_regs pointer */
92 jbsr syscall_trace_enter
93 cmpnei a0, 0
94 bt 1f
95 /* Prepare args before do system call */
96 ldw a0, (sp, LSAVE_A0)
97 ldw a1, (sp, LSAVE_A1)
98 ldw a2, (sp, LSAVE_A2)
99 ldw a3, (sp, LSAVE_A3)
100#if defined(__CSKYABIV2__)
101 subi sp, 8
102 ldw r9, (sp, LSAVE_A4)
103 stw r9, (sp, 0x0)
104 ldw r9, (sp, LSAVE_A5)
105 stw r9, (sp, 0x4)
106 jsr syscallid /* Do system call */
107 addi sp, 8
108#else
109 ldw r6, (sp, LSAVE_A4)
110 ldw r7, (sp, LSAVE_A5)
111 jsr syscallid /* Do system call */
112#endif
113 stw a0, (sp, LSAVE_A0) /* Save return value */
114
1151:
116#ifdef CONFIG_DEBUG_RSEQ
117 mov a0, sp
118 jbsr rseq_syscall
119#endif
120 mov a0, sp /* right now, sp --> pt_regs */
121 jbsr syscall_trace_exit
122 br ret_from_exception
123
124ENTRY(ret_from_kernel_thread)
125 jbsr schedule_tail
126 mov a0, r10
127 jsr r9
128 jbsr ret_from_exception
129
130ENTRY(ret_from_fork)
131 jbsr schedule_tail
132 mov r9, sp
133 bmaski r10, THREAD_SHIFT
134 andn r9, r10
135 ldw r10, (r9, TINFO_FLAGS)
136 lrw r9, _TIF_SYSCALL_WORK
137 and r10, r9
138 cmpnei r10, 0
139 bf ret_from_exception
140 mov a0, sp /* sp = pt_regs pointer */
141 jbsr syscall_trace_exit
142
143ret_from_exception:
144 psrclr ie
145 ld r9, (sp, LSAVE_PSR)
146 btsti r9, 31
147
148 bt 1f
149 /*
150 * Load address of current->thread_info, Then get address of task_struct
151 * Get task_needreshed in task_struct
152 */
153 mov r9, sp
154 bmaski r10, THREAD_SHIFT
155 andn r9, r10
156
157 ldw r10, (r9, TINFO_FLAGS)
158 lrw r9, _TIF_WORK_MASK
159 and r10, r9
160 cmpnei r10, 0
161 bt exit_work
162#ifdef CONFIG_CONTEXT_TRACKING
163 jbsr context_tracking_user_enter
164#endif
1651:
166#ifdef CONFIG_PREEMPTION
167 mov r9, sp
168 bmaski r10, THREAD_SHIFT
169 andn r9, r10
170
171 ldw r10, (r9, TINFO_PREEMPT)
172 cmpnei r10, 0
173 bt 2f
174 jbsr preempt_schedule_irq /* irq en/disable is done inside */
1752:
176#endif
177
178#ifdef CONFIG_TRACE_IRQFLAGS
179 ld r10, (sp, LSAVE_PSR)
180 btsti r10, 6
181 bf 2f
182 jbsr trace_hardirqs_on
1832:
184#endif
185 RESTORE_ALL
186
187exit_work:
188 lrw r9, ret_from_exception
189 mov lr, r9
190
191 btsti r10, TIF_NEED_RESCHED
192 bt work_resched
193
194 psrset ie
195 mov a0, sp
196 mov a1, r10
197 jmpi do_notify_resume
198
199work_resched:
200 jmpi schedule
201
202ENTRY(csky_trap)
203 SAVE_ALL 0
204 zero_fp
205 context_tracking
206 psrset ee
207 mov a0, sp /* Push Stack pointer arg */
208 jbsr trap_c /* Call C-level trap handler */
209 jmpi ret_from_exception
210
211/*
212 * Prototype from libc for abiv1:
213 * register unsigned int __result asm("a0");
214 * asm( "trap 3" :"=r"(__result)::);
215 */
216ENTRY(csky_get_tls)
217 USPTOKSP
218
219 RD_MEH a0
220 WR_MEH a0
221
222 /* increase epc for continue */
223 mfcr a0, epc
224 addi a0, TRAP0_SIZE
225 mtcr a0, epc
226
227 /* get current task thread_info with kernel 8K stack */
228 bmaski a0, THREAD_SHIFT
229 not a0
230 subi sp, 1
231 and a0, sp
232 addi sp, 1
233
234 /* get tls */
235 ldw a0, (a0, TINFO_TP_VALUE)
236
237 KSPTOUSP
238 rte
239
240ENTRY(csky_irq)
241 SAVE_ALL 0
242 zero_fp
243 context_tracking
244 psrset ee
245
246#ifdef CONFIG_TRACE_IRQFLAGS
247 jbsr trace_hardirqs_off
248#endif
249
250
251 mov a0, sp
252 jbsr csky_do_IRQ
253
254 jmpi ret_from_exception
255
256/*
257 * a0 = prev task_struct *
258 * a1 = next task_struct *
259 * a0 = return next
260 */
261ENTRY(__switch_to)
262 lrw a3, TASK_THREAD
263 addu a3, a0
264
265 SAVE_SWITCH_STACK
266
267 stw sp, (a3, THREAD_KSP)
268
269 /* Set up next process to run */
270 lrw a3, TASK_THREAD
271 addu a3, a1
272
273 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
274
275#if defined(__CSKYABIV2__)
276 addi a3, a1, TASK_THREAD_INFO
277 ldw tls, (a3, TINFO_TP_VALUE)
278#endif
279
280 RESTORE_SWITCH_STACK
281
282 rts
283ENDPROC(__switch_to)