Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/thread_info.h>
15
16#define PTE_INDX_MSK 0xffc
17#define PTE_INDX_SHIFT 10
18#define _PGDIR_SHIFT 22
19
20.macro zero_fp
21#ifdef CONFIG_STACKTRACE
22 movi r8, 0
23#endif
24.endm
25
26.macro context_tracking
27#ifdef CONFIG_CONTEXT_TRACKING
28 mfcr a0, epsr
29 btsti a0, 31
30 bt 1f
31 jbsr context_tracking_user_exit
32 ldw a0, (sp, LSAVE_A0)
33 ldw a1, (sp, LSAVE_A1)
34 ldw a2, (sp, LSAVE_A2)
35 ldw a3, (sp, LSAVE_A3)
36#if defined(__CSKYABIV1__)
37 ldw r6, (sp, LSAVE_A4)
38 ldw r7, (sp, LSAVE_A5)
39#endif
401:
41#endif
42.endm
43
44.macro tlbop_begin name, val0, val1, val2
45ENTRY(csky_\name)
46 mtcr a3, ss2
47 mtcr r6, ss3
48 mtcr a2, ss4
49
50 RD_PGDR r6
51 RD_MEH a3
52#ifdef CONFIG_CPU_HAS_TLBI
53 tlbi.vaas a3
54 sync.is
55
56 btsti a3, 31
57 bf 1f
58 RD_PGDR_K r6
591:
60#else
61 bgeni a2, 31
62 WR_MCIR a2
63 bgeni a2, 25
64 WR_MCIR a2
65#endif
66 bclri r6, 0
67 lrw a2, va_pa_offset
68 ld.w a2, (a2, 0)
69 subu r6, a2
70 bseti r6, 31
71
72 mov a2, a3
73 lsri a2, _PGDIR_SHIFT
74 lsli a2, 2
75 addu r6, a2
76 ldw r6, (r6)
77
78 lrw a2, va_pa_offset
79 ld.w a2, (a2, 0)
80 subu r6, a2
81 bseti r6, 31
82
83 lsri a3, PTE_INDX_SHIFT
84 lrw a2, PTE_INDX_MSK
85 and a3, a2
86 addu r6, a3
87 ldw a3, (r6)
88
89 movi a2, (_PAGE_PRESENT | \val0)
90 and a3, a2
91 cmpne a3, a2
92 bt \name
93
94 /* First read/write the page, just update the flags */
95 ldw a3, (r6)
96 bgeni a2, PAGE_VALID_BIT
97 bseti a2, PAGE_ACCESSED_BIT
98 bseti a2, \val1
99 bseti a2, \val2
100 or a3, a2
101 stw a3, (r6)
102
103 /* Some cpu tlb-hardrefill bypass the cache */
104#ifdef CONFIG_CPU_NEED_TLBSYNC
105 movi a2, 0x22
106 bseti a2, 6
107 mtcr r6, cr22
108 mtcr a2, cr17
109 sync
110#endif
111
112 mfcr a3, ss2
113 mfcr r6, ss3
114 mfcr a2, ss4
115 rte
116\name:
117 mfcr a3, ss2
118 mfcr r6, ss3
119 mfcr a2, ss4
120 SAVE_ALL 0
121.endm
122.macro tlbop_end is_write
123 zero_fp
124 context_tracking
125 RD_MEH a2
126 psrset ee, ie
127 mov a0, sp
128 movi a1, \is_write
129 jbsr do_page_fault
130 jmpi ret_from_exception
131.endm
132
133.text
134
135tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
136tlbop_end 0
137
138tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
139tlbop_end 1
140
141tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
142#ifndef CONFIG_CPU_HAS_LDSTEX
143jbsr csky_cmpxchg_fixup
144#endif
145tlbop_end 1
146
147ENTRY(csky_systemcall)
148 SAVE_ALL TRAP0_SIZE
149 zero_fp
150 context_tracking
151 psrset ee, ie
152
153 lrw r9, __NR_syscalls
154 cmphs syscallid, r9 /* Check nr of syscall */
155 bt 1f
156
157 lrw r9, sys_call_table
158 ixw r9, syscallid
159 ldw syscallid, (r9)
160 cmpnei syscallid, 0
161 bf ret_from_exception
162
163 mov r9, sp
164 bmaski r10, THREAD_SHIFT
165 andn r9, r10
166 ldw r10, (r9, TINFO_FLAGS)
167 lrw r9, _TIF_SYSCALL_WORK
168 and r10, r9
169 cmpnei r10, 0
170 bt csky_syscall_trace
171#if defined(__CSKYABIV2__)
172 subi sp, 8
173 stw r5, (sp, 0x4)
174 stw r4, (sp, 0x0)
175 jsr syscallid /* Do system call */
176 addi sp, 8
177#else
178 jsr syscallid
179#endif
180 stw a0, (sp, LSAVE_A0) /* Save return value */
1811:
182#ifdef CONFIG_DEBUG_RSEQ
183 mov a0, sp
184 jbsr rseq_syscall
185#endif
186 jmpi ret_from_exception
187
188csky_syscall_trace:
189 mov a0, sp /* sp = pt_regs pointer */
190 jbsr syscall_trace_enter
191 cmpnei a0, 0
192 bt 1f
193 /* Prepare args before do system call */
194 ldw a0, (sp, LSAVE_A0)
195 ldw a1, (sp, LSAVE_A1)
196 ldw a2, (sp, LSAVE_A2)
197 ldw a3, (sp, LSAVE_A3)
198#if defined(__CSKYABIV2__)
199 subi sp, 8
200 ldw r9, (sp, LSAVE_A4)
201 stw r9, (sp, 0x0)
202 ldw r9, (sp, LSAVE_A5)
203 stw r9, (sp, 0x4)
204 jsr syscallid /* Do system call */
205 addi sp, 8
206#else
207 ldw r6, (sp, LSAVE_A4)
208 ldw r7, (sp, LSAVE_A5)
209 jsr syscallid /* Do system call */
210#endif
211 stw a0, (sp, LSAVE_A0) /* Save return value */
212
2131:
214#ifdef CONFIG_DEBUG_RSEQ
215 mov a0, sp
216 jbsr rseq_syscall
217#endif
218 mov a0, sp /* right now, sp --> pt_regs */
219 jbsr syscall_trace_exit
220 br ret_from_exception
221
222ENTRY(ret_from_kernel_thread)
223 jbsr schedule_tail
224 mov a0, r10
225 jsr r9
226 jbsr ret_from_exception
227
228ENTRY(ret_from_fork)
229 jbsr schedule_tail
230 mov r9, sp
231 bmaski r10, THREAD_SHIFT
232 andn r9, r10
233 ldw r10, (r9, TINFO_FLAGS)
234 lrw r9, _TIF_SYSCALL_WORK
235 and r10, r9
236 cmpnei r10, 0
237 bf ret_from_exception
238 mov a0, sp /* sp = pt_regs pointer */
239 jbsr syscall_trace_exit
240
241ret_from_exception:
242 psrclr ie
243 ld r9, (sp, LSAVE_PSR)
244 btsti r9, 31
245
246 bt 1f
247 /*
248 * Load address of current->thread_info, Then get address of task_struct
249 * Get task_needreshed in task_struct
250 */
251 mov r9, sp
252 bmaski r10, THREAD_SHIFT
253 andn r9, r10
254
255 ldw r10, (r9, TINFO_FLAGS)
256 lrw r9, _TIF_WORK_MASK
257 and r10, r9
258 cmpnei r10, 0
259 bt exit_work
260#ifdef CONFIG_CONTEXT_TRACKING
261 jbsr context_tracking_user_enter
262#endif
2631:
264#ifdef CONFIG_PREEMPTION
265 mov r9, sp
266 bmaski r10, THREAD_SHIFT
267 andn r9, r10
268
269 ldw r10, (r9, TINFO_PREEMPT)
270 cmpnei r10, 0
271 bt 2f
272 jbsr preempt_schedule_irq /* irq en/disable is done inside */
2732:
274#endif
275
276#ifdef CONFIG_TRACE_IRQFLAGS
277 ld r10, (sp, LSAVE_PSR)
278 btsti r10, 6
279 bf 2f
280 jbsr trace_hardirqs_on
2812:
282#endif
283 RESTORE_ALL
284
285exit_work:
286 lrw r9, ret_from_exception
287 mov lr, r9
288
289 btsti r10, TIF_NEED_RESCHED
290 bt work_resched
291
292 psrset ie
293 mov a0, sp
294 mov a1, r10
295 jmpi do_notify_resume
296
297work_resched:
298 jmpi schedule
299
300ENTRY(csky_trap)
301 SAVE_ALL 0
302 zero_fp
303 context_tracking
304 psrset ee
305 mov a0, sp /* Push Stack pointer arg */
306 jbsr trap_c /* Call C-level trap handler */
307 jmpi ret_from_exception
308
309/*
310 * Prototype from libc for abiv1:
311 * register unsigned int __result asm("a0");
312 * asm( "trap 3" :"=r"(__result)::);
313 */
314ENTRY(csky_get_tls)
315 USPTOKSP
316
317 /* increase epc for continue */
318 mfcr a0, epc
319 addi a0, TRAP0_SIZE
320 mtcr a0, epc
321
322 /* get current task thread_info with kernel 8K stack */
323 bmaski a0, THREAD_SHIFT
324 not a0
325 subi sp, 1
326 and a0, sp
327 addi sp, 1
328
329 /* get tls */
330 ldw a0, (a0, TINFO_TP_VALUE)
331
332 KSPTOUSP
333 rte
334
335ENTRY(csky_irq)
336 SAVE_ALL 0
337 zero_fp
338 context_tracking
339 psrset ee
340
341#ifdef CONFIG_TRACE_IRQFLAGS
342 jbsr trace_hardirqs_off
343#endif
344
345
346 mov a0, sp
347 jbsr csky_do_IRQ
348
349 jmpi ret_from_exception
350
351/*
352 * a0 = prev task_struct *
353 * a1 = next task_struct *
354 * a0 = return next
355 */
356ENTRY(__switch_to)
357 lrw a3, TASK_THREAD
358 addu a3, a0
359
360 SAVE_SWITCH_STACK
361
362 stw sp, (a3, THREAD_KSP)
363
364 /* Set up next process to run */
365 lrw a3, TASK_THREAD
366 addu a3, a1
367
368 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
369
370#if defined(__CSKYABIV2__)
371 addi a3, a1, TASK_THREAD_INFO
372 ldw tls, (a3, TINFO_TP_VALUE)
373#endif
374
375 RESTORE_SWITCH_STACK
376
377 rts
378ENDPROC(__switch_to)
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/thread_info.h>
15
16#define PTE_INDX_MSK 0xffc
17#define PTE_INDX_SHIFT 10
18#define _PGDIR_SHIFT 22
19
20.macro zero_fp
21#ifdef CONFIG_STACKTRACE
22 movi r8, 0
23#endif
24.endm
25
26.macro tlbop_begin name, val0, val1, val2
27ENTRY(csky_\name)
28 mtcr a3, ss2
29 mtcr r6, ss3
30 mtcr a2, ss4
31
32 RD_PGDR r6
33 RD_MEH a3
34#ifdef CONFIG_CPU_HAS_TLBI
35 tlbi.vaas a3
36 sync.is
37
38 btsti a3, 31
39 bf 1f
40 RD_PGDR_K r6
411:
42#else
43 bgeni a2, 31
44 WR_MCIR a2
45 bgeni a2, 25
46 WR_MCIR a2
47#endif
48 bclri r6, 0
49 lrw a2, va_pa_offset
50 ld.w a2, (a2, 0)
51 subu r6, a2
52 bseti r6, 31
53
54 mov a2, a3
55 lsri a2, _PGDIR_SHIFT
56 lsli a2, 2
57 addu r6, a2
58 ldw r6, (r6)
59
60 lrw a2, va_pa_offset
61 ld.w a2, (a2, 0)
62 subu r6, a2
63 bseti r6, 31
64
65 lsri a3, PTE_INDX_SHIFT
66 lrw a2, PTE_INDX_MSK
67 and a3, a2
68 addu r6, a3
69 ldw a3, (r6)
70
71 movi a2, (_PAGE_PRESENT | \val0)
72 and a3, a2
73 cmpne a3, a2
74 bt \name
75
76 /* First read/write the page, just update the flags */
77 ldw a3, (r6)
78 bgeni a2, PAGE_VALID_BIT
79 bseti a2, PAGE_ACCESSED_BIT
80 bseti a2, \val1
81 bseti a2, \val2
82 or a3, a2
83 stw a3, (r6)
84
85 /* Some cpu tlb-hardrefill bypass the cache */
86#ifdef CONFIG_CPU_NEED_TLBSYNC
87 movi a2, 0x22
88 bseti a2, 6
89 mtcr r6, cr22
90 mtcr a2, cr17
91 sync
92#endif
93
94 mfcr a3, ss2
95 mfcr r6, ss3
96 mfcr a2, ss4
97 rte
98\name:
99 mfcr a3, ss2
100 mfcr r6, ss3
101 mfcr a2, ss4
102 SAVE_ALL 0
103.endm
104.macro tlbop_end is_write
105 zero_fp
106 RD_MEH a2
107 psrset ee, ie
108 mov a0, sp
109 movi a1, \is_write
110 jbsr do_page_fault
111 jmpi ret_from_exception
112.endm
113
114.text
115
116tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
117tlbop_end 0
118
119tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
120tlbop_end 1
121
122tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
123#ifndef CONFIG_CPU_HAS_LDSTEX
124jbsr csky_cmpxchg_fixup
125#endif
126tlbop_end 1
127
128ENTRY(csky_systemcall)
129 SAVE_ALL TRAP0_SIZE
130 zero_fp
131
132 psrset ee, ie
133
134 lrw r11, __NR_syscalls
135 cmphs syscallid, r11 /* Check nr of syscall */
136 bt ret_from_exception
137
138 lrw r13, sys_call_table
139 ixw r13, syscallid
140 ldw r11, (r13)
141 cmpnei r11, 0
142 bf ret_from_exception
143
144 mov r9, sp
145 bmaski r10, THREAD_SHIFT
146 andn r9, r10
147 ldw r12, (r9, TINFO_FLAGS)
148 ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
149 cmpnei r12, 0
150 bt csky_syscall_trace
151#if defined(__CSKYABIV2__)
152 subi sp, 8
153 stw r5, (sp, 0x4)
154 stw r4, (sp, 0x0)
155 jsr r11 /* Do system call */
156 addi sp, 8
157#else
158 jsr r11
159#endif
160 stw a0, (sp, LSAVE_A0) /* Save return value */
161 jmpi ret_from_exception
162
163csky_syscall_trace:
164 mov a0, sp /* sp = pt_regs pointer */
165 jbsr syscall_trace_enter
166 /* Prepare args before do system call */
167 ldw a0, (sp, LSAVE_A0)
168 ldw a1, (sp, LSAVE_A1)
169 ldw a2, (sp, LSAVE_A2)
170 ldw a3, (sp, LSAVE_A3)
171#if defined(__CSKYABIV2__)
172 subi sp, 8
173 stw r5, (sp, 0x4)
174 stw r4, (sp, 0x0)
175#else
176 ldw r6, (sp, LSAVE_A4)
177 ldw r7, (sp, LSAVE_A5)
178#endif
179 jsr r11 /* Do system call */
180#if defined(__CSKYABIV2__)
181 addi sp, 8
182#endif
183 stw a0, (sp, LSAVE_A0) /* Save return value */
184
185 mov a0, sp /* right now, sp --> pt_regs */
186 jbsr syscall_trace_exit
187 br ret_from_exception
188
189ENTRY(ret_from_kernel_thread)
190 jbsr schedule_tail
191 mov a0, r10
192 jsr r9
193 jbsr ret_from_exception
194
195ENTRY(ret_from_fork)
196 jbsr schedule_tail
197 mov r9, sp
198 bmaski r10, THREAD_SHIFT
199 andn r9, r10
200 ldw r12, (r9, TINFO_FLAGS)
201 ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
202 cmpnei r12, 0
203 bf ret_from_exception
204 mov a0, sp /* sp = pt_regs pointer */
205 jbsr syscall_trace_exit
206
207ret_from_exception:
208 ld syscallid, (sp, LSAVE_PSR)
209 btsti syscallid, 31
210 bt 1f
211
212 /*
213 * Load address of current->thread_info, Then get address of task_struct
214 * Get task_needreshed in task_struct
215 */
216 mov r9, sp
217 bmaski r10, THREAD_SHIFT
218 andn r9, r10
219
220 ldw r12, (r9, TINFO_FLAGS)
221 andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
222 cmpnei r12, 0
223 bt exit_work
2241:
225 RESTORE_ALL
226
227exit_work:
228 lrw syscallid, ret_from_exception
229 mov lr, syscallid
230
231 btsti r12, TIF_NEED_RESCHED
232 bt work_resched
233
234 mov a0, sp
235 mov a1, r12
236 jmpi do_notify_resume
237
238work_resched:
239 jmpi schedule
240
241ENTRY(csky_trap)
242 SAVE_ALL 0
243 zero_fp
244 psrset ee
245 mov a0, sp /* Push Stack pointer arg */
246 jbsr trap_c /* Call C-level trap handler */
247 jmpi ret_from_exception
248
249/*
250 * Prototype from libc for abiv1:
251 * register unsigned int __result asm("a0");
252 * asm( "trap 3" :"=r"(__result)::);
253 */
254ENTRY(csky_get_tls)
255 USPTOKSP
256
257 /* increase epc for continue */
258 mfcr a0, epc
259 addi a0, TRAP0_SIZE
260 mtcr a0, epc
261
262 /* get current task thread_info with kernel 8K stack */
263 bmaski a0, THREAD_SHIFT
264 not a0
265 subi sp, 1
266 and a0, sp
267 addi sp, 1
268
269 /* get tls */
270 ldw a0, (a0, TINFO_TP_VALUE)
271
272 KSPTOUSP
273 rte
274
275ENTRY(csky_irq)
276 SAVE_ALL 0
277 zero_fp
278 psrset ee
279
280#ifdef CONFIG_PREEMPT
281 mov r9, sp /* Get current stack pointer */
282 bmaski r10, THREAD_SHIFT
283 andn r9, r10 /* Get thread_info */
284
285 /*
286 * Get task_struct->stack.preempt_count for current,
287 * and increase 1.
288 */
289 ldw r12, (r9, TINFO_PREEMPT)
290 addi r12, 1
291 stw r12, (r9, TINFO_PREEMPT)
292#endif
293
294 mov a0, sp
295 jbsr csky_do_IRQ
296
297#ifdef CONFIG_PREEMPT
298 subi r12, 1
299 stw r12, (r9, TINFO_PREEMPT)
300 cmpnei r12, 0
301 bt 2f
302 ldw r12, (r9, TINFO_FLAGS)
303 btsti r12, TIF_NEED_RESCHED
304 bf 2f
305 jbsr preempt_schedule_irq /* irq en/disable is done inside */
306#endif
3072:
308 jmpi ret_from_exception
309
310/*
311 * a0 = prev task_struct *
312 * a1 = next task_struct *
313 * a0 = return next
314 */
315ENTRY(__switch_to)
316 lrw a3, TASK_THREAD
317 addu a3, a0
318
319 mfcr a2, psr /* Save PSR value */
320 stw a2, (a3, THREAD_SR) /* Save PSR in task struct */
321 bclri a2, 6 /* Disable interrupts */
322 mtcr a2, psr
323
324 SAVE_SWITCH_STACK
325
326 stw sp, (a3, THREAD_KSP)
327
328 /* Set up next process to run */
329 lrw a3, TASK_THREAD
330 addu a3, a1
331
332 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
333
334 ldw a2, (a3, THREAD_SR) /* Set next PSR */
335 mtcr a2, psr
336
337#if defined(__CSKYABIV2__)
338 addi r7, a1, TASK_THREAD_INFO
339 ldw tls, (r7, TINFO_TP_VALUE)
340#endif
341
342 RESTORE_SWITCH_STACK
343
344 rts
345ENDPROC(__switch_to)