Loading...
1/*
2 * linux/arch/arm/mm/proc-v7m.S
3 *
4 * Copyright (C) 2008 ARM Ltd.
5 * Copyright (C) 2001 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This is the "shell" of the ARMv7-M processor support.
12 */
13#include <linux/linkage.h>
14#include <asm/assembler.h>
15#include <asm/memory.h>
16#include <asm/v7m.h>
17#include "proc-macros.S"
18
19ENTRY(cpu_v7m_proc_init)
20 ret lr
21ENDPROC(cpu_v7m_proc_init)
22
23ENTRY(cpu_v7m_proc_fin)
24 ret lr
25ENDPROC(cpu_v7m_proc_fin)
26
27/*
28 * cpu_v7m_reset(loc)
29 *
30 * Perform a soft reset of the system. Put the CPU into the
31 * same state as it would be if it had been reset, and branch
32 * to what would be the reset vector.
33 *
34 * - loc - location to jump to for soft reset
35 */
36 .align 5
37ENTRY(cpu_v7m_reset)
38 ret r0
39ENDPROC(cpu_v7m_reset)
40
41/*
42 * cpu_v7m_do_idle()
43 *
44 * Idle the processor (eg, wait for interrupt).
45 *
46 * IRQs are already disabled.
47 */
48ENTRY(cpu_v7m_do_idle)
49 wfi
50 ret lr
51ENDPROC(cpu_v7m_do_idle)
52
53ENTRY(cpu_v7m_dcache_clean_area)
54 ret lr
55ENDPROC(cpu_v7m_dcache_clean_area)
56
57/*
58 * There is no MMU, so here is nothing to do.
59 */
60ENTRY(cpu_v7m_switch_mm)
61 ret lr
62ENDPROC(cpu_v7m_switch_mm)
63
64.globl cpu_v7m_suspend_size
65.equ cpu_v7m_suspend_size, 0
66
67#ifdef CONFIG_ARM_CPU_SUSPEND
68ENTRY(cpu_v7m_do_suspend)
69 ret lr
70ENDPROC(cpu_v7m_do_suspend)
71
72ENTRY(cpu_v7m_do_resume)
73 ret lr
74ENDPROC(cpu_v7m_do_resume)
75#endif
76
77ENTRY(cpu_cm7_dcache_clean_area)
78 dcache_line_size r2, r3
79 movw r3, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
80 movt r3, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
81
821: str r0, [r3] @ clean D entry
83 add r0, r0, r2
84 subs r1, r1, r2
85 bhi 1b
86 dsb
87 ret lr
88ENDPROC(cpu_cm7_dcache_clean_area)
89
90ENTRY(cpu_cm7_proc_fin)
91 movw r2, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
92 movt r2, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
93 ldr r0, [r2]
94 bic r0, r0, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC)
95 str r0, [r2]
96 ret lr
97ENDPROC(cpu_cm7_proc_fin)
98
99 .section ".init.text", #alloc, #execinstr
100
101__v7m_cm7_setup:
102 mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
103 b __v7m_setup_cont
104/*
105 * __v7m_setup
106 *
107 * This should be able to cover all ARMv7-M cores.
108 */
109__v7m_setup:
110 mov r8, 0
111
112__v7m_setup_cont:
113 @ Configure the vector table base address
114 ldr r0, =BASEADDR_V7M_SCB
115 ldr r12, =vector_table
116 str r12, [r0, V7M_SCB_VTOR]
117
118 @ enable UsageFault, BusFault and MemManage fault.
119 ldr r5, [r0, #V7M_SCB_SHCSR]
120 orr r5, #(V7M_SCB_SHCSR_USGFAULTENA | V7M_SCB_SHCSR_BUSFAULTENA | V7M_SCB_SHCSR_MEMFAULTENA)
121 str r5, [r0, #V7M_SCB_SHCSR]
122
123 @ Lower the priority of the SVC and PendSV exceptions
124 mov r5, #0x80000000
125 str r5, [r0, V7M_SCB_SHPR2] @ set SVC priority
126 mov r5, #0x00800000
127 str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority
128
129 @ SVC to switch to handler mode. Notice that this requires sp to
130 @ point to writeable memory because the processor saves
131 @ some registers to the stack.
132 badr r1, 1f
133 ldr r5, [r12, #11 * 4] @ read the SVC vector entry
134 str r1, [r12, #11 * 4] @ write the temporary SVC vector entry
135 dsb
136 mov r6, lr @ save LR
137 ldr sp, =init_thread_union + THREAD_START_SP
138 stmia sp, {r0-r3, r12}
139 cpsie i
140 svc #0
1411: cpsid i
142 ldmia sp, {r0-r3, r12}
143 str r5, [r12, #11 * 4] @ restore the original SVC vector entry
144 mov lr, r6 @ restore LR
145
146 @ Special-purpose control register
147 mov r1, #1
148 msr control, r1 @ Thread mode has unpriviledged access
149
150 @ Configure caches (if implemented)
151 teq r8, #0
152 stmneia sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
153 blne v7m_invalidate_l1
154 teq r8, #0 @ re-evalutae condition
155 ldmneia sp, {r0-r6, lr}
156
157 @ Configure the System Control Register to ensure 8-byte stack alignment
158 @ Note the STKALIGN bit is either RW or RAO.
159 ldr r0, [r0, V7M_SCB_CCR] @ system control register
160 orr r0, #V7M_SCB_CCR_STKALIGN
161 orr r0, r0, r8
162
163 ret lr
164ENDPROC(__v7m_setup)
165
166/*
167 * Cortex-M7 processor functions
168 */
169 globl_equ cpu_cm7_proc_init, cpu_v7m_proc_init
170 globl_equ cpu_cm7_reset, cpu_v7m_reset
171 globl_equ cpu_cm7_do_idle, cpu_v7m_do_idle
172 globl_equ cpu_cm7_switch_mm, cpu_v7m_switch_mm
173
174 define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
175 define_processor_functions cm7, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
176
177 .section ".rodata"
178 string cpu_arch_name, "armv7m"
179 string cpu_elf_name "v7m"
180 string cpu_v7m_name "ARMv7-M"
181
182 .section ".proc.info.init", #alloc
183
184.macro __v7m_proc name, initfunc, cache_fns = nop_cache_fns, hwcaps = 0, proc_fns = v7m_processor_functions
185 .long 0 /* proc_info_list.__cpu_mm_mmu_flags */
186 .long 0 /* proc_info_list.__cpu_io_mmu_flags */
187 initfn \initfunc, \name
188 .long cpu_arch_name
189 .long cpu_elf_name
190 .long HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \hwcaps
191 .long cpu_v7m_name
192 .long \proc_fns
193 .long 0 /* proc_info_list.tlb */
194 .long 0 /* proc_info_list.user */
195 .long \cache_fns
196.endm
197
198 /*
199 * Match ARM Cortex-M7 processor.
200 */
201 .type __v7m_cm7_proc_info, #object
202__v7m_cm7_proc_info:
203 .long 0x410fc270 /* ARM Cortex-M7 0xC27 */
204 .long 0xff0ffff0 /* Mask off revision, patch release */
205 __v7m_proc __v7m_cm7_proc_info, __v7m_cm7_setup, hwcaps = HWCAP_EDSP, cache_fns = v7m_cache_fns, proc_fns = cm7_processor_functions
206 .size __v7m_cm7_proc_info, . - __v7m_cm7_proc_info
207
208 /*
209 * Match ARM Cortex-M4 processor.
210 */
211 .type __v7m_cm4_proc_info, #object
212__v7m_cm4_proc_info:
213 .long 0x410fc240 /* ARM Cortex-M4 0xC24 */
214 .long 0xff0ffff0 /* Mask off revision, patch release */
215 __v7m_proc __v7m_cm4_proc_info, __v7m_setup, hwcaps = HWCAP_EDSP
216 .size __v7m_cm4_proc_info, . - __v7m_cm4_proc_info
217
218 /*
219 * Match ARM Cortex-M3 processor.
220 */
221 .type __v7m_cm3_proc_info, #object
222__v7m_cm3_proc_info:
223 .long 0x410fc230 /* ARM Cortex-M3 0xC23 */
224 .long 0xff0ffff0 /* Mask off revision, patch release */
225 __v7m_proc __v7m_cm3_proc_info, __v7m_setup
226 .size __v7m_cm3_proc_info, . - __v7m_cm3_proc_info
227
228 /*
229 * Match any ARMv7-M processor core.
230 */
231 .type __v7m_proc_info, #object
232__v7m_proc_info:
233 .long 0x000f0000 @ Required ID value
234 .long 0x000f0000 @ Mask for ID
235 __v7m_proc __v7m_proc_info, __v7m_setup
236 .size __v7m_proc_info, . - __v7m_proc_info
237
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/mm/proc-v7m.S
4 *
5 * Copyright (C) 2008 ARM Ltd.
6 * Copyright (C) 2001 Deep Blue Solutions Ltd.
7 *
8 * This is the "shell" of the ARMv7-M processor support.
9 */
10#include <linux/linkage.h>
11#include <asm/assembler.h>
12#include <asm/page.h>
13#include <asm/v7m.h>
14#include "proc-macros.S"
15
16ENTRY(cpu_v7m_proc_init)
17 ret lr
18ENDPROC(cpu_v7m_proc_init)
19
20ENTRY(cpu_v7m_proc_fin)
21 ret lr
22ENDPROC(cpu_v7m_proc_fin)
23
24/*
25 * cpu_v7m_reset(loc)
26 *
27 * Perform a soft reset of the system. Put the CPU into the
28 * same state as it would be if it had been reset, and branch
29 * to what would be the reset vector.
30 *
31 * - loc - location to jump to for soft reset
32 */
33 .align 5
34ENTRY(cpu_v7m_reset)
35 ret r0
36ENDPROC(cpu_v7m_reset)
37
38/*
39 * cpu_v7m_do_idle()
40 *
41 * Idle the processor (eg, wait for interrupt).
42 *
43 * IRQs are already disabled.
44 */
45ENTRY(cpu_v7m_do_idle)
46 wfi
47 ret lr
48ENDPROC(cpu_v7m_do_idle)
49
50ENTRY(cpu_v7m_dcache_clean_area)
51 ret lr
52ENDPROC(cpu_v7m_dcache_clean_area)
53
54/*
55 * There is no MMU, so here is nothing to do.
56 */
57ENTRY(cpu_v7m_switch_mm)
58 ret lr
59ENDPROC(cpu_v7m_switch_mm)
60
61.globl cpu_v7m_suspend_size
62.equ cpu_v7m_suspend_size, 0
63
64#ifdef CONFIG_ARM_CPU_SUSPEND
65ENTRY(cpu_v7m_do_suspend)
66 ret lr
67ENDPROC(cpu_v7m_do_suspend)
68
69ENTRY(cpu_v7m_do_resume)
70 ret lr
71ENDPROC(cpu_v7m_do_resume)
72#endif
73
74ENTRY(cpu_cm7_dcache_clean_area)
75 dcache_line_size r2, r3
76 movw r3, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
77 movt r3, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
78
791: str r0, [r3] @ clean D entry
80 add r0, r0, r2
81 subs r1, r1, r2
82 bhi 1b
83 dsb
84 ret lr
85ENDPROC(cpu_cm7_dcache_clean_area)
86
87ENTRY(cpu_cm7_proc_fin)
88 movw r2, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
89 movt r2, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
90 ldr r0, [r2]
91 bic r0, r0, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC)
92 str r0, [r2]
93 ret lr
94ENDPROC(cpu_cm7_proc_fin)
95
96 .section ".init.text", "ax"
97
98__v7m_cm7_setup:
99 mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
100 b __v7m_setup_cont
101/*
102 * __v7m_setup
103 *
104 * This should be able to cover all ARMv7-M cores.
105 */
106__v7m_setup:
107 mov r8, 0
108
109__v7m_setup_cont:
110 @ Configure the vector table base address
111 ldr r0, =BASEADDR_V7M_SCB
112 ldr r12, =vector_table
113 str r12, [r0, V7M_SCB_VTOR]
114
115 @ enable UsageFault, BusFault and MemManage fault.
116 ldr r5, [r0, #V7M_SCB_SHCSR]
117 orr r5, #(V7M_SCB_SHCSR_USGFAULTENA | V7M_SCB_SHCSR_BUSFAULTENA | V7M_SCB_SHCSR_MEMFAULTENA)
118 str r5, [r0, #V7M_SCB_SHCSR]
119
120 @ Lower the priority of the SVC and PendSV exceptions
121 mov r5, #0x80000000
122 str r5, [r0, V7M_SCB_SHPR2] @ set SVC priority
123 mov r5, #0x00800000
124 str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority
125
126 @ SVC to switch to handler mode. Notice that this requires sp to
127 @ point to writeable memory because the processor saves
128 @ some registers to the stack.
129 badr r1, 1f
130 ldr r5, [r12, #11 * 4] @ read the SVC vector entry
131 str r1, [r12, #11 * 4] @ write the temporary SVC vector entry
132 dsb
133 mov r6, lr @ save LR
134 ldr sp, =init_thread_union + THREAD_START_SP
135 cpsie i
136 svc #0
1371: cpsid i
138 /* Calculate exc_ret */
139 orr r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK
140 ldmia sp, {r0-r3, r12}
141 str r5, [r12, #11 * 4] @ restore the original SVC vector entry
142 mov lr, r6 @ restore LR
143
144 @ Special-purpose control register
145 mov r1, #1
146 msr control, r1 @ Thread mode has unpriviledged access
147
148 @ Configure caches (if implemented)
149 teq r8, #0
150 stmiane sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
151 blne v7m_invalidate_l1
152 teq r8, #0 @ re-evalutae condition
153 ldmiane sp, {r0-r6, lr}
154
155 @ Configure the System Control Register to ensure 8-byte stack alignment
156 @ Note the STKALIGN bit is either RW or RAO.
157 ldr r0, [r0, V7M_SCB_CCR] @ system control register
158 orr r0, #V7M_SCB_CCR_STKALIGN
159 orr r0, r0, r8
160
161 ret lr
162ENDPROC(__v7m_setup)
163
164/*
165 * Cortex-M7 processor functions
166 */
167 globl_equ cpu_cm7_proc_init, cpu_v7m_proc_init
168 globl_equ cpu_cm7_reset, cpu_v7m_reset
169 globl_equ cpu_cm7_do_idle, cpu_v7m_do_idle
170 globl_equ cpu_cm7_switch_mm, cpu_v7m_switch_mm
171
172 define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
173 define_processor_functions cm7, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
174
175 .section ".rodata"
176 string cpu_arch_name, "armv7m"
177 string cpu_elf_name "v7m"
178 string cpu_v7m_name "ARMv7-M"
179
180 .section ".proc.info.init", "a"
181
182.macro __v7m_proc name, initfunc, cache_fns = nop_cache_fns, hwcaps = 0, proc_fns = v7m_processor_functions
183 .long 0 /* proc_info_list.__cpu_mm_mmu_flags */
184 .long 0 /* proc_info_list.__cpu_io_mmu_flags */
185 initfn \initfunc, \name
186 .long cpu_arch_name
187 .long cpu_elf_name
188 .long HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \hwcaps
189 .long cpu_v7m_name
190 .long \proc_fns
191 .long 0 /* proc_info_list.tlb */
192 .long 0 /* proc_info_list.user */
193 .long \cache_fns
194.endm
195
196 /*
197 * Match ARM Cortex-M55 processor.
198 */
199 .type __v7m_cm55_proc_info, #object
200__v7m_cm55_proc_info:
201 .long 0x410fd220 /* ARM Cortex-M55 0xD22 */
202 .long 0xff0ffff0 /* Mask off revision, patch release */
203 __v7m_proc __v7m_cm55_proc_info, __v7m_cm7_setup, hwcaps = HWCAP_EDSP, cache_fns = v7m_cache_fns, proc_fns = cm7_processor_functions
204 .size __v7m_cm55_proc_info, . - __v7m_cm55_proc_info
205
206 /*
207 * Match ARM Cortex-M33 processor.
208 */
209 .type __v7m_cm33_proc_info, #object
210__v7m_cm33_proc_info:
211 .long 0x410fd210 /* ARM Cortex-M33 0xD21 */
212 .long 0xff0ffff0 /* Mask off revision, patch release */
213 __v7m_proc __v7m_cm33_proc_info, __v7m_setup, hwcaps = HWCAP_EDSP
214 .size __v7m_cm33_proc_info, . - __v7m_cm33_proc_info
215
216 /*
217 * Match ARM Cortex-M7 processor.
218 */
219 .type __v7m_cm7_proc_info, #object
220__v7m_cm7_proc_info:
221 .long 0x410fc270 /* ARM Cortex-M7 0xC27 */
222 .long 0xff0ffff0 /* Mask off revision, patch release */
223 __v7m_proc __v7m_cm7_proc_info, __v7m_cm7_setup, hwcaps = HWCAP_EDSP, cache_fns = v7m_cache_fns, proc_fns = cm7_processor_functions
224 .size __v7m_cm7_proc_info, . - __v7m_cm7_proc_info
225
226 /*
227 * Match ARM Cortex-M4 processor.
228 */
229 .type __v7m_cm4_proc_info, #object
230__v7m_cm4_proc_info:
231 .long 0x410fc240 /* ARM Cortex-M4 0xC24 */
232 .long 0xff0ffff0 /* Mask off revision, patch release */
233 __v7m_proc __v7m_cm4_proc_info, __v7m_setup, hwcaps = HWCAP_EDSP
234 .size __v7m_cm4_proc_info, . - __v7m_cm4_proc_info
235
236 /*
237 * Match ARM Cortex-M3 processor.
238 */
239 .type __v7m_cm3_proc_info, #object
240__v7m_cm3_proc_info:
241 .long 0x410fc230 /* ARM Cortex-M3 0xC23 */
242 .long 0xff0ffff0 /* Mask off revision, patch release */
243 __v7m_proc __v7m_cm3_proc_info, __v7m_setup
244 .size __v7m_cm3_proc_info, . - __v7m_cm3_proc_info
245
246 /*
247 * Match any ARMv7-M processor core.
248 */
249 .type __v7m_proc_info, #object
250__v7m_proc_info:
251 .long 0x000f0000 @ Required ID value
252 .long 0x000f0000 @ Mask for ID
253 __v7m_proc __v7m_proc_info, __v7m_setup
254 .size __v7m_proc_info, . - __v7m_proc_info
255