Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/mm/cache-v7m.S
4 *
5 * Based on linux/arch/arm/mm/cache-v7.S
6 *
7 * Copyright (C) 2001 Deep Blue Solutions Ltd.
8 * Copyright (C) 2005 ARM Ltd.
9 *
10 * This is the "shell" of the ARMv7M processor support.
11 */
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <asm/assembler.h>
15#include <asm/errno.h>
16#include <asm/unwind.h>
17#include <asm/v7m.h>
18
19#include "proc-macros.S"
20
21/* Generic V7M read/write macros for memory mapped cache operations */
22.macro v7m_cache_read, rt, reg
23 movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg
24 movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg
25 ldr \rt, [\rt]
26.endm
27
28.macro v7m_cacheop, rt, tmp, op, c = al
29 movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op
30 movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op
31 str\c \rt, [\tmp]
32.endm
33
34
35.macro read_ccsidr, rt
36 v7m_cache_read \rt, V7M_SCB_CCSIDR
37.endm
38
39.macro read_clidr, rt
40 v7m_cache_read \rt, V7M_SCB_CLIDR
41.endm
42
43.macro write_csselr, rt, tmp
44 v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR
45.endm
46
47/*
48 * dcisw: Invalidate data cache by set/way
49 */
50.macro dcisw, rt, tmp
51 v7m_cacheop \rt, \tmp, V7M_SCB_DCISW
52.endm
53
54/*
55 * dccisw: Clean and invalidate data cache by set/way
56 */
57.macro dccisw, rt, tmp
58 v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW
59.endm
60
61/*
62 * dccimvac: Clean and invalidate data cache line by MVA to PoC.
63 */
64.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
65.macro dccimvac\c, rt, tmp
66 v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c
67.endm
68.endr
69
70/*
71 * dcimvac: Invalidate data cache line by MVA to PoC
72 */
73.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
74.macro dcimvac\c, rt, tmp
75 v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
76.endm
77.endr
78
79/*
80 * dccmvau: Clean data cache line by MVA to PoU
81 */
82.macro dccmvau, rt, tmp
83 v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU
84.endm
85
86/*
87 * dccmvac: Clean data cache line by MVA to PoC
88 */
89.macro dccmvac, rt, tmp
90 v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC
91.endm
92
93/*
94 * icimvau: Invalidate instruction caches by MVA to PoU
95 */
96.macro icimvau, rt, tmp
97 v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU
98.endm
99
100/*
101 * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP.
102 * rt data ignored by ICIALLU(IS), so can be used for the address
103 */
104.macro invalidate_icache, rt
105 v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU
106 mov \rt, #0
107.endm
108
109/*
110 * Invalidate the BTB, inner shareable if SMP.
111 * rt data ignored by BPIALL, so it can be used for the address
112 */
113.macro invalidate_bp, rt
114 v7m_cacheop \rt, \rt, V7M_SCB_BPIALL
115 mov \rt, #0
116.endm
117
118ENTRY(v7m_invalidate_l1)
119 mov r0, #0
120
121 write_csselr r0, r1
122 read_ccsidr r0
123
124 movw r1, #0x7fff
125 and r2, r1, r0, lsr #13
126
127 movw r1, #0x3ff
128
129 and r3, r1, r0, lsr #3 @ NumWays - 1
130 add r2, r2, #1 @ NumSets
131
132 and r0, r0, #0x7
133 add r0, r0, #4 @ SetShift
134
135 clz r1, r3 @ WayShift
136 add r4, r3, #1 @ NumWays
1371: sub r2, r2, #1 @ NumSets--
138 mov r3, r4 @ Temp = NumWays
1392: subs r3, r3, #1 @ Temp--
140 mov r5, r3, lsl r1
141 mov r6, r2, lsl r0
142 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
143 dcisw r5, r6
144 bgt 2b
145 cmp r2, #0
146 bgt 1b
147 dsb st
148 isb
149 ret lr
150ENDPROC(v7m_invalidate_l1)
151
152/*
153 * v7m_flush_icache_all()
154 *
155 * Flush the whole I-cache.
156 *
157 * Registers:
158 * r0 - set to 0
159 */
160ENTRY(v7m_flush_icache_all)
161 invalidate_icache r0
162 ret lr
163ENDPROC(v7m_flush_icache_all)
164
165/*
166 * v7m_flush_dcache_all()
167 *
168 * Flush the whole D-cache.
169 *
170 * Corrupted registers: r0-r7, r9-r11
171 */
172ENTRY(v7m_flush_dcache_all)
173 dmb @ ensure ordering with previous memory accesses
174 read_clidr r0
175 mov r3, r0, lsr #23 @ move LoC into position
176 ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
177 beq finished @ if loc is 0, then no need to clean
178start_flush_levels:
179 mov r10, #0 @ start clean at cache level 0
180flush_levels:
181 add r2, r10, r10, lsr #1 @ work out 3x current cache level
182 mov r1, r0, lsr r2 @ extract cache type bits from clidr
183 and r1, r1, #7 @ mask of the bits for current cache only
184 cmp r1, #2 @ see what cache we have at this level
185 blt skip @ skip if no cache, or just i-cache
186#ifdef CONFIG_PREEMPTION
187 save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
188#endif
189 write_csselr r10, r1 @ set current cache level
190 isb @ isb to sych the new cssr&csidr
191 read_ccsidr r1 @ read the new csidr
192#ifdef CONFIG_PREEMPTION
193 restore_irqs_notrace r9
194#endif
195 and r2, r1, #7 @ extract the length of the cache lines
196 add r2, r2, #4 @ add 4 (line length offset)
197 movw r4, #0x3ff
198 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
199 clz r5, r4 @ find bit position of way size increment
200 movw r7, #0x7fff
201 ands r7, r7, r1, lsr #13 @ extract max number of the index size
202loop1:
203 mov r9, r7 @ create working copy of max index
204loop2:
205 lsl r6, r4, r5
206 orr r11, r10, r6 @ factor way and cache number into r11
207 lsl r6, r9, r2
208 orr r11, r11, r6 @ factor index number into r11
209 dccisw r11, r6 @ clean/invalidate by set/way
210 subs r9, r9, #1 @ decrement the index
211 bge loop2
212 subs r4, r4, #1 @ decrement the way
213 bge loop1
214skip:
215 add r10, r10, #2 @ increment cache number
216 cmp r3, r10
217 bgt flush_levels
218finished:
219 mov r10, #0 @ switch back to cache level 0
220 write_csselr r10, r3 @ select current cache level in cssr
221 dsb st
222 isb
223 ret lr
224ENDPROC(v7m_flush_dcache_all)
225
226/*
227 * v7m_flush_cache_all()
228 *
229 * Flush the entire cache system.
230 * The data cache flush is now achieved using atomic clean / invalidates
231 * working outwards from L1 cache. This is done using Set/Way based cache
232 * maintenance instructions.
233 * The instruction cache can still be invalidated back to the point of
234 * unification in a single instruction.
235 *
236 */
237ENTRY(v7m_flush_kern_cache_all)
238 stmfd sp!, {r4-r7, r9-r11, lr}
239 bl v7m_flush_dcache_all
240 invalidate_icache r0
241 ldmfd sp!, {r4-r7, r9-r11, lr}
242 ret lr
243ENDPROC(v7m_flush_kern_cache_all)
244
245/*
246 * v7m_flush_cache_all()
247 *
248 * Flush all TLB entries in a particular address space
249 *
250 * - mm - mm_struct describing address space
251 */
252ENTRY(v7m_flush_user_cache_all)
253 /*FALLTHROUGH*/
254
255/*
256 * v7m_flush_cache_range(start, end, flags)
257 *
258 * Flush a range of TLB entries in the specified address space.
259 *
260 * - start - start address (may not be aligned)
261 * - end - end address (exclusive, may not be aligned)
262 * - flags - vm_area_struct flags describing address space
263 *
264 * It is assumed that:
265 * - we have a VIPT cache.
266 */
267ENTRY(v7m_flush_user_cache_range)
268 ret lr
269ENDPROC(v7m_flush_user_cache_all)
270ENDPROC(v7m_flush_user_cache_range)
271
272/*
273 * v7m_coherent_kern_range(start,end)
274 *
275 * Ensure that the I and D caches are coherent within specified
276 * region. This is typically used when code has been written to
277 * a memory region, and will be executed.
278 *
279 * - start - virtual start address of region
280 * - end - virtual end address of region
281 *
282 * It is assumed that:
283 * - the Icache does not read data from the write buffer
284 */
285ENTRY(v7m_coherent_kern_range)
286 /* FALLTHROUGH */
287
288/*
289 * v7m_coherent_user_range(start,end)
290 *
291 * Ensure that the I and D caches are coherent within specified
292 * region. This is typically used when code has been written to
293 * a memory region, and will be executed.
294 *
295 * - start - virtual start address of region
296 * - end - virtual end address of region
297 *
298 * It is assumed that:
299 * - the Icache does not read data from the write buffer
300 */
301ENTRY(v7m_coherent_user_range)
302 UNWIND(.fnstart )
303 dcache_line_size r2, r3
304 sub r3, r2, #1
305 bic r12, r0, r3
3061:
307/*
308 * We use open coded version of dccmvau otherwise USER() would
309 * point at movw instruction.
310 */
311 dccmvau r12, r3
312 add r12, r12, r2
313 cmp r12, r1
314 blo 1b
315 dsb ishst
316 icache_line_size r2, r3
317 sub r3, r2, #1
318 bic r12, r0, r3
3192:
320 icimvau r12, r3
321 add r12, r12, r2
322 cmp r12, r1
323 blo 2b
324 invalidate_bp r0
325 dsb ishst
326 isb
327 ret lr
328 UNWIND(.fnend )
329ENDPROC(v7m_coherent_kern_range)
330ENDPROC(v7m_coherent_user_range)
331
332/*
333 * v7m_flush_kern_dcache_area(void *addr, size_t size)
334 *
335 * Ensure that the data held in the page kaddr is written back
336 * to the page in question.
337 *
338 * - addr - kernel address
339 * - size - region size
340 */
341ENTRY(v7m_flush_kern_dcache_area)
342 dcache_line_size r2, r3
343 add r1, r0, r1
344 sub r3, r2, #1
345 bic r0, r0, r3
3461:
347 dccimvac r0, r3 @ clean & invalidate D line / unified line
348 add r0, r0, r2
349 cmp r0, r1
350 blo 1b
351 dsb st
352 ret lr
353ENDPROC(v7m_flush_kern_dcache_area)
354
355/*
356 * v7m_dma_inv_range(start,end)
357 *
358 * Invalidate the data cache within the specified region; we will
359 * be performing a DMA operation in this region and we want to
360 * purge old data in the cache.
361 *
362 * - start - virtual start address of region
363 * - end - virtual end address of region
364 */
365v7m_dma_inv_range:
366 dcache_line_size r2, r3
367 sub r3, r2, #1
368 tst r0, r3
369 bic r0, r0, r3
370 dccimvacne r0, r3
371 addne r0, r0, r2
372 subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
373 tst r1, r3
374 bic r1, r1, r3
375 dccimvacne r1, r3
376 cmp r0, r1
3771:
378 dcimvaclo r0, r3
379 addlo r0, r0, r2
380 cmplo r0, r1
381 blo 1b
382 dsb st
383 ret lr
384ENDPROC(v7m_dma_inv_range)
385
386/*
387 * v7m_dma_clean_range(start,end)
388 * - start - virtual start address of region
389 * - end - virtual end address of region
390 */
391v7m_dma_clean_range:
392 dcache_line_size r2, r3
393 sub r3, r2, #1
394 bic r0, r0, r3
3951:
396 dccmvac r0, r3 @ clean D / U line
397 add r0, r0, r2
398 cmp r0, r1
399 blo 1b
400 dsb st
401 ret lr
402ENDPROC(v7m_dma_clean_range)
403
404/*
405 * v7m_dma_flush_range(start,end)
406 * - start - virtual start address of region
407 * - end - virtual end address of region
408 */
409ENTRY(v7m_dma_flush_range)
410 dcache_line_size r2, r3
411 sub r3, r2, #1
412 bic r0, r0, r3
4131:
414 dccimvac r0, r3 @ clean & invalidate D / U line
415 add r0, r0, r2
416 cmp r0, r1
417 blo 1b
418 dsb st
419 ret lr
420ENDPROC(v7m_dma_flush_range)
421
422/*
423 * dma_map_area(start, size, dir)
424 * - start - kernel virtual start address
425 * - size - size of region
426 * - dir - DMA direction
427 */
428ENTRY(v7m_dma_map_area)
429 add r1, r1, r0
430 teq r2, #DMA_FROM_DEVICE
431 beq v7m_dma_inv_range
432 b v7m_dma_clean_range
433ENDPROC(v7m_dma_map_area)
434
435/*
436 * dma_unmap_area(start, size, dir)
437 * - start - kernel virtual start address
438 * - size - size of region
439 * - dir - DMA direction
440 */
441ENTRY(v7m_dma_unmap_area)
442 add r1, r1, r0
443 teq r2, #DMA_TO_DEVICE
444 bne v7m_dma_inv_range
445 ret lr
446ENDPROC(v7m_dma_unmap_area)
447
448 .globl v7m_flush_kern_cache_louis
449 .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
450
451 __INITDATA
452
453 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
454 define_cache_functions v7m
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/mm/cache-v7m.S
4 *
5 * Based on linux/arch/arm/mm/cache-v7.S
6 *
7 * Copyright (C) 2001 Deep Blue Solutions Ltd.
8 * Copyright (C) 2005 ARM Ltd.
9 *
10 * This is the "shell" of the ARMv7M processor support.
11 */
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <asm/assembler.h>
15#include <asm/errno.h>
16#include <asm/unwind.h>
17#include <asm/v7m.h>
18
19#include "proc-macros.S"
20
21.arch armv7-m
22
23/* Generic V7M read/write macros for memory mapped cache operations */
24.macro v7m_cache_read, rt, reg
25 movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg
26 movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg
27 ldr \rt, [\rt]
28.endm
29
30.macro v7m_cacheop, rt, tmp, op, c = al
31 movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op
32 movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op
33 str\c \rt, [\tmp]
34.endm
35
36
37.macro read_ccsidr, rt
38 v7m_cache_read \rt, V7M_SCB_CCSIDR
39.endm
40
41.macro read_clidr, rt
42 v7m_cache_read \rt, V7M_SCB_CLIDR
43.endm
44
45.macro write_csselr, rt, tmp
46 v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR
47.endm
48
49/*
50 * dcisw: Invalidate data cache by set/way
51 */
52.macro dcisw, rt, tmp
53 v7m_cacheop \rt, \tmp, V7M_SCB_DCISW
54.endm
55
56/*
57 * dccisw: Clean and invalidate data cache by set/way
58 */
59.macro dccisw, rt, tmp
60 v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW
61.endm
62
63/*
64 * dccimvac: Clean and invalidate data cache line by MVA to PoC.
65 */
66.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
67.macro dccimvac\c, rt, tmp
68 v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c
69.endm
70.endr
71
72/*
73 * dcimvac: Invalidate data cache line by MVA to PoC
74 */
75.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
76.macro dcimvac\c, rt, tmp
77 v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
78.endm
79.endr
80
81/*
82 * dccmvau: Clean data cache line by MVA to PoU
83 */
84.macro dccmvau, rt, tmp
85 v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU
86.endm
87
88/*
89 * dccmvac: Clean data cache line by MVA to PoC
90 */
91.macro dccmvac, rt, tmp
92 v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC
93.endm
94
95/*
96 * icimvau: Invalidate instruction caches by MVA to PoU
97 */
98.macro icimvau, rt, tmp
99 v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU
100.endm
101
102/*
103 * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP.
104 * rt data ignored by ICIALLU(IS), so can be used for the address
105 */
106.macro invalidate_icache, rt
107 v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU
108 mov \rt, #0
109.endm
110
111/*
112 * Invalidate the BTB, inner shareable if SMP.
113 * rt data ignored by BPIALL, so it can be used for the address
114 */
115.macro invalidate_bp, rt
116 v7m_cacheop \rt, \rt, V7M_SCB_BPIALL
117 mov \rt, #0
118.endm
119
120ENTRY(v7m_invalidate_l1)
121 mov r0, #0
122
123 write_csselr r0, r1
124 read_ccsidr r0
125
126 movw r1, #0x7fff
127 and r2, r1, r0, lsr #13
128
129 movw r1, #0x3ff
130
131 and r3, r1, r0, lsr #3 @ NumWays - 1
132 add r2, r2, #1 @ NumSets
133
134 and r0, r0, #0x7
135 add r0, r0, #4 @ SetShift
136
137 clz r1, r3 @ WayShift
138 add r4, r3, #1 @ NumWays
1391: sub r2, r2, #1 @ NumSets--
140 mov r3, r4 @ Temp = NumWays
1412: subs r3, r3, #1 @ Temp--
142 mov r5, r3, lsl r1
143 mov r6, r2, lsl r0
144 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
145 dcisw r5, r6
146 bgt 2b
147 cmp r2, #0
148 bgt 1b
149 dsb st
150 isb
151 ret lr
152ENDPROC(v7m_invalidate_l1)
153
154/*
155 * v7m_flush_icache_all()
156 *
157 * Flush the whole I-cache.
158 *
159 * Registers:
160 * r0 - set to 0
161 */
162ENTRY(v7m_flush_icache_all)
163 invalidate_icache r0
164 ret lr
165ENDPROC(v7m_flush_icache_all)
166
167/*
168 * v7m_flush_dcache_all()
169 *
170 * Flush the whole D-cache.
171 *
172 * Corrupted registers: r0-r7, r9-r11
173 */
174ENTRY(v7m_flush_dcache_all)
175 dmb @ ensure ordering with previous memory accesses
176 read_clidr r0
177 mov r3, r0, lsr #23 @ move LoC into position
178 ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
179 beq finished @ if loc is 0, then no need to clean
180start_flush_levels:
181 mov r10, #0 @ start clean at cache level 0
182flush_levels:
183 add r2, r10, r10, lsr #1 @ work out 3x current cache level
184 mov r1, r0, lsr r2 @ extract cache type bits from clidr
185 and r1, r1, #7 @ mask of the bits for current cache only
186 cmp r1, #2 @ see what cache we have at this level
187 blt skip @ skip if no cache, or just i-cache
188#ifdef CONFIG_PREEMPTION
189 save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
190#endif
191 write_csselr r10, r1 @ set current cache level
192 isb @ isb to sych the new cssr&csidr
193 read_ccsidr r1 @ read the new csidr
194#ifdef CONFIG_PREEMPTION
195 restore_irqs_notrace r9
196#endif
197 and r2, r1, #7 @ extract the length of the cache lines
198 add r2, r2, #4 @ add 4 (line length offset)
199 movw r4, #0x3ff
200 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
201 clz r5, r4 @ find bit position of way size increment
202 movw r7, #0x7fff
203 ands r7, r7, r1, lsr #13 @ extract max number of the index size
204loop1:
205 mov r9, r7 @ create working copy of max index
206loop2:
207 lsl r6, r4, r5
208 orr r11, r10, r6 @ factor way and cache number into r11
209 lsl r6, r9, r2
210 orr r11, r11, r6 @ factor index number into r11
211 dccisw r11, r6 @ clean/invalidate by set/way
212 subs r9, r9, #1 @ decrement the index
213 bge loop2
214 subs r4, r4, #1 @ decrement the way
215 bge loop1
216skip:
217 add r10, r10, #2 @ increment cache number
218 cmp r3, r10
219 bgt flush_levels
220finished:
221 mov r10, #0 @ switch back to cache level 0
222 write_csselr r10, r3 @ select current cache level in cssr
223 dsb st
224 isb
225 ret lr
226ENDPROC(v7m_flush_dcache_all)
227
228/*
229 * v7m_flush_cache_all()
230 *
231 * Flush the entire cache system.
232 * The data cache flush is now achieved using atomic clean / invalidates
233 * working outwards from L1 cache. This is done using Set/Way based cache
234 * maintenance instructions.
235 * The instruction cache can still be invalidated back to the point of
236 * unification in a single instruction.
237 *
238 */
239ENTRY(v7m_flush_kern_cache_all)
240 stmfd sp!, {r4-r7, r9-r11, lr}
241 bl v7m_flush_dcache_all
242 invalidate_icache r0
243 ldmfd sp!, {r4-r7, r9-r11, lr}
244 ret lr
245ENDPROC(v7m_flush_kern_cache_all)
246
247/*
248 * v7m_flush_cache_all()
249 *
250 * Flush all TLB entries in a particular address space
251 *
252 * - mm - mm_struct describing address space
253 */
254ENTRY(v7m_flush_user_cache_all)
255 /*FALLTHROUGH*/
256
257/*
258 * v7m_flush_cache_range(start, end, flags)
259 *
260 * Flush a range of TLB entries in the specified address space.
261 *
262 * - start - start address (may not be aligned)
263 * - end - end address (exclusive, may not be aligned)
264 * - flags - vm_area_struct flags describing address space
265 *
266 * It is assumed that:
267 * - we have a VIPT cache.
268 */
269ENTRY(v7m_flush_user_cache_range)
270 ret lr
271ENDPROC(v7m_flush_user_cache_all)
272ENDPROC(v7m_flush_user_cache_range)
273
274/*
275 * v7m_coherent_kern_range(start,end)
276 *
277 * Ensure that the I and D caches are coherent within specified
278 * region. This is typically used when code has been written to
279 * a memory region, and will be executed.
280 *
281 * - start - virtual start address of region
282 * - end - virtual end address of region
283 *
284 * It is assumed that:
285 * - the Icache does not read data from the write buffer
286 */
287ENTRY(v7m_coherent_kern_range)
288 /* FALLTHROUGH */
289
290/*
291 * v7m_coherent_user_range(start,end)
292 *
293 * Ensure that the I and D caches are coherent within specified
294 * region. This is typically used when code has been written to
295 * a memory region, and will be executed.
296 *
297 * - start - virtual start address of region
298 * - end - virtual end address of region
299 *
300 * It is assumed that:
301 * - the Icache does not read data from the write buffer
302 */
303ENTRY(v7m_coherent_user_range)
304 UNWIND(.fnstart )
305 dcache_line_size r2, r3
306 sub r3, r2, #1
307 bic r12, r0, r3
3081:
309/*
310 * We use open coded version of dccmvau otherwise USER() would
311 * point at movw instruction.
312 */
313 dccmvau r12, r3
314 add r12, r12, r2
315 cmp r12, r1
316 blo 1b
317 dsb ishst
318 icache_line_size r2, r3
319 sub r3, r2, #1
320 bic r12, r0, r3
3212:
322 icimvau r12, r3
323 add r12, r12, r2
324 cmp r12, r1
325 blo 2b
326 invalidate_bp r0
327 dsb ishst
328 isb
329 ret lr
330 UNWIND(.fnend )
331ENDPROC(v7m_coherent_kern_range)
332ENDPROC(v7m_coherent_user_range)
333
334/*
335 * v7m_flush_kern_dcache_area(void *addr, size_t size)
336 *
337 * Ensure that the data held in the page kaddr is written back
338 * to the page in question.
339 *
340 * - addr - kernel address
341 * - size - region size
342 */
343ENTRY(v7m_flush_kern_dcache_area)
344 dcache_line_size r2, r3
345 add r1, r0, r1
346 sub r3, r2, #1
347 bic r0, r0, r3
3481:
349 dccimvac r0, r3 @ clean & invalidate D line / unified line
350 add r0, r0, r2
351 cmp r0, r1
352 blo 1b
353 dsb st
354 ret lr
355ENDPROC(v7m_flush_kern_dcache_area)
356
357/*
358 * v7m_dma_inv_range(start,end)
359 *
360 * Invalidate the data cache within the specified region; we will
361 * be performing a DMA operation in this region and we want to
362 * purge old data in the cache.
363 *
364 * - start - virtual start address of region
365 * - end - virtual end address of region
366 */
367v7m_dma_inv_range:
368 dcache_line_size r2, r3
369 sub r3, r2, #1
370 tst r0, r3
371 bic r0, r0, r3
372 dccimvacne r0, r3
373 addne r0, r0, r2
374 subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
375 tst r1, r3
376 bic r1, r1, r3
377 dccimvacne r1, r3
378 cmp r0, r1
3791:
380 dcimvaclo r0, r3
381 addlo r0, r0, r2
382 cmplo r0, r1
383 blo 1b
384 dsb st
385 ret lr
386ENDPROC(v7m_dma_inv_range)
387
388/*
389 * v7m_dma_clean_range(start,end)
390 * - start - virtual start address of region
391 * - end - virtual end address of region
392 */
393v7m_dma_clean_range:
394 dcache_line_size r2, r3
395 sub r3, r2, #1
396 bic r0, r0, r3
3971:
398 dccmvac r0, r3 @ clean D / U line
399 add r0, r0, r2
400 cmp r0, r1
401 blo 1b
402 dsb st
403 ret lr
404ENDPROC(v7m_dma_clean_range)
405
406/*
407 * v7m_dma_flush_range(start,end)
408 * - start - virtual start address of region
409 * - end - virtual end address of region
410 */
411ENTRY(v7m_dma_flush_range)
412 dcache_line_size r2, r3
413 sub r3, r2, #1
414 bic r0, r0, r3
4151:
416 dccimvac r0, r3 @ clean & invalidate D / U line
417 add r0, r0, r2
418 cmp r0, r1
419 blo 1b
420 dsb st
421 ret lr
422ENDPROC(v7m_dma_flush_range)
423
424/*
425 * dma_map_area(start, size, dir)
426 * - start - kernel virtual start address
427 * - size - size of region
428 * - dir - DMA direction
429 */
430ENTRY(v7m_dma_map_area)
431 add r1, r1, r0
432 teq r2, #DMA_FROM_DEVICE
433 beq v7m_dma_inv_range
434 b v7m_dma_clean_range
435ENDPROC(v7m_dma_map_area)
436
437/*
438 * dma_unmap_area(start, size, dir)
439 * - start - kernel virtual start address
440 * - size - size of region
441 * - dir - DMA direction
442 */
443ENTRY(v7m_dma_unmap_area)
444 add r1, r1, r0
445 teq r2, #DMA_TO_DEVICE
446 bne v7m_dma_inv_range
447 ret lr
448ENDPROC(v7m_dma_unmap_area)
449
450 .globl v7m_flush_kern_cache_louis
451 .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
452
453 __INITDATA
454
455 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
456 define_cache_functions v7m