Loading...
1/*
2 * linux/arch/arm/mm/cache-v4wb.S
3 *
4 * Copyright (C) 1997-2002 Russell king
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/linkage.h>
11#include <linux/init.h>
12#include <asm/memory.h>
13#include <asm/page.h>
14#include "proc-macros.S"
15
16/*
17 * The size of one data cache line.
18 */
19#define CACHE_DLINESIZE 32
20
21/*
22 * The total size of the data cache.
23 */
24#if defined(CONFIG_CPU_SA110)
25# define CACHE_DSIZE 16384
26#elif defined(CONFIG_CPU_SA1100)
27# define CACHE_DSIZE 8192
28#else
29# error Unknown cache size
30#endif
31
32/*
33 * This is the size at which it becomes more efficient to
34 * clean the whole cache, rather than using the individual
35 * cache line maintenance instructions.
36 *
37 * Size Clean (ticks) Dirty (ticks)
38 * 4096 21 20 21 53 55 54
39 * 8192 40 41 40 106 100 102
40 * 16384 77 77 76 140 140 138
41 * 32768 150 149 150 214 216 212 <---
42 * 65536 296 297 296 351 358 361
43 * 131072 591 591 591 656 657 651
44 * Whole 132 136 132 221 217 207 <---
45 */
46#define CACHE_DLIMIT (CACHE_DSIZE * 4)
47
48 .data
49flush_base:
50 .long FLUSH_BASE
51 .text
52
53/*
54 * flush_icache_all()
55 *
56 * Unconditionally clean and invalidate the entire icache.
57 */
58ENTRY(v4wb_flush_icache_all)
59 mov r0, #0
60 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
61 mov pc, lr
62ENDPROC(v4wb_flush_icache_all)
63
64/*
65 * flush_user_cache_all()
66 *
67 * Clean and invalidate all cache entries in a particular address
68 * space.
69 */
70ENTRY(v4wb_flush_user_cache_all)
71 /* FALLTHROUGH */
72/*
73 * flush_kern_cache_all()
74 *
75 * Clean and invalidate the entire cache.
76 */
77ENTRY(v4wb_flush_kern_cache_all)
78 mov ip, #0
79 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
80__flush_whole_cache:
81 ldr r3, =flush_base
82 ldr r1, [r3, #0]
83 eor r1, r1, #CACHE_DSIZE
84 str r1, [r3, #0]
85 add r2, r1, #CACHE_DSIZE
861: ldr r3, [r1], #32
87 cmp r1, r2
88 blo 1b
89#ifdef FLUSH_BASE_MINICACHE
90 add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
91 sub r1, r2, #512 @ only 512 bytes
921: ldr r3, [r1], #32
93 cmp r1, r2
94 blo 1b
95#endif
96 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
97 mov pc, lr
98
99/*
100 * flush_user_cache_range(start, end, flags)
101 *
102 * Invalidate a range of cache entries in the specified
103 * address space.
104 *
105 * - start - start address (inclusive, page aligned)
106 * - end - end address (exclusive, page aligned)
107 * - flags - vma_area_struct flags describing address space
108 */
109ENTRY(v4wb_flush_user_cache_range)
110 mov ip, #0
111 sub r3, r1, r0 @ calculate total size
112 tst r2, #VM_EXEC @ executable region?
113 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
114
115 cmp r3, #CACHE_DLIMIT @ total size >= limit?
116 bhs __flush_whole_cache @ flush whole D cache
117
1181: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
119 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
120 add r0, r0, #CACHE_DLINESIZE
121 cmp r0, r1
122 blo 1b
123 tst r2, #VM_EXEC
124 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
125 mov pc, lr
126
127/*
128 * flush_kern_dcache_area(void *addr, size_t size)
129 *
130 * Ensure no D cache aliasing occurs, either with itself or
131 * the I cache
132 *
133 * - addr - kernel address
134 * - size - region size
135 */
136ENTRY(v4wb_flush_kern_dcache_area)
137 add r1, r0, r1
138 /* fall through */
139
140/*
141 * coherent_kern_range(start, end)
142 *
143 * Ensure coherency between the Icache and the Dcache in the
144 * region described by start. If you have non-snooping
145 * Harvard caches, you need to implement this function.
146 *
147 * - start - virtual start address
148 * - end - virtual end address
149 */
150ENTRY(v4wb_coherent_kern_range)
151 /* fall through */
152
153/*
154 * coherent_user_range(start, end)
155 *
156 * Ensure coherency between the Icache and the Dcache in the
157 * region described by start. If you have non-snooping
158 * Harvard caches, you need to implement this function.
159 *
160 * - start - virtual start address
161 * - end - virtual end address
162 */
163ENTRY(v4wb_coherent_user_range)
164 bic r0, r0, #CACHE_DLINESIZE - 1
1651: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
166 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
167 add r0, r0, #CACHE_DLINESIZE
168 cmp r0, r1
169 blo 1b
170 mov ip, #0
171 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
172 mcr p15, 0, ip, c7, c10, 4 @ drain WB
173 mov pc, lr
174
175
176/*
177 * dma_inv_range(start, end)
178 *
179 * Invalidate (discard) the specified virtual address range.
180 * May not write back any entries. If 'start' or 'end'
181 * are not cache line aligned, those lines must be written
182 * back.
183 *
184 * - start - virtual start address
185 * - end - virtual end address
186 */
187v4wb_dma_inv_range:
188 tst r0, #CACHE_DLINESIZE - 1
189 bic r0, r0, #CACHE_DLINESIZE - 1
190 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
191 tst r1, #CACHE_DLINESIZE - 1
192 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1931: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
194 add r0, r0, #CACHE_DLINESIZE
195 cmp r0, r1
196 blo 1b
197 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
198 mov pc, lr
199
200/*
201 * dma_clean_range(start, end)
202 *
203 * Clean (write back) the specified virtual address range.
204 *
205 * - start - virtual start address
206 * - end - virtual end address
207 */
208v4wb_dma_clean_range:
209 bic r0, r0, #CACHE_DLINESIZE - 1
2101: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
211 add r0, r0, #CACHE_DLINESIZE
212 cmp r0, r1
213 blo 1b
214 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
215 mov pc, lr
216
217/*
218 * dma_flush_range(start, end)
219 *
220 * Clean and invalidate the specified virtual address range.
221 *
222 * - start - virtual start address
223 * - end - virtual end address
224 *
225 * This is actually the same as v4wb_coherent_kern_range()
226 */
227 .globl v4wb_dma_flush_range
228 .set v4wb_dma_flush_range, v4wb_coherent_kern_range
229
230/*
231 * dma_map_area(start, size, dir)
232 * - start - kernel virtual start address
233 * - size - size of region
234 * - dir - DMA direction
235 */
236ENTRY(v4wb_dma_map_area)
237 add r1, r1, r0
238 cmp r2, #DMA_TO_DEVICE
239 beq v4wb_dma_clean_range
240 bcs v4wb_dma_inv_range
241 b v4wb_dma_flush_range
242ENDPROC(v4wb_dma_map_area)
243
244/*
245 * dma_unmap_area(start, size, dir)
246 * - start - kernel virtual start address
247 * - size - size of region
248 * - dir - DMA direction
249 */
250ENTRY(v4wb_dma_unmap_area)
251 mov pc, lr
252ENDPROC(v4wb_dma_unmap_area)
253
254 __INITDATA
255
256 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
257 define_cache_functions v4wb
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/mm/cache-v4wb.S
4 *
5 * Copyright (C) 1997-2002 Russell king
6 */
7#include <linux/linkage.h>
8#include <linux/init.h>
9#include <asm/assembler.h>
10#include <asm/memory.h>
11#include <asm/page.h>
12#include "proc-macros.S"
13
14/*
15 * The size of one data cache line.
16 */
17#define CACHE_DLINESIZE 32
18
19/*
20 * The total size of the data cache.
21 */
22#if defined(CONFIG_CPU_SA110)
23# define CACHE_DSIZE 16384
24#elif defined(CONFIG_CPU_SA1100)
25# define CACHE_DSIZE 8192
26#else
27# error Unknown cache size
28#endif
29
30/*
31 * This is the size at which it becomes more efficient to
32 * clean the whole cache, rather than using the individual
33 * cache line maintenance instructions.
34 *
35 * Size Clean (ticks) Dirty (ticks)
36 * 4096 21 20 21 53 55 54
37 * 8192 40 41 40 106 100 102
38 * 16384 77 77 76 140 140 138
39 * 32768 150 149 150 214 216 212 <---
40 * 65536 296 297 296 351 358 361
41 * 131072 591 591 591 656 657 651
42 * Whole 132 136 132 221 217 207 <---
43 */
44#define CACHE_DLIMIT (CACHE_DSIZE * 4)
45
46 .data
47 .align 2
48flush_base:
49 .long FLUSH_BASE
50 .text
51
52/*
53 * flush_icache_all()
54 *
55 * Unconditionally clean and invalidate the entire icache.
56 */
57ENTRY(v4wb_flush_icache_all)
58 mov r0, #0
59 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
60 ret lr
61ENDPROC(v4wb_flush_icache_all)
62
63/*
64 * flush_user_cache_all()
65 *
66 * Clean and invalidate all cache entries in a particular address
67 * space.
68 */
69ENTRY(v4wb_flush_user_cache_all)
70 /* FALLTHROUGH */
71/*
72 * flush_kern_cache_all()
73 *
74 * Clean and invalidate the entire cache.
75 */
76ENTRY(v4wb_flush_kern_cache_all)
77 mov ip, #0
78 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
79__flush_whole_cache:
80 ldr r3, =flush_base
81 ldr r1, [r3, #0]
82 eor r1, r1, #CACHE_DSIZE
83 str r1, [r3, #0]
84 add r2, r1, #CACHE_DSIZE
851: ldr r3, [r1], #32
86 cmp r1, r2
87 blo 1b
88#ifdef FLUSH_BASE_MINICACHE
89 add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
90 sub r1, r2, #512 @ only 512 bytes
911: ldr r3, [r1], #32
92 cmp r1, r2
93 blo 1b
94#endif
95 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
96 ret lr
97
98/*
99 * flush_user_cache_range(start, end, flags)
100 *
101 * Invalidate a range of cache entries in the specified
102 * address space.
103 *
104 * - start - start address (inclusive, page aligned)
105 * - end - end address (exclusive, page aligned)
106 * - flags - vma_area_struct flags describing address space
107 */
108ENTRY(v4wb_flush_user_cache_range)
109 mov ip, #0
110 sub r3, r1, r0 @ calculate total size
111 tst r2, #VM_EXEC @ executable region?
112 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
113
114 cmp r3, #CACHE_DLIMIT @ total size >= limit?
115 bhs __flush_whole_cache @ flush whole D cache
116
1171: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
118 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
119 add r0, r0, #CACHE_DLINESIZE
120 cmp r0, r1
121 blo 1b
122 tst r2, #VM_EXEC
123 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
124 ret lr
125
126/*
127 * flush_kern_dcache_area(void *addr, size_t size)
128 *
129 * Ensure no D cache aliasing occurs, either with itself or
130 * the I cache
131 *
132 * - addr - kernel address
133 * - size - region size
134 */
135ENTRY(v4wb_flush_kern_dcache_area)
136 add r1, r0, r1
137 /* fall through */
138
139/*
140 * coherent_kern_range(start, end)
141 *
142 * Ensure coherency between the Icache and the Dcache in the
143 * region described by start. If you have non-snooping
144 * Harvard caches, you need to implement this function.
145 *
146 * - start - virtual start address
147 * - end - virtual end address
148 */
149ENTRY(v4wb_coherent_kern_range)
150 /* fall through */
151
152/*
153 * coherent_user_range(start, end)
154 *
155 * Ensure coherency between the Icache and the Dcache in the
156 * region described by start. If you have non-snooping
157 * Harvard caches, you need to implement this function.
158 *
159 * - start - virtual start address
160 * - end - virtual end address
161 */
162ENTRY(v4wb_coherent_user_range)
163 bic r0, r0, #CACHE_DLINESIZE - 1
1641: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
165 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
166 add r0, r0, #CACHE_DLINESIZE
167 cmp r0, r1
168 blo 1b
169 mov r0, #0
170 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
171 mcr p15, 0, r0, c7, c10, 4 @ drain WB
172 ret lr
173
174
175/*
176 * dma_inv_range(start, end)
177 *
178 * Invalidate (discard) the specified virtual address range.
179 * May not write back any entries. If 'start' or 'end'
180 * are not cache line aligned, those lines must be written
181 * back.
182 *
183 * - start - virtual start address
184 * - end - virtual end address
185 */
186v4wb_dma_inv_range:
187 tst r0, #CACHE_DLINESIZE - 1
188 bic r0, r0, #CACHE_DLINESIZE - 1
189 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
190 tst r1, #CACHE_DLINESIZE - 1
191 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1921: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
193 add r0, r0, #CACHE_DLINESIZE
194 cmp r0, r1
195 blo 1b
196 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
197 ret lr
198
199/*
200 * dma_clean_range(start, end)
201 *
202 * Clean (write back) the specified virtual address range.
203 *
204 * - start - virtual start address
205 * - end - virtual end address
206 */
207v4wb_dma_clean_range:
208 bic r0, r0, #CACHE_DLINESIZE - 1
2091: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
210 add r0, r0, #CACHE_DLINESIZE
211 cmp r0, r1
212 blo 1b
213 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
214 ret lr
215
216/*
217 * dma_flush_range(start, end)
218 *
219 * Clean and invalidate the specified virtual address range.
220 *
221 * - start - virtual start address
222 * - end - virtual end address
223 *
224 * This is actually the same as v4wb_coherent_kern_range()
225 */
226 .globl v4wb_dma_flush_range
227 .set v4wb_dma_flush_range, v4wb_coherent_kern_range
228
229/*
230 * dma_map_area(start, size, dir)
231 * - start - kernel virtual start address
232 * - size - size of region
233 * - dir - DMA direction
234 */
235ENTRY(v4wb_dma_map_area)
236 add r1, r1, r0
237 cmp r2, #DMA_TO_DEVICE
238 beq v4wb_dma_clean_range
239 bcs v4wb_dma_inv_range
240 b v4wb_dma_flush_range
241ENDPROC(v4wb_dma_map_area)
242
243/*
244 * dma_unmap_area(start, size, dir)
245 * - start - kernel virtual start address
246 * - size - size of region
247 * - dir - DMA direction
248 */
249ENTRY(v4wb_dma_unmap_area)
250 ret lr
251ENDPROC(v4wb_dma_unmap_area)
252
253 .globl v4wb_flush_kern_cache_louis
254 .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
255
256 __INITDATA
257
258 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
259 define_cache_functions v4wb