Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/mm/cache-v4wt.S
4 *
5 * Copyright (C) 1997-2002 Russell king
6 *
7 * ARMv4 write through cache operations support.
8 *
9 * We assume that the write buffer is not enabled.
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <linux/cfi_types.h>
14#include <asm/assembler.h>
15#include <asm/page.h>
16#include "proc-macros.S"
17
18/*
19 * The size of one data cache line.
20 */
21#define CACHE_DLINESIZE 32
22
23/*
24 * The number of data cache segments.
25 */
26#define CACHE_DSEGMENTS 8
27
28/*
29 * The number of lines in a cache segment.
30 */
31#define CACHE_DENTRIES 64
32
33/*
34 * This is the size at which it becomes more efficient to
35 * clean the whole cache, rather than using the individual
36 * cache line maintenance instructions.
37 *
38 * *** This needs benchmarking
39 */
40#define CACHE_DLIMIT 16384
41
42/*
43 * flush_icache_all()
44 *
45 * Unconditionally clean and invalidate the entire icache.
46 */
47SYM_TYPED_FUNC_START(v4wt_flush_icache_all)
48 mov r0, #0
49 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
50 ret lr
51SYM_FUNC_END(v4wt_flush_icache_all)
52
53/*
54 * flush_user_cache_all()
55 *
56 * Invalidate all cache entries in a particular address
57 * space.
58 */
59SYM_FUNC_ALIAS(v4wt_flush_user_cache_all, v4wt_flush_kern_cache_all)
60
61/*
62 * flush_kern_cache_all()
63 *
64 * Clean and invalidate the entire cache.
65 */
66SYM_TYPED_FUNC_START(v4wt_flush_kern_cache_all)
67 mov r2, #VM_EXEC
68 mov ip, #0
69__flush_whole_cache:
70 tst r2, #VM_EXEC
71 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
72 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
73 ret lr
74SYM_FUNC_END(v4wt_flush_kern_cache_all)
75
76/*
77 * flush_user_cache_range(start, end, flags)
78 *
79 * Clean and invalidate a range of cache entries in the specified
80 * address space.
81 *
82 * - start - start address (inclusive, page aligned)
83 * - end - end address (exclusive, page aligned)
84 * - flags - vma_area_struct flags describing address space
85 */
86SYM_TYPED_FUNC_START(v4wt_flush_user_cache_range)
87 sub r3, r1, r0 @ calculate total size
88 cmp r3, #CACHE_DLIMIT
89 bhs __flush_whole_cache
90
911: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
92 tst r2, #VM_EXEC
93 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
94 add r0, r0, #CACHE_DLINESIZE
95 cmp r0, r1
96 blo 1b
97 ret lr
98SYM_FUNC_END(v4wt_flush_user_cache_range)
99
100/*
101 * coherent_kern_range(start, end)
102 *
103 * Ensure coherency between the Icache and the Dcache in the
104 * region described by start. If you have non-snooping
105 * Harvard caches, you need to implement this function.
106 *
107 * - start - virtual start address
108 * - end - virtual end address
109 */
110SYM_TYPED_FUNC_START(v4wt_coherent_kern_range)
111#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
112 b v4wt_coherent_user_range
113#endif
114SYM_FUNC_END(v4wt_coherent_kern_range)
115
116/*
117 * coherent_user_range(start, end)
118 *
119 * Ensure coherency between the Icache and the Dcache in the
120 * region described by start. If you have non-snooping
121 * Harvard caches, you need to implement this function.
122 *
123 * - start - virtual start address
124 * - end - virtual end address
125 */
126SYM_TYPED_FUNC_START(v4wt_coherent_user_range)
127 bic r0, r0, #CACHE_DLINESIZE - 1
1281: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
129 add r0, r0, #CACHE_DLINESIZE
130 cmp r0, r1
131 blo 1b
132 mov r0, #0
133 ret lr
134SYM_FUNC_END(v4wt_coherent_user_range)
135
136/*
137 * flush_kern_dcache_area(void *addr, size_t size)
138 *
139 * Ensure no D cache aliasing occurs, either with itself or
140 * the I cache
141 *
142 * - addr - kernel address
143 * - size - region size
144 */
145SYM_TYPED_FUNC_START(v4wt_flush_kern_dcache_area)
146 mov r2, #0
147 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
148 add r1, r0, r1
149 b v4wt_dma_inv_range
150SYM_FUNC_END(v4wt_flush_kern_dcache_area)
151
152/*
153 * dma_inv_range(start, end)
154 *
155 * Invalidate (discard) the specified virtual address range.
156 * May not write back any entries. If 'start' or 'end'
157 * are not cache line aligned, those lines must be written
158 * back.
159 *
160 * - start - virtual start address
161 * - end - virtual end address
162 */
163v4wt_dma_inv_range:
164 bic r0, r0, #CACHE_DLINESIZE - 1
1651: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
166 add r0, r0, #CACHE_DLINESIZE
167 cmp r0, r1
168 blo 1b
169 ret lr
170
171/*
172 * dma_flush_range(start, end)
173 *
174 * Clean and invalidate the specified virtual address range.
175 *
176 * - start - virtual start address
177 * - end - virtual end address
178*/
179SYM_TYPED_FUNC_START(v4wt_dma_flush_range)
180 b v4wt_dma_inv_range
181SYM_FUNC_END(v4wt_dma_flush_range)
182
183/*
184 * dma_unmap_area(start, size, dir)
185 * - start - kernel virtual start address
186 * - size - size of region
187 * - dir - DMA direction
188 */
189SYM_TYPED_FUNC_START(v4wt_dma_unmap_area)
190 add r1, r1, r0
191 teq r2, #DMA_TO_DEVICE
192 bne v4wt_dma_inv_range
193 ret lr
194SYM_FUNC_END(v4wt_dma_unmap_area)
195
196/*
197 * dma_map_area(start, size, dir)
198 * - start - kernel virtual start address
199 * - size - size of region
200 * - dir - DMA direction
201 */
202SYM_TYPED_FUNC_START(v4wt_dma_map_area)
203 ret lr
204SYM_FUNC_END(v4wt_dma_map_area)
1/*
2 * linux/arch/arm/mm/cache-v4wt.S
3 *
4 * Copyright (C) 1997-2002 Russell king
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ARMv4 write through cache operations support.
11 *
12 * We assume that the write buffer is not enabled.
13 */
14#include <linux/linkage.h>
15#include <linux/init.h>
16#include <asm/assembler.h>
17#include <asm/page.h>
18#include "proc-macros.S"
19
20/*
21 * The size of one data cache line.
22 */
23#define CACHE_DLINESIZE 32
24
25/*
26 * The number of data cache segments.
27 */
28#define CACHE_DSEGMENTS 8
29
30/*
31 * The number of lines in a cache segment.
32 */
33#define CACHE_DENTRIES 64
34
35/*
36 * This is the size at which it becomes more efficient to
37 * clean the whole cache, rather than using the individual
38 * cache line maintenance instructions.
39 *
40 * *** This needs benchmarking
41 */
42#define CACHE_DLIMIT 16384
43
44/*
45 * flush_icache_all()
46 *
47 * Unconditionally clean and invalidate the entire icache.
48 */
49ENTRY(v4wt_flush_icache_all)
50 mov r0, #0
51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
52 ret lr
53ENDPROC(v4wt_flush_icache_all)
54
55/*
56 * flush_user_cache_all()
57 *
58 * Invalidate all cache entries in a particular address
59 * space.
60 */
61ENTRY(v4wt_flush_user_cache_all)
62 /* FALLTHROUGH */
63/*
64 * flush_kern_cache_all()
65 *
66 * Clean and invalidate the entire cache.
67 */
68ENTRY(v4wt_flush_kern_cache_all)
69 mov r2, #VM_EXEC
70 mov ip, #0
71__flush_whole_cache:
72 tst r2, #VM_EXEC
73 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
74 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
75 ret lr
76
77/*
78 * flush_user_cache_range(start, end, flags)
79 *
80 * Clean and invalidate a range of cache entries in the specified
81 * address space.
82 *
83 * - start - start address (inclusive, page aligned)
84 * - end - end address (exclusive, page aligned)
85 * - flags - vma_area_struct flags describing address space
86 */
87ENTRY(v4wt_flush_user_cache_range)
88 sub r3, r1, r0 @ calculate total size
89 cmp r3, #CACHE_DLIMIT
90 bhs __flush_whole_cache
91
921: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
93 tst r2, #VM_EXEC
94 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
95 add r0, r0, #CACHE_DLINESIZE
96 cmp r0, r1
97 blo 1b
98 ret lr
99
100/*
101 * coherent_kern_range(start, end)
102 *
103 * Ensure coherency between the Icache and the Dcache in the
104 * region described by start. If you have non-snooping
105 * Harvard caches, you need to implement this function.
106 *
107 * - start - virtual start address
108 * - end - virtual end address
109 */
110ENTRY(v4wt_coherent_kern_range)
111 /* FALLTRHOUGH */
112
113/*
114 * coherent_user_range(start, end)
115 *
116 * Ensure coherency between the Icache and the Dcache in the
117 * region described by start. If you have non-snooping
118 * Harvard caches, you need to implement this function.
119 *
120 * - start - virtual start address
121 * - end - virtual end address
122 */
123ENTRY(v4wt_coherent_user_range)
124 bic r0, r0, #CACHE_DLINESIZE - 1
1251: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
126 add r0, r0, #CACHE_DLINESIZE
127 cmp r0, r1
128 blo 1b
129 mov r0, #0
130 ret lr
131
132/*
133 * flush_kern_dcache_area(void *addr, size_t size)
134 *
135 * Ensure no D cache aliasing occurs, either with itself or
136 * the I cache
137 *
138 * - addr - kernel address
139 * - size - region size
140 */
141ENTRY(v4wt_flush_kern_dcache_area)
142 mov r2, #0
143 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
144 add r1, r0, r1
145 /* fallthrough */
146
147/*
148 * dma_inv_range(start, end)
149 *
150 * Invalidate (discard) the specified virtual address range.
151 * May not write back any entries. If 'start' or 'end'
152 * are not cache line aligned, those lines must be written
153 * back.
154 *
155 * - start - virtual start address
156 * - end - virtual end address
157 */
158v4wt_dma_inv_range:
159 bic r0, r0, #CACHE_DLINESIZE - 1
1601: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
161 add r0, r0, #CACHE_DLINESIZE
162 cmp r0, r1
163 blo 1b
164 ret lr
165
166/*
167 * dma_flush_range(start, end)
168 *
169 * Clean and invalidate the specified virtual address range.
170 *
171 * - start - virtual start address
172 * - end - virtual end address
173 */
174 .globl v4wt_dma_flush_range
175 .equ v4wt_dma_flush_range, v4wt_dma_inv_range
176
177/*
178 * dma_unmap_area(start, size, dir)
179 * - start - kernel virtual start address
180 * - size - size of region
181 * - dir - DMA direction
182 */
183ENTRY(v4wt_dma_unmap_area)
184 add r1, r1, r0
185 teq r2, #DMA_TO_DEVICE
186 bne v4wt_dma_inv_range
187 /* FALLTHROUGH */
188
189/*
190 * dma_map_area(start, size, dir)
191 * - start - kernel virtual start address
192 * - size - size of region
193 * - dir - DMA direction
194 */
195ENTRY(v4wt_dma_map_area)
196 ret lr
197ENDPROC(v4wt_dma_unmap_area)
198ENDPROC(v4wt_dma_map_area)
199
200 .globl v4wt_flush_kern_cache_louis
201 .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
202
203 __INITDATA
204
205 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
206 define_cache_functions v4wt