Loading...
1/*
2 * Cache maintenance
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/errno.h>
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <asm/assembler.h>
24#include <asm/cpufeature.h>
25#include <asm/alternative.h>
26
27#include "proc-macros.S"
28
29/*
30 * flush_icache_range(start,end)
31 *
32 * Ensure that the I and D caches are coherent within specified region.
33 * This is typically used when code has been written to a memory region,
34 * and will be executed.
35 *
36 * - start - virtual start address of region
37 * - end - virtual end address of region
38 */
39ENTRY(flush_icache_range)
40 /* FALLTHROUGH */
41
42/*
43 * __flush_cache_user_range(start,end)
44 *
45 * Ensure that the I and D caches are coherent within specified region.
46 * This is typically used when code has been written to a memory region,
47 * and will be executed.
48 *
49 * - start - virtual start address of region
50 * - end - virtual end address of region
51 */
52ENTRY(__flush_cache_user_range)
53 dcache_line_size x2, x3
54 sub x3, x2, #1
55 bic x4, x0, x3
561:
57USER(9f, dc cvau, x4 ) // clean D line to PoU
58 add x4, x4, x2
59 cmp x4, x1
60 b.lo 1b
61 dsb ish
62
63 icache_line_size x2, x3
64 sub x3, x2, #1
65 bic x4, x0, x3
661:
67USER(9f, ic ivau, x4 ) // invalidate I line PoU
68 add x4, x4, x2
69 cmp x4, x1
70 b.lo 1b
71 dsb ish
72 isb
73 mov x0, #0
74 ret
759:
76 mov x0, #-EFAULT
77 ret
78ENDPROC(flush_icache_range)
79ENDPROC(__flush_cache_user_range)
80
81/*
82 * __flush_dcache_area(kaddr, size)
83 *
84 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
85 * are cleaned and invalidated to the PoC.
86 *
87 * - kaddr - kernel address
88 * - size - size in question
89 */
90ENTRY(__flush_dcache_area)
91 dcache_by_line_op civac, sy, x0, x1, x2, x3
92 ret
93ENDPIPROC(__flush_dcache_area)
94
95/*
96 * __clean_dcache_area_pou(kaddr, size)
97 *
98 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
99 * are cleaned to the PoU.
100 *
101 * - kaddr - kernel address
102 * - size - size in question
103 */
104ENTRY(__clean_dcache_area_pou)
105 dcache_by_line_op cvau, ish, x0, x1, x2, x3
106 ret
107ENDPROC(__clean_dcache_area_pou)
108
109/*
110 * __inval_cache_range(start, end)
111 * - start - start address of region
112 * - end - end address of region
113 */
114ENTRY(__inval_cache_range)
115 /* FALLTHROUGH */
116
117/*
118 * __dma_inv_range(start, end)
119 * - start - virtual start address of region
120 * - end - virtual end address of region
121 */
122__dma_inv_range:
123 dcache_line_size x2, x3
124 sub x3, x2, #1
125 tst x1, x3 // end cache line aligned?
126 bic x1, x1, x3
127 b.eq 1f
128 dc civac, x1 // clean & invalidate D / U line
1291: tst x0, x3 // start cache line aligned?
130 bic x0, x0, x3
131 b.eq 2f
132 dc civac, x0 // clean & invalidate D / U line
133 b 3f
1342: dc ivac, x0 // invalidate D / U line
1353: add x0, x0, x2
136 cmp x0, x1
137 b.lo 2b
138 dsb sy
139 ret
140ENDPIPROC(__inval_cache_range)
141ENDPROC(__dma_inv_range)
142
143/*
144 * __dma_clean_range(start, end)
145 * - start - virtual start address of region
146 * - end - virtual end address of region
147 */
148__dma_clean_range:
149 dcache_line_size x2, x3
150 sub x3, x2, #1
151 bic x0, x0, x3
1521:
153alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
154 dc cvac, x0
155alternative_else
156 dc civac, x0
157alternative_endif
158 add x0, x0, x2
159 cmp x0, x1
160 b.lo 1b
161 dsb sy
162 ret
163ENDPROC(__dma_clean_range)
164
165/*
166 * __dma_flush_range(start, end)
167 * - start - virtual start address of region
168 * - end - virtual end address of region
169 */
170ENTRY(__dma_flush_range)
171 dcache_line_size x2, x3
172 sub x3, x2, #1
173 bic x0, x0, x3
1741: dc civac, x0 // clean & invalidate D / U line
175 add x0, x0, x2
176 cmp x0, x1
177 b.lo 1b
178 dsb sy
179 ret
180ENDPIPROC(__dma_flush_range)
181
182/*
183 * __dma_map_area(start, size, dir)
184 * - start - kernel virtual start address
185 * - size - size of region
186 * - dir - DMA direction
187 */
188ENTRY(__dma_map_area)
189 add x1, x1, x0
190 cmp w2, #DMA_FROM_DEVICE
191 b.eq __dma_inv_range
192 b __dma_clean_range
193ENDPIPROC(__dma_map_area)
194
195/*
196 * __dma_unmap_area(start, size, dir)
197 * - start - kernel virtual start address
198 * - size - size of region
199 * - dir - DMA direction
200 */
201ENTRY(__dma_unmap_area)
202 add x1, x1, x0
203 cmp w2, #DMA_TO_DEVICE
204 b.ne __dma_inv_range
205 ret
206ENDPIPROC(__dma_unmap_area)
1/*
2 * Cache maintenance
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/errno.h>
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <asm/assembler.h>
24#include <asm/cpufeature.h>
25#include <asm/alternative.h>
26#include <asm/asm-uaccess.h>
27
28/*
29 * flush_icache_range(start,end)
30 *
31 * Ensure that the I and D caches are coherent within specified region.
32 * This is typically used when code has been written to a memory region,
33 * and will be executed.
34 *
35 * - start - virtual start address of region
36 * - end - virtual end address of region
37 */
38ENTRY(flush_icache_range)
39 /* FALLTHROUGH */
40
41/*
42 * __flush_cache_user_range(start,end)
43 *
44 * Ensure that the I and D caches are coherent within specified region.
45 * This is typically used when code has been written to a memory region,
46 * and will be executed.
47 *
48 * - start - virtual start address of region
49 * - end - virtual end address of region
50 */
51ENTRY(__flush_cache_user_range)
52 uaccess_ttbr0_enable x2, x3, x4
53alternative_if ARM64_HAS_CACHE_IDC
54 dsb ishst
55 b 7f
56alternative_else_nop_endif
57 dcache_line_size x2, x3
58 sub x3, x2, #1
59 bic x4, x0, x3
601:
61user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
62 add x4, x4, x2
63 cmp x4, x1
64 b.lo 1b
65 dsb ish
66
677:
68alternative_if ARM64_HAS_CACHE_DIC
69 isb
70 b 8f
71alternative_else_nop_endif
72 invalidate_icache_by_line x0, x1, x2, x3, 9f
738: mov x0, #0
741:
75 uaccess_ttbr0_disable x1, x2
76 ret
779:
78 mov x0, #-EFAULT
79 b 1b
80ENDPROC(flush_icache_range)
81ENDPROC(__flush_cache_user_range)
82
83/*
84 * invalidate_icache_range(start,end)
85 *
86 * Ensure that the I cache is invalid within specified region.
87 *
88 * - start - virtual start address of region
89 * - end - virtual end address of region
90 */
91ENTRY(invalidate_icache_range)
92alternative_if ARM64_HAS_CACHE_DIC
93 mov x0, xzr
94 isb
95 ret
96alternative_else_nop_endif
97
98 uaccess_ttbr0_enable x2, x3, x4
99
100 invalidate_icache_by_line x0, x1, x2, x3, 2f
101 mov x0, xzr
1021:
103 uaccess_ttbr0_disable x1, x2
104 ret
1052:
106 mov x0, #-EFAULT
107 b 1b
108ENDPROC(invalidate_icache_range)
109
110/*
111 * __flush_dcache_area(kaddr, size)
112 *
113 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
114 * are cleaned and invalidated to the PoC.
115 *
116 * - kaddr - kernel address
117 * - size - size in question
118 */
119ENTRY(__flush_dcache_area)
120 dcache_by_line_op civac, sy, x0, x1, x2, x3
121 ret
122ENDPIPROC(__flush_dcache_area)
123
124/*
125 * __clean_dcache_area_pou(kaddr, size)
126 *
127 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
128 * are cleaned to the PoU.
129 *
130 * - kaddr - kernel address
131 * - size - size in question
132 */
133ENTRY(__clean_dcache_area_pou)
134alternative_if ARM64_HAS_CACHE_IDC
135 dsb ishst
136 ret
137alternative_else_nop_endif
138 dcache_by_line_op cvau, ish, x0, x1, x2, x3
139 ret
140ENDPROC(__clean_dcache_area_pou)
141
142/*
143 * __inval_dcache_area(kaddr, size)
144 *
145 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
146 * are invalidated. Any partial lines at the ends of the interval are
147 * also cleaned to PoC to prevent data loss.
148 *
149 * - kaddr - kernel address
150 * - size - size in question
151 */
152ENTRY(__inval_dcache_area)
153 /* FALLTHROUGH */
154
155/*
156 * __dma_inv_area(start, size)
157 * - start - virtual start address of region
158 * - size - size in question
159 */
160__dma_inv_area:
161 add x1, x1, x0
162 dcache_line_size x2, x3
163 sub x3, x2, #1
164 tst x1, x3 // end cache line aligned?
165 bic x1, x1, x3
166 b.eq 1f
167 dc civac, x1 // clean & invalidate D / U line
1681: tst x0, x3 // start cache line aligned?
169 bic x0, x0, x3
170 b.eq 2f
171 dc civac, x0 // clean & invalidate D / U line
172 b 3f
1732: dc ivac, x0 // invalidate D / U line
1743: add x0, x0, x2
175 cmp x0, x1
176 b.lo 2b
177 dsb sy
178 ret
179ENDPIPROC(__inval_dcache_area)
180ENDPROC(__dma_inv_area)
181
182/*
183 * __clean_dcache_area_poc(kaddr, size)
184 *
185 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
186 * are cleaned to the PoC.
187 *
188 * - kaddr - kernel address
189 * - size - size in question
190 */
191ENTRY(__clean_dcache_area_poc)
192 /* FALLTHROUGH */
193
194/*
195 * __dma_clean_area(start, size)
196 * - start - virtual start address of region
197 * - size - size in question
198 */
199__dma_clean_area:
200 dcache_by_line_op cvac, sy, x0, x1, x2, x3
201 ret
202ENDPIPROC(__clean_dcache_area_poc)
203ENDPROC(__dma_clean_area)
204
205/*
206 * __clean_dcache_area_pop(kaddr, size)
207 *
208 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
209 * are cleaned to the PoP.
210 *
211 * - kaddr - kernel address
212 * - size - size in question
213 */
214ENTRY(__clean_dcache_area_pop)
215 dcache_by_line_op cvap, sy, x0, x1, x2, x3
216 ret
217ENDPIPROC(__clean_dcache_area_pop)
218
219/*
220 * __dma_flush_area(start, size)
221 *
222 * clean & invalidate D / U line
223 *
224 * - start - virtual start address of region
225 * - size - size in question
226 */
227ENTRY(__dma_flush_area)
228 dcache_by_line_op civac, sy, x0, x1, x2, x3
229 ret
230ENDPIPROC(__dma_flush_area)
231
232/*
233 * __dma_map_area(start, size, dir)
234 * - start - kernel virtual start address
235 * - size - size of region
236 * - dir - DMA direction
237 */
238ENTRY(__dma_map_area)
239 cmp w2, #DMA_FROM_DEVICE
240 b.eq __dma_inv_area
241 b __dma_clean_area
242ENDPIPROC(__dma_map_area)
243
244/*
245 * __dma_unmap_area(start, size, dir)
246 * - start - kernel virtual start address
247 * - size - size of region
248 * - dir - DMA direction
249 */
250ENTRY(__dma_unmap_area)
251 cmp w2, #DMA_TO_DEVICE
252 b.ne __dma_inv_area
253 ret
254ENDPIPROC(__dma_unmap_area)