Loading...
1/*
2 * linux/arch/arm/mm/flush.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/highmem.h>
14
15#include <asm/cacheflush.h>
16#include <asm/cachetype.h>
17#include <asm/highmem.h>
18#include <asm/smp_plat.h>
19#include <asm/tlbflush.h>
20#include <linux/hugetlb.h>
21
22#include "mm.h"
23
24#ifdef CONFIG_CPU_CACHE_VIPT
25
26static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
27{
28 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
29 const int zero = 0;
30
31 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
32
33 asm( "mcrr p15, 0, %1, %0, c14\n"
34 " mcr p15, 0, %2, c7, c10, 4"
35 :
36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
37 : "cc");
38}
39
40static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
41{
42 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
43 unsigned long offset = vaddr & (PAGE_SIZE - 1);
44 unsigned long to;
45
46 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
47 to = va + offset;
48 flush_icache_range(to, to + len);
49}
50
51void flush_cache_mm(struct mm_struct *mm)
52{
53 if (cache_is_vivt()) {
54 vivt_flush_cache_mm(mm);
55 return;
56 }
57
58 if (cache_is_vipt_aliasing()) {
59 asm( "mcr p15, 0, %0, c7, c14, 0\n"
60 " mcr p15, 0, %0, c7, c10, 4"
61 :
62 : "r" (0)
63 : "cc");
64 }
65}
66
67void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
68{
69 if (cache_is_vivt()) {
70 vivt_flush_cache_range(vma, start, end);
71 return;
72 }
73
74 if (cache_is_vipt_aliasing()) {
75 asm( "mcr p15, 0, %0, c7, c14, 0\n"
76 " mcr p15, 0, %0, c7, c10, 4"
77 :
78 : "r" (0)
79 : "cc");
80 }
81
82 if (vma->vm_flags & VM_EXEC)
83 __flush_icache_all();
84}
85
86void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
87{
88 if (cache_is_vivt()) {
89 vivt_flush_cache_page(vma, user_addr, pfn);
90 return;
91 }
92
93 if (cache_is_vipt_aliasing()) {
94 flush_pfn_alias(pfn, user_addr);
95 __flush_icache_all();
96 }
97
98 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
99 __flush_icache_all();
100}
101
102#else
103#define flush_pfn_alias(pfn,vaddr) do { } while (0)
104#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
105#endif
106
107static void flush_ptrace_access_other(void *args)
108{
109 __flush_icache_all();
110}
111
112static
113void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
114 unsigned long uaddr, void *kaddr, unsigned long len)
115{
116 if (cache_is_vivt()) {
117 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
118 unsigned long addr = (unsigned long)kaddr;
119 __cpuc_coherent_kern_range(addr, addr + len);
120 }
121 return;
122 }
123
124 if (cache_is_vipt_aliasing()) {
125 flush_pfn_alias(page_to_pfn(page), uaddr);
126 __flush_icache_all();
127 return;
128 }
129
130 /* VIPT non-aliasing D-cache */
131 if (vma->vm_flags & VM_EXEC) {
132 unsigned long addr = (unsigned long)kaddr;
133 if (icache_is_vipt_aliasing())
134 flush_icache_alias(page_to_pfn(page), uaddr, len);
135 else
136 __cpuc_coherent_kern_range(addr, addr + len);
137 if (cache_ops_need_broadcast())
138 smp_call_function(flush_ptrace_access_other,
139 NULL, 1);
140 }
141}
142
143/*
144 * Copy user data from/to a page which is mapped into a different
145 * processes address space. Really, we want to allow our "user
146 * space" model to handle this.
147 *
148 * Note that this code needs to run on the current CPU.
149 */
150void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
151 unsigned long uaddr, void *dst, const void *src,
152 unsigned long len)
153{
154#ifdef CONFIG_SMP
155 preempt_disable();
156#endif
157 memcpy(dst, src, len);
158 flush_ptrace_access(vma, page, uaddr, dst, len);
159#ifdef CONFIG_SMP
160 preempt_enable();
161#endif
162}
163
164void __flush_dcache_page(struct address_space *mapping, struct page *page)
165{
166 /*
167 * Writeback any data associated with the kernel mapping of this
168 * page. This ensures that data in the physical page is mutually
169 * coherent with the kernels mapping.
170 */
171 if (!PageHighMem(page)) {
172 size_t page_size = PAGE_SIZE << compound_order(page);
173 __cpuc_flush_dcache_area(page_address(page), page_size);
174 } else {
175 unsigned long i;
176 if (cache_is_vipt_nonaliasing()) {
177 for (i = 0; i < (1 << compound_order(page)); i++) {
178 void *addr = kmap_atomic(page + i);
179 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
180 kunmap_atomic(addr);
181 }
182 } else {
183 for (i = 0; i < (1 << compound_order(page)); i++) {
184 void *addr = kmap_high_get(page + i);
185 if (addr) {
186 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
187 kunmap_high(page + i);
188 }
189 }
190 }
191 }
192
193 /*
194 * If this is a page cache page, and we have an aliasing VIPT cache,
195 * we only need to do one flush - which would be at the relevant
196 * userspace colour, which is congruent with page->index.
197 */
198 if (mapping && cache_is_vipt_aliasing())
199 flush_pfn_alias(page_to_pfn(page),
200 page->index << PAGE_CACHE_SHIFT);
201}
202
203static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
204{
205 struct mm_struct *mm = current->active_mm;
206 struct vm_area_struct *mpnt;
207 pgoff_t pgoff;
208
209 /*
210 * There are possible user space mappings of this page:
211 * - VIVT cache: we need to also write back and invalidate all user
212 * data in the current VM view associated with this page.
213 * - aliasing VIPT: we only need to find one mapping of this page.
214 */
215 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
216
217 flush_dcache_mmap_lock(mapping);
218 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
219 unsigned long offset;
220
221 /*
222 * If this VMA is not in our MM, we can ignore it.
223 */
224 if (mpnt->vm_mm != mm)
225 continue;
226 if (!(mpnt->vm_flags & VM_MAYSHARE))
227 continue;
228 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
229 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
230 }
231 flush_dcache_mmap_unlock(mapping);
232}
233
234#if __LINUX_ARM_ARCH__ >= 6
235void __sync_icache_dcache(pte_t pteval)
236{
237 unsigned long pfn;
238 struct page *page;
239 struct address_space *mapping;
240
241 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
242 /* only flush non-aliasing VIPT caches for exec mappings */
243 return;
244 pfn = pte_pfn(pteval);
245 if (!pfn_valid(pfn))
246 return;
247
248 page = pfn_to_page(pfn);
249 if (cache_is_vipt_aliasing())
250 mapping = page_mapping(page);
251 else
252 mapping = NULL;
253
254 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
255 __flush_dcache_page(mapping, page);
256
257 if (pte_exec(pteval))
258 __flush_icache_all();
259}
260#endif
261
262/*
263 * Ensure cache coherency between kernel mapping and userspace mapping
264 * of this page.
265 *
266 * We have three cases to consider:
267 * - VIPT non-aliasing cache: fully coherent so nothing required.
268 * - VIVT: fully aliasing, so we need to handle every alias in our
269 * current VM view.
270 * - VIPT aliasing: need to handle one alias in our current VM view.
271 *
272 * If we need to handle aliasing:
273 * If the page only exists in the page cache and there are no user
274 * space mappings, we can be lazy and remember that we may have dirty
275 * kernel cache lines for later. Otherwise, we assume we have
276 * aliasing mappings.
277 *
278 * Note that we disable the lazy flush for SMP configurations where
279 * the cache maintenance operations are not automatically broadcasted.
280 */
281void flush_dcache_page(struct page *page)
282{
283 struct address_space *mapping;
284
285 /*
286 * The zero page is never written to, so never has any dirty
287 * cache lines, and therefore never needs to be flushed.
288 */
289 if (page == ZERO_PAGE(0))
290 return;
291
292 mapping = page_mapping(page);
293
294 if (!cache_ops_need_broadcast() &&
295 mapping && !page_mapped(page))
296 clear_bit(PG_dcache_clean, &page->flags);
297 else {
298 __flush_dcache_page(mapping, page);
299 if (mapping && cache_is_vivt())
300 __flush_dcache_aliases(mapping, page);
301 else if (mapping)
302 __flush_icache_all();
303 set_bit(PG_dcache_clean, &page->flags);
304 }
305}
306EXPORT_SYMBOL(flush_dcache_page);
307
308/*
309 * Ensure cache coherency for the kernel mapping of this page. We can
310 * assume that the page is pinned via kmap.
311 *
312 * If the page only exists in the page cache and there are no user
313 * space mappings, this is a no-op since the page was already marked
314 * dirty at creation. Otherwise, we need to flush the dirty kernel
315 * cache lines directly.
316 */
317void flush_kernel_dcache_page(struct page *page)
318{
319 if (cache_is_vivt() || cache_is_vipt_aliasing()) {
320 struct address_space *mapping;
321
322 mapping = page_mapping(page);
323
324 if (!mapping || mapping_mapped(mapping)) {
325 void *addr;
326
327 addr = page_address(page);
328 /*
329 * kmap_atomic() doesn't set the page virtual
330 * address for highmem pages, and
331 * kunmap_atomic() takes care of cache
332 * flushing already.
333 */
334 if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
335 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
336 }
337 }
338}
339EXPORT_SYMBOL(flush_kernel_dcache_page);
340
341/*
342 * Flush an anonymous page so that users of get_user_pages()
343 * can safely access the data. The expected sequence is:
344 *
345 * get_user_pages()
346 * -> flush_anon_page
347 * memcpy() to/from page
348 * if written to page, flush_dcache_page()
349 */
350void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
351{
352 unsigned long pfn;
353
354 /* VIPT non-aliasing caches need do nothing */
355 if (cache_is_vipt_nonaliasing())
356 return;
357
358 /*
359 * Write back and invalidate userspace mapping.
360 */
361 pfn = page_to_pfn(page);
362 if (cache_is_vivt()) {
363 flush_cache_page(vma, vmaddr, pfn);
364 } else {
365 /*
366 * For aliasing VIPT, we can flush an alias of the
367 * userspace address only.
368 */
369 flush_pfn_alias(pfn, vmaddr);
370 __flush_icache_all();
371 }
372
373 /*
374 * Invalidate kernel mapping. No data should be contained
375 * in this mapping of the page. FIXME: this is overkill
376 * since we actually ask for a write-back and invalidate.
377 */
378 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
379}
1/*
2 * linux/arch/arm/mm/flush.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/highmem.h>
14
15#include <asm/cacheflush.h>
16#include <asm/cachetype.h>
17#include <asm/highmem.h>
18#include <asm/smp_plat.h>
19#include <asm/tlbflush.h>
20
21#include "mm.h"
22
23#ifdef CONFIG_CPU_CACHE_VIPT
24
25static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
26{
27 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
28 const int zero = 0;
29
30 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
31
32 asm( "mcrr p15, 0, %1, %0, c14\n"
33 " mcr p15, 0, %2, c7, c10, 4"
34 :
35 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
36 : "cc");
37}
38
39static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
40{
41 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
42 unsigned long offset = vaddr & (PAGE_SIZE - 1);
43 unsigned long to;
44
45 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
46 to = va + offset;
47 flush_icache_range(to, to + len);
48}
49
50void flush_cache_mm(struct mm_struct *mm)
51{
52 if (cache_is_vivt()) {
53 vivt_flush_cache_mm(mm);
54 return;
55 }
56
57 if (cache_is_vipt_aliasing()) {
58 asm( "mcr p15, 0, %0, c7, c14, 0\n"
59 " mcr p15, 0, %0, c7, c10, 4"
60 :
61 : "r" (0)
62 : "cc");
63 }
64}
65
66void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
67{
68 if (cache_is_vivt()) {
69 vivt_flush_cache_range(vma, start, end);
70 return;
71 }
72
73 if (cache_is_vipt_aliasing()) {
74 asm( "mcr p15, 0, %0, c7, c14, 0\n"
75 " mcr p15, 0, %0, c7, c10, 4"
76 :
77 : "r" (0)
78 : "cc");
79 }
80
81 if (vma->vm_flags & VM_EXEC)
82 __flush_icache_all();
83}
84
85void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
86{
87 if (cache_is_vivt()) {
88 vivt_flush_cache_page(vma, user_addr, pfn);
89 return;
90 }
91
92 if (cache_is_vipt_aliasing()) {
93 flush_pfn_alias(pfn, user_addr);
94 __flush_icache_all();
95 }
96
97 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
98 __flush_icache_all();
99}
100
101#else
102#define flush_pfn_alias(pfn,vaddr) do { } while (0)
103#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
104#endif
105
106static void flush_ptrace_access_other(void *args)
107{
108 __flush_icache_all();
109}
110
111static
112void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
113 unsigned long uaddr, void *kaddr, unsigned long len)
114{
115 if (cache_is_vivt()) {
116 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
117 unsigned long addr = (unsigned long)kaddr;
118 __cpuc_coherent_kern_range(addr, addr + len);
119 }
120 return;
121 }
122
123 if (cache_is_vipt_aliasing()) {
124 flush_pfn_alias(page_to_pfn(page), uaddr);
125 __flush_icache_all();
126 return;
127 }
128
129 /* VIPT non-aliasing D-cache */
130 if (vma->vm_flags & VM_EXEC) {
131 unsigned long addr = (unsigned long)kaddr;
132 if (icache_is_vipt_aliasing())
133 flush_icache_alias(page_to_pfn(page), uaddr, len);
134 else
135 __cpuc_coherent_kern_range(addr, addr + len);
136 if (cache_ops_need_broadcast())
137 smp_call_function(flush_ptrace_access_other,
138 NULL, 1);
139 }
140}
141
142/*
143 * Copy user data from/to a page which is mapped into a different
144 * processes address space. Really, we want to allow our "user
145 * space" model to handle this.
146 *
147 * Note that this code needs to run on the current CPU.
148 */
149void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
150 unsigned long uaddr, void *dst, const void *src,
151 unsigned long len)
152{
153#ifdef CONFIG_SMP
154 preempt_disable();
155#endif
156 memcpy(dst, src, len);
157 flush_ptrace_access(vma, page, uaddr, dst, len);
158#ifdef CONFIG_SMP
159 preempt_enable();
160#endif
161}
162
163void __flush_dcache_page(struct address_space *mapping, struct page *page)
164{
165 /*
166 * Writeback any data associated with the kernel mapping of this
167 * page. This ensures that data in the physical page is mutually
168 * coherent with the kernels mapping.
169 */
170 if (!PageHighMem(page)) {
171 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
172 } else {
173 void *addr = kmap_high_get(page);
174 if (addr) {
175 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
176 kunmap_high(page);
177 } else if (cache_is_vipt()) {
178 /* unmapped pages might still be cached */
179 addr = kmap_atomic(page);
180 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
181 kunmap_atomic(addr);
182 }
183 }
184
185 /*
186 * If this is a page cache page, and we have an aliasing VIPT cache,
187 * we only need to do one flush - which would be at the relevant
188 * userspace colour, which is congruent with page->index.
189 */
190 if (mapping && cache_is_vipt_aliasing())
191 flush_pfn_alias(page_to_pfn(page),
192 page->index << PAGE_CACHE_SHIFT);
193}
194
195static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
196{
197 struct mm_struct *mm = current->active_mm;
198 struct vm_area_struct *mpnt;
199 struct prio_tree_iter iter;
200 pgoff_t pgoff;
201
202 /*
203 * There are possible user space mappings of this page:
204 * - VIVT cache: we need to also write back and invalidate all user
205 * data in the current VM view associated with this page.
206 * - aliasing VIPT: we only need to find one mapping of this page.
207 */
208 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
209
210 flush_dcache_mmap_lock(mapping);
211 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
212 unsigned long offset;
213
214 /*
215 * If this VMA is not in our MM, we can ignore it.
216 */
217 if (mpnt->vm_mm != mm)
218 continue;
219 if (!(mpnt->vm_flags & VM_MAYSHARE))
220 continue;
221 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
222 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
223 }
224 flush_dcache_mmap_unlock(mapping);
225}
226
227#if __LINUX_ARM_ARCH__ >= 6
228void __sync_icache_dcache(pte_t pteval)
229{
230 unsigned long pfn;
231 struct page *page;
232 struct address_space *mapping;
233
234 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
235 /* only flush non-aliasing VIPT caches for exec mappings */
236 return;
237 pfn = pte_pfn(pteval);
238 if (!pfn_valid(pfn))
239 return;
240
241 page = pfn_to_page(pfn);
242 if (cache_is_vipt_aliasing())
243 mapping = page_mapping(page);
244 else
245 mapping = NULL;
246
247 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
248 __flush_dcache_page(mapping, page);
249
250 if (pte_exec(pteval))
251 __flush_icache_all();
252}
253#endif
254
255/*
256 * Ensure cache coherency between kernel mapping and userspace mapping
257 * of this page.
258 *
259 * We have three cases to consider:
260 * - VIPT non-aliasing cache: fully coherent so nothing required.
261 * - VIVT: fully aliasing, so we need to handle every alias in our
262 * current VM view.
263 * - VIPT aliasing: need to handle one alias in our current VM view.
264 *
265 * If we need to handle aliasing:
266 * If the page only exists in the page cache and there are no user
267 * space mappings, we can be lazy and remember that we may have dirty
268 * kernel cache lines for later. Otherwise, we assume we have
269 * aliasing mappings.
270 *
271 * Note that we disable the lazy flush for SMP configurations where
272 * the cache maintenance operations are not automatically broadcasted.
273 */
274void flush_dcache_page(struct page *page)
275{
276 struct address_space *mapping;
277
278 /*
279 * The zero page is never written to, so never has any dirty
280 * cache lines, and therefore never needs to be flushed.
281 */
282 if (page == ZERO_PAGE(0))
283 return;
284
285 mapping = page_mapping(page);
286
287 if (!cache_ops_need_broadcast() &&
288 mapping && !mapping_mapped(mapping))
289 clear_bit(PG_dcache_clean, &page->flags);
290 else {
291 __flush_dcache_page(mapping, page);
292 if (mapping && cache_is_vivt())
293 __flush_dcache_aliases(mapping, page);
294 else if (mapping)
295 __flush_icache_all();
296 set_bit(PG_dcache_clean, &page->flags);
297 }
298}
299EXPORT_SYMBOL(flush_dcache_page);
300
301/*
302 * Flush an anonymous page so that users of get_user_pages()
303 * can safely access the data. The expected sequence is:
304 *
305 * get_user_pages()
306 * -> flush_anon_page
307 * memcpy() to/from page
308 * if written to page, flush_dcache_page()
309 */
310void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
311{
312 unsigned long pfn;
313
314 /* VIPT non-aliasing caches need do nothing */
315 if (cache_is_vipt_nonaliasing())
316 return;
317
318 /*
319 * Write back and invalidate userspace mapping.
320 */
321 pfn = page_to_pfn(page);
322 if (cache_is_vivt()) {
323 flush_cache_page(vma, vmaddr, pfn);
324 } else {
325 /*
326 * For aliasing VIPT, we can flush an alias of the
327 * userspace address only.
328 */
329 flush_pfn_alias(pfn, vmaddr);
330 __flush_icache_all();
331 }
332
333 /*
334 * Invalidate kernel mapping. No data should be contained
335 * in this mapping of the page. FIXME: this is overkill
336 * since we actually ask for a write-back and invalidate.
337 */
338 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
339}