Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mm/flush.c
4 *
5 * Copyright (C) 1995-2002 Russell King
6 */
7#include <linux/module.h>
8#include <linux/mm.h>
9#include <linux/pagemap.h>
10#include <linux/highmem.h>
11
12#include <asm/cacheflush.h>
13#include <asm/cachetype.h>
14#include <asm/highmem.h>
15#include <asm/smp_plat.h>
16#include <asm/tlbflush.h>
17#include <linux/hugetlb.h>
18
19#include "mm.h"
20
21#ifdef CONFIG_ARM_HEAVY_MB
22void (*soc_mb)(void);
23
24void arm_heavy_mb(void)
25{
26#ifdef CONFIG_OUTER_CACHE_SYNC
27 if (outer_cache.sync)
28 outer_cache.sync();
29#endif
30 if (soc_mb)
31 soc_mb();
32}
33EXPORT_SYMBOL(arm_heavy_mb);
34#endif
35
36#ifdef CONFIG_CPU_CACHE_VIPT
37
38static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
39{
40 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
41 const int zero = 0;
42
43 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
44
45 asm( "mcrr p15, 0, %1, %0, c14\n"
46 " mcr p15, 0, %2, c7, c10, 4"
47 :
48 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
49 : "cc");
50}
51
52static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
53{
54 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
55 unsigned long offset = vaddr & (PAGE_SIZE - 1);
56 unsigned long to;
57
58 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
59 to = va + offset;
60 flush_icache_range(to, to + len);
61}
62
63void flush_cache_mm(struct mm_struct *mm)
64{
65 if (cache_is_vivt()) {
66 vivt_flush_cache_mm(mm);
67 return;
68 }
69
70 if (cache_is_vipt_aliasing()) {
71 asm( "mcr p15, 0, %0, c7, c14, 0\n"
72 " mcr p15, 0, %0, c7, c10, 4"
73 :
74 : "r" (0)
75 : "cc");
76 }
77}
78
79void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
80{
81 if (cache_is_vivt()) {
82 vivt_flush_cache_range(vma, start, end);
83 return;
84 }
85
86 if (cache_is_vipt_aliasing()) {
87 asm( "mcr p15, 0, %0, c7, c14, 0\n"
88 " mcr p15, 0, %0, c7, c10, 4"
89 :
90 : "r" (0)
91 : "cc");
92 }
93
94 if (vma->vm_flags & VM_EXEC)
95 __flush_icache_all();
96}
97
98void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsigned int nr)
99{
100 if (cache_is_vivt()) {
101 vivt_flush_cache_pages(vma, user_addr, pfn, nr);
102 return;
103 }
104
105 if (cache_is_vipt_aliasing()) {
106 flush_pfn_alias(pfn, user_addr);
107 __flush_icache_all();
108 }
109
110 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
111 __flush_icache_all();
112}
113
114#else
115#define flush_pfn_alias(pfn,vaddr) do { } while (0)
116#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
117#endif
118
119#define FLAG_PA_IS_EXEC 1
120#define FLAG_PA_CORE_IN_MM 2
121
122static void flush_ptrace_access_other(void *args)
123{
124 __flush_icache_all();
125}
126
127static inline
128void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
129 unsigned long len, unsigned int flags)
130{
131 if (cache_is_vivt()) {
132 if (flags & FLAG_PA_CORE_IN_MM) {
133 unsigned long addr = (unsigned long)kaddr;
134 __cpuc_coherent_kern_range(addr, addr + len);
135 }
136 return;
137 }
138
139 if (cache_is_vipt_aliasing()) {
140 flush_pfn_alias(page_to_pfn(page), uaddr);
141 __flush_icache_all();
142 return;
143 }
144
145 /* VIPT non-aliasing D-cache */
146 if (flags & FLAG_PA_IS_EXEC) {
147 unsigned long addr = (unsigned long)kaddr;
148 if (icache_is_vipt_aliasing())
149 flush_icache_alias(page_to_pfn(page), uaddr, len);
150 else
151 __cpuc_coherent_kern_range(addr, addr + len);
152 if (cache_ops_need_broadcast())
153 smp_call_function(flush_ptrace_access_other,
154 NULL, 1);
155 }
156}
157
158static
159void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
160 unsigned long uaddr, void *kaddr, unsigned long len)
161{
162 unsigned int flags = 0;
163 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
164 flags |= FLAG_PA_CORE_IN_MM;
165 if (vma->vm_flags & VM_EXEC)
166 flags |= FLAG_PA_IS_EXEC;
167 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
168}
169
170void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
171 void *kaddr, unsigned long len)
172{
173 unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
174
175 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
176}
177
178/*
179 * Copy user data from/to a page which is mapped into a different
180 * processes address space. Really, we want to allow our "user
181 * space" model to handle this.
182 *
183 * Note that this code needs to run on the current CPU.
184 */
185void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
186 unsigned long uaddr, void *dst, const void *src,
187 unsigned long len)
188{
189#ifdef CONFIG_SMP
190 preempt_disable();
191#endif
192 memcpy(dst, src, len);
193 flush_ptrace_access(vma, page, uaddr, dst, len);
194#ifdef CONFIG_SMP
195 preempt_enable();
196#endif
197}
198
199void __flush_dcache_folio(struct address_space *mapping, struct folio *folio)
200{
201 /*
202 * Writeback any data associated with the kernel mapping of this
203 * page. This ensures that data in the physical page is mutually
204 * coherent with the kernels mapping.
205 */
206 if (!folio_test_highmem(folio)) {
207 __cpuc_flush_dcache_area(folio_address(folio),
208 folio_size(folio));
209 } else {
210 unsigned long i;
211 if (cache_is_vipt_nonaliasing()) {
212 for (i = 0; i < folio_nr_pages(folio); i++) {
213 void *addr = kmap_local_folio(folio,
214 i * PAGE_SIZE);
215 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
216 kunmap_local(addr);
217 }
218 } else {
219 for (i = 0; i < folio_nr_pages(folio); i++) {
220 void *addr = kmap_high_get(folio_page(folio, i));
221 if (addr) {
222 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
223 kunmap_high(folio_page(folio, i));
224 }
225 }
226 }
227 }
228
229 /*
230 * If this is a page cache page, and we have an aliasing VIPT cache,
231 * we only need to do one flush - which would be at the relevant
232 * userspace colour, which is congruent with page->index.
233 */
234 if (mapping && cache_is_vipt_aliasing())
235 flush_pfn_alias(folio_pfn(folio), folio_pos(folio));
236}
237
238static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio)
239{
240 struct mm_struct *mm = current->active_mm;
241 struct vm_area_struct *vma;
242 pgoff_t pgoff, pgoff_end;
243
244 /*
245 * There are possible user space mappings of this page:
246 * - VIVT cache: we need to also write back and invalidate all user
247 * data in the current VM view associated with this page.
248 * - aliasing VIPT: we only need to find one mapping of this page.
249 */
250 pgoff = folio->index;
251 pgoff_end = pgoff + folio_nr_pages(folio) - 1;
252
253 flush_dcache_mmap_lock(mapping);
254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) {
255 unsigned long start, offset, pfn;
256 unsigned int nr;
257
258 /*
259 * If this VMA is not in our MM, we can ignore it.
260 */
261 if (vma->vm_mm != mm)
262 continue;
263 if (!(vma->vm_flags & VM_MAYSHARE))
264 continue;
265
266 start = vma->vm_start;
267 pfn = folio_pfn(folio);
268 nr = folio_nr_pages(folio);
269 offset = pgoff - vma->vm_pgoff;
270 if (offset > -nr) {
271 pfn -= offset;
272 nr += offset;
273 } else {
274 start += offset * PAGE_SIZE;
275 }
276 if (start + nr * PAGE_SIZE > vma->vm_end)
277 nr = (vma->vm_end - start) / PAGE_SIZE;
278
279 flush_cache_pages(vma, start, pfn, nr);
280 }
281 flush_dcache_mmap_unlock(mapping);
282}
283
284#if __LINUX_ARM_ARCH__ >= 6
285void __sync_icache_dcache(pte_t pteval)
286{
287 unsigned long pfn;
288 struct folio *folio;
289 struct address_space *mapping;
290
291 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
292 /* only flush non-aliasing VIPT caches for exec mappings */
293 return;
294 pfn = pte_pfn(pteval);
295 if (!pfn_valid(pfn))
296 return;
297
298 folio = page_folio(pfn_to_page(pfn));
299 if (folio_test_reserved(folio))
300 return;
301
302 if (cache_is_vipt_aliasing())
303 mapping = folio_flush_mapping(folio);
304 else
305 mapping = NULL;
306
307 if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
308 __flush_dcache_folio(mapping, folio);
309
310 if (pte_exec(pteval))
311 __flush_icache_all();
312}
313#endif
314
315/*
316 * Ensure cache coherency between kernel mapping and userspace mapping
317 * of this page.
318 *
319 * We have three cases to consider:
320 * - VIPT non-aliasing cache: fully coherent so nothing required.
321 * - VIVT: fully aliasing, so we need to handle every alias in our
322 * current VM view.
323 * - VIPT aliasing: need to handle one alias in our current VM view.
324 *
325 * If we need to handle aliasing:
326 * If the page only exists in the page cache and there are no user
327 * space mappings, we can be lazy and remember that we may have dirty
328 * kernel cache lines for later. Otherwise, we assume we have
329 * aliasing mappings.
330 *
331 * Note that we disable the lazy flush for SMP configurations where
332 * the cache maintenance operations are not automatically broadcasted.
333 */
334void flush_dcache_folio(struct folio *folio)
335{
336 struct address_space *mapping;
337
338 /*
339 * The zero page is never written to, so never has any dirty
340 * cache lines, and therefore never needs to be flushed.
341 */
342 if (is_zero_pfn(folio_pfn(folio)))
343 return;
344
345 if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
346 if (test_bit(PG_dcache_clean, &folio->flags))
347 clear_bit(PG_dcache_clean, &folio->flags);
348 return;
349 }
350
351 mapping = folio_flush_mapping(folio);
352
353 if (!cache_ops_need_broadcast() &&
354 mapping && !folio_mapped(folio))
355 clear_bit(PG_dcache_clean, &folio->flags);
356 else {
357 __flush_dcache_folio(mapping, folio);
358 if (mapping && cache_is_vivt())
359 __flush_dcache_aliases(mapping, folio);
360 else if (mapping)
361 __flush_icache_all();
362 set_bit(PG_dcache_clean, &folio->flags);
363 }
364}
365EXPORT_SYMBOL(flush_dcache_folio);
366
367void flush_dcache_page(struct page *page)
368{
369 flush_dcache_folio(page_folio(page));
370}
371EXPORT_SYMBOL(flush_dcache_page);
372/*
373 * Flush an anonymous page so that users of get_user_pages()
374 * can safely access the data. The expected sequence is:
375 *
376 * get_user_pages()
377 * -> flush_anon_page
378 * memcpy() to/from page
379 * if written to page, flush_dcache_page()
380 */
381void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
382void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
383{
384 unsigned long pfn;
385
386 /* VIPT non-aliasing caches need do nothing */
387 if (cache_is_vipt_nonaliasing())
388 return;
389
390 /*
391 * Write back and invalidate userspace mapping.
392 */
393 pfn = page_to_pfn(page);
394 if (cache_is_vivt()) {
395 flush_cache_page(vma, vmaddr, pfn);
396 } else {
397 /*
398 * For aliasing VIPT, we can flush an alias of the
399 * userspace address only.
400 */
401 flush_pfn_alias(pfn, vmaddr);
402 __flush_icache_all();
403 }
404
405 /*
406 * Invalidate kernel mapping. No data should be contained
407 * in this mapping of the page. FIXME: this is overkill
408 * since we actually ask for a write-back and invalidate.
409 */
410 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
411}
1/*
2 * linux/arch/arm/mm/flush.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/highmem.h>
14
15#include <asm/cacheflush.h>
16#include <asm/cachetype.h>
17#include <asm/highmem.h>
18#include <asm/smp_plat.h>
19#include <asm/tlbflush.h>
20#include <linux/hugetlb.h>
21
22#include "mm.h"
23
24#ifdef CONFIG_ARM_HEAVY_MB
25void (*soc_mb)(void);
26
27void arm_heavy_mb(void)
28{
29#ifdef CONFIG_OUTER_CACHE_SYNC
30 if (outer_cache.sync)
31 outer_cache.sync();
32#endif
33 if (soc_mb)
34 soc_mb();
35}
36EXPORT_SYMBOL(arm_heavy_mb);
37#endif
38
39#ifdef CONFIG_CPU_CACHE_VIPT
40
41static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
42{
43 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
44 const int zero = 0;
45
46 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
47
48 asm( "mcrr p15, 0, %1, %0, c14\n"
49 " mcr p15, 0, %2, c7, c10, 4"
50 :
51 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
52 : "cc");
53}
54
55static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
56{
57 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
58 unsigned long offset = vaddr & (PAGE_SIZE - 1);
59 unsigned long to;
60
61 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
62 to = va + offset;
63 flush_icache_range(to, to + len);
64}
65
66void flush_cache_mm(struct mm_struct *mm)
67{
68 if (cache_is_vivt()) {
69 vivt_flush_cache_mm(mm);
70 return;
71 }
72
73 if (cache_is_vipt_aliasing()) {
74 asm( "mcr p15, 0, %0, c7, c14, 0\n"
75 " mcr p15, 0, %0, c7, c10, 4"
76 :
77 : "r" (0)
78 : "cc");
79 }
80}
81
82void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
83{
84 if (cache_is_vivt()) {
85 vivt_flush_cache_range(vma, start, end);
86 return;
87 }
88
89 if (cache_is_vipt_aliasing()) {
90 asm( "mcr p15, 0, %0, c7, c14, 0\n"
91 " mcr p15, 0, %0, c7, c10, 4"
92 :
93 : "r" (0)
94 : "cc");
95 }
96
97 if (vma->vm_flags & VM_EXEC)
98 __flush_icache_all();
99}
100
101void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
102{
103 if (cache_is_vivt()) {
104 vivt_flush_cache_page(vma, user_addr, pfn);
105 return;
106 }
107
108 if (cache_is_vipt_aliasing()) {
109 flush_pfn_alias(pfn, user_addr);
110 __flush_icache_all();
111 }
112
113 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
114 __flush_icache_all();
115}
116
117#else
118#define flush_pfn_alias(pfn,vaddr) do { } while (0)
119#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
120#endif
121
122#define FLAG_PA_IS_EXEC 1
123#define FLAG_PA_CORE_IN_MM 2
124
125static void flush_ptrace_access_other(void *args)
126{
127 __flush_icache_all();
128}
129
130static inline
131void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
132 unsigned long len, unsigned int flags)
133{
134 if (cache_is_vivt()) {
135 if (flags & FLAG_PA_CORE_IN_MM) {
136 unsigned long addr = (unsigned long)kaddr;
137 __cpuc_coherent_kern_range(addr, addr + len);
138 }
139 return;
140 }
141
142 if (cache_is_vipt_aliasing()) {
143 flush_pfn_alias(page_to_pfn(page), uaddr);
144 __flush_icache_all();
145 return;
146 }
147
148 /* VIPT non-aliasing D-cache */
149 if (flags & FLAG_PA_IS_EXEC) {
150 unsigned long addr = (unsigned long)kaddr;
151 if (icache_is_vipt_aliasing())
152 flush_icache_alias(page_to_pfn(page), uaddr, len);
153 else
154 __cpuc_coherent_kern_range(addr, addr + len);
155 if (cache_ops_need_broadcast())
156 smp_call_function(flush_ptrace_access_other,
157 NULL, 1);
158 }
159}
160
161static
162void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
163 unsigned long uaddr, void *kaddr, unsigned long len)
164{
165 unsigned int flags = 0;
166 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
167 flags |= FLAG_PA_CORE_IN_MM;
168 if (vma->vm_flags & VM_EXEC)
169 flags |= FLAG_PA_IS_EXEC;
170 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
171}
172
173void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
174 void *kaddr, unsigned long len)
175{
176 unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
177
178 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
179}
180
181/*
182 * Copy user data from/to a page which is mapped into a different
183 * processes address space. Really, we want to allow our "user
184 * space" model to handle this.
185 *
186 * Note that this code needs to run on the current CPU.
187 */
188void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
189 unsigned long uaddr, void *dst, const void *src,
190 unsigned long len)
191{
192#ifdef CONFIG_SMP
193 preempt_disable();
194#endif
195 memcpy(dst, src, len);
196 flush_ptrace_access(vma, page, uaddr, dst, len);
197#ifdef CONFIG_SMP
198 preempt_enable();
199#endif
200}
201
202void __flush_dcache_page(struct address_space *mapping, struct page *page)
203{
204 /*
205 * Writeback any data associated with the kernel mapping of this
206 * page. This ensures that data in the physical page is mutually
207 * coherent with the kernels mapping.
208 */
209 if (!PageHighMem(page)) {
210 size_t page_size = PAGE_SIZE << compound_order(page);
211 __cpuc_flush_dcache_area(page_address(page), page_size);
212 } else {
213 unsigned long i;
214 if (cache_is_vipt_nonaliasing()) {
215 for (i = 0; i < (1 << compound_order(page)); i++) {
216 void *addr = kmap_atomic(page + i);
217 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
218 kunmap_atomic(addr);
219 }
220 } else {
221 for (i = 0; i < (1 << compound_order(page)); i++) {
222 void *addr = kmap_high_get(page + i);
223 if (addr) {
224 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
225 kunmap_high(page + i);
226 }
227 }
228 }
229 }
230
231 /*
232 * If this is a page cache page, and we have an aliasing VIPT cache,
233 * we only need to do one flush - which would be at the relevant
234 * userspace colour, which is congruent with page->index.
235 */
236 if (mapping && cache_is_vipt_aliasing())
237 flush_pfn_alias(page_to_pfn(page),
238 page->index << PAGE_SHIFT);
239}
240
241static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
242{
243 struct mm_struct *mm = current->active_mm;
244 struct vm_area_struct *mpnt;
245 pgoff_t pgoff;
246
247 /*
248 * There are possible user space mappings of this page:
249 * - VIVT cache: we need to also write back and invalidate all user
250 * data in the current VM view associated with this page.
251 * - aliasing VIPT: we only need to find one mapping of this page.
252 */
253 pgoff = page->index;
254
255 flush_dcache_mmap_lock(mapping);
256 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
257 unsigned long offset;
258
259 /*
260 * If this VMA is not in our MM, we can ignore it.
261 */
262 if (mpnt->vm_mm != mm)
263 continue;
264 if (!(mpnt->vm_flags & VM_MAYSHARE))
265 continue;
266 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
267 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
268 }
269 flush_dcache_mmap_unlock(mapping);
270}
271
272#if __LINUX_ARM_ARCH__ >= 6
273void __sync_icache_dcache(pte_t pteval)
274{
275 unsigned long pfn;
276 struct page *page;
277 struct address_space *mapping;
278
279 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
280 /* only flush non-aliasing VIPT caches for exec mappings */
281 return;
282 pfn = pte_pfn(pteval);
283 if (!pfn_valid(pfn))
284 return;
285
286 page = pfn_to_page(pfn);
287 if (cache_is_vipt_aliasing())
288 mapping = page_mapping(page);
289 else
290 mapping = NULL;
291
292 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
293 __flush_dcache_page(mapping, page);
294
295 if (pte_exec(pteval))
296 __flush_icache_all();
297}
298#endif
299
300/*
301 * Ensure cache coherency between kernel mapping and userspace mapping
302 * of this page.
303 *
304 * We have three cases to consider:
305 * - VIPT non-aliasing cache: fully coherent so nothing required.
306 * - VIVT: fully aliasing, so we need to handle every alias in our
307 * current VM view.
308 * - VIPT aliasing: need to handle one alias in our current VM view.
309 *
310 * If we need to handle aliasing:
311 * If the page only exists in the page cache and there are no user
312 * space mappings, we can be lazy and remember that we may have dirty
313 * kernel cache lines for later. Otherwise, we assume we have
314 * aliasing mappings.
315 *
316 * Note that we disable the lazy flush for SMP configurations where
317 * the cache maintenance operations are not automatically broadcasted.
318 */
319void flush_dcache_page(struct page *page)
320{
321 struct address_space *mapping;
322
323 /*
324 * The zero page is never written to, so never has any dirty
325 * cache lines, and therefore never needs to be flushed.
326 */
327 if (page == ZERO_PAGE(0))
328 return;
329
330 mapping = page_mapping(page);
331
332 if (!cache_ops_need_broadcast() &&
333 mapping && !page_mapcount(page))
334 clear_bit(PG_dcache_clean, &page->flags);
335 else {
336 __flush_dcache_page(mapping, page);
337 if (mapping && cache_is_vivt())
338 __flush_dcache_aliases(mapping, page);
339 else if (mapping)
340 __flush_icache_all();
341 set_bit(PG_dcache_clean, &page->flags);
342 }
343}
344EXPORT_SYMBOL(flush_dcache_page);
345
346/*
347 * Ensure cache coherency for the kernel mapping of this page. We can
348 * assume that the page is pinned via kmap.
349 *
350 * If the page only exists in the page cache and there are no user
351 * space mappings, this is a no-op since the page was already marked
352 * dirty at creation. Otherwise, we need to flush the dirty kernel
353 * cache lines directly.
354 */
355void flush_kernel_dcache_page(struct page *page)
356{
357 if (cache_is_vivt() || cache_is_vipt_aliasing()) {
358 struct address_space *mapping;
359
360 mapping = page_mapping(page);
361
362 if (!mapping || mapping_mapped(mapping)) {
363 void *addr;
364
365 addr = page_address(page);
366 /*
367 * kmap_atomic() doesn't set the page virtual
368 * address for highmem pages, and
369 * kunmap_atomic() takes care of cache
370 * flushing already.
371 */
372 if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
373 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
374 }
375 }
376}
377EXPORT_SYMBOL(flush_kernel_dcache_page);
378
379/*
380 * Flush an anonymous page so that users of get_user_pages()
381 * can safely access the data. The expected sequence is:
382 *
383 * get_user_pages()
384 * -> flush_anon_page
385 * memcpy() to/from page
386 * if written to page, flush_dcache_page()
387 */
388void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
389{
390 unsigned long pfn;
391
392 /* VIPT non-aliasing caches need do nothing */
393 if (cache_is_vipt_nonaliasing())
394 return;
395
396 /*
397 * Write back and invalidate userspace mapping.
398 */
399 pfn = page_to_pfn(page);
400 if (cache_is_vivt()) {
401 flush_cache_page(vma, vmaddr, pfn);
402 } else {
403 /*
404 * For aliasing VIPT, we can flush an alias of the
405 * userspace address only.
406 */
407 flush_pfn_alias(pfn, vmaddr);
408 __flush_icache_all();
409 }
410
411 /*
412 * Invalidate kernel mapping. No data should be contained
413 * in this mapping of the page. FIXME: this is overkill
414 * since we actually ask for a write-back and invalidate.
415 */
416 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
417}