Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 *  linux/arch/arm/mm/flush.c
  3 *
  4 *  Copyright (C) 1995-2002 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/module.h>
 11#include <linux/mm.h>
 12#include <linux/pagemap.h>
 13#include <linux/highmem.h>
 14
 15#include <asm/cacheflush.h>
 16#include <asm/cachetype.h>
 17#include <asm/highmem.h>
 18#include <asm/smp_plat.h>
 19#include <asm/system.h>
 20#include <asm/tlbflush.h>
 
 21
 22#include "mm.h"
 23
 24#ifdef CONFIG_CPU_CACHE_VIPT
 
 25
 26#define ALIAS_FLUSH_START	0xffff4000
 
 
 
 
 
 
 
 
 
 
 
 
 27
 28static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 29{
 30	unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 31	const int zero = 0;
 32
 33	set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
 34	flush_tlb_kernel_page(to);
 35
 36	asm(	"mcrr	p15, 0, %1, %0, c14\n"
 37	"	mcr	p15, 0, %2, c7, c10, 4"
 38	    :
 39	    : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
 40	    : "cc");
 41}
 42
 43static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
 44{
 45	unsigned long colour = CACHE_COLOUR(vaddr);
 46	unsigned long offset = vaddr & (PAGE_SIZE - 1);
 47	unsigned long to;
 48
 49	set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
 50	to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset;
 51	flush_tlb_kernel_page(to);
 52	flush_icache_range(to, to + len);
 53}
 54
 55void flush_cache_mm(struct mm_struct *mm)
 56{
 57	if (cache_is_vivt()) {
 58		vivt_flush_cache_mm(mm);
 59		return;
 60	}
 61
 62	if (cache_is_vipt_aliasing()) {
 63		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 64		"	mcr	p15, 0, %0, c7, c10, 4"
 65		    :
 66		    : "r" (0)
 67		    : "cc");
 68	}
 69}
 70
 71void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 72{
 73	if (cache_is_vivt()) {
 74		vivt_flush_cache_range(vma, start, end);
 75		return;
 76	}
 77
 78	if (cache_is_vipt_aliasing()) {
 79		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 80		"	mcr	p15, 0, %0, c7, c10, 4"
 81		    :
 82		    : "r" (0)
 83		    : "cc");
 84	}
 85
 86	if (vma->vm_flags & VM_EXEC)
 87		__flush_icache_all();
 88}
 89
 90void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
 91{
 92	if (cache_is_vivt()) {
 93		vivt_flush_cache_page(vma, user_addr, pfn);
 94		return;
 95	}
 96
 97	if (cache_is_vipt_aliasing()) {
 98		flush_pfn_alias(pfn, user_addr);
 99		__flush_icache_all();
100	}
101
102	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
103		__flush_icache_all();
104}
105
106#else
107#define flush_pfn_alias(pfn,vaddr)		do { } while (0)
108#define flush_icache_alias(pfn,vaddr,len)	do { } while (0)
109#endif
110
 
 
 
111static void flush_ptrace_access_other(void *args)
112{
113	__flush_icache_all();
114}
115
116static
117void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
118			 unsigned long uaddr, void *kaddr, unsigned long len)
119{
120	if (cache_is_vivt()) {
121		if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
122			unsigned long addr = (unsigned long)kaddr;
123			__cpuc_coherent_kern_range(addr, addr + len);
124		}
125		return;
126	}
127
128	if (cache_is_vipt_aliasing()) {
129		flush_pfn_alias(page_to_pfn(page), uaddr);
130		__flush_icache_all();
131		return;
132	}
133
134	/* VIPT non-aliasing D-cache */
135	if (vma->vm_flags & VM_EXEC) {
136		unsigned long addr = (unsigned long)kaddr;
137		if (icache_is_vipt_aliasing())
138			flush_icache_alias(page_to_pfn(page), uaddr, len);
139		else
140			__cpuc_coherent_kern_range(addr, addr + len);
141		if (cache_ops_need_broadcast())
142			smp_call_function(flush_ptrace_access_other,
143					  NULL, 1);
144	}
145}
146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147/*
148 * Copy user data from/to a page which is mapped into a different
149 * processes address space.  Really, we want to allow our "user
150 * space" model to handle this.
151 *
152 * Note that this code needs to run on the current CPU.
153 */
154void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
155		       unsigned long uaddr, void *dst, const void *src,
156		       unsigned long len)
157{
158#ifdef CONFIG_SMP
159	preempt_disable();
160#endif
161	memcpy(dst, src, len);
162	flush_ptrace_access(vma, page, uaddr, dst, len);
163#ifdef CONFIG_SMP
164	preempt_enable();
165#endif
166}
167
168void __flush_dcache_page(struct address_space *mapping, struct page *page)
169{
170	/*
171	 * Writeback any data associated with the kernel mapping of this
172	 * page.  This ensures that data in the physical page is mutually
173	 * coherent with the kernels mapping.
174	 */
175	if (!PageHighMem(page)) {
176		__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
 
177	} else {
178		void *addr = kmap_high_get(page);
179		if (addr) {
180			__cpuc_flush_dcache_area(addr, PAGE_SIZE);
181			kunmap_high(page);
182		} else if (cache_is_vipt()) {
183			/* unmapped pages might still be cached */
184			addr = kmap_atomic(page);
185			__cpuc_flush_dcache_area(addr, PAGE_SIZE);
186			kunmap_atomic(addr);
 
 
 
 
 
 
187		}
188	}
189
190	/*
191	 * If this is a page cache page, and we have an aliasing VIPT cache,
192	 * we only need to do one flush - which would be at the relevant
193	 * userspace colour, which is congruent with page->index.
194	 */
195	if (mapping && cache_is_vipt_aliasing())
196		flush_pfn_alias(page_to_pfn(page),
197				page->index << PAGE_CACHE_SHIFT);
198}
199
200static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
201{
202	struct mm_struct *mm = current->active_mm;
203	struct vm_area_struct *mpnt;
204	struct prio_tree_iter iter;
205	pgoff_t pgoff;
206
207	/*
208	 * There are possible user space mappings of this page:
209	 * - VIVT cache: we need to also write back and invalidate all user
210	 *   data in the current VM view associated with this page.
211	 * - aliasing VIPT: we only need to find one mapping of this page.
212	 */
213	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
214
215	flush_dcache_mmap_lock(mapping);
216	vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
217		unsigned long offset;
218
219		/*
220		 * If this VMA is not in our MM, we can ignore it.
221		 */
222		if (mpnt->vm_mm != mm)
223			continue;
224		if (!(mpnt->vm_flags & VM_MAYSHARE))
225			continue;
226		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
227		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
228	}
229	flush_dcache_mmap_unlock(mapping);
230}
231
232#if __LINUX_ARM_ARCH__ >= 6
233void __sync_icache_dcache(pte_t pteval)
234{
235	unsigned long pfn;
236	struct page *page;
237	struct address_space *mapping;
238
239	if (!pte_present_user(pteval))
240		return;
241	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
242		/* only flush non-aliasing VIPT caches for exec mappings */
243		return;
244	pfn = pte_pfn(pteval);
245	if (!pfn_valid(pfn))
246		return;
247
248	page = pfn_to_page(pfn);
249	if (cache_is_vipt_aliasing())
250		mapping = page_mapping(page);
251	else
252		mapping = NULL;
253
254	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
255		__flush_dcache_page(mapping, page);
256
257	if (pte_exec(pteval))
258		__flush_icache_all();
259}
260#endif
261
262/*
263 * Ensure cache coherency between kernel mapping and userspace mapping
264 * of this page.
265 *
266 * We have three cases to consider:
267 *  - VIPT non-aliasing cache: fully coherent so nothing required.
268 *  - VIVT: fully aliasing, so we need to handle every alias in our
269 *          current VM view.
270 *  - VIPT aliasing: need to handle one alias in our current VM view.
271 *
272 * If we need to handle aliasing:
273 *  If the page only exists in the page cache and there are no user
274 *  space mappings, we can be lazy and remember that we may have dirty
275 *  kernel cache lines for later.  Otherwise, we assume we have
276 *  aliasing mappings.
277 *
278 * Note that we disable the lazy flush for SMP configurations where
279 * the cache maintenance operations are not automatically broadcasted.
280 */
281void flush_dcache_page(struct page *page)
282{
283	struct address_space *mapping;
284
285	/*
286	 * The zero page is never written to, so never has any dirty
287	 * cache lines, and therefore never needs to be flushed.
288	 */
289	if (page == ZERO_PAGE(0))
290		return;
291
292	mapping = page_mapping(page);
 
 
 
 
 
 
293
294	if (!cache_ops_need_broadcast() &&
295	    mapping && !mapping_mapped(mapping))
296		clear_bit(PG_dcache_clean, &page->flags);
297	else {
298		__flush_dcache_page(mapping, page);
299		if (mapping && cache_is_vivt())
300			__flush_dcache_aliases(mapping, page);
301		else if (mapping)
302			__flush_icache_all();
303		set_bit(PG_dcache_clean, &page->flags);
304	}
305}
306EXPORT_SYMBOL(flush_dcache_page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
308/*
309 * Flush an anonymous page so that users of get_user_pages()
310 * can safely access the data.  The expected sequence is:
311 *
312 *  get_user_pages()
313 *    -> flush_anon_page
314 *  memcpy() to/from page
315 *  if written to page, flush_dcache_page()
316 */
317void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
318{
319	unsigned long pfn;
320
321	/* VIPT non-aliasing caches need do nothing */
322	if (cache_is_vipt_nonaliasing())
323		return;
324
325	/*
326	 * Write back and invalidate userspace mapping.
327	 */
328	pfn = page_to_pfn(page);
329	if (cache_is_vivt()) {
330		flush_cache_page(vma, vmaddr, pfn);
331	} else {
332		/*
333		 * For aliasing VIPT, we can flush an alias of the
334		 * userspace address only.
335		 */
336		flush_pfn_alias(pfn, vmaddr);
337		__flush_icache_all();
338	}
339
340	/*
341	 * Invalidate kernel mapping.  No data should be contained
342	 * in this mapping of the page.  FIXME: this is overkill
343	 * since we actually ask for a write-back and invalidate.
344	 */
345	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
346}
v4.17
  1/*
  2 *  linux/arch/arm/mm/flush.c
  3 *
  4 *  Copyright (C) 1995-2002 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/module.h>
 11#include <linux/mm.h>
 12#include <linux/pagemap.h>
 13#include <linux/highmem.h>
 14
 15#include <asm/cacheflush.h>
 16#include <asm/cachetype.h>
 17#include <asm/highmem.h>
 18#include <asm/smp_plat.h>
 
 19#include <asm/tlbflush.h>
 20#include <linux/hugetlb.h>
 21
 22#include "mm.h"
 23
 24#ifdef CONFIG_ARM_HEAVY_MB
 25void (*soc_mb)(void);
 26
 27void arm_heavy_mb(void)
 28{
 29#ifdef CONFIG_OUTER_CACHE_SYNC
 30	if (outer_cache.sync)
 31		outer_cache.sync();
 32#endif
 33	if (soc_mb)
 34		soc_mb();
 35}
 36EXPORT_SYMBOL(arm_heavy_mb);
 37#endif
 38
 39#ifdef CONFIG_CPU_CACHE_VIPT
 40
 41static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 42{
 43	unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 44	const int zero = 0;
 45
 46	set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
 
 47
 48	asm(	"mcrr	p15, 0, %1, %0, c14\n"
 49	"	mcr	p15, 0, %2, c7, c10, 4"
 50	    :
 51	    : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
 52	    : "cc");
 53}
 54
 55static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
 56{
 57	unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 58	unsigned long offset = vaddr & (PAGE_SIZE - 1);
 59	unsigned long to;
 60
 61	set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
 62	to = va + offset;
 
 63	flush_icache_range(to, to + len);
 64}
 65
 66void flush_cache_mm(struct mm_struct *mm)
 67{
 68	if (cache_is_vivt()) {
 69		vivt_flush_cache_mm(mm);
 70		return;
 71	}
 72
 73	if (cache_is_vipt_aliasing()) {
 74		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 75		"	mcr	p15, 0, %0, c7, c10, 4"
 76		    :
 77		    : "r" (0)
 78		    : "cc");
 79	}
 80}
 81
 82void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 83{
 84	if (cache_is_vivt()) {
 85		vivt_flush_cache_range(vma, start, end);
 86		return;
 87	}
 88
 89	if (cache_is_vipt_aliasing()) {
 90		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 91		"	mcr	p15, 0, %0, c7, c10, 4"
 92		    :
 93		    : "r" (0)
 94		    : "cc");
 95	}
 96
 97	if (vma->vm_flags & VM_EXEC)
 98		__flush_icache_all();
 99}
100
101void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
102{
103	if (cache_is_vivt()) {
104		vivt_flush_cache_page(vma, user_addr, pfn);
105		return;
106	}
107
108	if (cache_is_vipt_aliasing()) {
109		flush_pfn_alias(pfn, user_addr);
110		__flush_icache_all();
111	}
112
113	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
114		__flush_icache_all();
115}
116
117#else
118#define flush_pfn_alias(pfn,vaddr)		do { } while (0)
119#define flush_icache_alias(pfn,vaddr,len)	do { } while (0)
120#endif
121
122#define FLAG_PA_IS_EXEC 1
123#define FLAG_PA_CORE_IN_MM 2
124
125static void flush_ptrace_access_other(void *args)
126{
127	__flush_icache_all();
128}
129
130static inline
131void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
132			   unsigned long len, unsigned int flags)
133{
134	if (cache_is_vivt()) {
135		if (flags & FLAG_PA_CORE_IN_MM) {
136			unsigned long addr = (unsigned long)kaddr;
137			__cpuc_coherent_kern_range(addr, addr + len);
138		}
139		return;
140	}
141
142	if (cache_is_vipt_aliasing()) {
143		flush_pfn_alias(page_to_pfn(page), uaddr);
144		__flush_icache_all();
145		return;
146	}
147
148	/* VIPT non-aliasing D-cache */
149	if (flags & FLAG_PA_IS_EXEC) {
150		unsigned long addr = (unsigned long)kaddr;
151		if (icache_is_vipt_aliasing())
152			flush_icache_alias(page_to_pfn(page), uaddr, len);
153		else
154			__cpuc_coherent_kern_range(addr, addr + len);
155		if (cache_ops_need_broadcast())
156			smp_call_function(flush_ptrace_access_other,
157					  NULL, 1);
158	}
159}
160
161static
162void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
163			 unsigned long uaddr, void *kaddr, unsigned long len)
164{
165	unsigned int flags = 0;
166	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
167		flags |= FLAG_PA_CORE_IN_MM;
168	if (vma->vm_flags & VM_EXEC)
169		flags |= FLAG_PA_IS_EXEC;
170	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
171}
172
173void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
174			     void *kaddr, unsigned long len)
175{
176	unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
177
178	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
179}
180
181/*
182 * Copy user data from/to a page which is mapped into a different
183 * processes address space.  Really, we want to allow our "user
184 * space" model to handle this.
185 *
186 * Note that this code needs to run on the current CPU.
187 */
188void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
189		       unsigned long uaddr, void *dst, const void *src,
190		       unsigned long len)
191{
192#ifdef CONFIG_SMP
193	preempt_disable();
194#endif
195	memcpy(dst, src, len);
196	flush_ptrace_access(vma, page, uaddr, dst, len);
197#ifdef CONFIG_SMP
198	preempt_enable();
199#endif
200}
201
202void __flush_dcache_page(struct address_space *mapping, struct page *page)
203{
204	/*
205	 * Writeback any data associated with the kernel mapping of this
206	 * page.  This ensures that data in the physical page is mutually
207	 * coherent with the kernels mapping.
208	 */
209	if (!PageHighMem(page)) {
210		size_t page_size = PAGE_SIZE << compound_order(page);
211		__cpuc_flush_dcache_area(page_address(page), page_size);
212	} else {
213		unsigned long i;
214		if (cache_is_vipt_nonaliasing()) {
215			for (i = 0; i < (1 << compound_order(page)); i++) {
216				void *addr = kmap_atomic(page + i);
217				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
218				kunmap_atomic(addr);
219			}
220		} else {
221			for (i = 0; i < (1 << compound_order(page)); i++) {
222				void *addr = kmap_high_get(page + i);
223				if (addr) {
224					__cpuc_flush_dcache_area(addr, PAGE_SIZE);
225					kunmap_high(page + i);
226				}
227			}
228		}
229	}
230
231	/*
232	 * If this is a page cache page, and we have an aliasing VIPT cache,
233	 * we only need to do one flush - which would be at the relevant
234	 * userspace colour, which is congruent with page->index.
235	 */
236	if (mapping && cache_is_vipt_aliasing())
237		flush_pfn_alias(page_to_pfn(page),
238				page->index << PAGE_SHIFT);
239}
240
241static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
242{
243	struct mm_struct *mm = current->active_mm;
244	struct vm_area_struct *mpnt;
 
245	pgoff_t pgoff;
246
247	/*
248	 * There are possible user space mappings of this page:
249	 * - VIVT cache: we need to also write back and invalidate all user
250	 *   data in the current VM view associated with this page.
251	 * - aliasing VIPT: we only need to find one mapping of this page.
252	 */
253	pgoff = page->index;
254
255	flush_dcache_mmap_lock(mapping);
256	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
257		unsigned long offset;
258
259		/*
260		 * If this VMA is not in our MM, we can ignore it.
261		 */
262		if (mpnt->vm_mm != mm)
263			continue;
264		if (!(mpnt->vm_flags & VM_MAYSHARE))
265			continue;
266		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
267		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
268	}
269	flush_dcache_mmap_unlock(mapping);
270}
271
272#if __LINUX_ARM_ARCH__ >= 6
273void __sync_icache_dcache(pte_t pteval)
274{
275	unsigned long pfn;
276	struct page *page;
277	struct address_space *mapping;
278
 
 
279	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
280		/* only flush non-aliasing VIPT caches for exec mappings */
281		return;
282	pfn = pte_pfn(pteval);
283	if (!pfn_valid(pfn))
284		return;
285
286	page = pfn_to_page(pfn);
287	if (cache_is_vipt_aliasing())
288		mapping = page_mapping_file(page);
289	else
290		mapping = NULL;
291
292	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
293		__flush_dcache_page(mapping, page);
294
295	if (pte_exec(pteval))
296		__flush_icache_all();
297}
298#endif
299
300/*
301 * Ensure cache coherency between kernel mapping and userspace mapping
302 * of this page.
303 *
304 * We have three cases to consider:
305 *  - VIPT non-aliasing cache: fully coherent so nothing required.
306 *  - VIVT: fully aliasing, so we need to handle every alias in our
307 *          current VM view.
308 *  - VIPT aliasing: need to handle one alias in our current VM view.
309 *
310 * If we need to handle aliasing:
311 *  If the page only exists in the page cache and there are no user
312 *  space mappings, we can be lazy and remember that we may have dirty
313 *  kernel cache lines for later.  Otherwise, we assume we have
314 *  aliasing mappings.
315 *
316 * Note that we disable the lazy flush for SMP configurations where
317 * the cache maintenance operations are not automatically broadcasted.
318 */
319void flush_dcache_page(struct page *page)
320{
321	struct address_space *mapping;
322
323	/*
324	 * The zero page is never written to, so never has any dirty
325	 * cache lines, and therefore never needs to be flushed.
326	 */
327	if (page == ZERO_PAGE(0))
328		return;
329
330	if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
331		if (test_bit(PG_dcache_clean, &page->flags))
332			clear_bit(PG_dcache_clean, &page->flags);
333		return;
334	}
335
336	mapping = page_mapping_file(page);
337
338	if (!cache_ops_need_broadcast() &&
339	    mapping && !page_mapcount(page))
340		clear_bit(PG_dcache_clean, &page->flags);
341	else {
342		__flush_dcache_page(mapping, page);
343		if (mapping && cache_is_vivt())
344			__flush_dcache_aliases(mapping, page);
345		else if (mapping)
346			__flush_icache_all();
347		set_bit(PG_dcache_clean, &page->flags);
348	}
349}
350EXPORT_SYMBOL(flush_dcache_page);
351
352/*
353 * Ensure cache coherency for the kernel mapping of this page. We can
354 * assume that the page is pinned via kmap.
355 *
356 * If the page only exists in the page cache and there are no user
357 * space mappings, this is a no-op since the page was already marked
358 * dirty at creation.  Otherwise, we need to flush the dirty kernel
359 * cache lines directly.
360 */
361void flush_kernel_dcache_page(struct page *page)
362{
363	if (cache_is_vivt() || cache_is_vipt_aliasing()) {
364		struct address_space *mapping;
365
366		mapping = page_mapping_file(page);
367
368		if (!mapping || mapping_mapped(mapping)) {
369			void *addr;
370
371			addr = page_address(page);
372			/*
373			 * kmap_atomic() doesn't set the page virtual
374			 * address for highmem pages, and
375			 * kunmap_atomic() takes care of cache
376			 * flushing already.
377			 */
378			if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
379				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
380		}
381	}
382}
383EXPORT_SYMBOL(flush_kernel_dcache_page);
384
385/*
386 * Flush an anonymous page so that users of get_user_pages()
387 * can safely access the data.  The expected sequence is:
388 *
389 *  get_user_pages()
390 *    -> flush_anon_page
391 *  memcpy() to/from page
392 *  if written to page, flush_dcache_page()
393 */
394void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
395{
396	unsigned long pfn;
397
398	/* VIPT non-aliasing caches need do nothing */
399	if (cache_is_vipt_nonaliasing())
400		return;
401
402	/*
403	 * Write back and invalidate userspace mapping.
404	 */
405	pfn = page_to_pfn(page);
406	if (cache_is_vivt()) {
407		flush_cache_page(vma, vmaddr, pfn);
408	} else {
409		/*
410		 * For aliasing VIPT, we can flush an alias of the
411		 * userspace address only.
412		 */
413		flush_pfn_alias(pfn, vmaddr);
414		__flush_icache_all();
415	}
416
417	/*
418	 * Invalidate kernel mapping.  No data should be contained
419	 * in this mapping of the page.  FIXME: this is overkill
420	 * since we actually ask for a write-back and invalidate.
421	 */
422	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
423}