Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/flush.c
  4 *
  5 *  Copyright (C) 1995-2002 Russell King
 
 
 
 
  6 */
  7#include <linux/module.h>
  8#include <linux/mm.h>
  9#include <linux/pagemap.h>
 10#include <linux/highmem.h>
 11
 12#include <asm/cacheflush.h>
 13#include <asm/cachetype.h>
 14#include <asm/highmem.h>
 15#include <asm/smp_plat.h>
 16#include <asm/tlbflush.h>
 17#include <linux/hugetlb.h>
 18
 19#include "mm.h"
 20
 21#ifdef CONFIG_ARM_HEAVY_MB
 22void (*soc_mb)(void);
 23
 24void arm_heavy_mb(void)
 25{
 26#ifdef CONFIG_OUTER_CACHE_SYNC
 27	if (outer_cache.sync)
 28		outer_cache.sync();
 29#endif
 30	if (soc_mb)
 31		soc_mb();
 32}
 33EXPORT_SYMBOL(arm_heavy_mb);
 34#endif
 35
 36#ifdef CONFIG_CPU_CACHE_VIPT
 37
 38static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 39{
 40	unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 41	const int zero = 0;
 42
 43	set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
 44
 45	asm(	"mcrr	p15, 0, %1, %0, c14\n"
 46	"	mcr	p15, 0, %2, c7, c10, 4"
 47	    :
 48	    : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
 49	    : "cc");
 50}
 51
 52static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
 53{
 54	unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 55	unsigned long offset = vaddr & (PAGE_SIZE - 1);
 56	unsigned long to;
 57
 58	set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
 59	to = va + offset;
 60	flush_icache_range(to, to + len);
 61}
 62
 63void flush_cache_mm(struct mm_struct *mm)
 64{
 65	if (cache_is_vivt()) {
 66		vivt_flush_cache_mm(mm);
 67		return;
 68	}
 69
 70	if (cache_is_vipt_aliasing()) {
 71		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 72		"	mcr	p15, 0, %0, c7, c10, 4"
 73		    :
 74		    : "r" (0)
 75		    : "cc");
 76	}
 77}
 78
 79void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 80{
 81	if (cache_is_vivt()) {
 82		vivt_flush_cache_range(vma, start, end);
 83		return;
 84	}
 85
 86	if (cache_is_vipt_aliasing()) {
 87		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 88		"	mcr	p15, 0, %0, c7, c10, 4"
 89		    :
 90		    : "r" (0)
 91		    : "cc");
 92	}
 93
 94	if (vma->vm_flags & VM_EXEC)
 95		__flush_icache_all();
 96}
 97
 98void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
 99{
100	if (cache_is_vivt()) {
101		vivt_flush_cache_page(vma, user_addr, pfn);
102		return;
103	}
104
105	if (cache_is_vipt_aliasing()) {
106		flush_pfn_alias(pfn, user_addr);
107		__flush_icache_all();
108	}
109
110	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
111		__flush_icache_all();
112}
113
114#else
115#define flush_pfn_alias(pfn,vaddr)		do { } while (0)
116#define flush_icache_alias(pfn,vaddr,len)	do { } while (0)
117#endif
118
119#define FLAG_PA_IS_EXEC 1
120#define FLAG_PA_CORE_IN_MM 2
121
122static void flush_ptrace_access_other(void *args)
123{
124	__flush_icache_all();
125}
126
127static inline
128void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
129			   unsigned long len, unsigned int flags)
130{
131	if (cache_is_vivt()) {
132		if (flags & FLAG_PA_CORE_IN_MM) {
133			unsigned long addr = (unsigned long)kaddr;
134			__cpuc_coherent_kern_range(addr, addr + len);
135		}
136		return;
137	}
138
139	if (cache_is_vipt_aliasing()) {
140		flush_pfn_alias(page_to_pfn(page), uaddr);
141		__flush_icache_all();
142		return;
143	}
144
145	/* VIPT non-aliasing D-cache */
146	if (flags & FLAG_PA_IS_EXEC) {
147		unsigned long addr = (unsigned long)kaddr;
148		if (icache_is_vipt_aliasing())
149			flush_icache_alias(page_to_pfn(page), uaddr, len);
150		else
151			__cpuc_coherent_kern_range(addr, addr + len);
152		if (cache_ops_need_broadcast())
153			smp_call_function(flush_ptrace_access_other,
154					  NULL, 1);
155	}
156}
157
158static
159void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
160			 unsigned long uaddr, void *kaddr, unsigned long len)
161{
162	unsigned int flags = 0;
163	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
164		flags |= FLAG_PA_CORE_IN_MM;
165	if (vma->vm_flags & VM_EXEC)
166		flags |= FLAG_PA_IS_EXEC;
167	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
168}
169
170void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
171			     void *kaddr, unsigned long len)
172{
173	unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
174
175	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
176}
177
178/*
179 * Copy user data from/to a page which is mapped into a different
180 * processes address space.  Really, we want to allow our "user
181 * space" model to handle this.
182 *
183 * Note that this code needs to run on the current CPU.
184 */
185void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
186		       unsigned long uaddr, void *dst, const void *src,
187		       unsigned long len)
188{
189#ifdef CONFIG_SMP
190	preempt_disable();
191#endif
192	memcpy(dst, src, len);
193	flush_ptrace_access(vma, page, uaddr, dst, len);
194#ifdef CONFIG_SMP
195	preempt_enable();
196#endif
197}
198
199void __flush_dcache_page(struct address_space *mapping, struct page *page)
200{
201	/*
202	 * Writeback any data associated with the kernel mapping of this
203	 * page.  This ensures that data in the physical page is mutually
204	 * coherent with the kernels mapping.
205	 */
206	if (!PageHighMem(page)) {
207		__cpuc_flush_dcache_area(page_address(page), page_size(page));
 
208	} else {
209		unsigned long i;
210		if (cache_is_vipt_nonaliasing()) {
211			for (i = 0; i < compound_nr(page); i++) {
212				void *addr = kmap_atomic(page + i);
213				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
214				kunmap_atomic(addr);
215			}
216		} else {
217			for (i = 0; i < compound_nr(page); i++) {
218				void *addr = kmap_high_get(page + i);
219				if (addr) {
220					__cpuc_flush_dcache_area(addr, PAGE_SIZE);
221					kunmap_high(page + i);
222				}
223			}
224		}
225	}
226
227	/*
228	 * If this is a page cache page, and we have an aliasing VIPT cache,
229	 * we only need to do one flush - which would be at the relevant
230	 * userspace colour, which is congruent with page->index.
231	 */
232	if (mapping && cache_is_vipt_aliasing())
233		flush_pfn_alias(page_to_pfn(page),
234				page->index << PAGE_SHIFT);
235}
236
237static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
238{
239	struct mm_struct *mm = current->active_mm;
240	struct vm_area_struct *mpnt;
241	pgoff_t pgoff;
242
243	/*
244	 * There are possible user space mappings of this page:
245	 * - VIVT cache: we need to also write back and invalidate all user
246	 *   data in the current VM view associated with this page.
247	 * - aliasing VIPT: we only need to find one mapping of this page.
248	 */
249	pgoff = page->index;
250
251	flush_dcache_mmap_lock(mapping);
252	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
253		unsigned long offset;
254
255		/*
256		 * If this VMA is not in our MM, we can ignore it.
257		 */
258		if (mpnt->vm_mm != mm)
259			continue;
260		if (!(mpnt->vm_flags & VM_MAYSHARE))
261			continue;
262		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
263		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
264	}
265	flush_dcache_mmap_unlock(mapping);
266}
267
268#if __LINUX_ARM_ARCH__ >= 6
269void __sync_icache_dcache(pte_t pteval)
270{
271	unsigned long pfn;
272	struct page *page;
273	struct address_space *mapping;
274
275	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
276		/* only flush non-aliasing VIPT caches for exec mappings */
277		return;
278	pfn = pte_pfn(pteval);
279	if (!pfn_valid(pfn))
280		return;
281
282	page = pfn_to_page(pfn);
283	if (cache_is_vipt_aliasing())
284		mapping = page_mapping_file(page);
285	else
286		mapping = NULL;
287
288	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
289		__flush_dcache_page(mapping, page);
290
291	if (pte_exec(pteval))
292		__flush_icache_all();
293}
294#endif
295
296/*
297 * Ensure cache coherency between kernel mapping and userspace mapping
298 * of this page.
299 *
300 * We have three cases to consider:
301 *  - VIPT non-aliasing cache: fully coherent so nothing required.
302 *  - VIVT: fully aliasing, so we need to handle every alias in our
303 *          current VM view.
304 *  - VIPT aliasing: need to handle one alias in our current VM view.
305 *
306 * If we need to handle aliasing:
307 *  If the page only exists in the page cache and there are no user
308 *  space mappings, we can be lazy and remember that we may have dirty
309 *  kernel cache lines for later.  Otherwise, we assume we have
310 *  aliasing mappings.
311 *
312 * Note that we disable the lazy flush for SMP configurations where
313 * the cache maintenance operations are not automatically broadcasted.
314 */
315void flush_dcache_page(struct page *page)
316{
317	struct address_space *mapping;
318
319	/*
320	 * The zero page is never written to, so never has any dirty
321	 * cache lines, and therefore never needs to be flushed.
322	 */
323	if (page == ZERO_PAGE(0))
324		return;
325
326	if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
327		if (test_bit(PG_dcache_clean, &page->flags))
328			clear_bit(PG_dcache_clean, &page->flags);
329		return;
330	}
331
332	mapping = page_mapping_file(page);
333
334	if (!cache_ops_need_broadcast() &&
335	    mapping && !page_mapcount(page))
336		clear_bit(PG_dcache_clean, &page->flags);
337	else {
338		__flush_dcache_page(mapping, page);
339		if (mapping && cache_is_vivt())
340			__flush_dcache_aliases(mapping, page);
341		else if (mapping)
342			__flush_icache_all();
343		set_bit(PG_dcache_clean, &page->flags);
344	}
345}
346EXPORT_SYMBOL(flush_dcache_page);
347
348/*
349 * Ensure cache coherency for the kernel mapping of this page. We can
350 * assume that the page is pinned via kmap.
351 *
352 * If the page only exists in the page cache and there are no user
353 * space mappings, this is a no-op since the page was already marked
354 * dirty at creation.  Otherwise, we need to flush the dirty kernel
355 * cache lines directly.
356 */
357void flush_kernel_dcache_page(struct page *page)
358{
359	if (cache_is_vivt() || cache_is_vipt_aliasing()) {
360		struct address_space *mapping;
361
362		mapping = page_mapping_file(page);
363
364		if (!mapping || mapping_mapped(mapping)) {
365			void *addr;
366
367			addr = page_address(page);
368			/*
369			 * kmap_atomic() doesn't set the page virtual
370			 * address for highmem pages, and
371			 * kunmap_atomic() takes care of cache
372			 * flushing already.
373			 */
374			if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
375				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
376		}
377	}
378}
379EXPORT_SYMBOL(flush_kernel_dcache_page);
380
381/*
382 * Flush an anonymous page so that users of get_user_pages()
383 * can safely access the data.  The expected sequence is:
384 *
385 *  get_user_pages()
386 *    -> flush_anon_page
387 *  memcpy() to/from page
388 *  if written to page, flush_dcache_page()
389 */
390void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
391{
392	unsigned long pfn;
393
394	/* VIPT non-aliasing caches need do nothing */
395	if (cache_is_vipt_nonaliasing())
396		return;
397
398	/*
399	 * Write back and invalidate userspace mapping.
400	 */
401	pfn = page_to_pfn(page);
402	if (cache_is_vivt()) {
403		flush_cache_page(vma, vmaddr, pfn);
404	} else {
405		/*
406		 * For aliasing VIPT, we can flush an alias of the
407		 * userspace address only.
408		 */
409		flush_pfn_alias(pfn, vmaddr);
410		__flush_icache_all();
411	}
412
413	/*
414	 * Invalidate kernel mapping.  No data should be contained
415	 * in this mapping of the page.  FIXME: this is overkill
416	 * since we actually ask for a write-back and invalidate.
417	 */
418	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
419}
v4.10.11
 
  1/*
  2 *  linux/arch/arm/mm/flush.c
  3 *
  4 *  Copyright (C) 1995-2002 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/module.h>
 11#include <linux/mm.h>
 12#include <linux/pagemap.h>
 13#include <linux/highmem.h>
 14
 15#include <asm/cacheflush.h>
 16#include <asm/cachetype.h>
 17#include <asm/highmem.h>
 18#include <asm/smp_plat.h>
 19#include <asm/tlbflush.h>
 20#include <linux/hugetlb.h>
 21
 22#include "mm.h"
 23
 24#ifdef CONFIG_ARM_HEAVY_MB
 25void (*soc_mb)(void);
 26
 27void arm_heavy_mb(void)
 28{
 29#ifdef CONFIG_OUTER_CACHE_SYNC
 30	if (outer_cache.sync)
 31		outer_cache.sync();
 32#endif
 33	if (soc_mb)
 34		soc_mb();
 35}
 36EXPORT_SYMBOL(arm_heavy_mb);
 37#endif
 38
 39#ifdef CONFIG_CPU_CACHE_VIPT
 40
 41static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 42{
 43	unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 44	const int zero = 0;
 45
 46	set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
 47
 48	asm(	"mcrr	p15, 0, %1, %0, c14\n"
 49	"	mcr	p15, 0, %2, c7, c10, 4"
 50	    :
 51	    : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
 52	    : "cc");
 53}
 54
 55static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
 56{
 57	unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 58	unsigned long offset = vaddr & (PAGE_SIZE - 1);
 59	unsigned long to;
 60
 61	set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
 62	to = va + offset;
 63	flush_icache_range(to, to + len);
 64}
 65
 66void flush_cache_mm(struct mm_struct *mm)
 67{
 68	if (cache_is_vivt()) {
 69		vivt_flush_cache_mm(mm);
 70		return;
 71	}
 72
 73	if (cache_is_vipt_aliasing()) {
 74		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 75		"	mcr	p15, 0, %0, c7, c10, 4"
 76		    :
 77		    : "r" (0)
 78		    : "cc");
 79	}
 80}
 81
 82void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 83{
 84	if (cache_is_vivt()) {
 85		vivt_flush_cache_range(vma, start, end);
 86		return;
 87	}
 88
 89	if (cache_is_vipt_aliasing()) {
 90		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 91		"	mcr	p15, 0, %0, c7, c10, 4"
 92		    :
 93		    : "r" (0)
 94		    : "cc");
 95	}
 96
 97	if (vma->vm_flags & VM_EXEC)
 98		__flush_icache_all();
 99}
100
101void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
102{
103	if (cache_is_vivt()) {
104		vivt_flush_cache_page(vma, user_addr, pfn);
105		return;
106	}
107
108	if (cache_is_vipt_aliasing()) {
109		flush_pfn_alias(pfn, user_addr);
110		__flush_icache_all();
111	}
112
113	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
114		__flush_icache_all();
115}
116
117#else
118#define flush_pfn_alias(pfn,vaddr)		do { } while (0)
119#define flush_icache_alias(pfn,vaddr,len)	do { } while (0)
120#endif
121
122#define FLAG_PA_IS_EXEC 1
123#define FLAG_PA_CORE_IN_MM 2
124
125static void flush_ptrace_access_other(void *args)
126{
127	__flush_icache_all();
128}
129
130static inline
131void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
132			   unsigned long len, unsigned int flags)
133{
134	if (cache_is_vivt()) {
135		if (flags & FLAG_PA_CORE_IN_MM) {
136			unsigned long addr = (unsigned long)kaddr;
137			__cpuc_coherent_kern_range(addr, addr + len);
138		}
139		return;
140	}
141
142	if (cache_is_vipt_aliasing()) {
143		flush_pfn_alias(page_to_pfn(page), uaddr);
144		__flush_icache_all();
145		return;
146	}
147
148	/* VIPT non-aliasing D-cache */
149	if (flags & FLAG_PA_IS_EXEC) {
150		unsigned long addr = (unsigned long)kaddr;
151		if (icache_is_vipt_aliasing())
152			flush_icache_alias(page_to_pfn(page), uaddr, len);
153		else
154			__cpuc_coherent_kern_range(addr, addr + len);
155		if (cache_ops_need_broadcast())
156			smp_call_function(flush_ptrace_access_other,
157					  NULL, 1);
158	}
159}
160
161static
162void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
163			 unsigned long uaddr, void *kaddr, unsigned long len)
164{
165	unsigned int flags = 0;
166	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
167		flags |= FLAG_PA_CORE_IN_MM;
168	if (vma->vm_flags & VM_EXEC)
169		flags |= FLAG_PA_IS_EXEC;
170	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
171}
172
173void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
174			     void *kaddr, unsigned long len)
175{
176	unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
177
178	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
179}
180
181/*
182 * Copy user data from/to a page which is mapped into a different
183 * processes address space.  Really, we want to allow our "user
184 * space" model to handle this.
185 *
186 * Note that this code needs to run on the current CPU.
187 */
188void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
189		       unsigned long uaddr, void *dst, const void *src,
190		       unsigned long len)
191{
192#ifdef CONFIG_SMP
193	preempt_disable();
194#endif
195	memcpy(dst, src, len);
196	flush_ptrace_access(vma, page, uaddr, dst, len);
197#ifdef CONFIG_SMP
198	preempt_enable();
199#endif
200}
201
202void __flush_dcache_page(struct address_space *mapping, struct page *page)
203{
204	/*
205	 * Writeback any data associated with the kernel mapping of this
206	 * page.  This ensures that data in the physical page is mutually
207	 * coherent with the kernels mapping.
208	 */
209	if (!PageHighMem(page)) {
210		size_t page_size = PAGE_SIZE << compound_order(page);
211		__cpuc_flush_dcache_area(page_address(page), page_size);
212	} else {
213		unsigned long i;
214		if (cache_is_vipt_nonaliasing()) {
215			for (i = 0; i < (1 << compound_order(page)); i++) {
216				void *addr = kmap_atomic(page + i);
217				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
218				kunmap_atomic(addr);
219			}
220		} else {
221			for (i = 0; i < (1 << compound_order(page)); i++) {
222				void *addr = kmap_high_get(page + i);
223				if (addr) {
224					__cpuc_flush_dcache_area(addr, PAGE_SIZE);
225					kunmap_high(page + i);
226				}
227			}
228		}
229	}
230
231	/*
232	 * If this is a page cache page, and we have an aliasing VIPT cache,
233	 * we only need to do one flush - which would be at the relevant
234	 * userspace colour, which is congruent with page->index.
235	 */
236	if (mapping && cache_is_vipt_aliasing())
237		flush_pfn_alias(page_to_pfn(page),
238				page->index << PAGE_SHIFT);
239}
240
241static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
242{
243	struct mm_struct *mm = current->active_mm;
244	struct vm_area_struct *mpnt;
245	pgoff_t pgoff;
246
247	/*
248	 * There are possible user space mappings of this page:
249	 * - VIVT cache: we need to also write back and invalidate all user
250	 *   data in the current VM view associated with this page.
251	 * - aliasing VIPT: we only need to find one mapping of this page.
252	 */
253	pgoff = page->index;
254
255	flush_dcache_mmap_lock(mapping);
256	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
257		unsigned long offset;
258
259		/*
260		 * If this VMA is not in our MM, we can ignore it.
261		 */
262		if (mpnt->vm_mm != mm)
263			continue;
264		if (!(mpnt->vm_flags & VM_MAYSHARE))
265			continue;
266		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
267		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
268	}
269	flush_dcache_mmap_unlock(mapping);
270}
271
272#if __LINUX_ARM_ARCH__ >= 6
273void __sync_icache_dcache(pte_t pteval)
274{
275	unsigned long pfn;
276	struct page *page;
277	struct address_space *mapping;
278
279	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
280		/* only flush non-aliasing VIPT caches for exec mappings */
281		return;
282	pfn = pte_pfn(pteval);
283	if (!pfn_valid(pfn))
284		return;
285
286	page = pfn_to_page(pfn);
287	if (cache_is_vipt_aliasing())
288		mapping = page_mapping(page);
289	else
290		mapping = NULL;
291
292	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
293		__flush_dcache_page(mapping, page);
294
295	if (pte_exec(pteval))
296		__flush_icache_all();
297}
298#endif
299
300/*
301 * Ensure cache coherency between kernel mapping and userspace mapping
302 * of this page.
303 *
304 * We have three cases to consider:
305 *  - VIPT non-aliasing cache: fully coherent so nothing required.
306 *  - VIVT: fully aliasing, so we need to handle every alias in our
307 *          current VM view.
308 *  - VIPT aliasing: need to handle one alias in our current VM view.
309 *
310 * If we need to handle aliasing:
311 *  If the page only exists in the page cache and there are no user
312 *  space mappings, we can be lazy and remember that we may have dirty
313 *  kernel cache lines for later.  Otherwise, we assume we have
314 *  aliasing mappings.
315 *
316 * Note that we disable the lazy flush for SMP configurations where
317 * the cache maintenance operations are not automatically broadcasted.
318 */
319void flush_dcache_page(struct page *page)
320{
321	struct address_space *mapping;
322
323	/*
324	 * The zero page is never written to, so never has any dirty
325	 * cache lines, and therefore never needs to be flushed.
326	 */
327	if (page == ZERO_PAGE(0))
328		return;
329
330	mapping = page_mapping(page);
 
 
 
 
 
 
331
332	if (!cache_ops_need_broadcast() &&
333	    mapping && !page_mapcount(page))
334		clear_bit(PG_dcache_clean, &page->flags);
335	else {
336		__flush_dcache_page(mapping, page);
337		if (mapping && cache_is_vivt())
338			__flush_dcache_aliases(mapping, page);
339		else if (mapping)
340			__flush_icache_all();
341		set_bit(PG_dcache_clean, &page->flags);
342	}
343}
344EXPORT_SYMBOL(flush_dcache_page);
345
346/*
347 * Ensure cache coherency for the kernel mapping of this page. We can
348 * assume that the page is pinned via kmap.
349 *
350 * If the page only exists in the page cache and there are no user
351 * space mappings, this is a no-op since the page was already marked
352 * dirty at creation.  Otherwise, we need to flush the dirty kernel
353 * cache lines directly.
354 */
355void flush_kernel_dcache_page(struct page *page)
356{
357	if (cache_is_vivt() || cache_is_vipt_aliasing()) {
358		struct address_space *mapping;
359
360		mapping = page_mapping(page);
361
362		if (!mapping || mapping_mapped(mapping)) {
363			void *addr;
364
365			addr = page_address(page);
366			/*
367			 * kmap_atomic() doesn't set the page virtual
368			 * address for highmem pages, and
369			 * kunmap_atomic() takes care of cache
370			 * flushing already.
371			 */
372			if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
373				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
374		}
375	}
376}
377EXPORT_SYMBOL(flush_kernel_dcache_page);
378
379/*
380 * Flush an anonymous page so that users of get_user_pages()
381 * can safely access the data.  The expected sequence is:
382 *
383 *  get_user_pages()
384 *    -> flush_anon_page
385 *  memcpy() to/from page
386 *  if written to page, flush_dcache_page()
387 */
388void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
389{
390	unsigned long pfn;
391
392	/* VIPT non-aliasing caches need do nothing */
393	if (cache_is_vipt_nonaliasing())
394		return;
395
396	/*
397	 * Write back and invalidate userspace mapping.
398	 */
399	pfn = page_to_pfn(page);
400	if (cache_is_vivt()) {
401		flush_cache_page(vma, vmaddr, pfn);
402	} else {
403		/*
404		 * For aliasing VIPT, we can flush an alias of the
405		 * userspace address only.
406		 */
407		flush_pfn_alias(pfn, vmaddr);
408		__flush_icache_all();
409	}
410
411	/*
412	 * Invalidate kernel mapping.  No data should be contained
413	 * in this mapping of the page.  FIXME: this is overkill
414	 * since we actually ask for a write-back and invalidate.
415	 */
416	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
417}