Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *  linux/arch/arm/mm/flush.c
  3 *
  4 *  Copyright (C) 1995-2002 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/module.h>
 11#include <linux/mm.h>
 12#include <linux/pagemap.h>
 13#include <linux/highmem.h>
 14
 15#include <asm/cacheflush.h>
 16#include <asm/cachetype.h>
 17#include <asm/highmem.h>
 18#include <asm/smp_plat.h>
 19#include <asm/tlbflush.h>
 20#include <linux/hugetlb.h>
 21
 22#include "mm.h"
 23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24#ifdef CONFIG_CPU_CACHE_VIPT
 25
 26static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 27{
 28	unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 29	const int zero = 0;
 30
 31	set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
 32
 33	asm(	"mcrr	p15, 0, %1, %0, c14\n"
 34	"	mcr	p15, 0, %2, c7, c10, 4"
 35	    :
 36	    : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
 37	    : "cc");
 38}
 39
 40static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
 41{
 42	unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 43	unsigned long offset = vaddr & (PAGE_SIZE - 1);
 44	unsigned long to;
 45
 46	set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
 47	to = va + offset;
 48	flush_icache_range(to, to + len);
 49}
 50
 51void flush_cache_mm(struct mm_struct *mm)
 52{
 53	if (cache_is_vivt()) {
 54		vivt_flush_cache_mm(mm);
 55		return;
 56	}
 57
 58	if (cache_is_vipt_aliasing()) {
 59		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 60		"	mcr	p15, 0, %0, c7, c10, 4"
 61		    :
 62		    : "r" (0)
 63		    : "cc");
 64	}
 65}
 66
 67void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 68{
 69	if (cache_is_vivt()) {
 70		vivt_flush_cache_range(vma, start, end);
 71		return;
 72	}
 73
 74	if (cache_is_vipt_aliasing()) {
 75		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 76		"	mcr	p15, 0, %0, c7, c10, 4"
 77		    :
 78		    : "r" (0)
 79		    : "cc");
 80	}
 81
 82	if (vma->vm_flags & VM_EXEC)
 83		__flush_icache_all();
 84}
 85
 86void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
 87{
 88	if (cache_is_vivt()) {
 89		vivt_flush_cache_page(vma, user_addr, pfn);
 90		return;
 91	}
 92
 93	if (cache_is_vipt_aliasing()) {
 94		flush_pfn_alias(pfn, user_addr);
 95		__flush_icache_all();
 96	}
 97
 98	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
 99		__flush_icache_all();
100}
101
102#else
103#define flush_pfn_alias(pfn,vaddr)		do { } while (0)
104#define flush_icache_alias(pfn,vaddr,len)	do { } while (0)
105#endif
106
 
 
 
107static void flush_ptrace_access_other(void *args)
108{
109	__flush_icache_all();
110}
111
112static
113void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
114			 unsigned long uaddr, void *kaddr, unsigned long len)
115{
116	if (cache_is_vivt()) {
117		if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
118			unsigned long addr = (unsigned long)kaddr;
119			__cpuc_coherent_kern_range(addr, addr + len);
120		}
121		return;
122	}
123
124	if (cache_is_vipt_aliasing()) {
125		flush_pfn_alias(page_to_pfn(page), uaddr);
126		__flush_icache_all();
127		return;
128	}
129
130	/* VIPT non-aliasing D-cache */
131	if (vma->vm_flags & VM_EXEC) {
132		unsigned long addr = (unsigned long)kaddr;
133		if (icache_is_vipt_aliasing())
134			flush_icache_alias(page_to_pfn(page), uaddr, len);
135		else
136			__cpuc_coherent_kern_range(addr, addr + len);
137		if (cache_ops_need_broadcast())
138			smp_call_function(flush_ptrace_access_other,
139					  NULL, 1);
140	}
141}
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143/*
144 * Copy user data from/to a page which is mapped into a different
145 * processes address space.  Really, we want to allow our "user
146 * space" model to handle this.
147 *
148 * Note that this code needs to run on the current CPU.
149 */
150void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
151		       unsigned long uaddr, void *dst, const void *src,
152		       unsigned long len)
153{
154#ifdef CONFIG_SMP
155	preempt_disable();
156#endif
157	memcpy(dst, src, len);
158	flush_ptrace_access(vma, page, uaddr, dst, len);
159#ifdef CONFIG_SMP
160	preempt_enable();
161#endif
162}
163
164void __flush_dcache_page(struct address_space *mapping, struct page *page)
165{
166	/*
167	 * Writeback any data associated with the kernel mapping of this
168	 * page.  This ensures that data in the physical page is mutually
169	 * coherent with the kernels mapping.
170	 */
171	if (!PageHighMem(page)) {
172		size_t page_size = PAGE_SIZE << compound_order(page);
173		__cpuc_flush_dcache_area(page_address(page), page_size);
174	} else {
175		unsigned long i;
176		if (cache_is_vipt_nonaliasing()) {
177			for (i = 0; i < (1 << compound_order(page)); i++) {
178				void *addr = kmap_atomic(page + i);
 
179				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
180				kunmap_atomic(addr);
181			}
182		} else {
183			for (i = 0; i < (1 << compound_order(page)); i++) {
184				void *addr = kmap_high_get(page + i);
185				if (addr) {
186					__cpuc_flush_dcache_area(addr, PAGE_SIZE);
187					kunmap_high(page + i);
188				}
189			}
190		}
191	}
192
193	/*
194	 * If this is a page cache page, and we have an aliasing VIPT cache,
195	 * we only need to do one flush - which would be at the relevant
196	 * userspace colour, which is congruent with page->index.
197	 */
198	if (mapping && cache_is_vipt_aliasing())
199		flush_pfn_alias(page_to_pfn(page),
200				page->index << PAGE_CACHE_SHIFT);
201}
202
203static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
204{
205	struct mm_struct *mm = current->active_mm;
206	struct vm_area_struct *mpnt;
207	pgoff_t pgoff;
208
209	/*
210	 * There are possible user space mappings of this page:
211	 * - VIVT cache: we need to also write back and invalidate all user
212	 *   data in the current VM view associated with this page.
213	 * - aliasing VIPT: we only need to find one mapping of this page.
214	 */
215	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 
216
217	flush_dcache_mmap_lock(mapping);
218	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
219		unsigned long offset;
 
220
221		/*
222		 * If this VMA is not in our MM, we can ignore it.
223		 */
224		if (mpnt->vm_mm != mm)
225			continue;
226		if (!(mpnt->vm_flags & VM_MAYSHARE))
227			continue;
228		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
229		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
 
 
 
 
 
 
 
 
 
 
 
 
 
230	}
231	flush_dcache_mmap_unlock(mapping);
232}
233
234#if __LINUX_ARM_ARCH__ >= 6
235void __sync_icache_dcache(pte_t pteval)
236{
237	unsigned long pfn;
238	struct page *page;
239	struct address_space *mapping;
240
241	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
242		/* only flush non-aliasing VIPT caches for exec mappings */
243		return;
244	pfn = pte_pfn(pteval);
245	if (!pfn_valid(pfn))
246		return;
247
248	page = pfn_to_page(pfn);
249	if (cache_is_vipt_aliasing())
250		mapping = page_mapping(page);
251	else
252		mapping = NULL;
253
254	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
255		__flush_dcache_page(mapping, page);
256
257	if (pte_exec(pteval))
258		__flush_icache_all();
259}
260#endif
261
262/*
263 * Ensure cache coherency between kernel mapping and userspace mapping
264 * of this page.
265 *
266 * We have three cases to consider:
267 *  - VIPT non-aliasing cache: fully coherent so nothing required.
268 *  - VIVT: fully aliasing, so we need to handle every alias in our
269 *          current VM view.
270 *  - VIPT aliasing: need to handle one alias in our current VM view.
271 *
272 * If we need to handle aliasing:
273 *  If the page only exists in the page cache and there are no user
274 *  space mappings, we can be lazy and remember that we may have dirty
275 *  kernel cache lines for later.  Otherwise, we assume we have
276 *  aliasing mappings.
277 *
278 * Note that we disable the lazy flush for SMP configurations where
279 * the cache maintenance operations are not automatically broadcasted.
280 */
281void flush_dcache_page(struct page *page)
282{
283	struct address_space *mapping;
284
285	/*
286	 * The zero page is never written to, so never has any dirty
287	 * cache lines, and therefore never needs to be flushed.
288	 */
289	if (page == ZERO_PAGE(0))
290		return;
291
292	mapping = page_mapping(page);
 
 
 
 
 
 
293
294	if (!cache_ops_need_broadcast() &&
295	    mapping && !page_mapped(page))
296		clear_bit(PG_dcache_clean, &page->flags);
297	else {
298		__flush_dcache_page(mapping, page);
299		if (mapping && cache_is_vivt())
300			__flush_dcache_aliases(mapping, page);
301		else if (mapping)
302			__flush_icache_all();
303		set_bit(PG_dcache_clean, &page->flags);
304	}
305}
306EXPORT_SYMBOL(flush_dcache_page);
307
308/*
309 * Ensure cache coherency for the kernel mapping of this page. We can
310 * assume that the page is pinned via kmap.
311 *
312 * If the page only exists in the page cache and there are no user
313 * space mappings, this is a no-op since the page was already marked
314 * dirty at creation.  Otherwise, we need to flush the dirty kernel
315 * cache lines directly.
316 */
317void flush_kernel_dcache_page(struct page *page)
318{
319	if (cache_is_vivt() || cache_is_vipt_aliasing()) {
320		struct address_space *mapping;
321
322		mapping = page_mapping(page);
323
324		if (!mapping || mapping_mapped(mapping)) {
325			void *addr;
326
327			addr = page_address(page);
328			/*
329			 * kmap_atomic() doesn't set the page virtual
330			 * address for highmem pages, and
331			 * kunmap_atomic() takes care of cache
332			 * flushing already.
333			 */
334			if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
335				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
336		}
337	}
338}
339EXPORT_SYMBOL(flush_kernel_dcache_page);
340
341/*
342 * Flush an anonymous page so that users of get_user_pages()
343 * can safely access the data.  The expected sequence is:
344 *
345 *  get_user_pages()
346 *    -> flush_anon_page
347 *  memcpy() to/from page
348 *  if written to page, flush_dcache_page()
349 */
 
350void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
351{
352	unsigned long pfn;
353
354	/* VIPT non-aliasing caches need do nothing */
355	if (cache_is_vipt_nonaliasing())
356		return;
357
358	/*
359	 * Write back and invalidate userspace mapping.
360	 */
361	pfn = page_to_pfn(page);
362	if (cache_is_vivt()) {
363		flush_cache_page(vma, vmaddr, pfn);
364	} else {
365		/*
366		 * For aliasing VIPT, we can flush an alias of the
367		 * userspace address only.
368		 */
369		flush_pfn_alias(pfn, vmaddr);
370		__flush_icache_all();
371	}
372
373	/*
374	 * Invalidate kernel mapping.  No data should be contained
375	 * in this mapping of the page.  FIXME: this is overkill
376	 * since we actually ask for a write-back and invalidate.
377	 */
378	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
379}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/flush.c
  4 *
  5 *  Copyright (C) 1995-2002 Russell King
 
 
 
 
  6 */
  7#include <linux/module.h>
  8#include <linux/mm.h>
  9#include <linux/pagemap.h>
 10#include <linux/highmem.h>
 11
 12#include <asm/cacheflush.h>
 13#include <asm/cachetype.h>
 14#include <asm/highmem.h>
 15#include <asm/smp_plat.h>
 16#include <asm/tlbflush.h>
 17#include <linux/hugetlb.h>
 18
 19#include "mm.h"
 20
 21#ifdef CONFIG_ARM_HEAVY_MB
 22void (*soc_mb)(void);
 23
 24void arm_heavy_mb(void)
 25{
 26#ifdef CONFIG_OUTER_CACHE_SYNC
 27	if (outer_cache.sync)
 28		outer_cache.sync();
 29#endif
 30	if (soc_mb)
 31		soc_mb();
 32}
 33EXPORT_SYMBOL(arm_heavy_mb);
 34#endif
 35
 36#ifdef CONFIG_CPU_CACHE_VIPT
 37
 38static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 39{
 40	unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 41	const int zero = 0;
 42
 43	set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
 44
 45	asm(	"mcrr	p15, 0, %1, %0, c14\n"
 46	"	mcr	p15, 0, %2, c7, c10, 4"
 47	    :
 48	    : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
 49	    : "cc");
 50}
 51
 52static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
 53{
 54	unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 55	unsigned long offset = vaddr & (PAGE_SIZE - 1);
 56	unsigned long to;
 57
 58	set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
 59	to = va + offset;
 60	flush_icache_range(to, to + len);
 61}
 62
 63void flush_cache_mm(struct mm_struct *mm)
 64{
 65	if (cache_is_vivt()) {
 66		vivt_flush_cache_mm(mm);
 67		return;
 68	}
 69
 70	if (cache_is_vipt_aliasing()) {
 71		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 72		"	mcr	p15, 0, %0, c7, c10, 4"
 73		    :
 74		    : "r" (0)
 75		    : "cc");
 76	}
 77}
 78
 79void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 80{
 81	if (cache_is_vivt()) {
 82		vivt_flush_cache_range(vma, start, end);
 83		return;
 84	}
 85
 86	if (cache_is_vipt_aliasing()) {
 87		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
 88		"	mcr	p15, 0, %0, c7, c10, 4"
 89		    :
 90		    : "r" (0)
 91		    : "cc");
 92	}
 93
 94	if (vma->vm_flags & VM_EXEC)
 95		__flush_icache_all();
 96}
 97
 98void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsigned int nr)
 99{
100	if (cache_is_vivt()) {
101		vivt_flush_cache_pages(vma, user_addr, pfn, nr);
102		return;
103	}
104
105	if (cache_is_vipt_aliasing()) {
106		flush_pfn_alias(pfn, user_addr);
107		__flush_icache_all();
108	}
109
110	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
111		__flush_icache_all();
112}
113
114#else
115#define flush_pfn_alias(pfn,vaddr)		do { } while (0)
116#define flush_icache_alias(pfn,vaddr,len)	do { } while (0)
117#endif
118
119#define FLAG_PA_IS_EXEC 1
120#define FLAG_PA_CORE_IN_MM 2
121
122static void flush_ptrace_access_other(void *args)
123{
124	__flush_icache_all();
125}
126
127static inline
128void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
129			   unsigned long len, unsigned int flags)
130{
131	if (cache_is_vivt()) {
132		if (flags & FLAG_PA_CORE_IN_MM) {
133			unsigned long addr = (unsigned long)kaddr;
134			__cpuc_coherent_kern_range(addr, addr + len);
135		}
136		return;
137	}
138
139	if (cache_is_vipt_aliasing()) {
140		flush_pfn_alias(page_to_pfn(page), uaddr);
141		__flush_icache_all();
142		return;
143	}
144
145	/* VIPT non-aliasing D-cache */
146	if (flags & FLAG_PA_IS_EXEC) {
147		unsigned long addr = (unsigned long)kaddr;
148		if (icache_is_vipt_aliasing())
149			flush_icache_alias(page_to_pfn(page), uaddr, len);
150		else
151			__cpuc_coherent_kern_range(addr, addr + len);
152		if (cache_ops_need_broadcast())
153			smp_call_function(flush_ptrace_access_other,
154					  NULL, 1);
155	}
156}
157
158static
159void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
160			 unsigned long uaddr, void *kaddr, unsigned long len)
161{
162	unsigned int flags = 0;
163	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
164		flags |= FLAG_PA_CORE_IN_MM;
165	if (vma->vm_flags & VM_EXEC)
166		flags |= FLAG_PA_IS_EXEC;
167	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
168}
169
170void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
171			     void *kaddr, unsigned long len)
172{
173	unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
174
175	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
176}
177
178/*
179 * Copy user data from/to a page which is mapped into a different
180 * processes address space.  Really, we want to allow our "user
181 * space" model to handle this.
182 *
183 * Note that this code needs to run on the current CPU.
184 */
185void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
186		       unsigned long uaddr, void *dst, const void *src,
187		       unsigned long len)
188{
189#ifdef CONFIG_SMP
190	preempt_disable();
191#endif
192	memcpy(dst, src, len);
193	flush_ptrace_access(vma, page, uaddr, dst, len);
194#ifdef CONFIG_SMP
195	preempt_enable();
196#endif
197}
198
199void __flush_dcache_folio(struct address_space *mapping, struct folio *folio)
200{
201	/*
202	 * Writeback any data associated with the kernel mapping of this
203	 * page.  This ensures that data in the physical page is mutually
204	 * coherent with the kernels mapping.
205	 */
206	if (!folio_test_highmem(folio)) {
207		__cpuc_flush_dcache_area(folio_address(folio),
208					folio_size(folio));
209	} else {
210		unsigned long i;
211		if (cache_is_vipt_nonaliasing()) {
212			for (i = 0; i < folio_nr_pages(folio); i++) {
213				void *addr = kmap_local_folio(folio,
214								i * PAGE_SIZE);
215				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
216				kunmap_local(addr);
217			}
218		} else {
219			for (i = 0; i < folio_nr_pages(folio); i++) {
220				void *addr = kmap_high_get(folio_page(folio, i));
221				if (addr) {
222					__cpuc_flush_dcache_area(addr, PAGE_SIZE);
223					kunmap_high(folio_page(folio, i));
224				}
225			}
226		}
227	}
228
229	/*
230	 * If this is a page cache page, and we have an aliasing VIPT cache,
231	 * we only need to do one flush - which would be at the relevant
232	 * userspace colour, which is congruent with page->index.
233	 */
234	if (mapping && cache_is_vipt_aliasing())
235		flush_pfn_alias(folio_pfn(folio), folio_pos(folio));
 
236}
237
238static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio)
239{
240	struct mm_struct *mm = current->active_mm;
241	struct vm_area_struct *vma;
242	pgoff_t pgoff, pgoff_end;
243
244	/*
245	 * There are possible user space mappings of this page:
246	 * - VIVT cache: we need to also write back and invalidate all user
247	 *   data in the current VM view associated with this page.
248	 * - aliasing VIPT: we only need to find one mapping of this page.
249	 */
250	pgoff = folio->index;
251	pgoff_end = pgoff + folio_nr_pages(folio) - 1;
252
253	flush_dcache_mmap_lock(mapping);
254	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) {
255		unsigned long start, offset, pfn;
256		unsigned int nr;
257
258		/*
259		 * If this VMA is not in our MM, we can ignore it.
260		 */
261		if (vma->vm_mm != mm)
262			continue;
263		if (!(vma->vm_flags & VM_MAYSHARE))
264			continue;
265
266		start = vma->vm_start;
267		pfn = folio_pfn(folio);
268		nr = folio_nr_pages(folio);
269		offset = pgoff - vma->vm_pgoff;
270		if (offset > -nr) {
271			pfn -= offset;
272			nr += offset;
273		} else {
274			start += offset * PAGE_SIZE;
275		}
276		if (start + nr * PAGE_SIZE > vma->vm_end)
277			nr = (vma->vm_end - start) / PAGE_SIZE;
278
279		flush_cache_pages(vma, start, pfn, nr);
280	}
281	flush_dcache_mmap_unlock(mapping);
282}
283
284#if __LINUX_ARM_ARCH__ >= 6
285void __sync_icache_dcache(pte_t pteval)
286{
287	unsigned long pfn;
288	struct folio *folio;
289	struct address_space *mapping;
290
291	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
292		/* only flush non-aliasing VIPT caches for exec mappings */
293		return;
294	pfn = pte_pfn(pteval);
295	if (!pfn_valid(pfn))
296		return;
297
298	folio = page_folio(pfn_to_page(pfn));
299	if (cache_is_vipt_aliasing())
300		mapping = folio_flush_mapping(folio);
301	else
302		mapping = NULL;
303
304	if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
305		__flush_dcache_folio(mapping, folio);
306
307	if (pte_exec(pteval))
308		__flush_icache_all();
309}
310#endif
311
312/*
313 * Ensure cache coherency between kernel mapping and userspace mapping
314 * of this page.
315 *
316 * We have three cases to consider:
317 *  - VIPT non-aliasing cache: fully coherent so nothing required.
318 *  - VIVT: fully aliasing, so we need to handle every alias in our
319 *          current VM view.
320 *  - VIPT aliasing: need to handle one alias in our current VM view.
321 *
322 * If we need to handle aliasing:
323 *  If the page only exists in the page cache and there are no user
324 *  space mappings, we can be lazy and remember that we may have dirty
325 *  kernel cache lines for later.  Otherwise, we assume we have
326 *  aliasing mappings.
327 *
328 * Note that we disable the lazy flush for SMP configurations where
329 * the cache maintenance operations are not automatically broadcasted.
330 */
331void flush_dcache_folio(struct folio *folio)
332{
333	struct address_space *mapping;
334
335	/*
336	 * The zero page is never written to, so never has any dirty
337	 * cache lines, and therefore never needs to be flushed.
338	 */
339	if (is_zero_pfn(folio_pfn(folio)))
340		return;
341
342	if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
343		if (test_bit(PG_dcache_clean, &folio->flags))
344			clear_bit(PG_dcache_clean, &folio->flags);
345		return;
346	}
347
348	mapping = folio_flush_mapping(folio);
349
350	if (!cache_ops_need_broadcast() &&
351	    mapping && !folio_mapped(folio))
352		clear_bit(PG_dcache_clean, &folio->flags);
353	else {
354		__flush_dcache_folio(mapping, folio);
355		if (mapping && cache_is_vivt())
356			__flush_dcache_aliases(mapping, folio);
357		else if (mapping)
358			__flush_icache_all();
359		set_bit(PG_dcache_clean, &folio->flags);
360	}
361}
362EXPORT_SYMBOL(flush_dcache_folio);
363
364void flush_dcache_page(struct page *page)
 
 
 
 
 
 
 
 
 
365{
366	flush_dcache_folio(page_folio(page));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367}
368EXPORT_SYMBOL(flush_dcache_page);
 
369/*
370 * Flush an anonymous page so that users of get_user_pages()
371 * can safely access the data.  The expected sequence is:
372 *
373 *  get_user_pages()
374 *    -> flush_anon_page
375 *  memcpy() to/from page
376 *  if written to page, flush_dcache_page()
377 */
378void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
379void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
380{
381	unsigned long pfn;
382
383	/* VIPT non-aliasing caches need do nothing */
384	if (cache_is_vipt_nonaliasing())
385		return;
386
387	/*
388	 * Write back and invalidate userspace mapping.
389	 */
390	pfn = page_to_pfn(page);
391	if (cache_is_vivt()) {
392		flush_cache_page(vma, vmaddr, pfn);
393	} else {
394		/*
395		 * For aliasing VIPT, we can flush an alias of the
396		 * userspace address only.
397		 */
398		flush_pfn_alias(pfn, vmaddr);
399		__flush_icache_all();
400	}
401
402	/*
403	 * Invalidate kernel mapping.  No data should be contained
404	 * in this mapping of the page.  FIXME: this is overkill
405	 * since we actually ask for a write-back and invalidate.
406	 */
407	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
408}