Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 *  linux/arch/arm/mm/fault-armv.c
  3 *
  4 *  Copyright (C) 1995  Linus Torvalds
  5 *  Modifications for ARM processor (c) 1995-2002 Russell King
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#include <linux/sched.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/bitops.h>
 15#include <linux/vmalloc.h>
 16#include <linux/init.h>
 17#include <linux/pagemap.h>
 18#include <linux/gfp.h>
 19
 20#include <asm/bugs.h>
 21#include <asm/cacheflush.h>
 22#include <asm/cachetype.h>
 23#include <asm/pgtable.h>
 24#include <asm/tlbflush.h>
 25
 26#include "mm.h"
 27
 28static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
 29
 30#if __LINUX_ARM_ARCH__ < 6
 31/*
 32 * We take the easy way out of this problem - we make the
 33 * PTE uncacheable.  However, we leave the write buffer on.
 34 *
 35 * Note that the pte lock held when calling update_mmu_cache must also
 36 * guard the pte (somewhere else in the same mm) that we modify here.
 37 * Therefore those configurations which might call adjust_pte (those
 38 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
 39 */
 40static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
 41	unsigned long pfn, pte_t *ptep)
 42{
 43	pte_t entry = *ptep;
 44	int ret;
 45
 46	/*
 47	 * If this page is present, it's actually being shared.
 48	 */
 49	ret = pte_present(entry);
 50
 51	/*
 52	 * If this page isn't present, or is already setup to
 53	 * fault (ie, is old), we can safely ignore any issues.
 54	 */
 55	if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
 56		flush_cache_page(vma, address, pfn);
 57		outer_flush_range((pfn << PAGE_SHIFT),
 58				  (pfn << PAGE_SHIFT) + PAGE_SIZE);
 59		pte_val(entry) &= ~L_PTE_MT_MASK;
 60		pte_val(entry) |= shared_pte_mask;
 61		set_pte_at(vma->vm_mm, address, ptep, entry);
 62		flush_tlb_page(vma, address);
 63	}
 64
 65	return ret;
 66}
 67
 68#if USE_SPLIT_PTE_PTLOCKS
 69/*
 70 * If we are using split PTE locks, then we need to take the page
 71 * lock here.  Otherwise we are using shared mm->page_table_lock
 72 * which is already locked, thus cannot take it.
 73 */
 74static inline void do_pte_lock(spinlock_t *ptl)
 75{
 76	/*
 77	 * Use nested version here to indicate that we are already
 78	 * holding one similar spinlock.
 79	 */
 80	spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
 81}
 82
 83static inline void do_pte_unlock(spinlock_t *ptl)
 84{
 85	spin_unlock(ptl);
 86}
 87#else /* !USE_SPLIT_PTE_PTLOCKS */
 88static inline void do_pte_lock(spinlock_t *ptl) {}
 89static inline void do_pte_unlock(spinlock_t *ptl) {}
 90#endif /* USE_SPLIT_PTE_PTLOCKS */
 91
 92static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
 93	unsigned long pfn)
 94{
 95	spinlock_t *ptl;
 96	pgd_t *pgd;
 
 97	pud_t *pud;
 98	pmd_t *pmd;
 99	pte_t *pte;
100	int ret;
101
102	pgd = pgd_offset(vma->vm_mm, address);
103	if (pgd_none_or_clear_bad(pgd))
104		return 0;
105
106	pud = pud_offset(pgd, address);
 
 
 
 
107	if (pud_none_or_clear_bad(pud))
108		return 0;
109
110	pmd = pmd_offset(pud, address);
111	if (pmd_none_or_clear_bad(pmd))
112		return 0;
113
114	/*
115	 * This is called while another page table is mapped, so we
116	 * must use the nested version.  This also means we need to
117	 * open-code the spin-locking.
118	 */
119	ptl = pte_lockptr(vma->vm_mm, pmd);
120	pte = pte_offset_map(pmd, address);
 
 
121	do_pte_lock(ptl);
122
123	ret = do_adjust_pte(vma, address, pfn, pte);
124
125	do_pte_unlock(ptl);
126	pte_unmap(pte);
127
128	return ret;
129}
130
131static void
132make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
133	unsigned long addr, pte_t *ptep, unsigned long pfn)
134{
135	struct mm_struct *mm = vma->vm_mm;
136	struct vm_area_struct *mpnt;
137	unsigned long offset;
138	pgoff_t pgoff;
139	int aliases = 0;
140
141	pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
142
143	/*
144	 * If we have any shared mappings that are in the same mm
145	 * space, then we need to handle them specially to maintain
146	 * cache coherency.
147	 */
148	flush_dcache_mmap_lock(mapping);
149	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
150		/*
151		 * If this VMA is not in our MM, we can ignore it.
152		 * Note that we intentionally mask out the VMA
153		 * that we are fixing up.
154		 */
155		if (mpnt->vm_mm != mm || mpnt == vma)
156			continue;
157		if (!(mpnt->vm_flags & VM_MAYSHARE))
158			continue;
159		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
160		aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
161	}
162	flush_dcache_mmap_unlock(mapping);
163	if (aliases)
164		do_adjust_pte(vma, addr, pfn, ptep);
165}
166
167/*
168 * Take care of architecture specific things when placing a new PTE into
169 * a page table, or changing an existing PTE.  Basically, there are two
170 * things that we need to take care of:
171 *
172 *  1. If PG_dcache_clean is not set for the page, we need to ensure
173 *     that any cache entries for the kernels virtual memory
174 *     range are written back to the page.
175 *  2. If we have multiple shared mappings of the same space in
176 *     an object, we need to deal with the cache aliasing issues.
177 *
178 * Note that the pte lock will be held.
179 */
180void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
181	pte_t *ptep)
182{
183	unsigned long pfn = pte_pfn(*ptep);
184	struct address_space *mapping;
185	struct page *page;
186
187	if (!pfn_valid(pfn))
188		return;
189
190	/*
191	 * The zero page is never written to, so never has any dirty
192	 * cache lines, and therefore never needs to be flushed.
193	 */
194	page = pfn_to_page(pfn);
195	if (page == ZERO_PAGE(0))
196		return;
197
198	mapping = page_mapping_file(page);
199	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
200		__flush_dcache_page(mapping, page);
 
201	if (mapping) {
202		if (cache_is_vivt())
203			make_coherent(mapping, vma, addr, ptep, pfn);
204		else if (vma->vm_flags & VM_EXEC)
205			__flush_icache_all();
206	}
207}
208#endif	/* __LINUX_ARM_ARCH__ < 6 */
209
210/*
211 * Check whether the write buffer has physical address aliasing
212 * issues.  If it has, we need to avoid them for the case where
213 * we have several shared mappings of the same object in user
214 * space.
215 */
216static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
217{
218	register unsigned long zero = 0, one = 1, val;
219
220	local_irq_disable();
221	mb();
222	*p1 = one;
223	mb();
224	*p2 = zero;
225	mb();
226	val = *p1;
227	mb();
228	local_irq_enable();
229	return val != zero;
230}
231
232void __init check_writebuffer_bugs(void)
233{
234	struct page *page;
235	const char *reason;
236	unsigned long v = 1;
237
238	pr_info("CPU: Testing write buffer coherency: ");
239
240	page = alloc_page(GFP_KERNEL);
241	if (page) {
242		unsigned long *p1, *p2;
243		pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
244					L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
245
246		p1 = vmap(&page, 1, VM_IOREMAP, prot);
247		p2 = vmap(&page, 1, VM_IOREMAP, prot);
248
249		if (p1 && p2) {
250			v = check_writebuffer(p1, p2);
251			reason = "enabling work-around";
252		} else {
253			reason = "unable to map memory\n";
254		}
255
256		vunmap(p1);
257		vunmap(p2);
258		put_page(page);
259	} else {
260		reason = "unable to grab page\n";
261	}
262
263	if (v) {
264		pr_cont("failed, %s\n", reason);
265		shared_pte_mask = L_PTE_MT_UNCACHED;
266	} else {
267		pr_cont("ok\n");
268	}
269}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/fault-armv.c
  4 *
  5 *  Copyright (C) 1995  Linus Torvalds
  6 *  Modifications for ARM processor (c) 1995-2002 Russell King
 
 
 
 
  7 */
  8#include <linux/sched.h>
  9#include <linux/kernel.h>
 10#include <linux/mm.h>
 11#include <linux/bitops.h>
 12#include <linux/vmalloc.h>
 13#include <linux/init.h>
 14#include <linux/pagemap.h>
 15#include <linux/gfp.h>
 16
 17#include <asm/bugs.h>
 18#include <asm/cacheflush.h>
 19#include <asm/cachetype.h>
 
 20#include <asm/tlbflush.h>
 21
 22#include "mm.h"
 23
 24static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
 25
 26#if __LINUX_ARM_ARCH__ < 6
 27/*
 28 * We take the easy way out of this problem - we make the
 29 * PTE uncacheable.  However, we leave the write buffer on.
 30 *
 31 * Note that the pte lock held when calling update_mmu_cache must also
 32 * guard the pte (somewhere else in the same mm) that we modify here.
 33 * Therefore those configurations which might call adjust_pte (those
 34 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
 35 */
 36static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
 37	unsigned long pfn, pte_t *ptep)
 38{
 39	pte_t entry = *ptep;
 40	int ret;
 41
 42	/*
 43	 * If this page is present, it's actually being shared.
 44	 */
 45	ret = pte_present(entry);
 46
 47	/*
 48	 * If this page isn't present, or is already setup to
 49	 * fault (ie, is old), we can safely ignore any issues.
 50	 */
 51	if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
 52		flush_cache_page(vma, address, pfn);
 53		outer_flush_range((pfn << PAGE_SHIFT),
 54				  (pfn << PAGE_SHIFT) + PAGE_SIZE);
 55		pte_val(entry) &= ~L_PTE_MT_MASK;
 56		pte_val(entry) |= shared_pte_mask;
 57		set_pte_at(vma->vm_mm, address, ptep, entry);
 58		flush_tlb_page(vma, address);
 59	}
 60
 61	return ret;
 62}
 63
 64#if USE_SPLIT_PTE_PTLOCKS
 65/*
 66 * If we are using split PTE locks, then we need to take the page
 67 * lock here.  Otherwise we are using shared mm->page_table_lock
 68 * which is already locked, thus cannot take it.
 69 */
 70static inline void do_pte_lock(spinlock_t *ptl)
 71{
 72	/*
 73	 * Use nested version here to indicate that we are already
 74	 * holding one similar spinlock.
 75	 */
 76	spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
 77}
 78
 79static inline void do_pte_unlock(spinlock_t *ptl)
 80{
 81	spin_unlock(ptl);
 82}
 83#else /* !USE_SPLIT_PTE_PTLOCKS */
 84static inline void do_pte_lock(spinlock_t *ptl) {}
 85static inline void do_pte_unlock(spinlock_t *ptl) {}
 86#endif /* USE_SPLIT_PTE_PTLOCKS */
 87
 88static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
 89	unsigned long pfn)
 90{
 91	spinlock_t *ptl;
 92	pgd_t *pgd;
 93	p4d_t *p4d;
 94	pud_t *pud;
 95	pmd_t *pmd;
 96	pte_t *pte;
 97	int ret;
 98
 99	pgd = pgd_offset(vma->vm_mm, address);
100	if (pgd_none_or_clear_bad(pgd))
101		return 0;
102
103	p4d = p4d_offset(pgd, address);
104	if (p4d_none_or_clear_bad(p4d))
105		return 0;
106
107	pud = pud_offset(p4d, address);
108	if (pud_none_or_clear_bad(pud))
109		return 0;
110
111	pmd = pmd_offset(pud, address);
112	if (pmd_none_or_clear_bad(pmd))
113		return 0;
114
115	/*
116	 * This is called while another page table is mapped, so we
117	 * must use the nested version.  This also means we need to
118	 * open-code the spin-locking.
119	 */
120	pte = pte_offset_map_nolock(vma->vm_mm, pmd, address, &ptl);
121	if (!pte)
122		return 0;
123
124	do_pte_lock(ptl);
125
126	ret = do_adjust_pte(vma, address, pfn, pte);
127
128	do_pte_unlock(ptl);
129	pte_unmap(pte);
130
131	return ret;
132}
133
134static void
135make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
136	unsigned long addr, pte_t *ptep, unsigned long pfn)
137{
138	struct mm_struct *mm = vma->vm_mm;
139	struct vm_area_struct *mpnt;
140	unsigned long offset;
141	pgoff_t pgoff;
142	int aliases = 0;
143
144	pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
145
146	/*
147	 * If we have any shared mappings that are in the same mm
148	 * space, then we need to handle them specially to maintain
149	 * cache coherency.
150	 */
151	flush_dcache_mmap_lock(mapping);
152	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
153		/*
154		 * If this VMA is not in our MM, we can ignore it.
155		 * Note that we intentionally mask out the VMA
156		 * that we are fixing up.
157		 */
158		if (mpnt->vm_mm != mm || mpnt == vma)
159			continue;
160		if (!(mpnt->vm_flags & VM_MAYSHARE))
161			continue;
162		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
163		aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
164	}
165	flush_dcache_mmap_unlock(mapping);
166	if (aliases)
167		do_adjust_pte(vma, addr, pfn, ptep);
168}
169
170/*
171 * Take care of architecture specific things when placing a new PTE into
172 * a page table, or changing an existing PTE.  Basically, there are two
173 * things that we need to take care of:
174 *
175 *  1. If PG_dcache_clean is not set for the page, we need to ensure
176 *     that any cache entries for the kernels virtual memory
177 *     range are written back to the page.
178 *  2. If we have multiple shared mappings of the same space in
179 *     an object, we need to deal with the cache aliasing issues.
180 *
181 * Note that the pte lock will be held.
182 */
183void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
184		unsigned long addr, pte_t *ptep, unsigned int nr)
185{
186	unsigned long pfn = pte_pfn(*ptep);
187	struct address_space *mapping;
188	struct folio *folio;
189
190	if (!pfn_valid(pfn))
191		return;
192
193	/*
194	 * The zero page is never written to, so never has any dirty
195	 * cache lines, and therefore never needs to be flushed.
196	 */
197	if (is_zero_pfn(pfn))
 
198		return;
199
200	folio = page_folio(pfn_to_page(pfn));
201	mapping = folio_flush_mapping(folio);
202	if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
203		__flush_dcache_folio(mapping, folio);
204	if (mapping) {
205		if (cache_is_vivt())
206			make_coherent(mapping, vma, addr, ptep, pfn);
207		else if (vma->vm_flags & VM_EXEC)
208			__flush_icache_all();
209	}
210}
211#endif	/* __LINUX_ARM_ARCH__ < 6 */
212
213/*
214 * Check whether the write buffer has physical address aliasing
215 * issues.  If it has, we need to avoid them for the case where
216 * we have several shared mappings of the same object in user
217 * space.
218 */
219static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
220{
221	register unsigned long zero = 0, one = 1, val;
222
223	local_irq_disable();
224	mb();
225	*p1 = one;
226	mb();
227	*p2 = zero;
228	mb();
229	val = *p1;
230	mb();
231	local_irq_enable();
232	return val != zero;
233}
234
235void __init check_writebuffer_bugs(void)
236{
237	struct page *page;
238	const char *reason;
239	unsigned long v = 1;
240
241	pr_info("CPU: Testing write buffer coherency: ");
242
243	page = alloc_page(GFP_KERNEL);
244	if (page) {
245		unsigned long *p1, *p2;
246		pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
247					L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
248
249		p1 = vmap(&page, 1, VM_IOREMAP, prot);
250		p2 = vmap(&page, 1, VM_IOREMAP, prot);
251
252		if (p1 && p2) {
253			v = check_writebuffer(p1, p2);
254			reason = "enabling work-around";
255		} else {
256			reason = "unable to map memory\n";
257		}
258
259		vunmap(p1);
260		vunmap(p2);
261		put_page(page);
262	} else {
263		reason = "unable to grab page\n";
264	}
265
266	if (v) {
267		pr_cont("failed, %s\n", reason);
268		shared_pte_mask = L_PTE_MT_UNCACHED;
269	} else {
270		pr_cont("ok\n");
271	}
272}