Loading...
1/*
2 * linux/arch/arm/mm/fault-armv.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/bitops.h>
16#include <linux/vmalloc.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/gfp.h>
20
21#include <asm/bugs.h>
22#include <asm/cacheflush.h>
23#include <asm/cachetype.h>
24#include <asm/pgtable.h>
25#include <asm/tlbflush.h>
26
27#include "mm.h"
28
29static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
30
31#if __LINUX_ARM_ARCH__ < 6
32/*
33 * We take the easy way out of this problem - we make the
34 * PTE uncacheable. However, we leave the write buffer on.
35 *
36 * Note that the pte lock held when calling update_mmu_cache must also
37 * guard the pte (somewhere else in the same mm) that we modify here.
38 * Therefore those configurations which might call adjust_pte (those
39 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
40 */
41static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
42 unsigned long pfn, pte_t *ptep)
43{
44 pte_t entry = *ptep;
45 int ret;
46
47 /*
48 * If this page is present, it's actually being shared.
49 */
50 ret = pte_present(entry);
51
52 /*
53 * If this page isn't present, or is already setup to
54 * fault (ie, is old), we can safely ignore any issues.
55 */
56 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
57 flush_cache_page(vma, address, pfn);
58 outer_flush_range((pfn << PAGE_SHIFT),
59 (pfn << PAGE_SHIFT) + PAGE_SIZE);
60 pte_val(entry) &= ~L_PTE_MT_MASK;
61 pte_val(entry) |= shared_pte_mask;
62 set_pte_at(vma->vm_mm, address, ptep, entry);
63 flush_tlb_page(vma, address);
64 }
65
66 return ret;
67}
68
69#if USE_SPLIT_PTLOCKS
70/*
71 * If we are using split PTE locks, then we need to take the page
72 * lock here. Otherwise we are using shared mm->page_table_lock
73 * which is already locked, thus cannot take it.
74 */
75static inline void do_pte_lock(spinlock_t *ptl)
76{
77 /*
78 * Use nested version here to indicate that we are already
79 * holding one similar spinlock.
80 */
81 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
82}
83
84static inline void do_pte_unlock(spinlock_t *ptl)
85{
86 spin_unlock(ptl);
87}
88#else /* !USE_SPLIT_PTLOCKS */
89static inline void do_pte_lock(spinlock_t *ptl) {}
90static inline void do_pte_unlock(spinlock_t *ptl) {}
91#endif /* USE_SPLIT_PTLOCKS */
92
93static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
94 unsigned long pfn)
95{
96 spinlock_t *ptl;
97 pgd_t *pgd;
98 pud_t *pud;
99 pmd_t *pmd;
100 pte_t *pte;
101 int ret;
102
103 pgd = pgd_offset(vma->vm_mm, address);
104 if (pgd_none_or_clear_bad(pgd))
105 return 0;
106
107 pud = pud_offset(pgd, address);
108 if (pud_none_or_clear_bad(pud))
109 return 0;
110
111 pmd = pmd_offset(pud, address);
112 if (pmd_none_or_clear_bad(pmd))
113 return 0;
114
115 /*
116 * This is called while another page table is mapped, so we
117 * must use the nested version. This also means we need to
118 * open-code the spin-locking.
119 */
120 ptl = pte_lockptr(vma->vm_mm, pmd);
121 pte = pte_offset_map(pmd, address);
122 do_pte_lock(ptl);
123
124 ret = do_adjust_pte(vma, address, pfn, pte);
125
126 do_pte_unlock(ptl);
127 pte_unmap(pte);
128
129 return ret;
130}
131
132static void
133make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
134 unsigned long addr, pte_t *ptep, unsigned long pfn)
135{
136 struct mm_struct *mm = vma->vm_mm;
137 struct vm_area_struct *mpnt;
138 struct prio_tree_iter iter;
139 unsigned long offset;
140 pgoff_t pgoff;
141 int aliases = 0;
142
143 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
144
145 /*
146 * If we have any shared mappings that are in the same mm
147 * space, then we need to handle them specially to maintain
148 * cache coherency.
149 */
150 flush_dcache_mmap_lock(mapping);
151 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
152 /*
153 * If this VMA is not in our MM, we can ignore it.
154 * Note that we intentionally mask out the VMA
155 * that we are fixing up.
156 */
157 if (mpnt->vm_mm != mm || mpnt == vma)
158 continue;
159 if (!(mpnt->vm_flags & VM_MAYSHARE))
160 continue;
161 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
162 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
163 }
164 flush_dcache_mmap_unlock(mapping);
165 if (aliases)
166 do_adjust_pte(vma, addr, pfn, ptep);
167}
168
169/*
170 * Take care of architecture specific things when placing a new PTE into
171 * a page table, or changing an existing PTE. Basically, there are two
172 * things that we need to take care of:
173 *
174 * 1. If PG_dcache_clean is not set for the page, we need to ensure
175 * that any cache entries for the kernels virtual memory
176 * range are written back to the page.
177 * 2. If we have multiple shared mappings of the same space in
178 * an object, we need to deal with the cache aliasing issues.
179 *
180 * Note that the pte lock will be held.
181 */
182void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
183 pte_t *ptep)
184{
185 unsigned long pfn = pte_pfn(*ptep);
186 struct address_space *mapping;
187 struct page *page;
188
189 if (!pfn_valid(pfn))
190 return;
191
192 /*
193 * The zero page is never written to, so never has any dirty
194 * cache lines, and therefore never needs to be flushed.
195 */
196 page = pfn_to_page(pfn);
197 if (page == ZERO_PAGE(0))
198 return;
199
200 mapping = page_mapping(page);
201 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
202 __flush_dcache_page(mapping, page);
203 if (mapping) {
204 if (cache_is_vivt())
205 make_coherent(mapping, vma, addr, ptep, pfn);
206 else if (vma->vm_flags & VM_EXEC)
207 __flush_icache_all();
208 }
209}
210#endif /* __LINUX_ARM_ARCH__ < 6 */
211
212/*
213 * Check whether the write buffer has physical address aliasing
214 * issues. If it has, we need to avoid them for the case where
215 * we have several shared mappings of the same object in user
216 * space.
217 */
218static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
219{
220 register unsigned long zero = 0, one = 1, val;
221
222 local_irq_disable();
223 mb();
224 *p1 = one;
225 mb();
226 *p2 = zero;
227 mb();
228 val = *p1;
229 mb();
230 local_irq_enable();
231 return val != zero;
232}
233
234void __init check_writebuffer_bugs(void)
235{
236 struct page *page;
237 const char *reason;
238 unsigned long v = 1;
239
240 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
241
242 page = alloc_page(GFP_KERNEL);
243 if (page) {
244 unsigned long *p1, *p2;
245 pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
246 L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
247
248 p1 = vmap(&page, 1, VM_IOREMAP, prot);
249 p2 = vmap(&page, 1, VM_IOREMAP, prot);
250
251 if (p1 && p2) {
252 v = check_writebuffer(p1, p2);
253 reason = "enabling work-around";
254 } else {
255 reason = "unable to map memory\n";
256 }
257
258 vunmap(p1);
259 vunmap(p2);
260 put_page(page);
261 } else {
262 reason = "unable to grab page\n";
263 }
264
265 if (v) {
266 printk("failed, %s\n", reason);
267 shared_pte_mask = L_PTE_MT_UNCACHED;
268 } else {
269 printk("ok\n");
270 }
271}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mm/fault-armv.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 * Modifications for ARM processor (c) 1995-2002 Russell King
7 */
8#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/bitops.h>
12#include <linux/vmalloc.h>
13#include <linux/init.h>
14#include <linux/pagemap.h>
15#include <linux/gfp.h>
16
17#include <asm/bugs.h>
18#include <asm/cacheflush.h>
19#include <asm/cachetype.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22
23#include "mm.h"
24
25static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
26
27#if __LINUX_ARM_ARCH__ < 6
28/*
29 * We take the easy way out of this problem - we make the
30 * PTE uncacheable. However, we leave the write buffer on.
31 *
32 * Note that the pte lock held when calling update_mmu_cache must also
33 * guard the pte (somewhere else in the same mm) that we modify here.
34 * Therefore those configurations which might call adjust_pte (those
35 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
36 */
37static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
38 unsigned long pfn, pte_t *ptep)
39{
40 pte_t entry = *ptep;
41 int ret;
42
43 /*
44 * If this page is present, it's actually being shared.
45 */
46 ret = pte_present(entry);
47
48 /*
49 * If this page isn't present, or is already setup to
50 * fault (ie, is old), we can safely ignore any issues.
51 */
52 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
53 flush_cache_page(vma, address, pfn);
54 outer_flush_range((pfn << PAGE_SHIFT),
55 (pfn << PAGE_SHIFT) + PAGE_SIZE);
56 pte_val(entry) &= ~L_PTE_MT_MASK;
57 pte_val(entry) |= shared_pte_mask;
58 set_pte_at(vma->vm_mm, address, ptep, entry);
59 flush_tlb_page(vma, address);
60 }
61
62 return ret;
63}
64
65#if USE_SPLIT_PTE_PTLOCKS
66/*
67 * If we are using split PTE locks, then we need to take the page
68 * lock here. Otherwise we are using shared mm->page_table_lock
69 * which is already locked, thus cannot take it.
70 */
71static inline void do_pte_lock(spinlock_t *ptl)
72{
73 /*
74 * Use nested version here to indicate that we are already
75 * holding one similar spinlock.
76 */
77 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
78}
79
80static inline void do_pte_unlock(spinlock_t *ptl)
81{
82 spin_unlock(ptl);
83}
84#else /* !USE_SPLIT_PTE_PTLOCKS */
85static inline void do_pte_lock(spinlock_t *ptl) {}
86static inline void do_pte_unlock(spinlock_t *ptl) {}
87#endif /* USE_SPLIT_PTE_PTLOCKS */
88
89static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
90 unsigned long pfn)
91{
92 spinlock_t *ptl;
93 pgd_t *pgd;
94 pud_t *pud;
95 pmd_t *pmd;
96 pte_t *pte;
97 int ret;
98
99 pgd = pgd_offset(vma->vm_mm, address);
100 if (pgd_none_or_clear_bad(pgd))
101 return 0;
102
103 pud = pud_offset(pgd, address);
104 if (pud_none_or_clear_bad(pud))
105 return 0;
106
107 pmd = pmd_offset(pud, address);
108 if (pmd_none_or_clear_bad(pmd))
109 return 0;
110
111 /*
112 * This is called while another page table is mapped, so we
113 * must use the nested version. This also means we need to
114 * open-code the spin-locking.
115 */
116 ptl = pte_lockptr(vma->vm_mm, pmd);
117 pte = pte_offset_map(pmd, address);
118 do_pte_lock(ptl);
119
120 ret = do_adjust_pte(vma, address, pfn, pte);
121
122 do_pte_unlock(ptl);
123 pte_unmap(pte);
124
125 return ret;
126}
127
128static void
129make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
130 unsigned long addr, pte_t *ptep, unsigned long pfn)
131{
132 struct mm_struct *mm = vma->vm_mm;
133 struct vm_area_struct *mpnt;
134 unsigned long offset;
135 pgoff_t pgoff;
136 int aliases = 0;
137
138 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
139
140 /*
141 * If we have any shared mappings that are in the same mm
142 * space, then we need to handle them specially to maintain
143 * cache coherency.
144 */
145 flush_dcache_mmap_lock(mapping);
146 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
147 /*
148 * If this VMA is not in our MM, we can ignore it.
149 * Note that we intentionally mask out the VMA
150 * that we are fixing up.
151 */
152 if (mpnt->vm_mm != mm || mpnt == vma)
153 continue;
154 if (!(mpnt->vm_flags & VM_MAYSHARE))
155 continue;
156 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
157 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
158 }
159 flush_dcache_mmap_unlock(mapping);
160 if (aliases)
161 do_adjust_pte(vma, addr, pfn, ptep);
162}
163
164/*
165 * Take care of architecture specific things when placing a new PTE into
166 * a page table, or changing an existing PTE. Basically, there are two
167 * things that we need to take care of:
168 *
169 * 1. If PG_dcache_clean is not set for the page, we need to ensure
170 * that any cache entries for the kernels virtual memory
171 * range are written back to the page.
172 * 2. If we have multiple shared mappings of the same space in
173 * an object, we need to deal with the cache aliasing issues.
174 *
175 * Note that the pte lock will be held.
176 */
177void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
178 pte_t *ptep)
179{
180 unsigned long pfn = pte_pfn(*ptep);
181 struct address_space *mapping;
182 struct page *page;
183
184 if (!pfn_valid(pfn))
185 return;
186
187 /*
188 * The zero page is never written to, so never has any dirty
189 * cache lines, and therefore never needs to be flushed.
190 */
191 page = pfn_to_page(pfn);
192 if (page == ZERO_PAGE(0))
193 return;
194
195 mapping = page_mapping_file(page);
196 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
197 __flush_dcache_page(mapping, page);
198 if (mapping) {
199 if (cache_is_vivt())
200 make_coherent(mapping, vma, addr, ptep, pfn);
201 else if (vma->vm_flags & VM_EXEC)
202 __flush_icache_all();
203 }
204}
205#endif /* __LINUX_ARM_ARCH__ < 6 */
206
207/*
208 * Check whether the write buffer has physical address aliasing
209 * issues. If it has, we need to avoid them for the case where
210 * we have several shared mappings of the same object in user
211 * space.
212 */
213static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
214{
215 register unsigned long zero = 0, one = 1, val;
216
217 local_irq_disable();
218 mb();
219 *p1 = one;
220 mb();
221 *p2 = zero;
222 mb();
223 val = *p1;
224 mb();
225 local_irq_enable();
226 return val != zero;
227}
228
229void __init check_writebuffer_bugs(void)
230{
231 struct page *page;
232 const char *reason;
233 unsigned long v = 1;
234
235 pr_info("CPU: Testing write buffer coherency: ");
236
237 page = alloc_page(GFP_KERNEL);
238 if (page) {
239 unsigned long *p1, *p2;
240 pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
241 L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
242
243 p1 = vmap(&page, 1, VM_IOREMAP, prot);
244 p2 = vmap(&page, 1, VM_IOREMAP, prot);
245
246 if (p1 && p2) {
247 v = check_writebuffer(p1, p2);
248 reason = "enabling work-around";
249 } else {
250 reason = "unable to map memory\n";
251 }
252
253 vunmap(p1);
254 vunmap(p2);
255 put_page(page);
256 } else {
257 reason = "unable to grab page\n";
258 }
259
260 if (v) {
261 pr_cont("failed, %s\n", reason);
262 shared_pte_mask = L_PTE_MT_UNCACHED;
263 } else {
264 pr_cont("ok\n");
265 }
266}