Loading...
1/*
2 * mm/pgtable-generic.c
3 *
4 * Generic pgtable methods declared in asm-generic/pgtable.h
5 *
6 * Copyright (C) 2010 Linus Torvalds
7 */
8
9#include <linux/pagemap.h>
10#include <asm/tlb.h>
11#include <asm-generic/pgtable.h>
12
13/*
14 * If a p?d_bad entry is found while walking page tables, report
15 * the error, before resetting entry to p?d_none. Usually (but
16 * very seldom) called out from the p?d_none_or_clear_bad macros.
17 */
18
19void pgd_clear_bad(pgd_t *pgd)
20{
21 pgd_ERROR(*pgd);
22 pgd_clear(pgd);
23}
24
25void pud_clear_bad(pud_t *pud)
26{
27 pud_ERROR(*pud);
28 pud_clear(pud);
29}
30
31void pmd_clear_bad(pmd_t *pmd)
32{
33 pmd_ERROR(*pmd);
34 pmd_clear(pmd);
35}
36
37#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
38/*
39 * Only sets the access flags (dirty, accessed), as well as write
40 * permission. Furthermore, we know it always gets set to a "more
41 * permissive" setting, which allows most architectures to optimize
42 * this. We return whether the PTE actually changed, which in turn
43 * instructs the caller to do things like update__mmu_cache. This
44 * used to be done in the caller, but sparc needs minor faults to
45 * force that call on sun4c so we changed this macro slightly
46 */
47int ptep_set_access_flags(struct vm_area_struct *vma,
48 unsigned long address, pte_t *ptep,
49 pte_t entry, int dirty)
50{
51 int changed = !pte_same(*ptep, entry);
52 if (changed) {
53 set_pte_at(vma->vm_mm, address, ptep, entry);
54 flush_tlb_fix_spurious_fault(vma, address);
55 }
56 return changed;
57}
58#endif
59
60#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
61int ptep_clear_flush_young(struct vm_area_struct *vma,
62 unsigned long address, pte_t *ptep)
63{
64 int young;
65 young = ptep_test_and_clear_young(vma, address, ptep);
66 if (young)
67 flush_tlb_page(vma, address);
68 return young;
69}
70#endif
71
72#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
73pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
74 pte_t *ptep)
75{
76 struct mm_struct *mm = (vma)->vm_mm;
77 pte_t pte;
78 pte = ptep_get_and_clear(mm, address, ptep);
79 if (pte_accessible(mm, pte))
80 flush_tlb_page(vma, address);
81 return pte;
82}
83#endif
84
85#ifdef CONFIG_TRANSPARENT_HUGEPAGE
86
87#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
88int pmdp_set_access_flags(struct vm_area_struct *vma,
89 unsigned long address, pmd_t *pmdp,
90 pmd_t entry, int dirty)
91{
92 int changed = !pmd_same(*pmdp, entry);
93 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
94 if (changed) {
95 set_pmd_at(vma->vm_mm, address, pmdp, entry);
96 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
97 }
98 return changed;
99}
100#endif
101
102#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
103int pmdp_clear_flush_young(struct vm_area_struct *vma,
104 unsigned long address, pmd_t *pmdp)
105{
106 int young;
107 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
108 young = pmdp_test_and_clear_young(vma, address, pmdp);
109 if (young)
110 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
111 return young;
112}
113#endif
114
115#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
116pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
117 pmd_t *pmdp)
118{
119 pmd_t pmd;
120 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
121 VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
122 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
123 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
124 return pmd;
125}
126#endif
127
128#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
129void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
130 pgtable_t pgtable)
131{
132 assert_spin_locked(pmd_lockptr(mm, pmdp));
133
134 /* FIFO */
135 if (!pmd_huge_pte(mm, pmdp))
136 INIT_LIST_HEAD(&pgtable->lru);
137 else
138 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
139 pmd_huge_pte(mm, pmdp) = pgtable;
140}
141#endif
142
143#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
144/* no "address" argument so destroys page coloring of some arch */
145pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
146{
147 pgtable_t pgtable;
148
149 assert_spin_locked(pmd_lockptr(mm, pmdp));
150
151 /* FIFO */
152 pgtable = pmd_huge_pte(mm, pmdp);
153 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
154 struct page, lru);
155 if (pmd_huge_pte(mm, pmdp))
156 list_del(&pgtable->lru);
157 return pgtable;
158}
159#endif
160
161#ifndef __HAVE_ARCH_PMDP_INVALIDATE
162void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
163 pmd_t *pmdp)
164{
165 pmd_t entry = *pmdp;
166 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
167 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
168}
169#endif
170
171#ifndef pmdp_collapse_flush
172pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
173 pmd_t *pmdp)
174{
175 /*
176 * pmd and hugepage pte format are same. So we could
177 * use the same function.
178 */
179 pmd_t pmd;
180
181 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
182 VM_BUG_ON(pmd_trans_huge(*pmdp));
183 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
184
185 /* collapse entails shooting down ptes not pmd */
186 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
187 return pmd;
188}
189#endif
190#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * mm/pgtable-generic.c
4 *
5 * Generic pgtable methods declared in linux/pgtable.h
6 *
7 * Copyright (C) 2010 Linus Torvalds
8 */
9
10#include <linux/pagemap.h>
11#include <linux/hugetlb.h>
12#include <linux/pgtable.h>
13#include <asm/tlb.h>
14
15/*
16 * If a p?d_bad entry is found while walking page tables, report
17 * the error, before resetting entry to p?d_none. Usually (but
18 * very seldom) called out from the p?d_none_or_clear_bad macros.
19 */
20
21void pgd_clear_bad(pgd_t *pgd)
22{
23 pgd_ERROR(*pgd);
24 pgd_clear(pgd);
25}
26
27#ifndef __PAGETABLE_P4D_FOLDED
28void p4d_clear_bad(p4d_t *p4d)
29{
30 p4d_ERROR(*p4d);
31 p4d_clear(p4d);
32}
33#endif
34
35#ifndef __PAGETABLE_PUD_FOLDED
36void pud_clear_bad(pud_t *pud)
37{
38 pud_ERROR(*pud);
39 pud_clear(pud);
40}
41#endif
42
43/*
44 * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
45 * above. pmd folding is special and typically pmd_* macros refer to upper
46 * level even when folded
47 */
48void pmd_clear_bad(pmd_t *pmd)
49{
50 pmd_ERROR(*pmd);
51 pmd_clear(pmd);
52}
53
54#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
55/*
56 * Only sets the access flags (dirty, accessed), as well as write
57 * permission. Furthermore, we know it always gets set to a "more
58 * permissive" setting, which allows most architectures to optimize
59 * this. We return whether the PTE actually changed, which in turn
60 * instructs the caller to do things like update__mmu_cache. This
61 * used to be done in the caller, but sparc needs minor faults to
62 * force that call on sun4c so we changed this macro slightly
63 */
64int ptep_set_access_flags(struct vm_area_struct *vma,
65 unsigned long address, pte_t *ptep,
66 pte_t entry, int dirty)
67{
68 int changed = !pte_same(*ptep, entry);
69 if (changed) {
70 set_pte_at(vma->vm_mm, address, ptep, entry);
71 flush_tlb_fix_spurious_fault(vma, address);
72 }
73 return changed;
74}
75#endif
76
77#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
78int ptep_clear_flush_young(struct vm_area_struct *vma,
79 unsigned long address, pte_t *ptep)
80{
81 int young;
82 young = ptep_test_and_clear_young(vma, address, ptep);
83 if (young)
84 flush_tlb_page(vma, address);
85 return young;
86}
87#endif
88
89#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
90pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
91 pte_t *ptep)
92{
93 struct mm_struct *mm = (vma)->vm_mm;
94 pte_t pte;
95 pte = ptep_get_and_clear(mm, address, ptep);
96 if (pte_accessible(mm, pte))
97 flush_tlb_page(vma, address);
98 return pte;
99}
100#endif
101
102#ifdef CONFIG_TRANSPARENT_HUGEPAGE
103
104#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
105int pmdp_set_access_flags(struct vm_area_struct *vma,
106 unsigned long address, pmd_t *pmdp,
107 pmd_t entry, int dirty)
108{
109 int changed = !pmd_same(*pmdp, entry);
110 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
111 if (changed) {
112 set_pmd_at(vma->vm_mm, address, pmdp, entry);
113 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
114 }
115 return changed;
116}
117#endif
118
119#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
120int pmdp_clear_flush_young(struct vm_area_struct *vma,
121 unsigned long address, pmd_t *pmdp)
122{
123 int young;
124 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
125 young = pmdp_test_and_clear_young(vma, address, pmdp);
126 if (young)
127 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
128 return young;
129}
130#endif
131
132#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
133pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
134 pmd_t *pmdp)
135{
136 pmd_t pmd;
137 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
138 VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
139 !pmd_devmap(*pmdp));
140 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
141 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
142 return pmd;
143}
144
145#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
146pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
147 pud_t *pudp)
148{
149 pud_t pud;
150
151 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
152 VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
153 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
154 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
155 return pud;
156}
157#endif
158#endif
159
160#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
161void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
162 pgtable_t pgtable)
163{
164 assert_spin_locked(pmd_lockptr(mm, pmdp));
165
166 /* FIFO */
167 if (!pmd_huge_pte(mm, pmdp))
168 INIT_LIST_HEAD(&pgtable->lru);
169 else
170 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
171 pmd_huge_pte(mm, pmdp) = pgtable;
172}
173#endif
174
175#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
176/* no "address" argument so destroys page coloring of some arch */
177pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
178{
179 pgtable_t pgtable;
180
181 assert_spin_locked(pmd_lockptr(mm, pmdp));
182
183 /* FIFO */
184 pgtable = pmd_huge_pte(mm, pmdp);
185 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
186 struct page, lru);
187 if (pmd_huge_pte(mm, pmdp))
188 list_del(&pgtable->lru);
189 return pgtable;
190}
191#endif
192
193#ifndef __HAVE_ARCH_PMDP_INVALIDATE
194pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
195 pmd_t *pmdp)
196{
197 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
198 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
199 return old;
200}
201#endif
202
203#ifndef pmdp_collapse_flush
204pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
205 pmd_t *pmdp)
206{
207 /*
208 * pmd and hugepage pte format are same. So we could
209 * use the same function.
210 */
211 pmd_t pmd;
212
213 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
214 VM_BUG_ON(pmd_trans_huge(*pmdp));
215 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
216
217 /* collapse entails shooting down ptes not pmd */
218 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
219 return pmd;
220}
221#endif
222#endif /* CONFIG_TRANSPARENT_HUGEPAGE */