Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * mm/pgtable-generic.c
4 *
5 * Generic pgtable methods declared in asm-generic/pgtable.h
6 *
7 * Copyright (C) 2010 Linus Torvalds
8 */
9
10#include <linux/pagemap.h>
11#include <asm/tlb.h>
12#include <asm-generic/pgtable.h>
13
14/*
15 * If a p?d_bad entry is found while walking page tables, report
16 * the error, before resetting entry to p?d_none. Usually (but
17 * very seldom) called out from the p?d_none_or_clear_bad macros.
18 */
19
20void pgd_clear_bad(pgd_t *pgd)
21{
22 pgd_ERROR(*pgd);
23 pgd_clear(pgd);
24}
25
26void p4d_clear_bad(p4d_t *p4d)
27{
28 p4d_ERROR(*p4d);
29 p4d_clear(p4d);
30}
31
32void pud_clear_bad(pud_t *pud)
33{
34 pud_ERROR(*pud);
35 pud_clear(pud);
36}
37
38void pmd_clear_bad(pmd_t *pmd)
39{
40 pmd_ERROR(*pmd);
41 pmd_clear(pmd);
42}
43
44#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
45/*
46 * Only sets the access flags (dirty, accessed), as well as write
47 * permission. Furthermore, we know it always gets set to a "more
48 * permissive" setting, which allows most architectures to optimize
49 * this. We return whether the PTE actually changed, which in turn
50 * instructs the caller to do things like update__mmu_cache. This
51 * used to be done in the caller, but sparc needs minor faults to
52 * force that call on sun4c so we changed this macro slightly
53 */
54int ptep_set_access_flags(struct vm_area_struct *vma,
55 unsigned long address, pte_t *ptep,
56 pte_t entry, int dirty)
57{
58 int changed = !pte_same(*ptep, entry);
59 if (changed) {
60 set_pte_at(vma->vm_mm, address, ptep, entry);
61 flush_tlb_fix_spurious_fault(vma, address);
62 }
63 return changed;
64}
65#endif
66
67#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
68int ptep_clear_flush_young(struct vm_area_struct *vma,
69 unsigned long address, pte_t *ptep)
70{
71 int young;
72 young = ptep_test_and_clear_young(vma, address, ptep);
73 if (young)
74 flush_tlb_page(vma, address);
75 return young;
76}
77#endif
78
79#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
80pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
81 pte_t *ptep)
82{
83 struct mm_struct *mm = (vma)->vm_mm;
84 pte_t pte;
85 pte = ptep_get_and_clear(mm, address, ptep);
86 if (pte_accessible(mm, pte))
87 flush_tlb_page(vma, address);
88 return pte;
89}
90#endif
91
92#ifdef CONFIG_TRANSPARENT_HUGEPAGE
93
94#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
95int pmdp_set_access_flags(struct vm_area_struct *vma,
96 unsigned long address, pmd_t *pmdp,
97 pmd_t entry, int dirty)
98{
99 int changed = !pmd_same(*pmdp, entry);
100 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
101 if (changed) {
102 set_pmd_at(vma->vm_mm, address, pmdp, entry);
103 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
104 }
105 return changed;
106}
107#endif
108
109#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
110int pmdp_clear_flush_young(struct vm_area_struct *vma,
111 unsigned long address, pmd_t *pmdp)
112{
113 int young;
114 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
115 young = pmdp_test_and_clear_young(vma, address, pmdp);
116 if (young)
117 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
118 return young;
119}
120#endif
121
122#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
123pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
124 pmd_t *pmdp)
125{
126 pmd_t pmd;
127 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
128 VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
129 !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
130 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
131 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
132 return pmd;
133}
134
135#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
136pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
137 pud_t *pudp)
138{
139 pud_t pud;
140
141 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
142 VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
143 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
144 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
145 return pud;
146}
147#endif
148#endif
149
150#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
151void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
152 pgtable_t pgtable)
153{
154 assert_spin_locked(pmd_lockptr(mm, pmdp));
155
156 /* FIFO */
157 if (!pmd_huge_pte(mm, pmdp))
158 INIT_LIST_HEAD(&pgtable->lru);
159 else
160 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
161 pmd_huge_pte(mm, pmdp) = pgtable;
162}
163#endif
164
165#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
166/* no "address" argument so destroys page coloring of some arch */
167pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
168{
169 pgtable_t pgtable;
170
171 assert_spin_locked(pmd_lockptr(mm, pmdp));
172
173 /* FIFO */
174 pgtable = pmd_huge_pte(mm, pmdp);
175 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
176 struct page, lru);
177 if (pmd_huge_pte(mm, pmdp))
178 list_del(&pgtable->lru);
179 return pgtable;
180}
181#endif
182
183#ifndef __HAVE_ARCH_PMDP_INVALIDATE
184pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
185 pmd_t *pmdp)
186{
187 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mknotpresent(*pmdp));
188 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
189 return old;
190}
191#endif
192
193#ifndef pmdp_collapse_flush
194pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
195 pmd_t *pmdp)
196{
197 /*
198 * pmd and hugepage pte format are same. So we could
199 * use the same function.
200 */
201 pmd_t pmd;
202
203 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
204 VM_BUG_ON(pmd_trans_huge(*pmdp));
205 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
206
207 /* collapse entails shooting down ptes not pmd */
208 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
209 return pmd;
210}
211#endif
212#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1/*
2 * mm/pgtable-generic.c
3 *
4 * Generic pgtable methods declared in asm-generic/pgtable.h
5 *
6 * Copyright (C) 2010 Linus Torvalds
7 */
8
9#include <linux/pagemap.h>
10#include <asm/tlb.h>
11#include <asm-generic/pgtable.h>
12
13/*
14 * If a p?d_bad entry is found while walking page tables, report
15 * the error, before resetting entry to p?d_none. Usually (but
16 * very seldom) called out from the p?d_none_or_clear_bad macros.
17 */
18
19void pgd_clear_bad(pgd_t *pgd)
20{
21 pgd_ERROR(*pgd);
22 pgd_clear(pgd);
23}
24
25void pud_clear_bad(pud_t *pud)
26{
27 pud_ERROR(*pud);
28 pud_clear(pud);
29}
30
31void pmd_clear_bad(pmd_t *pmd)
32{
33 pmd_ERROR(*pmd);
34 pmd_clear(pmd);
35}
36
37#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
38/*
39 * Only sets the access flags (dirty, accessed), as well as write
40 * permission. Furthermore, we know it always gets set to a "more
41 * permissive" setting, which allows most architectures to optimize
42 * this. We return whether the PTE actually changed, which in turn
43 * instructs the caller to do things like update__mmu_cache. This
44 * used to be done in the caller, but sparc needs minor faults to
45 * force that call on sun4c so we changed this macro slightly
46 */
47int ptep_set_access_flags(struct vm_area_struct *vma,
48 unsigned long address, pte_t *ptep,
49 pte_t entry, int dirty)
50{
51 int changed = !pte_same(*ptep, entry);
52 if (changed) {
53 set_pte_at(vma->vm_mm, address, ptep, entry);
54 flush_tlb_fix_spurious_fault(vma, address);
55 }
56 return changed;
57}
58#endif
59
60#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
61int ptep_clear_flush_young(struct vm_area_struct *vma,
62 unsigned long address, pte_t *ptep)
63{
64 int young;
65 young = ptep_test_and_clear_young(vma, address, ptep);
66 if (young)
67 flush_tlb_page(vma, address);
68 return young;
69}
70#endif
71
72#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
73pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
74 pte_t *ptep)
75{
76 struct mm_struct *mm = (vma)->vm_mm;
77 pte_t pte;
78 pte = ptep_get_and_clear(mm, address, ptep);
79 if (pte_accessible(mm, pte))
80 flush_tlb_page(vma, address);
81 return pte;
82}
83#endif
84
85#ifdef CONFIG_TRANSPARENT_HUGEPAGE
86
87#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
88int pmdp_set_access_flags(struct vm_area_struct *vma,
89 unsigned long address, pmd_t *pmdp,
90 pmd_t entry, int dirty)
91{
92 int changed = !pmd_same(*pmdp, entry);
93 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
94 if (changed) {
95 set_pmd_at(vma->vm_mm, address, pmdp, entry);
96 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
97 }
98 return changed;
99}
100#endif
101
102#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
103int pmdp_clear_flush_young(struct vm_area_struct *vma,
104 unsigned long address, pmd_t *pmdp)
105{
106 int young;
107 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
108 young = pmdp_test_and_clear_young(vma, address, pmdp);
109 if (young)
110 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
111 return young;
112}
113#endif
114
115#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
116pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
117 pmd_t *pmdp)
118{
119 pmd_t pmd;
120 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
121 VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
122 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
123 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
124 return pmd;
125}
126#endif
127
128#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
129void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
130 pgtable_t pgtable)
131{
132 assert_spin_locked(pmd_lockptr(mm, pmdp));
133
134 /* FIFO */
135 if (!pmd_huge_pte(mm, pmdp))
136 INIT_LIST_HEAD(&pgtable->lru);
137 else
138 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
139 pmd_huge_pte(mm, pmdp) = pgtable;
140}
141#endif
142
143#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
144/* no "address" argument so destroys page coloring of some arch */
145pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
146{
147 pgtable_t pgtable;
148
149 assert_spin_locked(pmd_lockptr(mm, pmdp));
150
151 /* FIFO */
152 pgtable = pmd_huge_pte(mm, pmdp);
153 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
154 struct page, lru);
155 if (pmd_huge_pte(mm, pmdp))
156 list_del(&pgtable->lru);
157 return pgtable;
158}
159#endif
160
161#ifndef __HAVE_ARCH_PMDP_INVALIDATE
162void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
163 pmd_t *pmdp)
164{
165 pmd_t entry = *pmdp;
166 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
167 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
168}
169#endif
170
171#ifndef pmdp_collapse_flush
172pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
173 pmd_t *pmdp)
174{
175 /*
176 * pmd and hugepage pte format are same. So we could
177 * use the same function.
178 */
179 pmd_t pmd;
180
181 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
182 VM_BUG_ON(pmd_trans_huge(*pmdp));
183 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
184
185 /* collapse entails shooting down ptes not pmd */
186 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
187 return pmd;
188}
189#endif
190#endif /* CONFIG_TRANSPARENT_HUGEPAGE */