Loading...
1/*
2 * arch/arm64/mm/hugetlbpage.c
3 *
4 * Copyright (C) 2013 Linaro Ltd.
5 *
6 * Based on arch/x86/mm/hugetlbpage.c.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/init.h>
19#include <linux/fs.h>
20#include <linux/mm.h>
21#include <linux/hugetlb.h>
22#include <linux/pagemap.h>
23#include <linux/err.h>
24#include <linux/sysctl.h>
25#include <asm/mman.h>
26#include <asm/tlb.h>
27#include <asm/tlbflush.h>
28#include <asm/pgalloc.h>
29
30int pmd_huge(pmd_t pmd)
31{
32 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
33}
34
35int pud_huge(pud_t pud)
36{
37#ifndef __PAGETABLE_PMD_FOLDED
38 return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
39#else
40 return 0;
41#endif
42}
43
44static int find_num_contig(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep, pte_t pte, size_t *pgsize)
46{
47 pgd_t *pgd = pgd_offset(mm, addr);
48 pud_t *pud;
49 pmd_t *pmd;
50
51 *pgsize = PAGE_SIZE;
52 if (!pte_cont(pte))
53 return 1;
54 if (!pgd_present(*pgd)) {
55 VM_BUG_ON(!pgd_present(*pgd));
56 return 1;
57 }
58 pud = pud_offset(pgd, addr);
59 if (!pud_present(*pud)) {
60 VM_BUG_ON(!pud_present(*pud));
61 return 1;
62 }
63 pmd = pmd_offset(pud, addr);
64 if (!pmd_present(*pmd)) {
65 VM_BUG_ON(!pmd_present(*pmd));
66 return 1;
67 }
68 if ((pte_t *)pmd == ptep) {
69 *pgsize = PMD_SIZE;
70 return CONT_PMDS;
71 }
72 return CONT_PTES;
73}
74
75void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
76 pte_t *ptep, pte_t pte)
77{
78 size_t pgsize;
79 int i;
80 int ncontig = find_num_contig(mm, addr, ptep, pte, &pgsize);
81 unsigned long pfn;
82 pgprot_t hugeprot;
83
84 if (ncontig == 1) {
85 set_pte_at(mm, addr, ptep, pte);
86 return;
87 }
88
89 pfn = pte_pfn(pte);
90 hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
91 for (i = 0; i < ncontig; i++) {
92 pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
93 pte_val(pfn_pte(pfn, hugeprot)));
94 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
95 ptep++;
96 pfn += pgsize >> PAGE_SHIFT;
97 addr += pgsize;
98 }
99}
100
101pte_t *huge_pte_alloc(struct mm_struct *mm,
102 unsigned long addr, unsigned long sz)
103{
104 pgd_t *pgd;
105 pud_t *pud;
106 pte_t *pte = NULL;
107
108 pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
109 pgd = pgd_offset(mm, addr);
110 pud = pud_alloc(mm, pgd, addr);
111 if (!pud)
112 return NULL;
113
114 if (sz == PUD_SIZE) {
115 pte = (pte_t *)pud;
116 } else if (sz == (PAGE_SIZE * CONT_PTES)) {
117 pmd_t *pmd = pmd_alloc(mm, pud, addr);
118
119 WARN_ON(addr & (sz - 1));
120 /*
121 * Note that if this code were ever ported to the
122 * 32-bit arm platform then it will cause trouble in
123 * the case where CONFIG_HIGHPTE is set, since there
124 * will be no pte_unmap() to correspond with this
125 * pte_alloc_map().
126 */
127 pte = pte_alloc_map(mm, pmd, addr);
128 } else if (sz == PMD_SIZE) {
129 if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
130 pud_none(*pud))
131 pte = huge_pmd_share(mm, addr, pud);
132 else
133 pte = (pte_t *)pmd_alloc(mm, pud, addr);
134 } else if (sz == (PMD_SIZE * CONT_PMDS)) {
135 pmd_t *pmd;
136
137 pmd = pmd_alloc(mm, pud, addr);
138 WARN_ON(addr & (sz - 1));
139 return (pte_t *)pmd;
140 }
141
142 pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
143 sz, pte, pte_val(*pte));
144 return pte;
145}
146
147pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
148{
149 pgd_t *pgd;
150 pud_t *pud;
151 pmd_t *pmd = NULL;
152 pte_t *pte = NULL;
153
154 pgd = pgd_offset(mm, addr);
155 pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
156 if (!pgd_present(*pgd))
157 return NULL;
158 pud = pud_offset(pgd, addr);
159 if (!pud_present(*pud))
160 return NULL;
161
162 if (pud_huge(*pud))
163 return (pte_t *)pud;
164 pmd = pmd_offset(pud, addr);
165 if (!pmd_present(*pmd))
166 return NULL;
167
168 if (pte_cont(pmd_pte(*pmd))) {
169 pmd = pmd_offset(
170 pud, (addr & CONT_PMD_MASK));
171 return (pte_t *)pmd;
172 }
173 if (pmd_huge(*pmd))
174 return (pte_t *)pmd;
175 pte = pte_offset_kernel(pmd, addr);
176 if (pte_present(*pte) && pte_cont(*pte)) {
177 pte = pte_offset_kernel(
178 pmd, (addr & CONT_PTE_MASK));
179 return pte;
180 }
181 return NULL;
182}
183
184pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
185 struct page *page, int writable)
186{
187 size_t pagesize = huge_page_size(hstate_vma(vma));
188
189 if (pagesize == CONT_PTE_SIZE) {
190 entry = pte_mkcont(entry);
191 } else if (pagesize == CONT_PMD_SIZE) {
192 entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
193 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
194 pr_warn("%s: unrecognized huge page size 0x%lx\n",
195 __func__, pagesize);
196 }
197 return entry;
198}
199
200pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
201 unsigned long addr, pte_t *ptep)
202{
203 pte_t pte;
204
205 if (pte_cont(*ptep)) {
206 int ncontig, i;
207 size_t pgsize;
208 pte_t *cpte;
209 bool is_dirty = false;
210
211 cpte = huge_pte_offset(mm, addr);
212 ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
213 /* save the 1st pte to return */
214 pte = ptep_get_and_clear(mm, addr, cpte);
215 for (i = 1; i < ncontig; ++i) {
216 /*
217 * If HW_AFDBM is enabled, then the HW could
218 * turn on the dirty bit for any of the page
219 * in the set, so check them all.
220 */
221 ++cpte;
222 if (pte_dirty(ptep_get_and_clear(mm, addr, cpte)))
223 is_dirty = true;
224 }
225 if (is_dirty)
226 return pte_mkdirty(pte);
227 else
228 return pte;
229 } else {
230 return ptep_get_and_clear(mm, addr, ptep);
231 }
232}
233
234int huge_ptep_set_access_flags(struct vm_area_struct *vma,
235 unsigned long addr, pte_t *ptep,
236 pte_t pte, int dirty)
237{
238 pte_t *cpte;
239
240 if (pte_cont(pte)) {
241 int ncontig, i, changed = 0;
242 size_t pgsize = 0;
243 unsigned long pfn = pte_pfn(pte);
244 /* Select all bits except the pfn */
245 pgprot_t hugeprot =
246 __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^
247 pte_val(pte));
248
249 cpte = huge_pte_offset(vma->vm_mm, addr);
250 pfn = pte_pfn(*cpte);
251 ncontig = find_num_contig(vma->vm_mm, addr, cpte,
252 *cpte, &pgsize);
253 for (i = 0; i < ncontig; ++i, ++cpte) {
254 changed = ptep_set_access_flags(vma, addr, cpte,
255 pfn_pte(pfn,
256 hugeprot),
257 dirty);
258 pfn += pgsize >> PAGE_SHIFT;
259 }
260 return changed;
261 } else {
262 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
263 }
264}
265
266void huge_ptep_set_wrprotect(struct mm_struct *mm,
267 unsigned long addr, pte_t *ptep)
268{
269 if (pte_cont(*ptep)) {
270 int ncontig, i;
271 pte_t *cpte;
272 size_t pgsize = 0;
273
274 cpte = huge_pte_offset(mm, addr);
275 ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
276 for (i = 0; i < ncontig; ++i, ++cpte)
277 ptep_set_wrprotect(mm, addr, cpte);
278 } else {
279 ptep_set_wrprotect(mm, addr, ptep);
280 }
281}
282
283void huge_ptep_clear_flush(struct vm_area_struct *vma,
284 unsigned long addr, pte_t *ptep)
285{
286 if (pte_cont(*ptep)) {
287 int ncontig, i;
288 pte_t *cpte;
289 size_t pgsize = 0;
290
291 cpte = huge_pte_offset(vma->vm_mm, addr);
292 ncontig = find_num_contig(vma->vm_mm, addr, cpte,
293 *cpte, &pgsize);
294 for (i = 0; i < ncontig; ++i, ++cpte)
295 ptep_clear_flush(vma, addr, cpte);
296 } else {
297 ptep_clear_flush(vma, addr, ptep);
298 }
299}
300
301static __init int setup_hugepagesz(char *opt)
302{
303 unsigned long ps = memparse(opt, &opt);
304
305 if (ps == PMD_SIZE) {
306 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
307 } else if (ps == PUD_SIZE) {
308 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
309 } else {
310 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
311 return 0;
312 }
313 return 1;
314}
315__setup("hugepagesz=", setup_hugepagesz);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm64/mm/hugetlbpage.c
4 *
5 * Copyright (C) 2013 Linaro Ltd.
6 *
7 * Based on arch/x86/mm/hugetlbpage.c.
8 */
9
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/pagemap.h>
15#include <linux/err.h>
16#include <linux/sysctl.h>
17#include <asm/mman.h>
18#include <asm/tlb.h>
19#include <asm/tlbflush.h>
20
21/*
22 * HugeTLB Support Matrix
23 *
24 * ---------------------------------------------------
25 * | Page Size | CONT PTE | PMD | CONT PMD | PUD |
26 * ---------------------------------------------------
27 * | 4K | 64K | 2M | 32M | 1G |
28 * | 16K | 2M | 32M | 1G | |
29 * | 64K | 2M | 512M | 16G | |
30 * ---------------------------------------------------
31 */
32
33/*
34 * Reserve CMA areas for the largest supported gigantic
35 * huge page when requested. Any other smaller gigantic
36 * huge pages could still be served from those areas.
37 */
38#ifdef CONFIG_CMA
39void __init arm64_hugetlb_cma_reserve(void)
40{
41 int order;
42
43 if (pud_sect_supported())
44 order = PUD_SHIFT - PAGE_SHIFT;
45 else
46 order = CONT_PMD_SHIFT - PAGE_SHIFT;
47
48 /*
49 * HugeTLB CMA reservation is required for gigantic
50 * huge pages which could not be allocated via the
51 * page allocator. Just warn if there is any change
52 * breaking this assumption.
53 */
54 WARN_ON(order <= MAX_ORDER);
55 hugetlb_cma_reserve(order);
56}
57#endif /* CONFIG_CMA */
58
59static bool __hugetlb_valid_size(unsigned long size)
60{
61 switch (size) {
62#ifndef __PAGETABLE_PMD_FOLDED
63 case PUD_SIZE:
64 return pud_sect_supported();
65#endif
66 case CONT_PMD_SIZE:
67 case PMD_SIZE:
68 case CONT_PTE_SIZE:
69 return true;
70 }
71
72 return false;
73}
74
75#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
76bool arch_hugetlb_migration_supported(struct hstate *h)
77{
78 size_t pagesize = huge_page_size(h);
79
80 if (!__hugetlb_valid_size(pagesize)) {
81 pr_warn("%s: unrecognized huge page size 0x%lx\n",
82 __func__, pagesize);
83 return false;
84 }
85 return true;
86}
87#endif
88
89int pmd_huge(pmd_t pmd)
90{
91 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
92}
93
94int pud_huge(pud_t pud)
95{
96#ifndef __PAGETABLE_PMD_FOLDED
97 return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
98#else
99 return 0;
100#endif
101}
102
103static int find_num_contig(struct mm_struct *mm, unsigned long addr,
104 pte_t *ptep, size_t *pgsize)
105{
106 pgd_t *pgdp = pgd_offset(mm, addr);
107 p4d_t *p4dp;
108 pud_t *pudp;
109 pmd_t *pmdp;
110
111 *pgsize = PAGE_SIZE;
112 p4dp = p4d_offset(pgdp, addr);
113 pudp = pud_offset(p4dp, addr);
114 pmdp = pmd_offset(pudp, addr);
115 if ((pte_t *)pmdp == ptep) {
116 *pgsize = PMD_SIZE;
117 return CONT_PMDS;
118 }
119 return CONT_PTES;
120}
121
122static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
123{
124 int contig_ptes = 0;
125
126 *pgsize = size;
127
128 switch (size) {
129#ifndef __PAGETABLE_PMD_FOLDED
130 case PUD_SIZE:
131 if (pud_sect_supported())
132 contig_ptes = 1;
133 break;
134#endif
135 case PMD_SIZE:
136 contig_ptes = 1;
137 break;
138 case CONT_PMD_SIZE:
139 *pgsize = PMD_SIZE;
140 contig_ptes = CONT_PMDS;
141 break;
142 case CONT_PTE_SIZE:
143 *pgsize = PAGE_SIZE;
144 contig_ptes = CONT_PTES;
145 break;
146 }
147
148 return contig_ptes;
149}
150
151pte_t huge_ptep_get(pte_t *ptep)
152{
153 int ncontig, i;
154 size_t pgsize;
155 pte_t orig_pte = ptep_get(ptep);
156
157 if (!pte_present(orig_pte) || !pte_cont(orig_pte))
158 return orig_pte;
159
160 ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
161 for (i = 0; i < ncontig; i++, ptep++) {
162 pte_t pte = ptep_get(ptep);
163
164 if (pte_dirty(pte))
165 orig_pte = pte_mkdirty(orig_pte);
166
167 if (pte_young(pte))
168 orig_pte = pte_mkyoung(orig_pte);
169 }
170 return orig_pte;
171}
172
173/*
174 * Changing some bits of contiguous entries requires us to follow a
175 * Break-Before-Make approach, breaking the whole contiguous set
176 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
177 * "Misprogramming of the Contiguous bit", page D4-1762.
178 *
179 * This helper performs the break step.
180 */
181static pte_t get_clear_contig(struct mm_struct *mm,
182 unsigned long addr,
183 pte_t *ptep,
184 unsigned long pgsize,
185 unsigned long ncontig)
186{
187 pte_t orig_pte = ptep_get(ptep);
188 unsigned long i;
189
190 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
191 pte_t pte = ptep_get_and_clear(mm, addr, ptep);
192
193 /*
194 * If HW_AFDBM is enabled, then the HW could turn on
195 * the dirty or accessed bit for any page in the set,
196 * so check them all.
197 */
198 if (pte_dirty(pte))
199 orig_pte = pte_mkdirty(orig_pte);
200
201 if (pte_young(pte))
202 orig_pte = pte_mkyoung(orig_pte);
203 }
204 return orig_pte;
205}
206
207static pte_t get_clear_contig_flush(struct mm_struct *mm,
208 unsigned long addr,
209 pte_t *ptep,
210 unsigned long pgsize,
211 unsigned long ncontig)
212{
213 pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
214 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
215
216 flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
217 return orig_pte;
218}
219
220/*
221 * Changing some bits of contiguous entries requires us to follow a
222 * Break-Before-Make approach, breaking the whole contiguous set
223 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
224 * "Misprogramming of the Contiguous bit", page D4-1762.
225 *
226 * This helper performs the break step for use cases where the
227 * original pte is not needed.
228 */
229static void clear_flush(struct mm_struct *mm,
230 unsigned long addr,
231 pte_t *ptep,
232 unsigned long pgsize,
233 unsigned long ncontig)
234{
235 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
236 unsigned long i, saddr = addr;
237
238 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
239 pte_clear(mm, addr, ptep);
240
241 flush_tlb_range(&vma, saddr, addr);
242}
243
244static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
245{
246 VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
247
248 return page_folio(pfn_to_page(swp_offset_pfn(entry)));
249}
250
251void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
252 pte_t *ptep, pte_t pte)
253{
254 size_t pgsize;
255 int i;
256 int ncontig;
257 unsigned long pfn, dpfn;
258 pgprot_t hugeprot;
259
260 if (!pte_present(pte)) {
261 struct folio *folio;
262
263 folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte));
264 ncontig = num_contig_ptes(folio_size(folio), &pgsize);
265
266 for (i = 0; i < ncontig; i++, ptep++)
267 set_pte_at(mm, addr, ptep, pte);
268 return;
269 }
270
271 if (!pte_cont(pte)) {
272 set_pte_at(mm, addr, ptep, pte);
273 return;
274 }
275
276 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
277 pfn = pte_pfn(pte);
278 dpfn = pgsize >> PAGE_SHIFT;
279 hugeprot = pte_pgprot(pte);
280
281 clear_flush(mm, addr, ptep, pgsize, ncontig);
282
283 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
284 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
285}
286
287pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
288 unsigned long addr, unsigned long sz)
289{
290 pgd_t *pgdp;
291 p4d_t *p4dp;
292 pud_t *pudp;
293 pmd_t *pmdp;
294 pte_t *ptep = NULL;
295
296 pgdp = pgd_offset(mm, addr);
297 p4dp = p4d_offset(pgdp, addr);
298 pudp = pud_alloc(mm, p4dp, addr);
299 if (!pudp)
300 return NULL;
301
302 if (sz == PUD_SIZE) {
303 ptep = (pte_t *)pudp;
304 } else if (sz == (CONT_PTE_SIZE)) {
305 pmdp = pmd_alloc(mm, pudp, addr);
306 if (!pmdp)
307 return NULL;
308
309 WARN_ON(addr & (sz - 1));
310 /*
311 * Note that if this code were ever ported to the
312 * 32-bit arm platform then it will cause trouble in
313 * the case where CONFIG_HIGHPTE is set, since there
314 * will be no pte_unmap() to correspond with this
315 * pte_alloc_map().
316 */
317 ptep = pte_alloc_map(mm, pmdp, addr);
318 } else if (sz == PMD_SIZE) {
319 if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
320 ptep = huge_pmd_share(mm, vma, addr, pudp);
321 else
322 ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
323 } else if (sz == (CONT_PMD_SIZE)) {
324 pmdp = pmd_alloc(mm, pudp, addr);
325 WARN_ON(addr & (sz - 1));
326 return (pte_t *)pmdp;
327 }
328
329 return ptep;
330}
331
332pte_t *huge_pte_offset(struct mm_struct *mm,
333 unsigned long addr, unsigned long sz)
334{
335 pgd_t *pgdp;
336 p4d_t *p4dp;
337 pud_t *pudp, pud;
338 pmd_t *pmdp, pmd;
339
340 pgdp = pgd_offset(mm, addr);
341 if (!pgd_present(READ_ONCE(*pgdp)))
342 return NULL;
343
344 p4dp = p4d_offset(pgdp, addr);
345 if (!p4d_present(READ_ONCE(*p4dp)))
346 return NULL;
347
348 pudp = pud_offset(p4dp, addr);
349 pud = READ_ONCE(*pudp);
350 if (sz != PUD_SIZE && pud_none(pud))
351 return NULL;
352 /* hugepage or swap? */
353 if (pud_huge(pud) || !pud_present(pud))
354 return (pte_t *)pudp;
355 /* table; check the next level */
356
357 if (sz == CONT_PMD_SIZE)
358 addr &= CONT_PMD_MASK;
359
360 pmdp = pmd_offset(pudp, addr);
361 pmd = READ_ONCE(*pmdp);
362 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
363 pmd_none(pmd))
364 return NULL;
365 if (pmd_huge(pmd) || !pmd_present(pmd))
366 return (pte_t *)pmdp;
367
368 if (sz == CONT_PTE_SIZE)
369 return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
370
371 return NULL;
372}
373
374unsigned long hugetlb_mask_last_page(struct hstate *h)
375{
376 unsigned long hp_size = huge_page_size(h);
377
378 switch (hp_size) {
379#ifndef __PAGETABLE_PMD_FOLDED
380 case PUD_SIZE:
381 return PGDIR_SIZE - PUD_SIZE;
382#endif
383 case CONT_PMD_SIZE:
384 return PUD_SIZE - CONT_PMD_SIZE;
385 case PMD_SIZE:
386 return PUD_SIZE - PMD_SIZE;
387 case CONT_PTE_SIZE:
388 return PMD_SIZE - CONT_PTE_SIZE;
389 default:
390 break;
391 }
392
393 return 0UL;
394}
395
396pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
397{
398 size_t pagesize = 1UL << shift;
399
400 entry = pte_mkhuge(entry);
401 if (pagesize == CONT_PTE_SIZE) {
402 entry = pte_mkcont(entry);
403 } else if (pagesize == CONT_PMD_SIZE) {
404 entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
405 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
406 pr_warn("%s: unrecognized huge page size 0x%lx\n",
407 __func__, pagesize);
408 }
409 return entry;
410}
411
412void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
413 pte_t *ptep, unsigned long sz)
414{
415 int i, ncontig;
416 size_t pgsize;
417
418 ncontig = num_contig_ptes(sz, &pgsize);
419
420 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
421 pte_clear(mm, addr, ptep);
422}
423
424pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
425 unsigned long addr, pte_t *ptep)
426{
427 int ncontig;
428 size_t pgsize;
429 pte_t orig_pte = ptep_get(ptep);
430
431 if (!pte_cont(orig_pte))
432 return ptep_get_and_clear(mm, addr, ptep);
433
434 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
435
436 return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
437}
438
439/*
440 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
441 * and write permission.
442 *
443 * For a contiguous huge pte range we need to check whether or not write
444 * permission has to change only on the first pte in the set. Then for
445 * all the contiguous ptes we need to check whether or not there is a
446 * discrepancy between dirty or young.
447 */
448static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
449{
450 int i;
451
452 if (pte_write(pte) != pte_write(ptep_get(ptep)))
453 return 1;
454
455 for (i = 0; i < ncontig; i++) {
456 pte_t orig_pte = ptep_get(ptep + i);
457
458 if (pte_dirty(pte) != pte_dirty(orig_pte))
459 return 1;
460
461 if (pte_young(pte) != pte_young(orig_pte))
462 return 1;
463 }
464
465 return 0;
466}
467
468int huge_ptep_set_access_flags(struct vm_area_struct *vma,
469 unsigned long addr, pte_t *ptep,
470 pte_t pte, int dirty)
471{
472 int ncontig, i;
473 size_t pgsize = 0;
474 unsigned long pfn = pte_pfn(pte), dpfn;
475 struct mm_struct *mm = vma->vm_mm;
476 pgprot_t hugeprot;
477 pte_t orig_pte;
478
479 if (!pte_cont(pte))
480 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
481
482 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
483 dpfn = pgsize >> PAGE_SHIFT;
484
485 if (!__cont_access_flags_changed(ptep, pte, ncontig))
486 return 0;
487
488 orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
489
490 /* Make sure we don't lose the dirty or young state */
491 if (pte_dirty(orig_pte))
492 pte = pte_mkdirty(pte);
493
494 if (pte_young(orig_pte))
495 pte = pte_mkyoung(pte);
496
497 hugeprot = pte_pgprot(pte);
498 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
499 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
500
501 return 1;
502}
503
504void huge_ptep_set_wrprotect(struct mm_struct *mm,
505 unsigned long addr, pte_t *ptep)
506{
507 unsigned long pfn, dpfn;
508 pgprot_t hugeprot;
509 int ncontig, i;
510 size_t pgsize;
511 pte_t pte;
512
513 if (!pte_cont(READ_ONCE(*ptep))) {
514 ptep_set_wrprotect(mm, addr, ptep);
515 return;
516 }
517
518 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
519 dpfn = pgsize >> PAGE_SHIFT;
520
521 pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
522 pte = pte_wrprotect(pte);
523
524 hugeprot = pte_pgprot(pte);
525 pfn = pte_pfn(pte);
526
527 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
528 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
529}
530
531pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
532 unsigned long addr, pte_t *ptep)
533{
534 struct mm_struct *mm = vma->vm_mm;
535 size_t pgsize;
536 int ncontig;
537
538 if (!pte_cont(READ_ONCE(*ptep)))
539 return ptep_clear_flush(vma, addr, ptep);
540
541 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
542 return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
543}
544
545static int __init hugetlbpage_init(void)
546{
547 if (pud_sect_supported())
548 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
549
550 hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
551 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
552 hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
553
554 return 0;
555}
556arch_initcall(hugetlbpage_init);
557
558bool __init arch_hugetlb_valid_size(unsigned long size)
559{
560 return __hugetlb_valid_size(size);
561}
562
563pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
564{
565 if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
566 cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
567 /*
568 * Break-before-make (BBM) is required for all user space mappings
569 * when the permission changes from executable to non-executable
570 * in cases where cpu is affected with errata #2645198.
571 */
572 if (pte_user_exec(READ_ONCE(*ptep)))
573 return huge_ptep_clear_flush(vma, addr, ptep);
574 }
575 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
576}
577
578void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
579 pte_t old_pte, pte_t pte)
580{
581 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
582}