Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/mm.h>
3#include <linux/gfp.h>
4#include <linux/hugetlb.h>
5#include <asm/pgalloc.h>
6#include <asm/tlb.h>
7#include <asm/fixmap.h>
8#include <asm/mtrr.h>
9
10#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
11phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
12EXPORT_SYMBOL(physical_mask);
13#endif
14
15#ifdef CONFIG_HIGHPTE
16#define PGTABLE_HIGHMEM __GFP_HIGHMEM
17#else
18#define PGTABLE_HIGHMEM 0
19#endif
20
21#ifndef CONFIG_PARAVIRT
22static inline
23void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
24{
25 tlb_remove_page(tlb, table);
26}
27#endif
28
29gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
30
31pgtable_t pte_alloc_one(struct mm_struct *mm)
32{
33 return __pte_alloc_one(mm, __userpte_alloc_gfp);
34}
35
36static int __init setup_userpte(char *arg)
37{
38 if (!arg)
39 return -EINVAL;
40
41 /*
42 * "userpte=nohigh" disables allocation of user pagetables in
43 * high memory.
44 */
45 if (strcmp(arg, "nohigh") == 0)
46 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
47 else
48 return -EINVAL;
49 return 0;
50}
51early_param("userpte", setup_userpte);
52
53void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
54{
55 pgtable_pte_page_dtor(pte);
56 paravirt_release_pte(page_to_pfn(pte));
57 paravirt_tlb_remove_table(tlb, pte);
58}
59
60#if CONFIG_PGTABLE_LEVELS > 2
61void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
62{
63 struct page *page = virt_to_page(pmd);
64 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
65 /*
66 * NOTE! For PAE, any changes to the top page-directory-pointer-table
67 * entries need a full cr3 reload to flush.
68 */
69#ifdef CONFIG_X86_PAE
70 tlb->need_flush_all = 1;
71#endif
72 pgtable_pmd_page_dtor(page);
73 paravirt_tlb_remove_table(tlb, page);
74}
75
76#if CONFIG_PGTABLE_LEVELS > 3
77void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
78{
79 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
80 paravirt_tlb_remove_table(tlb, virt_to_page(pud));
81}
82
83#if CONFIG_PGTABLE_LEVELS > 4
84void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
85{
86 paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
87 paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
88}
89#endif /* CONFIG_PGTABLE_LEVELS > 4 */
90#endif /* CONFIG_PGTABLE_LEVELS > 3 */
91#endif /* CONFIG_PGTABLE_LEVELS > 2 */
92
93static inline void pgd_list_add(pgd_t *pgd)
94{
95 struct page *page = virt_to_page(pgd);
96
97 list_add(&page->lru, &pgd_list);
98}
99
100static inline void pgd_list_del(pgd_t *pgd)
101{
102 struct page *page = virt_to_page(pgd);
103
104 list_del(&page->lru);
105}
106
107#define UNSHARED_PTRS_PER_PGD \
108 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
109#define MAX_UNSHARED_PTRS_PER_PGD \
110 max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
111
112
113static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
114{
115 virt_to_page(pgd)->pt_mm = mm;
116}
117
118struct mm_struct *pgd_page_get_mm(struct page *page)
119{
120 return page->pt_mm;
121}
122
123static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
124{
125 /* If the pgd points to a shared pagetable level (either the
126 ptes in non-PAE, or shared PMD in PAE), then just copy the
127 references from swapper_pg_dir. */
128 if (CONFIG_PGTABLE_LEVELS == 2 ||
129 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
130 CONFIG_PGTABLE_LEVELS >= 4) {
131 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
132 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
133 KERNEL_PGD_PTRS);
134 }
135
136 /* list required to sync kernel mapping updates */
137 if (!SHARED_KERNEL_PMD) {
138 pgd_set_mm(pgd, mm);
139 pgd_list_add(pgd);
140 }
141}
142
143static void pgd_dtor(pgd_t *pgd)
144{
145 if (SHARED_KERNEL_PMD)
146 return;
147
148 spin_lock(&pgd_lock);
149 pgd_list_del(pgd);
150 spin_unlock(&pgd_lock);
151}
152
153/*
154 * List of all pgd's needed for non-PAE so it can invalidate entries
155 * in both cached and uncached pgd's; not needed for PAE since the
156 * kernel pmd is shared. If PAE were not to share the pmd a similar
157 * tactic would be needed. This is essentially codepath-based locking
158 * against pageattr.c; it is the unique case in which a valid change
159 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
160 * vmalloc faults work because attached pagetables are never freed.
161 * -- nyc
162 */
163
164#ifdef CONFIG_X86_PAE
165/*
166 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
167 * updating the top-level pagetable entries to guarantee the
168 * processor notices the update. Since this is expensive, and
169 * all 4 top-level entries are used almost immediately in a
170 * new process's life, we just pre-populate them here.
171 *
172 * Also, if we're in a paravirt environment where the kernel pmd is
173 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
174 * and initialize the kernel pmds here.
175 */
176#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
177#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
178
179/*
180 * We allocate separate PMDs for the kernel part of the user page-table
181 * when PTI is enabled. We need them to map the per-process LDT into the
182 * user-space page-table.
183 */
184#define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \
185 KERNEL_PGD_PTRS : 0)
186#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
187
188void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
189{
190 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
191
192 /* Note: almost everything apart from _PAGE_PRESENT is
193 reserved at the pmd (PDPT) level. */
194 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
195
196 /*
197 * According to Intel App note "TLBs, Paging-Structure Caches,
198 * and Their Invalidation", April 2007, document 317080-001,
199 * section 8.1: in PAE mode we explicitly have to flush the
200 * TLB via cr3 if the top-level pgd is changed...
201 */
202 flush_tlb_mm(mm);
203}
204#else /* !CONFIG_X86_PAE */
205
206/* No need to prepopulate any pagetable entries in non-PAE modes. */
207#define PREALLOCATED_PMDS 0
208#define MAX_PREALLOCATED_PMDS 0
209#define PREALLOCATED_USER_PMDS 0
210#define MAX_PREALLOCATED_USER_PMDS 0
211#endif /* CONFIG_X86_PAE */
212
213static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
214{
215 int i;
216
217 for (i = 0; i < count; i++)
218 if (pmds[i]) {
219 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
220 free_page((unsigned long)pmds[i]);
221 mm_dec_nr_pmds(mm);
222 }
223}
224
225static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
226{
227 int i;
228 bool failed = false;
229 gfp_t gfp = GFP_PGTABLE_USER;
230
231 if (mm == &init_mm)
232 gfp &= ~__GFP_ACCOUNT;
233
234 for (i = 0; i < count; i++) {
235 pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
236 if (!pmd)
237 failed = true;
238 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
239 free_page((unsigned long)pmd);
240 pmd = NULL;
241 failed = true;
242 }
243 if (pmd)
244 mm_inc_nr_pmds(mm);
245 pmds[i] = pmd;
246 }
247
248 if (failed) {
249 free_pmds(mm, pmds, count);
250 return -ENOMEM;
251 }
252
253 return 0;
254}
255
256/*
257 * Mop up any pmd pages which may still be attached to the pgd.
258 * Normally they will be freed by munmap/exit_mmap, but any pmd we
259 * preallocate which never got a corresponding vma will need to be
260 * freed manually.
261 */
262static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
263{
264 pgd_t pgd = *pgdp;
265
266 if (pgd_val(pgd) != 0) {
267 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
268
269 pgd_clear(pgdp);
270
271 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
272 pmd_free(mm, pmd);
273 mm_dec_nr_pmds(mm);
274 }
275}
276
277static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
278{
279 int i;
280
281 for (i = 0; i < PREALLOCATED_PMDS; i++)
282 mop_up_one_pmd(mm, &pgdp[i]);
283
284#ifdef CONFIG_PAGE_TABLE_ISOLATION
285
286 if (!boot_cpu_has(X86_FEATURE_PTI))
287 return;
288
289 pgdp = kernel_to_user_pgdp(pgdp);
290
291 for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
292 mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
293#endif
294}
295
296static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
297{
298 p4d_t *p4d;
299 pud_t *pud;
300 int i;
301
302 p4d = p4d_offset(pgd, 0);
303 pud = pud_offset(p4d, 0);
304
305 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
306 pmd_t *pmd = pmds[i];
307
308 if (i >= KERNEL_PGD_BOUNDARY)
309 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
310 sizeof(pmd_t) * PTRS_PER_PMD);
311
312 pud_populate(mm, pud, pmd);
313 }
314}
315
316#ifdef CONFIG_PAGE_TABLE_ISOLATION
317static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
318 pgd_t *k_pgd, pmd_t *pmds[])
319{
320 pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
321 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
322 p4d_t *u_p4d;
323 pud_t *u_pud;
324 int i;
325
326 u_p4d = p4d_offset(u_pgd, 0);
327 u_pud = pud_offset(u_p4d, 0);
328
329 s_pgd += KERNEL_PGD_BOUNDARY;
330 u_pud += KERNEL_PGD_BOUNDARY;
331
332 for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
333 pmd_t *pmd = pmds[i];
334
335 memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
336 sizeof(pmd_t) * PTRS_PER_PMD);
337
338 pud_populate(mm, u_pud, pmd);
339 }
340
341}
342#else
343static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
344 pgd_t *k_pgd, pmd_t *pmds[])
345{
346}
347#endif
348/*
349 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
350 * assumes that pgd should be in one page.
351 *
352 * But kernel with PAE paging that is not running as a Xen domain
353 * only needs to allocate 32 bytes for pgd instead of one page.
354 */
355#ifdef CONFIG_X86_PAE
356
357#include <linux/slab.h>
358
359#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
360#define PGD_ALIGN 32
361
362static struct kmem_cache *pgd_cache;
363
364void __init pgtable_cache_init(void)
365{
366 /*
367 * When PAE kernel is running as a Xen domain, it does not use
368 * shared kernel pmd. And this requires a whole page for pgd.
369 */
370 if (!SHARED_KERNEL_PMD)
371 return;
372
373 /*
374 * when PAE kernel is not running as a Xen domain, it uses
375 * shared kernel pmd. Shared kernel pmd does not require a whole
376 * page for pgd. We are able to just allocate a 32-byte for pgd.
377 * During boot time, we create a 32-byte slab for pgd table allocation.
378 */
379 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
380 SLAB_PANIC, NULL);
381}
382
383static inline pgd_t *_pgd_alloc(void)
384{
385 /*
386 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
387 * We allocate one page for pgd.
388 */
389 if (!SHARED_KERNEL_PMD)
390 return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
391 PGD_ALLOCATION_ORDER);
392
393 /*
394 * Now PAE kernel is not running as a Xen domain. We can allocate
395 * a 32-byte slab for pgd to save memory space.
396 */
397 return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
398}
399
400static inline void _pgd_free(pgd_t *pgd)
401{
402 if (!SHARED_KERNEL_PMD)
403 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
404 else
405 kmem_cache_free(pgd_cache, pgd);
406}
407#else
408
409static inline pgd_t *_pgd_alloc(void)
410{
411 return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
412 PGD_ALLOCATION_ORDER);
413}
414
415static inline void _pgd_free(pgd_t *pgd)
416{
417 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
418}
419#endif /* CONFIG_X86_PAE */
420
421pgd_t *pgd_alloc(struct mm_struct *mm)
422{
423 pgd_t *pgd;
424 pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
425 pmd_t *pmds[MAX_PREALLOCATED_PMDS];
426
427 pgd = _pgd_alloc();
428
429 if (pgd == NULL)
430 goto out;
431
432 mm->pgd = pgd;
433
434 if (sizeof(pmds) != 0 &&
435 preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
436 goto out_free_pgd;
437
438 if (sizeof(u_pmds) != 0 &&
439 preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
440 goto out_free_pmds;
441
442 if (paravirt_pgd_alloc(mm) != 0)
443 goto out_free_user_pmds;
444
445 /*
446 * Make sure that pre-populating the pmds is atomic with
447 * respect to anything walking the pgd_list, so that they
448 * never see a partially populated pgd.
449 */
450 spin_lock(&pgd_lock);
451
452 pgd_ctor(mm, pgd);
453 if (sizeof(pmds) != 0)
454 pgd_prepopulate_pmd(mm, pgd, pmds);
455
456 if (sizeof(u_pmds) != 0)
457 pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
458
459 spin_unlock(&pgd_lock);
460
461 return pgd;
462
463out_free_user_pmds:
464 if (sizeof(u_pmds) != 0)
465 free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
466out_free_pmds:
467 if (sizeof(pmds) != 0)
468 free_pmds(mm, pmds, PREALLOCATED_PMDS);
469out_free_pgd:
470 _pgd_free(pgd);
471out:
472 return NULL;
473}
474
475void pgd_free(struct mm_struct *mm, pgd_t *pgd)
476{
477 pgd_mop_up_pmds(mm, pgd);
478 pgd_dtor(pgd);
479 paravirt_pgd_free(mm, pgd);
480 _pgd_free(pgd);
481}
482
483/*
484 * Used to set accessed or dirty bits in the page table entries
485 * on other architectures. On x86, the accessed and dirty bits
486 * are tracked by hardware. However, do_wp_page calls this function
487 * to also make the pte writeable at the same time the dirty bit is
488 * set. In that case we do actually need to write the PTE.
489 */
490int ptep_set_access_flags(struct vm_area_struct *vma,
491 unsigned long address, pte_t *ptep,
492 pte_t entry, int dirty)
493{
494 int changed = !pte_same(*ptep, entry);
495
496 if (changed && dirty)
497 set_pte(ptep, entry);
498
499 return changed;
500}
501
502#ifdef CONFIG_TRANSPARENT_HUGEPAGE
503int pmdp_set_access_flags(struct vm_area_struct *vma,
504 unsigned long address, pmd_t *pmdp,
505 pmd_t entry, int dirty)
506{
507 int changed = !pmd_same(*pmdp, entry);
508
509 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
510
511 if (changed && dirty) {
512 set_pmd(pmdp, entry);
513 /*
514 * We had a write-protection fault here and changed the pmd
515 * to to more permissive. No need to flush the TLB for that,
516 * #PF is architecturally guaranteed to do that and in the
517 * worst-case we'll generate a spurious fault.
518 */
519 }
520
521 return changed;
522}
523
524int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
525 pud_t *pudp, pud_t entry, int dirty)
526{
527 int changed = !pud_same(*pudp, entry);
528
529 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
530
531 if (changed && dirty) {
532 set_pud(pudp, entry);
533 /*
534 * We had a write-protection fault here and changed the pud
535 * to to more permissive. No need to flush the TLB for that,
536 * #PF is architecturally guaranteed to do that and in the
537 * worst-case we'll generate a spurious fault.
538 */
539 }
540
541 return changed;
542}
543#endif
544
545int ptep_test_and_clear_young(struct vm_area_struct *vma,
546 unsigned long addr, pte_t *ptep)
547{
548 int ret = 0;
549
550 if (pte_young(*ptep))
551 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
552 (unsigned long *) &ptep->pte);
553
554 return ret;
555}
556
557#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
558int pmdp_test_and_clear_young(struct vm_area_struct *vma,
559 unsigned long addr, pmd_t *pmdp)
560{
561 int ret = 0;
562
563 if (pmd_young(*pmdp))
564 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
565 (unsigned long *)pmdp);
566
567 return ret;
568}
569#endif
570
571#ifdef CONFIG_TRANSPARENT_HUGEPAGE
572int pudp_test_and_clear_young(struct vm_area_struct *vma,
573 unsigned long addr, pud_t *pudp)
574{
575 int ret = 0;
576
577 if (pud_young(*pudp))
578 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
579 (unsigned long *)pudp);
580
581 return ret;
582}
583#endif
584
585int ptep_clear_flush_young(struct vm_area_struct *vma,
586 unsigned long address, pte_t *ptep)
587{
588 /*
589 * On x86 CPUs, clearing the accessed bit without a TLB flush
590 * doesn't cause data corruption. [ It could cause incorrect
591 * page aging and the (mistaken) reclaim of hot pages, but the
592 * chance of that should be relatively low. ]
593 *
594 * So as a performance optimization don't flush the TLB when
595 * clearing the accessed bit, it will eventually be flushed by
596 * a context switch or a VM operation anyway. [ In the rare
597 * event of it not getting flushed for a long time the delay
598 * shouldn't really matter because there's no real memory
599 * pressure for swapout to react to. ]
600 */
601 return ptep_test_and_clear_young(vma, address, ptep);
602}
603
604#ifdef CONFIG_TRANSPARENT_HUGEPAGE
605int pmdp_clear_flush_young(struct vm_area_struct *vma,
606 unsigned long address, pmd_t *pmdp)
607{
608 int young;
609
610 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
611
612 young = pmdp_test_and_clear_young(vma, address, pmdp);
613 if (young)
614 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
615
616 return young;
617}
618
619pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
620 pmd_t *pmdp)
621{
622 /*
623 * No flush is necessary. Once an invalid PTE is established, the PTE's
624 * access and dirty bits cannot be updated.
625 */
626 return pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
627}
628#endif
629
630/**
631 * reserve_top_address - reserves a hole in the top of kernel address space
632 * @reserve - size of hole to reserve
633 *
634 * Can be used to relocate the fixmap area and poke a hole in the top
635 * of kernel address space to make room for a hypervisor.
636 */
637void __init reserve_top_address(unsigned long reserve)
638{
639#ifdef CONFIG_X86_32
640 BUG_ON(fixmaps_set > 0);
641 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
642 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
643 -reserve, __FIXADDR_TOP + PAGE_SIZE);
644#endif
645}
646
647int fixmaps_set;
648
649void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
650{
651 unsigned long address = __fix_to_virt(idx);
652
653#ifdef CONFIG_X86_64
654 /*
655 * Ensure that the static initial page tables are covering the
656 * fixmap completely.
657 */
658 BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
659 (FIXMAP_PMD_NUM * PTRS_PER_PTE));
660#endif
661
662 if (idx >= __end_of_fixed_addresses) {
663 BUG();
664 return;
665 }
666 set_pte_vaddr(address, pte);
667 fixmaps_set++;
668}
669
670void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
671 phys_addr_t phys, pgprot_t flags)
672{
673 /* Sanitize 'prot' against any unsupported bits: */
674 pgprot_val(flags) &= __default_kernel_pte_mask;
675
676 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
677}
678
679#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
680#ifdef CONFIG_X86_5LEVEL
681/**
682 * p4d_set_huge - setup kernel P4D mapping
683 *
684 * No 512GB pages yet -- always return 0
685 */
686int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
687{
688 return 0;
689}
690
691/**
692 * p4d_clear_huge - clear kernel P4D mapping when it is set
693 *
694 * No 512GB pages yet -- always return 0
695 */
696void p4d_clear_huge(p4d_t *p4d)
697{
698}
699#endif
700
701/**
702 * pud_set_huge - setup kernel PUD mapping
703 *
704 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
705 * function sets up a huge page only if any of the following conditions are met:
706 *
707 * - MTRRs are disabled, or
708 *
709 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
710 *
711 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
712 * has no effect on the requested PAT memory type.
713 *
714 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
715 * page mapping attempt fails.
716 *
717 * Returns 1 on success and 0 on failure.
718 */
719int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
720{
721 u8 mtrr, uniform;
722
723 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
724 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
725 (mtrr != MTRR_TYPE_WRBACK))
726 return 0;
727
728 /* Bail out if we are we on a populated non-leaf entry: */
729 if (pud_present(*pud) && !pud_huge(*pud))
730 return 0;
731
732 set_pte((pte_t *)pud, pfn_pte(
733 (u64)addr >> PAGE_SHIFT,
734 __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
735
736 return 1;
737}
738
739/**
740 * pmd_set_huge - setup kernel PMD mapping
741 *
742 * See text over pud_set_huge() above.
743 *
744 * Returns 1 on success and 0 on failure.
745 */
746int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
747{
748 u8 mtrr, uniform;
749
750 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
751 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
752 (mtrr != MTRR_TYPE_WRBACK)) {
753 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
754 __func__, addr, addr + PMD_SIZE);
755 return 0;
756 }
757
758 /* Bail out if we are we on a populated non-leaf entry: */
759 if (pmd_present(*pmd) && !pmd_huge(*pmd))
760 return 0;
761
762 set_pte((pte_t *)pmd, pfn_pte(
763 (u64)addr >> PAGE_SHIFT,
764 __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
765
766 return 1;
767}
768
769/**
770 * pud_clear_huge - clear kernel PUD mapping when it is set
771 *
772 * Returns 1 on success and 0 on failure (no PUD map is found).
773 */
774int pud_clear_huge(pud_t *pud)
775{
776 if (pud_large(*pud)) {
777 pud_clear(pud);
778 return 1;
779 }
780
781 return 0;
782}
783
784/**
785 * pmd_clear_huge - clear kernel PMD mapping when it is set
786 *
787 * Returns 1 on success and 0 on failure (no PMD map is found).
788 */
789int pmd_clear_huge(pmd_t *pmd)
790{
791 if (pmd_large(*pmd)) {
792 pmd_clear(pmd);
793 return 1;
794 }
795
796 return 0;
797}
798
799#ifdef CONFIG_X86_64
800/**
801 * pud_free_pmd_page - Clear pud entry and free pmd page.
802 * @pud: Pointer to a PUD.
803 * @addr: Virtual address associated with pud.
804 *
805 * Context: The pud range has been unmapped and TLB purged.
806 * Return: 1 if clearing the entry succeeded. 0 otherwise.
807 *
808 * NOTE: Callers must allow a single page allocation.
809 */
810int pud_free_pmd_page(pud_t *pud, unsigned long addr)
811{
812 pmd_t *pmd, *pmd_sv;
813 pte_t *pte;
814 int i;
815
816 pmd = pud_pgtable(*pud);
817 pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
818 if (!pmd_sv)
819 return 0;
820
821 for (i = 0; i < PTRS_PER_PMD; i++) {
822 pmd_sv[i] = pmd[i];
823 if (!pmd_none(pmd[i]))
824 pmd_clear(&pmd[i]);
825 }
826
827 pud_clear(pud);
828
829 /* INVLPG to clear all paging-structure caches */
830 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
831
832 for (i = 0; i < PTRS_PER_PMD; i++) {
833 if (!pmd_none(pmd_sv[i])) {
834 pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
835 free_page((unsigned long)pte);
836 }
837 }
838
839 free_page((unsigned long)pmd_sv);
840
841 pgtable_pmd_page_dtor(virt_to_page(pmd));
842 free_page((unsigned long)pmd);
843
844 return 1;
845}
846
847/**
848 * pmd_free_pte_page - Clear pmd entry and free pte page.
849 * @pmd: Pointer to a PMD.
850 * @addr: Virtual address associated with pmd.
851 *
852 * Context: The pmd range has been unmapped and TLB purged.
853 * Return: 1 if clearing the entry succeeded. 0 otherwise.
854 */
855int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
856{
857 pte_t *pte;
858
859 pte = (pte_t *)pmd_page_vaddr(*pmd);
860 pmd_clear(pmd);
861
862 /* INVLPG to clear all paging-structure caches */
863 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
864
865 free_page((unsigned long)pte);
866
867 return 1;
868}
869
870#else /* !CONFIG_X86_64 */
871
872/*
873 * Disable free page handling on x86-PAE. This assures that ioremap()
874 * does not update sync'd pmd entries. See vmalloc_sync_one().
875 */
876int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
877{
878 return pmd_none(*pmd);
879}
880
881#endif /* CONFIG_X86_64 */
882#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/mm.h>
3#include <linux/gfp.h>
4#include <linux/hugetlb.h>
5#include <asm/pgalloc.h>
6#include <asm/tlb.h>
7#include <asm/fixmap.h>
8#include <asm/mtrr.h>
9
10#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
11phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
12EXPORT_SYMBOL(physical_mask);
13#endif
14
15#ifdef CONFIG_HIGHPTE
16#define PGTABLE_HIGHMEM __GFP_HIGHMEM
17#else
18#define PGTABLE_HIGHMEM 0
19#endif
20
21#ifndef CONFIG_PARAVIRT
22static inline
23void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
24{
25 tlb_remove_page(tlb, table);
26}
27#endif
28
29gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
30
31pgtable_t pte_alloc_one(struct mm_struct *mm)
32{
33 return __pte_alloc_one(mm, __userpte_alloc_gfp);
34}
35
36static int __init setup_userpte(char *arg)
37{
38 if (!arg)
39 return -EINVAL;
40
41 /*
42 * "userpte=nohigh" disables allocation of user pagetables in
43 * high memory.
44 */
45 if (strcmp(arg, "nohigh") == 0)
46 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
47 else
48 return -EINVAL;
49 return 0;
50}
51early_param("userpte", setup_userpte);
52
53void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
54{
55 pagetable_pte_dtor(page_ptdesc(pte));
56 paravirt_release_pte(page_to_pfn(pte));
57 paravirt_tlb_remove_table(tlb, pte);
58}
59
60#if CONFIG_PGTABLE_LEVELS > 2
61void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
62{
63 struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
64 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
65 /*
66 * NOTE! For PAE, any changes to the top page-directory-pointer-table
67 * entries need a full cr3 reload to flush.
68 */
69#ifdef CONFIG_X86_PAE
70 tlb->need_flush_all = 1;
71#endif
72 pagetable_pmd_dtor(ptdesc);
73 paravirt_tlb_remove_table(tlb, ptdesc_page(ptdesc));
74}
75
76#if CONFIG_PGTABLE_LEVELS > 3
77void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
78{
79 struct ptdesc *ptdesc = virt_to_ptdesc(pud);
80
81 pagetable_pud_dtor(ptdesc);
82 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
83 paravirt_tlb_remove_table(tlb, virt_to_page(pud));
84}
85
86#if CONFIG_PGTABLE_LEVELS > 4
87void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
88{
89 paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
90 paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
91}
92#endif /* CONFIG_PGTABLE_LEVELS > 4 */
93#endif /* CONFIG_PGTABLE_LEVELS > 3 */
94#endif /* CONFIG_PGTABLE_LEVELS > 2 */
95
96static inline void pgd_list_add(pgd_t *pgd)
97{
98 struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
99
100 list_add(&ptdesc->pt_list, &pgd_list);
101}
102
103static inline void pgd_list_del(pgd_t *pgd)
104{
105 struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
106
107 list_del(&ptdesc->pt_list);
108}
109
110#define UNSHARED_PTRS_PER_PGD \
111 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
112#define MAX_UNSHARED_PTRS_PER_PGD \
113 max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
114
115
116static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
117{
118 virt_to_ptdesc(pgd)->pt_mm = mm;
119}
120
121struct mm_struct *pgd_page_get_mm(struct page *page)
122{
123 return page_ptdesc(page)->pt_mm;
124}
125
126static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
127{
128 /* If the pgd points to a shared pagetable level (either the
129 ptes in non-PAE, or shared PMD in PAE), then just copy the
130 references from swapper_pg_dir. */
131 if (CONFIG_PGTABLE_LEVELS == 2 ||
132 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
133 CONFIG_PGTABLE_LEVELS >= 4) {
134 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
135 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
136 KERNEL_PGD_PTRS);
137 }
138
139 /* list required to sync kernel mapping updates */
140 if (!SHARED_KERNEL_PMD) {
141 pgd_set_mm(pgd, mm);
142 pgd_list_add(pgd);
143 }
144}
145
146static void pgd_dtor(pgd_t *pgd)
147{
148 if (SHARED_KERNEL_PMD)
149 return;
150
151 spin_lock(&pgd_lock);
152 pgd_list_del(pgd);
153 spin_unlock(&pgd_lock);
154}
155
156/*
157 * List of all pgd's needed for non-PAE so it can invalidate entries
158 * in both cached and uncached pgd's; not needed for PAE since the
159 * kernel pmd is shared. If PAE were not to share the pmd a similar
160 * tactic would be needed. This is essentially codepath-based locking
161 * against pageattr.c; it is the unique case in which a valid change
162 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
163 * vmalloc faults work because attached pagetables are never freed.
164 * -- nyc
165 */
166
167#ifdef CONFIG_X86_PAE
168/*
169 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
170 * updating the top-level pagetable entries to guarantee the
171 * processor notices the update. Since this is expensive, and
172 * all 4 top-level entries are used almost immediately in a
173 * new process's life, we just pre-populate them here.
174 *
175 * Also, if we're in a paravirt environment where the kernel pmd is
176 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
177 * and initialize the kernel pmds here.
178 */
179#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
180#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
181
182/*
183 * We allocate separate PMDs for the kernel part of the user page-table
184 * when PTI is enabled. We need them to map the per-process LDT into the
185 * user-space page-table.
186 */
187#define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \
188 KERNEL_PGD_PTRS : 0)
189#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
190
191void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
192{
193 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
194
195 /* Note: almost everything apart from _PAGE_PRESENT is
196 reserved at the pmd (PDPT) level. */
197 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
198
199 /*
200 * According to Intel App note "TLBs, Paging-Structure Caches,
201 * and Their Invalidation", April 2007, document 317080-001,
202 * section 8.1: in PAE mode we explicitly have to flush the
203 * TLB via cr3 if the top-level pgd is changed...
204 */
205 flush_tlb_mm(mm);
206}
207#else /* !CONFIG_X86_PAE */
208
209/* No need to prepopulate any pagetable entries in non-PAE modes. */
210#define PREALLOCATED_PMDS 0
211#define MAX_PREALLOCATED_PMDS 0
212#define PREALLOCATED_USER_PMDS 0
213#define MAX_PREALLOCATED_USER_PMDS 0
214#endif /* CONFIG_X86_PAE */
215
216static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
217{
218 int i;
219 struct ptdesc *ptdesc;
220
221 for (i = 0; i < count; i++)
222 if (pmds[i]) {
223 ptdesc = virt_to_ptdesc(pmds[i]);
224
225 pagetable_pmd_dtor(ptdesc);
226 pagetable_free(ptdesc);
227 mm_dec_nr_pmds(mm);
228 }
229}
230
231static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
232{
233 int i;
234 bool failed = false;
235 gfp_t gfp = GFP_PGTABLE_USER;
236
237 if (mm == &init_mm)
238 gfp &= ~__GFP_ACCOUNT;
239 gfp &= ~__GFP_HIGHMEM;
240
241 for (i = 0; i < count; i++) {
242 pmd_t *pmd = NULL;
243 struct ptdesc *ptdesc = pagetable_alloc(gfp, 0);
244
245 if (!ptdesc)
246 failed = true;
247 if (ptdesc && !pagetable_pmd_ctor(ptdesc)) {
248 pagetable_free(ptdesc);
249 ptdesc = NULL;
250 failed = true;
251 }
252 if (ptdesc) {
253 mm_inc_nr_pmds(mm);
254 pmd = ptdesc_address(ptdesc);
255 }
256
257 pmds[i] = pmd;
258 }
259
260 if (failed) {
261 free_pmds(mm, pmds, count);
262 return -ENOMEM;
263 }
264
265 return 0;
266}
267
268/*
269 * Mop up any pmd pages which may still be attached to the pgd.
270 * Normally they will be freed by munmap/exit_mmap, but any pmd we
271 * preallocate which never got a corresponding vma will need to be
272 * freed manually.
273 */
274static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
275{
276 pgd_t pgd = *pgdp;
277
278 if (pgd_val(pgd) != 0) {
279 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
280
281 pgd_clear(pgdp);
282
283 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
284 pmd_free(mm, pmd);
285 mm_dec_nr_pmds(mm);
286 }
287}
288
289static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
290{
291 int i;
292
293 for (i = 0; i < PREALLOCATED_PMDS; i++)
294 mop_up_one_pmd(mm, &pgdp[i]);
295
296#ifdef CONFIG_PAGE_TABLE_ISOLATION
297
298 if (!boot_cpu_has(X86_FEATURE_PTI))
299 return;
300
301 pgdp = kernel_to_user_pgdp(pgdp);
302
303 for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
304 mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
305#endif
306}
307
308static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
309{
310 p4d_t *p4d;
311 pud_t *pud;
312 int i;
313
314 p4d = p4d_offset(pgd, 0);
315 pud = pud_offset(p4d, 0);
316
317 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
318 pmd_t *pmd = pmds[i];
319
320 if (i >= KERNEL_PGD_BOUNDARY)
321 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
322 sizeof(pmd_t) * PTRS_PER_PMD);
323
324 pud_populate(mm, pud, pmd);
325 }
326}
327
328#ifdef CONFIG_PAGE_TABLE_ISOLATION
329static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
330 pgd_t *k_pgd, pmd_t *pmds[])
331{
332 pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
333 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
334 p4d_t *u_p4d;
335 pud_t *u_pud;
336 int i;
337
338 u_p4d = p4d_offset(u_pgd, 0);
339 u_pud = pud_offset(u_p4d, 0);
340
341 s_pgd += KERNEL_PGD_BOUNDARY;
342 u_pud += KERNEL_PGD_BOUNDARY;
343
344 for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
345 pmd_t *pmd = pmds[i];
346
347 memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
348 sizeof(pmd_t) * PTRS_PER_PMD);
349
350 pud_populate(mm, u_pud, pmd);
351 }
352
353}
354#else
355static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
356 pgd_t *k_pgd, pmd_t *pmds[])
357{
358}
359#endif
360/*
361 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
362 * assumes that pgd should be in one page.
363 *
364 * But kernel with PAE paging that is not running as a Xen domain
365 * only needs to allocate 32 bytes for pgd instead of one page.
366 */
367#ifdef CONFIG_X86_PAE
368
369#include <linux/slab.h>
370
371#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
372#define PGD_ALIGN 32
373
374static struct kmem_cache *pgd_cache;
375
376void __init pgtable_cache_init(void)
377{
378 /*
379 * When PAE kernel is running as a Xen domain, it does not use
380 * shared kernel pmd. And this requires a whole page for pgd.
381 */
382 if (!SHARED_KERNEL_PMD)
383 return;
384
385 /*
386 * when PAE kernel is not running as a Xen domain, it uses
387 * shared kernel pmd. Shared kernel pmd does not require a whole
388 * page for pgd. We are able to just allocate a 32-byte for pgd.
389 * During boot time, we create a 32-byte slab for pgd table allocation.
390 */
391 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
392 SLAB_PANIC, NULL);
393}
394
395static inline pgd_t *_pgd_alloc(void)
396{
397 /*
398 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
399 * We allocate one page for pgd.
400 */
401 if (!SHARED_KERNEL_PMD)
402 return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
403 PGD_ALLOCATION_ORDER);
404
405 /*
406 * Now PAE kernel is not running as a Xen domain. We can allocate
407 * a 32-byte slab for pgd to save memory space.
408 */
409 return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
410}
411
412static inline void _pgd_free(pgd_t *pgd)
413{
414 if (!SHARED_KERNEL_PMD)
415 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
416 else
417 kmem_cache_free(pgd_cache, pgd);
418}
419#else
420
421static inline pgd_t *_pgd_alloc(void)
422{
423 return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
424 PGD_ALLOCATION_ORDER);
425}
426
427static inline void _pgd_free(pgd_t *pgd)
428{
429 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
430}
431#endif /* CONFIG_X86_PAE */
432
433pgd_t *pgd_alloc(struct mm_struct *mm)
434{
435 pgd_t *pgd;
436 pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
437 pmd_t *pmds[MAX_PREALLOCATED_PMDS];
438
439 pgd = _pgd_alloc();
440
441 if (pgd == NULL)
442 goto out;
443
444 mm->pgd = pgd;
445
446 if (sizeof(pmds) != 0 &&
447 preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
448 goto out_free_pgd;
449
450 if (sizeof(u_pmds) != 0 &&
451 preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
452 goto out_free_pmds;
453
454 if (paravirt_pgd_alloc(mm) != 0)
455 goto out_free_user_pmds;
456
457 /*
458 * Make sure that pre-populating the pmds is atomic with
459 * respect to anything walking the pgd_list, so that they
460 * never see a partially populated pgd.
461 */
462 spin_lock(&pgd_lock);
463
464 pgd_ctor(mm, pgd);
465 if (sizeof(pmds) != 0)
466 pgd_prepopulate_pmd(mm, pgd, pmds);
467
468 if (sizeof(u_pmds) != 0)
469 pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
470
471 spin_unlock(&pgd_lock);
472
473 return pgd;
474
475out_free_user_pmds:
476 if (sizeof(u_pmds) != 0)
477 free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
478out_free_pmds:
479 if (sizeof(pmds) != 0)
480 free_pmds(mm, pmds, PREALLOCATED_PMDS);
481out_free_pgd:
482 _pgd_free(pgd);
483out:
484 return NULL;
485}
486
487void pgd_free(struct mm_struct *mm, pgd_t *pgd)
488{
489 pgd_mop_up_pmds(mm, pgd);
490 pgd_dtor(pgd);
491 paravirt_pgd_free(mm, pgd);
492 _pgd_free(pgd);
493}
494
495/*
496 * Used to set accessed or dirty bits in the page table entries
497 * on other architectures. On x86, the accessed and dirty bits
498 * are tracked by hardware. However, do_wp_page calls this function
499 * to also make the pte writeable at the same time the dirty bit is
500 * set. In that case we do actually need to write the PTE.
501 */
502int ptep_set_access_flags(struct vm_area_struct *vma,
503 unsigned long address, pte_t *ptep,
504 pte_t entry, int dirty)
505{
506 int changed = !pte_same(*ptep, entry);
507
508 if (changed && dirty)
509 set_pte(ptep, entry);
510
511 return changed;
512}
513
514#ifdef CONFIG_TRANSPARENT_HUGEPAGE
515int pmdp_set_access_flags(struct vm_area_struct *vma,
516 unsigned long address, pmd_t *pmdp,
517 pmd_t entry, int dirty)
518{
519 int changed = !pmd_same(*pmdp, entry);
520
521 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
522
523 if (changed && dirty) {
524 set_pmd(pmdp, entry);
525 /*
526 * We had a write-protection fault here and changed the pmd
527 * to to more permissive. No need to flush the TLB for that,
528 * #PF is architecturally guaranteed to do that and in the
529 * worst-case we'll generate a spurious fault.
530 */
531 }
532
533 return changed;
534}
535
536int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
537 pud_t *pudp, pud_t entry, int dirty)
538{
539 int changed = !pud_same(*pudp, entry);
540
541 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
542
543 if (changed && dirty) {
544 set_pud(pudp, entry);
545 /*
546 * We had a write-protection fault here and changed the pud
547 * to to more permissive. No need to flush the TLB for that,
548 * #PF is architecturally guaranteed to do that and in the
549 * worst-case we'll generate a spurious fault.
550 */
551 }
552
553 return changed;
554}
555#endif
556
557int ptep_test_and_clear_young(struct vm_area_struct *vma,
558 unsigned long addr, pte_t *ptep)
559{
560 int ret = 0;
561
562 if (pte_young(*ptep))
563 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
564 (unsigned long *) &ptep->pte);
565
566 return ret;
567}
568
569#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
570int pmdp_test_and_clear_young(struct vm_area_struct *vma,
571 unsigned long addr, pmd_t *pmdp)
572{
573 int ret = 0;
574
575 if (pmd_young(*pmdp))
576 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
577 (unsigned long *)pmdp);
578
579 return ret;
580}
581#endif
582
583#ifdef CONFIG_TRANSPARENT_HUGEPAGE
584int pudp_test_and_clear_young(struct vm_area_struct *vma,
585 unsigned long addr, pud_t *pudp)
586{
587 int ret = 0;
588
589 if (pud_young(*pudp))
590 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
591 (unsigned long *)pudp);
592
593 return ret;
594}
595#endif
596
597int ptep_clear_flush_young(struct vm_area_struct *vma,
598 unsigned long address, pte_t *ptep)
599{
600 /*
601 * On x86 CPUs, clearing the accessed bit without a TLB flush
602 * doesn't cause data corruption. [ It could cause incorrect
603 * page aging and the (mistaken) reclaim of hot pages, but the
604 * chance of that should be relatively low. ]
605 *
606 * So as a performance optimization don't flush the TLB when
607 * clearing the accessed bit, it will eventually be flushed by
608 * a context switch or a VM operation anyway. [ In the rare
609 * event of it not getting flushed for a long time the delay
610 * shouldn't really matter because there's no real memory
611 * pressure for swapout to react to. ]
612 */
613 return ptep_test_and_clear_young(vma, address, ptep);
614}
615
616#ifdef CONFIG_TRANSPARENT_HUGEPAGE
617int pmdp_clear_flush_young(struct vm_area_struct *vma,
618 unsigned long address, pmd_t *pmdp)
619{
620 int young;
621
622 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
623
624 young = pmdp_test_and_clear_young(vma, address, pmdp);
625 if (young)
626 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
627
628 return young;
629}
630
631pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
632 pmd_t *pmdp)
633{
634 /*
635 * No flush is necessary. Once an invalid PTE is established, the PTE's
636 * access and dirty bits cannot be updated.
637 */
638 return pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
639}
640#endif
641
642/**
643 * reserve_top_address - reserves a hole in the top of kernel address space
644 * @reserve - size of hole to reserve
645 *
646 * Can be used to relocate the fixmap area and poke a hole in the top
647 * of kernel address space to make room for a hypervisor.
648 */
649void __init reserve_top_address(unsigned long reserve)
650{
651#ifdef CONFIG_X86_32
652 BUG_ON(fixmaps_set > 0);
653 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
654 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
655 -reserve, __FIXADDR_TOP + PAGE_SIZE);
656#endif
657}
658
659int fixmaps_set;
660
661void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
662{
663 unsigned long address = __fix_to_virt(idx);
664
665#ifdef CONFIG_X86_64
666 /*
667 * Ensure that the static initial page tables are covering the
668 * fixmap completely.
669 */
670 BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
671 (FIXMAP_PMD_NUM * PTRS_PER_PTE));
672#endif
673
674 if (idx >= __end_of_fixed_addresses) {
675 BUG();
676 return;
677 }
678 set_pte_vaddr(address, pte);
679 fixmaps_set++;
680}
681
682void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
683 phys_addr_t phys, pgprot_t flags)
684{
685 /* Sanitize 'prot' against any unsupported bits: */
686 pgprot_val(flags) &= __default_kernel_pte_mask;
687
688 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
689}
690
691#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
692#ifdef CONFIG_X86_5LEVEL
693/**
694 * p4d_set_huge - setup kernel P4D mapping
695 *
696 * No 512GB pages yet -- always return 0
697 */
698int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
699{
700 return 0;
701}
702
703/**
704 * p4d_clear_huge - clear kernel P4D mapping when it is set
705 *
706 * No 512GB pages yet -- always return 0
707 */
708void p4d_clear_huge(p4d_t *p4d)
709{
710}
711#endif
712
713/**
714 * pud_set_huge - setup kernel PUD mapping
715 *
716 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
717 * function sets up a huge page only if the complete range has the same MTRR
718 * caching mode.
719 *
720 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
721 * page mapping attempt fails.
722 *
723 * Returns 1 on success and 0 on failure.
724 */
725int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
726{
727 u8 uniform;
728
729 mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
730 if (!uniform)
731 return 0;
732
733 /* Bail out if we are we on a populated non-leaf entry: */
734 if (pud_present(*pud) && !pud_huge(*pud))
735 return 0;
736
737 set_pte((pte_t *)pud, pfn_pte(
738 (u64)addr >> PAGE_SHIFT,
739 __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
740
741 return 1;
742}
743
744/**
745 * pmd_set_huge - setup kernel PMD mapping
746 *
747 * See text over pud_set_huge() above.
748 *
749 * Returns 1 on success and 0 on failure.
750 */
751int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
752{
753 u8 uniform;
754
755 mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
756 if (!uniform) {
757 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
758 __func__, addr, addr + PMD_SIZE);
759 return 0;
760 }
761
762 /* Bail out if we are we on a populated non-leaf entry: */
763 if (pmd_present(*pmd) && !pmd_huge(*pmd))
764 return 0;
765
766 set_pte((pte_t *)pmd, pfn_pte(
767 (u64)addr >> PAGE_SHIFT,
768 __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
769
770 return 1;
771}
772
773/**
774 * pud_clear_huge - clear kernel PUD mapping when it is set
775 *
776 * Returns 1 on success and 0 on failure (no PUD map is found).
777 */
778int pud_clear_huge(pud_t *pud)
779{
780 if (pud_large(*pud)) {
781 pud_clear(pud);
782 return 1;
783 }
784
785 return 0;
786}
787
788/**
789 * pmd_clear_huge - clear kernel PMD mapping when it is set
790 *
791 * Returns 1 on success and 0 on failure (no PMD map is found).
792 */
793int pmd_clear_huge(pmd_t *pmd)
794{
795 if (pmd_large(*pmd)) {
796 pmd_clear(pmd);
797 return 1;
798 }
799
800 return 0;
801}
802
803#ifdef CONFIG_X86_64
804/**
805 * pud_free_pmd_page - Clear pud entry and free pmd page.
806 * @pud: Pointer to a PUD.
807 * @addr: Virtual address associated with pud.
808 *
809 * Context: The pud range has been unmapped and TLB purged.
810 * Return: 1 if clearing the entry succeeded. 0 otherwise.
811 *
812 * NOTE: Callers must allow a single page allocation.
813 */
814int pud_free_pmd_page(pud_t *pud, unsigned long addr)
815{
816 pmd_t *pmd, *pmd_sv;
817 pte_t *pte;
818 int i;
819
820 pmd = pud_pgtable(*pud);
821 pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
822 if (!pmd_sv)
823 return 0;
824
825 for (i = 0; i < PTRS_PER_PMD; i++) {
826 pmd_sv[i] = pmd[i];
827 if (!pmd_none(pmd[i]))
828 pmd_clear(&pmd[i]);
829 }
830
831 pud_clear(pud);
832
833 /* INVLPG to clear all paging-structure caches */
834 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
835
836 for (i = 0; i < PTRS_PER_PMD; i++) {
837 if (!pmd_none(pmd_sv[i])) {
838 pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
839 free_page((unsigned long)pte);
840 }
841 }
842
843 free_page((unsigned long)pmd_sv);
844
845 pagetable_pmd_dtor(virt_to_ptdesc(pmd));
846 free_page((unsigned long)pmd);
847
848 return 1;
849}
850
851/**
852 * pmd_free_pte_page - Clear pmd entry and free pte page.
853 * @pmd: Pointer to a PMD.
854 * @addr: Virtual address associated with pmd.
855 *
856 * Context: The pmd range has been unmapped and TLB purged.
857 * Return: 1 if clearing the entry succeeded. 0 otherwise.
858 */
859int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
860{
861 pte_t *pte;
862
863 pte = (pte_t *)pmd_page_vaddr(*pmd);
864 pmd_clear(pmd);
865
866 /* INVLPG to clear all paging-structure caches */
867 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
868
869 free_page((unsigned long)pte);
870
871 return 1;
872}
873
874#else /* !CONFIG_X86_64 */
875
876/*
877 * Disable free page handling on x86-PAE. This assures that ioremap()
878 * does not update sync'd pmd entries. See vmalloc_sync_one().
879 */
880int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
881{
882 return pmd_none(*pmd);
883}
884
885#endif /* CONFIG_X86_64 */
886#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
887
888pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
889{
890 if (vma->vm_flags & VM_SHADOW_STACK)
891 return pte_mkwrite_shstk(pte);
892
893 pte = pte_mkwrite_novma(pte);
894
895 return pte_clear_saveddirty(pte);
896}
897
898pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
899{
900 if (vma->vm_flags & VM_SHADOW_STACK)
901 return pmd_mkwrite_shstk(pmd);
902
903 pmd = pmd_mkwrite_novma(pmd);
904
905 return pmd_clear_saveddirty(pmd);
906}
907
908void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte)
909{
910 /*
911 * Hardware before shadow stack can (rarely) set Dirty=1
912 * on a Write=0 PTE. So the below condition
913 * only indicates a software bug when shadow stack is
914 * supported by the HW. This checking is covered in
915 * pte_shstk().
916 */
917 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
918 pte_shstk(pte));
919}
920
921void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd)
922{
923 /* See note in arch_check_zapped_pte() */
924 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
925 pmd_shstk(pmd));
926}