Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/mm.h>
3#include <linux/gfp.h>
4#include <linux/hugetlb.h>
5#include <asm/pgalloc.h>
6#include <asm/tlb.h>
7#include <asm/fixmap.h>
8#include <asm/mtrr.h>
9
10#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
11phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
12EXPORT_SYMBOL(physical_mask);
13#endif
14
15#ifdef CONFIG_HIGHPTE
16#define PGTABLE_HIGHMEM __GFP_HIGHMEM
17#else
18#define PGTABLE_HIGHMEM 0
19#endif
20
21#ifndef CONFIG_PARAVIRT
22static inline
23void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
24{
25 tlb_remove_page(tlb, table);
26}
27#endif
28
29gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
30
31pgtable_t pte_alloc_one(struct mm_struct *mm)
32{
33 return __pte_alloc_one(mm, __userpte_alloc_gfp);
34}
35
36static int __init setup_userpte(char *arg)
37{
38 if (!arg)
39 return -EINVAL;
40
41 /*
42 * "userpte=nohigh" disables allocation of user pagetables in
43 * high memory.
44 */
45 if (strcmp(arg, "nohigh") == 0)
46 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
47 else
48 return -EINVAL;
49 return 0;
50}
51early_param("userpte", setup_userpte);
52
53void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
54{
55 pagetable_pte_dtor(page_ptdesc(pte));
56 paravirt_release_pte(page_to_pfn(pte));
57 paravirt_tlb_remove_table(tlb, pte);
58}
59
60#if CONFIG_PGTABLE_LEVELS > 2
61void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
62{
63 struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
64 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
65 /*
66 * NOTE! For PAE, any changes to the top page-directory-pointer-table
67 * entries need a full cr3 reload to flush.
68 */
69#ifdef CONFIG_X86_PAE
70 tlb->need_flush_all = 1;
71#endif
72 pagetable_pmd_dtor(ptdesc);
73 paravirt_tlb_remove_table(tlb, ptdesc_page(ptdesc));
74}
75
76#if CONFIG_PGTABLE_LEVELS > 3
77void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
78{
79 struct ptdesc *ptdesc = virt_to_ptdesc(pud);
80
81 pagetable_pud_dtor(ptdesc);
82 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
83 paravirt_tlb_remove_table(tlb, virt_to_page(pud));
84}
85
86#if CONFIG_PGTABLE_LEVELS > 4
87void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
88{
89 paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
90 paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
91}
92#endif /* CONFIG_PGTABLE_LEVELS > 4 */
93#endif /* CONFIG_PGTABLE_LEVELS > 3 */
94#endif /* CONFIG_PGTABLE_LEVELS > 2 */
95
96static inline void pgd_list_add(pgd_t *pgd)
97{
98 struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
99
100 list_add(&ptdesc->pt_list, &pgd_list);
101}
102
103static inline void pgd_list_del(pgd_t *pgd)
104{
105 struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
106
107 list_del(&ptdesc->pt_list);
108}
109
110#define UNSHARED_PTRS_PER_PGD \
111 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
112#define MAX_UNSHARED_PTRS_PER_PGD \
113 max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
114
115
116static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
117{
118 virt_to_ptdesc(pgd)->pt_mm = mm;
119}
120
121struct mm_struct *pgd_page_get_mm(struct page *page)
122{
123 return page_ptdesc(page)->pt_mm;
124}
125
126static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
127{
128 /* If the pgd points to a shared pagetable level (either the
129 ptes in non-PAE, or shared PMD in PAE), then just copy the
130 references from swapper_pg_dir. */
131 if (CONFIG_PGTABLE_LEVELS == 2 ||
132 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
133 CONFIG_PGTABLE_LEVELS >= 4) {
134 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
135 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
136 KERNEL_PGD_PTRS);
137 }
138
139 /* list required to sync kernel mapping updates */
140 if (!SHARED_KERNEL_PMD) {
141 pgd_set_mm(pgd, mm);
142 pgd_list_add(pgd);
143 }
144}
145
146static void pgd_dtor(pgd_t *pgd)
147{
148 if (SHARED_KERNEL_PMD)
149 return;
150
151 spin_lock(&pgd_lock);
152 pgd_list_del(pgd);
153 spin_unlock(&pgd_lock);
154}
155
156/*
157 * List of all pgd's needed for non-PAE so it can invalidate entries
158 * in both cached and uncached pgd's; not needed for PAE since the
159 * kernel pmd is shared. If PAE were not to share the pmd a similar
160 * tactic would be needed. This is essentially codepath-based locking
161 * against pageattr.c; it is the unique case in which a valid change
162 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
163 * vmalloc faults work because attached pagetables are never freed.
164 * -- nyc
165 */
166
167#ifdef CONFIG_X86_PAE
168/*
169 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
170 * updating the top-level pagetable entries to guarantee the
171 * processor notices the update. Since this is expensive, and
172 * all 4 top-level entries are used almost immediately in a
173 * new process's life, we just pre-populate them here.
174 *
175 * Also, if we're in a paravirt environment where the kernel pmd is
176 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
177 * and initialize the kernel pmds here.
178 */
179#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
180#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
181
182/*
183 * We allocate separate PMDs for the kernel part of the user page-table
184 * when PTI is enabled. We need them to map the per-process LDT into the
185 * user-space page-table.
186 */
187#define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \
188 KERNEL_PGD_PTRS : 0)
189#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
190
191void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
192{
193 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
194
195 /* Note: almost everything apart from _PAGE_PRESENT is
196 reserved at the pmd (PDPT) level. */
197 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
198
199 /*
200 * According to Intel App note "TLBs, Paging-Structure Caches,
201 * and Their Invalidation", April 2007, document 317080-001,
202 * section 8.1: in PAE mode we explicitly have to flush the
203 * TLB via cr3 if the top-level pgd is changed...
204 */
205 flush_tlb_mm(mm);
206}
207#else /* !CONFIG_X86_PAE */
208
209/* No need to prepopulate any pagetable entries in non-PAE modes. */
210#define PREALLOCATED_PMDS 0
211#define MAX_PREALLOCATED_PMDS 0
212#define PREALLOCATED_USER_PMDS 0
213#define MAX_PREALLOCATED_USER_PMDS 0
214#endif /* CONFIG_X86_PAE */
215
216static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
217{
218 int i;
219 struct ptdesc *ptdesc;
220
221 for (i = 0; i < count; i++)
222 if (pmds[i]) {
223 ptdesc = virt_to_ptdesc(pmds[i]);
224
225 pagetable_pmd_dtor(ptdesc);
226 pagetable_free(ptdesc);
227 mm_dec_nr_pmds(mm);
228 }
229}
230
231static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
232{
233 int i;
234 bool failed = false;
235 gfp_t gfp = GFP_PGTABLE_USER;
236
237 if (mm == &init_mm)
238 gfp &= ~__GFP_ACCOUNT;
239 gfp &= ~__GFP_HIGHMEM;
240
241 for (i = 0; i < count; i++) {
242 pmd_t *pmd = NULL;
243 struct ptdesc *ptdesc = pagetable_alloc(gfp, 0);
244
245 if (!ptdesc)
246 failed = true;
247 if (ptdesc && !pagetable_pmd_ctor(ptdesc)) {
248 pagetable_free(ptdesc);
249 ptdesc = NULL;
250 failed = true;
251 }
252 if (ptdesc) {
253 mm_inc_nr_pmds(mm);
254 pmd = ptdesc_address(ptdesc);
255 }
256
257 pmds[i] = pmd;
258 }
259
260 if (failed) {
261 free_pmds(mm, pmds, count);
262 return -ENOMEM;
263 }
264
265 return 0;
266}
267
268/*
269 * Mop up any pmd pages which may still be attached to the pgd.
270 * Normally they will be freed by munmap/exit_mmap, but any pmd we
271 * preallocate which never got a corresponding vma will need to be
272 * freed manually.
273 */
274static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
275{
276 pgd_t pgd = *pgdp;
277
278 if (pgd_val(pgd) != 0) {
279 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
280
281 pgd_clear(pgdp);
282
283 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
284 pmd_free(mm, pmd);
285 mm_dec_nr_pmds(mm);
286 }
287}
288
289static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
290{
291 int i;
292
293 for (i = 0; i < PREALLOCATED_PMDS; i++)
294 mop_up_one_pmd(mm, &pgdp[i]);
295
296#ifdef CONFIG_PAGE_TABLE_ISOLATION
297
298 if (!boot_cpu_has(X86_FEATURE_PTI))
299 return;
300
301 pgdp = kernel_to_user_pgdp(pgdp);
302
303 for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
304 mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
305#endif
306}
307
308static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
309{
310 p4d_t *p4d;
311 pud_t *pud;
312 int i;
313
314 p4d = p4d_offset(pgd, 0);
315 pud = pud_offset(p4d, 0);
316
317 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
318 pmd_t *pmd = pmds[i];
319
320 if (i >= KERNEL_PGD_BOUNDARY)
321 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
322 sizeof(pmd_t) * PTRS_PER_PMD);
323
324 pud_populate(mm, pud, pmd);
325 }
326}
327
328#ifdef CONFIG_PAGE_TABLE_ISOLATION
329static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
330 pgd_t *k_pgd, pmd_t *pmds[])
331{
332 pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
333 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
334 p4d_t *u_p4d;
335 pud_t *u_pud;
336 int i;
337
338 u_p4d = p4d_offset(u_pgd, 0);
339 u_pud = pud_offset(u_p4d, 0);
340
341 s_pgd += KERNEL_PGD_BOUNDARY;
342 u_pud += KERNEL_PGD_BOUNDARY;
343
344 for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
345 pmd_t *pmd = pmds[i];
346
347 memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
348 sizeof(pmd_t) * PTRS_PER_PMD);
349
350 pud_populate(mm, u_pud, pmd);
351 }
352
353}
354#else
355static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
356 pgd_t *k_pgd, pmd_t *pmds[])
357{
358}
359#endif
360/*
361 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
362 * assumes that pgd should be in one page.
363 *
364 * But kernel with PAE paging that is not running as a Xen domain
365 * only needs to allocate 32 bytes for pgd instead of one page.
366 */
367#ifdef CONFIG_X86_PAE
368
369#include <linux/slab.h>
370
371#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
372#define PGD_ALIGN 32
373
374static struct kmem_cache *pgd_cache;
375
376void __init pgtable_cache_init(void)
377{
378 /*
379 * When PAE kernel is running as a Xen domain, it does not use
380 * shared kernel pmd. And this requires a whole page for pgd.
381 */
382 if (!SHARED_KERNEL_PMD)
383 return;
384
385 /*
386 * when PAE kernel is not running as a Xen domain, it uses
387 * shared kernel pmd. Shared kernel pmd does not require a whole
388 * page for pgd. We are able to just allocate a 32-byte for pgd.
389 * During boot time, we create a 32-byte slab for pgd table allocation.
390 */
391 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
392 SLAB_PANIC, NULL);
393}
394
395static inline pgd_t *_pgd_alloc(void)
396{
397 /*
398 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
399 * We allocate one page for pgd.
400 */
401 if (!SHARED_KERNEL_PMD)
402 return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
403 PGD_ALLOCATION_ORDER);
404
405 /*
406 * Now PAE kernel is not running as a Xen domain. We can allocate
407 * a 32-byte slab for pgd to save memory space.
408 */
409 return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
410}
411
412static inline void _pgd_free(pgd_t *pgd)
413{
414 if (!SHARED_KERNEL_PMD)
415 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
416 else
417 kmem_cache_free(pgd_cache, pgd);
418}
419#else
420
421static inline pgd_t *_pgd_alloc(void)
422{
423 return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
424 PGD_ALLOCATION_ORDER);
425}
426
427static inline void _pgd_free(pgd_t *pgd)
428{
429 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
430}
431#endif /* CONFIG_X86_PAE */
432
433pgd_t *pgd_alloc(struct mm_struct *mm)
434{
435 pgd_t *pgd;
436 pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
437 pmd_t *pmds[MAX_PREALLOCATED_PMDS];
438
439 pgd = _pgd_alloc();
440
441 if (pgd == NULL)
442 goto out;
443
444 mm->pgd = pgd;
445
446 if (sizeof(pmds) != 0 &&
447 preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
448 goto out_free_pgd;
449
450 if (sizeof(u_pmds) != 0 &&
451 preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
452 goto out_free_pmds;
453
454 if (paravirt_pgd_alloc(mm) != 0)
455 goto out_free_user_pmds;
456
457 /*
458 * Make sure that pre-populating the pmds is atomic with
459 * respect to anything walking the pgd_list, so that they
460 * never see a partially populated pgd.
461 */
462 spin_lock(&pgd_lock);
463
464 pgd_ctor(mm, pgd);
465 if (sizeof(pmds) != 0)
466 pgd_prepopulate_pmd(mm, pgd, pmds);
467
468 if (sizeof(u_pmds) != 0)
469 pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
470
471 spin_unlock(&pgd_lock);
472
473 return pgd;
474
475out_free_user_pmds:
476 if (sizeof(u_pmds) != 0)
477 free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
478out_free_pmds:
479 if (sizeof(pmds) != 0)
480 free_pmds(mm, pmds, PREALLOCATED_PMDS);
481out_free_pgd:
482 _pgd_free(pgd);
483out:
484 return NULL;
485}
486
487void pgd_free(struct mm_struct *mm, pgd_t *pgd)
488{
489 pgd_mop_up_pmds(mm, pgd);
490 pgd_dtor(pgd);
491 paravirt_pgd_free(mm, pgd);
492 _pgd_free(pgd);
493}
494
495/*
496 * Used to set accessed or dirty bits in the page table entries
497 * on other architectures. On x86, the accessed and dirty bits
498 * are tracked by hardware. However, do_wp_page calls this function
499 * to also make the pte writeable at the same time the dirty bit is
500 * set. In that case we do actually need to write the PTE.
501 */
502int ptep_set_access_flags(struct vm_area_struct *vma,
503 unsigned long address, pte_t *ptep,
504 pte_t entry, int dirty)
505{
506 int changed = !pte_same(*ptep, entry);
507
508 if (changed && dirty)
509 set_pte(ptep, entry);
510
511 return changed;
512}
513
514#ifdef CONFIG_TRANSPARENT_HUGEPAGE
515int pmdp_set_access_flags(struct vm_area_struct *vma,
516 unsigned long address, pmd_t *pmdp,
517 pmd_t entry, int dirty)
518{
519 int changed = !pmd_same(*pmdp, entry);
520
521 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
522
523 if (changed && dirty) {
524 set_pmd(pmdp, entry);
525 /*
526 * We had a write-protection fault here and changed the pmd
527 * to to more permissive. No need to flush the TLB for that,
528 * #PF is architecturally guaranteed to do that and in the
529 * worst-case we'll generate a spurious fault.
530 */
531 }
532
533 return changed;
534}
535
536int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
537 pud_t *pudp, pud_t entry, int dirty)
538{
539 int changed = !pud_same(*pudp, entry);
540
541 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
542
543 if (changed && dirty) {
544 set_pud(pudp, entry);
545 /*
546 * We had a write-protection fault here and changed the pud
547 * to to more permissive. No need to flush the TLB for that,
548 * #PF is architecturally guaranteed to do that and in the
549 * worst-case we'll generate a spurious fault.
550 */
551 }
552
553 return changed;
554}
555#endif
556
557int ptep_test_and_clear_young(struct vm_area_struct *vma,
558 unsigned long addr, pte_t *ptep)
559{
560 int ret = 0;
561
562 if (pte_young(*ptep))
563 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
564 (unsigned long *) &ptep->pte);
565
566 return ret;
567}
568
569#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
570int pmdp_test_and_clear_young(struct vm_area_struct *vma,
571 unsigned long addr, pmd_t *pmdp)
572{
573 int ret = 0;
574
575 if (pmd_young(*pmdp))
576 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
577 (unsigned long *)pmdp);
578
579 return ret;
580}
581#endif
582
583#ifdef CONFIG_TRANSPARENT_HUGEPAGE
584int pudp_test_and_clear_young(struct vm_area_struct *vma,
585 unsigned long addr, pud_t *pudp)
586{
587 int ret = 0;
588
589 if (pud_young(*pudp))
590 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
591 (unsigned long *)pudp);
592
593 return ret;
594}
595#endif
596
597int ptep_clear_flush_young(struct vm_area_struct *vma,
598 unsigned long address, pte_t *ptep)
599{
600 /*
601 * On x86 CPUs, clearing the accessed bit without a TLB flush
602 * doesn't cause data corruption. [ It could cause incorrect
603 * page aging and the (mistaken) reclaim of hot pages, but the
604 * chance of that should be relatively low. ]
605 *
606 * So as a performance optimization don't flush the TLB when
607 * clearing the accessed bit, it will eventually be flushed by
608 * a context switch or a VM operation anyway. [ In the rare
609 * event of it not getting flushed for a long time the delay
610 * shouldn't really matter because there's no real memory
611 * pressure for swapout to react to. ]
612 */
613 return ptep_test_and_clear_young(vma, address, ptep);
614}
615
616#ifdef CONFIG_TRANSPARENT_HUGEPAGE
617int pmdp_clear_flush_young(struct vm_area_struct *vma,
618 unsigned long address, pmd_t *pmdp)
619{
620 int young;
621
622 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
623
624 young = pmdp_test_and_clear_young(vma, address, pmdp);
625 if (young)
626 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
627
628 return young;
629}
630
631pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
632 pmd_t *pmdp)
633{
634 /*
635 * No flush is necessary. Once an invalid PTE is established, the PTE's
636 * access and dirty bits cannot be updated.
637 */
638 return pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
639}
640#endif
641
642/**
643 * reserve_top_address - reserves a hole in the top of kernel address space
644 * @reserve - size of hole to reserve
645 *
646 * Can be used to relocate the fixmap area and poke a hole in the top
647 * of kernel address space to make room for a hypervisor.
648 */
649void __init reserve_top_address(unsigned long reserve)
650{
651#ifdef CONFIG_X86_32
652 BUG_ON(fixmaps_set > 0);
653 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
654 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
655 -reserve, __FIXADDR_TOP + PAGE_SIZE);
656#endif
657}
658
659int fixmaps_set;
660
661void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
662{
663 unsigned long address = __fix_to_virt(idx);
664
665#ifdef CONFIG_X86_64
666 /*
667 * Ensure that the static initial page tables are covering the
668 * fixmap completely.
669 */
670 BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
671 (FIXMAP_PMD_NUM * PTRS_PER_PTE));
672#endif
673
674 if (idx >= __end_of_fixed_addresses) {
675 BUG();
676 return;
677 }
678 set_pte_vaddr(address, pte);
679 fixmaps_set++;
680}
681
682void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
683 phys_addr_t phys, pgprot_t flags)
684{
685 /* Sanitize 'prot' against any unsupported bits: */
686 pgprot_val(flags) &= __default_kernel_pte_mask;
687
688 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
689}
690
691#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
692#ifdef CONFIG_X86_5LEVEL
693/**
694 * p4d_set_huge - setup kernel P4D mapping
695 *
696 * No 512GB pages yet -- always return 0
697 */
698int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
699{
700 return 0;
701}
702
703/**
704 * p4d_clear_huge - clear kernel P4D mapping when it is set
705 *
706 * No 512GB pages yet -- always return 0
707 */
708void p4d_clear_huge(p4d_t *p4d)
709{
710}
711#endif
712
713/**
714 * pud_set_huge - setup kernel PUD mapping
715 *
716 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
717 * function sets up a huge page only if the complete range has the same MTRR
718 * caching mode.
719 *
720 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
721 * page mapping attempt fails.
722 *
723 * Returns 1 on success and 0 on failure.
724 */
725int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
726{
727 u8 uniform;
728
729 mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
730 if (!uniform)
731 return 0;
732
733 /* Bail out if we are we on a populated non-leaf entry: */
734 if (pud_present(*pud) && !pud_huge(*pud))
735 return 0;
736
737 set_pte((pte_t *)pud, pfn_pte(
738 (u64)addr >> PAGE_SHIFT,
739 __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
740
741 return 1;
742}
743
744/**
745 * pmd_set_huge - setup kernel PMD mapping
746 *
747 * See text over pud_set_huge() above.
748 *
749 * Returns 1 on success and 0 on failure.
750 */
751int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
752{
753 u8 uniform;
754
755 mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
756 if (!uniform) {
757 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
758 __func__, addr, addr + PMD_SIZE);
759 return 0;
760 }
761
762 /* Bail out if we are we on a populated non-leaf entry: */
763 if (pmd_present(*pmd) && !pmd_huge(*pmd))
764 return 0;
765
766 set_pte((pte_t *)pmd, pfn_pte(
767 (u64)addr >> PAGE_SHIFT,
768 __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
769
770 return 1;
771}
772
773/**
774 * pud_clear_huge - clear kernel PUD mapping when it is set
775 *
776 * Returns 1 on success and 0 on failure (no PUD map is found).
777 */
778int pud_clear_huge(pud_t *pud)
779{
780 if (pud_large(*pud)) {
781 pud_clear(pud);
782 return 1;
783 }
784
785 return 0;
786}
787
788/**
789 * pmd_clear_huge - clear kernel PMD mapping when it is set
790 *
791 * Returns 1 on success and 0 on failure (no PMD map is found).
792 */
793int pmd_clear_huge(pmd_t *pmd)
794{
795 if (pmd_large(*pmd)) {
796 pmd_clear(pmd);
797 return 1;
798 }
799
800 return 0;
801}
802
803#ifdef CONFIG_X86_64
804/**
805 * pud_free_pmd_page - Clear pud entry and free pmd page.
806 * @pud: Pointer to a PUD.
807 * @addr: Virtual address associated with pud.
808 *
809 * Context: The pud range has been unmapped and TLB purged.
810 * Return: 1 if clearing the entry succeeded. 0 otherwise.
811 *
812 * NOTE: Callers must allow a single page allocation.
813 */
814int pud_free_pmd_page(pud_t *pud, unsigned long addr)
815{
816 pmd_t *pmd, *pmd_sv;
817 pte_t *pte;
818 int i;
819
820 pmd = pud_pgtable(*pud);
821 pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
822 if (!pmd_sv)
823 return 0;
824
825 for (i = 0; i < PTRS_PER_PMD; i++) {
826 pmd_sv[i] = pmd[i];
827 if (!pmd_none(pmd[i]))
828 pmd_clear(&pmd[i]);
829 }
830
831 pud_clear(pud);
832
833 /* INVLPG to clear all paging-structure caches */
834 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
835
836 for (i = 0; i < PTRS_PER_PMD; i++) {
837 if (!pmd_none(pmd_sv[i])) {
838 pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
839 free_page((unsigned long)pte);
840 }
841 }
842
843 free_page((unsigned long)pmd_sv);
844
845 pagetable_pmd_dtor(virt_to_ptdesc(pmd));
846 free_page((unsigned long)pmd);
847
848 return 1;
849}
850
851/**
852 * pmd_free_pte_page - Clear pmd entry and free pte page.
853 * @pmd: Pointer to a PMD.
854 * @addr: Virtual address associated with pmd.
855 *
856 * Context: The pmd range has been unmapped and TLB purged.
857 * Return: 1 if clearing the entry succeeded. 0 otherwise.
858 */
859int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
860{
861 pte_t *pte;
862
863 pte = (pte_t *)pmd_page_vaddr(*pmd);
864 pmd_clear(pmd);
865
866 /* INVLPG to clear all paging-structure caches */
867 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
868
869 free_page((unsigned long)pte);
870
871 return 1;
872}
873
874#else /* !CONFIG_X86_64 */
875
876/*
877 * Disable free page handling on x86-PAE. This assures that ioremap()
878 * does not update sync'd pmd entries. See vmalloc_sync_one().
879 */
880int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
881{
882 return pmd_none(*pmd);
883}
884
885#endif /* CONFIG_X86_64 */
886#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
887
888pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
889{
890 if (vma->vm_flags & VM_SHADOW_STACK)
891 return pte_mkwrite_shstk(pte);
892
893 pte = pte_mkwrite_novma(pte);
894
895 return pte_clear_saveddirty(pte);
896}
897
898pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
899{
900 if (vma->vm_flags & VM_SHADOW_STACK)
901 return pmd_mkwrite_shstk(pmd);
902
903 pmd = pmd_mkwrite_novma(pmd);
904
905 return pmd_clear_saveddirty(pmd);
906}
907
908void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte)
909{
910 /*
911 * Hardware before shadow stack can (rarely) set Dirty=1
912 * on a Write=0 PTE. So the below condition
913 * only indicates a software bug when shadow stack is
914 * supported by the HW. This checking is covered in
915 * pte_shstk().
916 */
917 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
918 pte_shstk(pte));
919}
920
921void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd)
922{
923 /* See note in arch_check_zapped_pte() */
924 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
925 pmd_shstk(pmd));
926}
1#include <linux/mm.h>
2#include <linux/gfp.h>
3#include <asm/pgalloc.h>
4#include <asm/pgtable.h>
5#include <asm/tlb.h>
6#include <asm/fixmap.h>
7#include <asm/mtrr.h>
8
9#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
10
11#ifdef CONFIG_HIGHPTE
12#define PGALLOC_USER_GFP __GFP_HIGHMEM
13#else
14#define PGALLOC_USER_GFP 0
15#endif
16
17gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
18
19pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
20{
21 return (pte_t *)__get_free_page(PGALLOC_GFP);
22}
23
24pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
25{
26 struct page *pte;
27
28 pte = alloc_pages(__userpte_alloc_gfp, 0);
29 if (!pte)
30 return NULL;
31 if (!pgtable_page_ctor(pte)) {
32 __free_page(pte);
33 return NULL;
34 }
35 return pte;
36}
37
38static int __init setup_userpte(char *arg)
39{
40 if (!arg)
41 return -EINVAL;
42
43 /*
44 * "userpte=nohigh" disables allocation of user pagetables in
45 * high memory.
46 */
47 if (strcmp(arg, "nohigh") == 0)
48 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
49 else
50 return -EINVAL;
51 return 0;
52}
53early_param("userpte", setup_userpte);
54
55void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
56{
57 pgtable_page_dtor(pte);
58 paravirt_release_pte(page_to_pfn(pte));
59 tlb_remove_page(tlb, pte);
60}
61
62#if CONFIG_PGTABLE_LEVELS > 2
63void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
64{
65 struct page *page = virt_to_page(pmd);
66 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
67 /*
68 * NOTE! For PAE, any changes to the top page-directory-pointer-table
69 * entries need a full cr3 reload to flush.
70 */
71#ifdef CONFIG_X86_PAE
72 tlb->need_flush_all = 1;
73#endif
74 pgtable_pmd_page_dtor(page);
75 tlb_remove_page(tlb, page);
76}
77
78#if CONFIG_PGTABLE_LEVELS > 3
79void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
80{
81 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
82 tlb_remove_page(tlb, virt_to_page(pud));
83}
84#endif /* CONFIG_PGTABLE_LEVELS > 3 */
85#endif /* CONFIG_PGTABLE_LEVELS > 2 */
86
87static inline void pgd_list_add(pgd_t *pgd)
88{
89 struct page *page = virt_to_page(pgd);
90
91 list_add(&page->lru, &pgd_list);
92}
93
94static inline void pgd_list_del(pgd_t *pgd)
95{
96 struct page *page = virt_to_page(pgd);
97
98 list_del(&page->lru);
99}
100
101#define UNSHARED_PTRS_PER_PGD \
102 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
103
104
105static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
106{
107 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
108 virt_to_page(pgd)->index = (pgoff_t)mm;
109}
110
111struct mm_struct *pgd_page_get_mm(struct page *page)
112{
113 return (struct mm_struct *)page->index;
114}
115
116static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
117{
118 /* If the pgd points to a shared pagetable level (either the
119 ptes in non-PAE, or shared PMD in PAE), then just copy the
120 references from swapper_pg_dir. */
121 if (CONFIG_PGTABLE_LEVELS == 2 ||
122 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
123 CONFIG_PGTABLE_LEVELS == 4) {
124 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
125 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
126 KERNEL_PGD_PTRS);
127 }
128
129 /* list required to sync kernel mapping updates */
130 if (!SHARED_KERNEL_PMD) {
131 pgd_set_mm(pgd, mm);
132 pgd_list_add(pgd);
133 }
134}
135
136static void pgd_dtor(pgd_t *pgd)
137{
138 if (SHARED_KERNEL_PMD)
139 return;
140
141 spin_lock(&pgd_lock);
142 pgd_list_del(pgd);
143 spin_unlock(&pgd_lock);
144}
145
146/*
147 * List of all pgd's needed for non-PAE so it can invalidate entries
148 * in both cached and uncached pgd's; not needed for PAE since the
149 * kernel pmd is shared. If PAE were not to share the pmd a similar
150 * tactic would be needed. This is essentially codepath-based locking
151 * against pageattr.c; it is the unique case in which a valid change
152 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
153 * vmalloc faults work because attached pagetables are never freed.
154 * -- nyc
155 */
156
157#ifdef CONFIG_X86_PAE
158/*
159 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
160 * updating the top-level pagetable entries to guarantee the
161 * processor notices the update. Since this is expensive, and
162 * all 4 top-level entries are used almost immediately in a
163 * new process's life, we just pre-populate them here.
164 *
165 * Also, if we're in a paravirt environment where the kernel pmd is
166 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
167 * and initialize the kernel pmds here.
168 */
169#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
170
171void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
172{
173 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
174
175 /* Note: almost everything apart from _PAGE_PRESENT is
176 reserved at the pmd (PDPT) level. */
177 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
178
179 /*
180 * According to Intel App note "TLBs, Paging-Structure Caches,
181 * and Their Invalidation", April 2007, document 317080-001,
182 * section 8.1: in PAE mode we explicitly have to flush the
183 * TLB via cr3 if the top-level pgd is changed...
184 */
185 flush_tlb_mm(mm);
186}
187#else /* !CONFIG_X86_PAE */
188
189/* No need to prepopulate any pagetable entries in non-PAE modes. */
190#define PREALLOCATED_PMDS 0
191
192#endif /* CONFIG_X86_PAE */
193
194static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
195{
196 int i;
197
198 for(i = 0; i < PREALLOCATED_PMDS; i++)
199 if (pmds[i]) {
200 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
201 free_page((unsigned long)pmds[i]);
202 mm_dec_nr_pmds(mm);
203 }
204}
205
206static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
207{
208 int i;
209 bool failed = false;
210
211 for(i = 0; i < PREALLOCATED_PMDS; i++) {
212 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
213 if (!pmd)
214 failed = true;
215 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
216 free_page((unsigned long)pmd);
217 pmd = NULL;
218 failed = true;
219 }
220 if (pmd)
221 mm_inc_nr_pmds(mm);
222 pmds[i] = pmd;
223 }
224
225 if (failed) {
226 free_pmds(mm, pmds);
227 return -ENOMEM;
228 }
229
230 return 0;
231}
232
233/*
234 * Mop up any pmd pages which may still be attached to the pgd.
235 * Normally they will be freed by munmap/exit_mmap, but any pmd we
236 * preallocate which never got a corresponding vma will need to be
237 * freed manually.
238 */
239static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
240{
241 int i;
242
243 for(i = 0; i < PREALLOCATED_PMDS; i++) {
244 pgd_t pgd = pgdp[i];
245
246 if (pgd_val(pgd) != 0) {
247 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
248
249 pgdp[i] = native_make_pgd(0);
250
251 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
252 pmd_free(mm, pmd);
253 mm_dec_nr_pmds(mm);
254 }
255 }
256}
257
258static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
259{
260 pud_t *pud;
261 int i;
262
263 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
264 return;
265
266 pud = pud_offset(pgd, 0);
267
268 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
269 pmd_t *pmd = pmds[i];
270
271 if (i >= KERNEL_PGD_BOUNDARY)
272 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
273 sizeof(pmd_t) * PTRS_PER_PMD);
274
275 pud_populate(mm, pud, pmd);
276 }
277}
278
279/*
280 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
281 * assumes that pgd should be in one page.
282 *
283 * But kernel with PAE paging that is not running as a Xen domain
284 * only needs to allocate 32 bytes for pgd instead of one page.
285 */
286#ifdef CONFIG_X86_PAE
287
288#include <linux/slab.h>
289
290#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
291#define PGD_ALIGN 32
292
293static struct kmem_cache *pgd_cache;
294
295static int __init pgd_cache_init(void)
296{
297 /*
298 * When PAE kernel is running as a Xen domain, it does not use
299 * shared kernel pmd. And this requires a whole page for pgd.
300 */
301 if (!SHARED_KERNEL_PMD)
302 return 0;
303
304 /*
305 * when PAE kernel is not running as a Xen domain, it uses
306 * shared kernel pmd. Shared kernel pmd does not require a whole
307 * page for pgd. We are able to just allocate a 32-byte for pgd.
308 * During boot time, we create a 32-byte slab for pgd table allocation.
309 */
310 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
311 SLAB_PANIC, NULL);
312 if (!pgd_cache)
313 return -ENOMEM;
314
315 return 0;
316}
317core_initcall(pgd_cache_init);
318
319static inline pgd_t *_pgd_alloc(void)
320{
321 /*
322 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
323 * We allocate one page for pgd.
324 */
325 if (!SHARED_KERNEL_PMD)
326 return (pgd_t *)__get_free_page(PGALLOC_GFP);
327
328 /*
329 * Now PAE kernel is not running as a Xen domain. We can allocate
330 * a 32-byte slab for pgd to save memory space.
331 */
332 return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
333}
334
335static inline void _pgd_free(pgd_t *pgd)
336{
337 if (!SHARED_KERNEL_PMD)
338 free_page((unsigned long)pgd);
339 else
340 kmem_cache_free(pgd_cache, pgd);
341}
342#else
343static inline pgd_t *_pgd_alloc(void)
344{
345 return (pgd_t *)__get_free_page(PGALLOC_GFP);
346}
347
348static inline void _pgd_free(pgd_t *pgd)
349{
350 free_page((unsigned long)pgd);
351}
352#endif /* CONFIG_X86_PAE */
353
354pgd_t *pgd_alloc(struct mm_struct *mm)
355{
356 pgd_t *pgd;
357 pmd_t *pmds[PREALLOCATED_PMDS];
358
359 pgd = _pgd_alloc();
360
361 if (pgd == NULL)
362 goto out;
363
364 mm->pgd = pgd;
365
366 if (preallocate_pmds(mm, pmds) != 0)
367 goto out_free_pgd;
368
369 if (paravirt_pgd_alloc(mm) != 0)
370 goto out_free_pmds;
371
372 /*
373 * Make sure that pre-populating the pmds is atomic with
374 * respect to anything walking the pgd_list, so that they
375 * never see a partially populated pgd.
376 */
377 spin_lock(&pgd_lock);
378
379 pgd_ctor(mm, pgd);
380 pgd_prepopulate_pmd(mm, pgd, pmds);
381
382 spin_unlock(&pgd_lock);
383
384 return pgd;
385
386out_free_pmds:
387 free_pmds(mm, pmds);
388out_free_pgd:
389 _pgd_free(pgd);
390out:
391 return NULL;
392}
393
394void pgd_free(struct mm_struct *mm, pgd_t *pgd)
395{
396 pgd_mop_up_pmds(mm, pgd);
397 pgd_dtor(pgd);
398 paravirt_pgd_free(mm, pgd);
399 _pgd_free(pgd);
400}
401
402/*
403 * Used to set accessed or dirty bits in the page table entries
404 * on other architectures. On x86, the accessed and dirty bits
405 * are tracked by hardware. However, do_wp_page calls this function
406 * to also make the pte writeable at the same time the dirty bit is
407 * set. In that case we do actually need to write the PTE.
408 */
409int ptep_set_access_flags(struct vm_area_struct *vma,
410 unsigned long address, pte_t *ptep,
411 pte_t entry, int dirty)
412{
413 int changed = !pte_same(*ptep, entry);
414
415 if (changed && dirty) {
416 *ptep = entry;
417 pte_update(vma->vm_mm, address, ptep);
418 }
419
420 return changed;
421}
422
423#ifdef CONFIG_TRANSPARENT_HUGEPAGE
424int pmdp_set_access_flags(struct vm_area_struct *vma,
425 unsigned long address, pmd_t *pmdp,
426 pmd_t entry, int dirty)
427{
428 int changed = !pmd_same(*pmdp, entry);
429
430 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
431
432 if (changed && dirty) {
433 *pmdp = entry;
434 /*
435 * We had a write-protection fault here and changed the pmd
436 * to to more permissive. No need to flush the TLB for that,
437 * #PF is architecturally guaranteed to do that and in the
438 * worst-case we'll generate a spurious fault.
439 */
440 }
441
442 return changed;
443}
444#endif
445
446int ptep_test_and_clear_young(struct vm_area_struct *vma,
447 unsigned long addr, pte_t *ptep)
448{
449 int ret = 0;
450
451 if (pte_young(*ptep))
452 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
453 (unsigned long *) &ptep->pte);
454
455 if (ret)
456 pte_update(vma->vm_mm, addr, ptep);
457
458 return ret;
459}
460
461#ifdef CONFIG_TRANSPARENT_HUGEPAGE
462int pmdp_test_and_clear_young(struct vm_area_struct *vma,
463 unsigned long addr, pmd_t *pmdp)
464{
465 int ret = 0;
466
467 if (pmd_young(*pmdp))
468 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
469 (unsigned long *)pmdp);
470
471 return ret;
472}
473#endif
474
475int ptep_clear_flush_young(struct vm_area_struct *vma,
476 unsigned long address, pte_t *ptep)
477{
478 /*
479 * On x86 CPUs, clearing the accessed bit without a TLB flush
480 * doesn't cause data corruption. [ It could cause incorrect
481 * page aging and the (mistaken) reclaim of hot pages, but the
482 * chance of that should be relatively low. ]
483 *
484 * So as a performance optimization don't flush the TLB when
485 * clearing the accessed bit, it will eventually be flushed by
486 * a context switch or a VM operation anyway. [ In the rare
487 * event of it not getting flushed for a long time the delay
488 * shouldn't really matter because there's no real memory
489 * pressure for swapout to react to. ]
490 */
491 return ptep_test_and_clear_young(vma, address, ptep);
492}
493
494#ifdef CONFIG_TRANSPARENT_HUGEPAGE
495int pmdp_clear_flush_young(struct vm_area_struct *vma,
496 unsigned long address, pmd_t *pmdp)
497{
498 int young;
499
500 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
501
502 young = pmdp_test_and_clear_young(vma, address, pmdp);
503 if (young)
504 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
505
506 return young;
507}
508#endif
509
510/**
511 * reserve_top_address - reserves a hole in the top of kernel address space
512 * @reserve - size of hole to reserve
513 *
514 * Can be used to relocate the fixmap area and poke a hole in the top
515 * of kernel address space to make room for a hypervisor.
516 */
517void __init reserve_top_address(unsigned long reserve)
518{
519#ifdef CONFIG_X86_32
520 BUG_ON(fixmaps_set > 0);
521 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
522 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
523 -reserve, __FIXADDR_TOP + PAGE_SIZE);
524#endif
525}
526
527int fixmaps_set;
528
529void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
530{
531 unsigned long address = __fix_to_virt(idx);
532
533 if (idx >= __end_of_fixed_addresses) {
534 BUG();
535 return;
536 }
537 set_pte_vaddr(address, pte);
538 fixmaps_set++;
539}
540
541void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
542 pgprot_t flags)
543{
544 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
545}
546
547#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
548/**
549 * pud_set_huge - setup kernel PUD mapping
550 *
551 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
552 * function sets up a huge page only if any of the following conditions are met:
553 *
554 * - MTRRs are disabled, or
555 *
556 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
557 *
558 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
559 * has no effect on the requested PAT memory type.
560 *
561 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
562 * page mapping attempt fails.
563 *
564 * Returns 1 on success and 0 on failure.
565 */
566int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
567{
568 u8 mtrr, uniform;
569
570 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
571 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
572 (mtrr != MTRR_TYPE_WRBACK))
573 return 0;
574
575 prot = pgprot_4k_2_large(prot);
576
577 set_pte((pte_t *)pud, pfn_pte(
578 (u64)addr >> PAGE_SHIFT,
579 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
580
581 return 1;
582}
583
584/**
585 * pmd_set_huge - setup kernel PMD mapping
586 *
587 * See text over pud_set_huge() above.
588 *
589 * Returns 1 on success and 0 on failure.
590 */
591int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
592{
593 u8 mtrr, uniform;
594
595 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
596 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
597 (mtrr != MTRR_TYPE_WRBACK)) {
598 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
599 __func__, addr, addr + PMD_SIZE);
600 return 0;
601 }
602
603 prot = pgprot_4k_2_large(prot);
604
605 set_pte((pte_t *)pmd, pfn_pte(
606 (u64)addr >> PAGE_SHIFT,
607 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
608
609 return 1;
610}
611
612/**
613 * pud_clear_huge - clear kernel PUD mapping when it is set
614 *
615 * Returns 1 on success and 0 on failure (no PUD map is found).
616 */
617int pud_clear_huge(pud_t *pud)
618{
619 if (pud_large(*pud)) {
620 pud_clear(pud);
621 return 1;
622 }
623
624 return 0;
625}
626
627/**
628 * pmd_clear_huge - clear kernel PMD mapping when it is set
629 *
630 * Returns 1 on success and 0 on failure (no PMD map is found).
631 */
632int pmd_clear_huge(pmd_t *pmd)
633{
634 if (pmd_large(*pmd)) {
635 pmd_clear(pmd);
636 return 1;
637 }
638
639 return 0;
640}
641#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */