Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/gfp.h>
  4#include <linux/hugetlb.h>
  5#include <asm/pgalloc.h>
 
  6#include <asm/tlb.h>
  7#include <asm/fixmap.h>
  8#include <asm/mtrr.h>
  9
 10#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
 11phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
 12EXPORT_SYMBOL(physical_mask);
 13#endif
 14
 15#ifdef CONFIG_HIGHPTE
 16#define PGTABLE_HIGHMEM __GFP_HIGHMEM
 17#else
 18#define PGTABLE_HIGHMEM 0
 19#endif
 20
 21#ifndef CONFIG_PARAVIRT
 22static inline
 23void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
 24{
 25	tlb_remove_page(tlb, table);
 26}
 27#endif
 28
 29gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
 30
 31pgtable_t pte_alloc_one(struct mm_struct *mm)
 32{
 33	return __pte_alloc_one(mm, __userpte_alloc_gfp);
 
 
 
 
 
 
 
 
 
 34}
 35
 36static int __init setup_userpte(char *arg)
 37{
 38	if (!arg)
 39		return -EINVAL;
 40
 41	/*
 42	 * "userpte=nohigh" disables allocation of user pagetables in
 43	 * high memory.
 44	 */
 45	if (strcmp(arg, "nohigh") == 0)
 46		__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
 47	else
 48		return -EINVAL;
 49	return 0;
 50}
 51early_param("userpte", setup_userpte);
 52
 53void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
 54{
 55	pgtable_pte_page_dtor(pte);
 56	paravirt_release_pte(page_to_pfn(pte));
 57	paravirt_tlb_remove_table(tlb, pte);
 58}
 59
 60#if CONFIG_PGTABLE_LEVELS > 2
 61void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
 62{
 63	struct page *page = virt_to_page(pmd);
 64	paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
 65	/*
 66	 * NOTE! For PAE, any changes to the top page-directory-pointer-table
 67	 * entries need a full cr3 reload to flush.
 68	 */
 69#ifdef CONFIG_X86_PAE
 70	tlb->need_flush_all = 1;
 71#endif
 72	pgtable_pmd_page_dtor(page);
 73	paravirt_tlb_remove_table(tlb, page);
 74}
 75
 76#if CONFIG_PGTABLE_LEVELS > 3
 77void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
 78{
 79	paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
 80	paravirt_tlb_remove_table(tlb, virt_to_page(pud));
 81}
 82
 83#if CONFIG_PGTABLE_LEVELS > 4
 84void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
 85{
 86	paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
 87	paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
 88}
 89#endif	/* CONFIG_PGTABLE_LEVELS > 4 */
 90#endif	/* CONFIG_PGTABLE_LEVELS > 3 */
 91#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
 92
 93static inline void pgd_list_add(pgd_t *pgd)
 94{
 95	struct page *page = virt_to_page(pgd);
 96
 97	list_add(&page->lru, &pgd_list);
 98}
 99
100static inline void pgd_list_del(pgd_t *pgd)
101{
102	struct page *page = virt_to_page(pgd);
103
104	list_del(&page->lru);
105}
106
107#define UNSHARED_PTRS_PER_PGD				\
108	(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
109#define MAX_UNSHARED_PTRS_PER_PGD			\
110	max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
111
112
113static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
114{
115	virt_to_page(pgd)->pt_mm = mm;
 
116}
117
118struct mm_struct *pgd_page_get_mm(struct page *page)
119{
120	return page->pt_mm;
121}
122
123static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
124{
125	/* If the pgd points to a shared pagetable level (either the
126	   ptes in non-PAE, or shared PMD in PAE), then just copy the
127	   references from swapper_pg_dir. */
128	if (CONFIG_PGTABLE_LEVELS == 2 ||
129	    (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
130	    CONFIG_PGTABLE_LEVELS >= 4) {
131		clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
132				swapper_pg_dir + KERNEL_PGD_BOUNDARY,
133				KERNEL_PGD_PTRS);
134	}
135
136	/* list required to sync kernel mapping updates */
137	if (!SHARED_KERNEL_PMD) {
138		pgd_set_mm(pgd, mm);
139		pgd_list_add(pgd);
140	}
141}
142
143static void pgd_dtor(pgd_t *pgd)
144{
145	if (SHARED_KERNEL_PMD)
146		return;
147
148	spin_lock(&pgd_lock);
149	pgd_list_del(pgd);
150	spin_unlock(&pgd_lock);
151}
152
153/*
154 * List of all pgd's needed for non-PAE so it can invalidate entries
155 * in both cached and uncached pgd's; not needed for PAE since the
156 * kernel pmd is shared. If PAE were not to share the pmd a similar
157 * tactic would be needed. This is essentially codepath-based locking
158 * against pageattr.c; it is the unique case in which a valid change
159 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
160 * vmalloc faults work because attached pagetables are never freed.
161 * -- nyc
162 */
163
164#ifdef CONFIG_X86_PAE
165/*
166 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
167 * updating the top-level pagetable entries to guarantee the
168 * processor notices the update.  Since this is expensive, and
169 * all 4 top-level entries are used almost immediately in a
170 * new process's life, we just pre-populate them here.
171 *
172 * Also, if we're in a paravirt environment where the kernel pmd is
173 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
174 * and initialize the kernel pmds here.
175 */
176#define PREALLOCATED_PMDS	UNSHARED_PTRS_PER_PGD
177#define MAX_PREALLOCATED_PMDS	MAX_UNSHARED_PTRS_PER_PGD
178
179/*
180 * We allocate separate PMDs for the kernel part of the user page-table
181 * when PTI is enabled. We need them to map the per-process LDT into the
182 * user-space page-table.
183 */
184#define PREALLOCATED_USER_PMDS	 (boot_cpu_has(X86_FEATURE_PTI) ? \
185					KERNEL_PGD_PTRS : 0)
186#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
187
188void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
189{
190	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
191
192	/* Note: almost everything apart from _PAGE_PRESENT is
193	   reserved at the pmd (PDPT) level. */
194	set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
195
196	/*
197	 * According to Intel App note "TLBs, Paging-Structure Caches,
198	 * and Their Invalidation", April 2007, document 317080-001,
199	 * section 8.1: in PAE mode we explicitly have to flush the
200	 * TLB via cr3 if the top-level pgd is changed...
201	 */
202	flush_tlb_mm(mm);
203}
204#else  /* !CONFIG_X86_PAE */
205
206/* No need to prepopulate any pagetable entries in non-PAE modes. */
207#define PREALLOCATED_PMDS	0
208#define MAX_PREALLOCATED_PMDS	0
209#define PREALLOCATED_USER_PMDS	 0
210#define MAX_PREALLOCATED_USER_PMDS 0
211#endif	/* CONFIG_X86_PAE */
212
213static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
214{
215	int i;
216
217	for (i = 0; i < count; i++)
218		if (pmds[i]) {
219			pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
220			free_page((unsigned long)pmds[i]);
221			mm_dec_nr_pmds(mm);
222		}
223}
224
225static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
226{
227	int i;
228	bool failed = false;
229	gfp_t gfp = GFP_PGTABLE_USER;
230
231	if (mm == &init_mm)
232		gfp &= ~__GFP_ACCOUNT;
233
234	for (i = 0; i < count; i++) {
235		pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
236		if (!pmd)
237			failed = true;
238		if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
239			free_page((unsigned long)pmd);
240			pmd = NULL;
241			failed = true;
242		}
243		if (pmd)
244			mm_inc_nr_pmds(mm);
245		pmds[i] = pmd;
246	}
247
248	if (failed) {
249		free_pmds(mm, pmds, count);
250		return -ENOMEM;
251	}
252
253	return 0;
254}
255
256/*
257 * Mop up any pmd pages which may still be attached to the pgd.
258 * Normally they will be freed by munmap/exit_mmap, but any pmd we
259 * preallocate which never got a corresponding vma will need to be
260 * freed manually.
261 */
262static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
263{
264	pgd_t pgd = *pgdp;
265
266	if (pgd_val(pgd) != 0) {
267		pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
268
269		pgd_clear(pgdp);
270
271		paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
272		pmd_free(mm, pmd);
273		mm_dec_nr_pmds(mm);
274	}
275}
276
277static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
278{
279	int i;
280
281	for (i = 0; i < PREALLOCATED_PMDS; i++)
282		mop_up_one_pmd(mm, &pgdp[i]);
283
284#ifdef CONFIG_PAGE_TABLE_ISOLATION
 
285
286	if (!boot_cpu_has(X86_FEATURE_PTI))
287		return;
288
289	pgdp = kernel_to_user_pgdp(pgdp);
290
291	for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
292		mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
293#endif
 
294}
295
296static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
297{
298	p4d_t *p4d;
299	pud_t *pud;
300	int i;
301
302	if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
303		return;
304
305	p4d = p4d_offset(pgd, 0);
306	pud = pud_offset(p4d, 0);
307
308	for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
309		pmd_t *pmd = pmds[i];
310
311		if (i >= KERNEL_PGD_BOUNDARY)
312			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
313			       sizeof(pmd_t) * PTRS_PER_PMD);
314
315		pud_populate(mm, pud, pmd);
316	}
317}
318
319#ifdef CONFIG_PAGE_TABLE_ISOLATION
320static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
321				     pgd_t *k_pgd, pmd_t *pmds[])
322{
323	pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
324	pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
325	p4d_t *u_p4d;
326	pud_t *u_pud;
327	int i;
328
329	u_p4d = p4d_offset(u_pgd, 0);
330	u_pud = pud_offset(u_p4d, 0);
331
332	s_pgd += KERNEL_PGD_BOUNDARY;
333	u_pud += KERNEL_PGD_BOUNDARY;
334
335	for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
336		pmd_t *pmd = pmds[i];
337
338		memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
339		       sizeof(pmd_t) * PTRS_PER_PMD);
340
341		pud_populate(mm, u_pud, pmd);
342	}
343
344}
345#else
346static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
347				     pgd_t *k_pgd, pmd_t *pmds[])
348{
349}
350#endif
351/*
352 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
353 * assumes that pgd should be in one page.
354 *
355 * But kernel with PAE paging that is not running as a Xen domain
356 * only needs to allocate 32 bytes for pgd instead of one page.
357 */
358#ifdef CONFIG_X86_PAE
359
360#include <linux/slab.h>
361
362#define PGD_SIZE	(PTRS_PER_PGD * sizeof(pgd_t))
363#define PGD_ALIGN	32
364
365static struct kmem_cache *pgd_cache;
366
367void __init pgtable_cache_init(void)
368{
369	/*
370	 * When PAE kernel is running as a Xen domain, it does not use
371	 * shared kernel pmd. And this requires a whole page for pgd.
372	 */
373	if (!SHARED_KERNEL_PMD)
374		return;
375
376	/*
377	 * when PAE kernel is not running as a Xen domain, it uses
378	 * shared kernel pmd. Shared kernel pmd does not require a whole
379	 * page for pgd. We are able to just allocate a 32-byte for pgd.
380	 * During boot time, we create a 32-byte slab for pgd table allocation.
381	 */
382	pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
383				      SLAB_PANIC, NULL);
384}
385
386static inline pgd_t *_pgd_alloc(void)
387{
388	/*
389	 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
390	 * We allocate one page for pgd.
391	 */
392	if (!SHARED_KERNEL_PMD)
393		return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
394						 PGD_ALLOCATION_ORDER);
395
396	/*
397	 * Now PAE kernel is not running as a Xen domain. We can allocate
398	 * a 32-byte slab for pgd to save memory space.
399	 */
400	return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
401}
402
403static inline void _pgd_free(pgd_t *pgd)
404{
405	if (!SHARED_KERNEL_PMD)
406		free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
407	else
408		kmem_cache_free(pgd_cache, pgd);
409}
410#else
411
412static inline pgd_t *_pgd_alloc(void)
413{
414	return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
415					 PGD_ALLOCATION_ORDER);
416}
417
418static inline void _pgd_free(pgd_t *pgd)
419{
420	free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
421}
422#endif /* CONFIG_X86_PAE */
423
424pgd_t *pgd_alloc(struct mm_struct *mm)
425{
426	pgd_t *pgd;
427	pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
428	pmd_t *pmds[MAX_PREALLOCATED_PMDS];
429
430	pgd = _pgd_alloc();
431
432	if (pgd == NULL)
433		goto out;
434
435	mm->pgd = pgd;
436
437	if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
438		goto out_free_pgd;
439
440	if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
441		goto out_free_pmds;
442
443	if (paravirt_pgd_alloc(mm) != 0)
444		goto out_free_user_pmds;
445
446	/*
447	 * Make sure that pre-populating the pmds is atomic with
448	 * respect to anything walking the pgd_list, so that they
449	 * never see a partially populated pgd.
450	 */
451	spin_lock(&pgd_lock);
452
453	pgd_ctor(mm, pgd);
454	pgd_prepopulate_pmd(mm, pgd, pmds);
455	pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
456
457	spin_unlock(&pgd_lock);
458
459	return pgd;
460
461out_free_user_pmds:
462	free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
463out_free_pmds:
464	free_pmds(mm, pmds, PREALLOCATED_PMDS);
465out_free_pgd:
466	_pgd_free(pgd);
467out:
468	return NULL;
469}
470
471void pgd_free(struct mm_struct *mm, pgd_t *pgd)
472{
473	pgd_mop_up_pmds(mm, pgd);
474	pgd_dtor(pgd);
475	paravirt_pgd_free(mm, pgd);
476	_pgd_free(pgd);
477}
478
479/*
480 * Used to set accessed or dirty bits in the page table entries
481 * on other architectures. On x86, the accessed and dirty bits
482 * are tracked by hardware. However, do_wp_page calls this function
483 * to also make the pte writeable at the same time the dirty bit is
484 * set. In that case we do actually need to write the PTE.
485 */
486int ptep_set_access_flags(struct vm_area_struct *vma,
487			  unsigned long address, pte_t *ptep,
488			  pte_t entry, int dirty)
489{
490	int changed = !pte_same(*ptep, entry);
491
492	if (changed && dirty)
493		set_pte(ptep, entry);
 
 
494
495	return changed;
496}
497
498#ifdef CONFIG_TRANSPARENT_HUGEPAGE
499int pmdp_set_access_flags(struct vm_area_struct *vma,
500			  unsigned long address, pmd_t *pmdp,
501			  pmd_t entry, int dirty)
502{
503	int changed = !pmd_same(*pmdp, entry);
504
505	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
506
507	if (changed && dirty) {
508		set_pmd(pmdp, entry);
 
509		/*
510		 * We had a write-protection fault here and changed the pmd
511		 * to to more permissive. No need to flush the TLB for that,
512		 * #PF is architecturally guaranteed to do that and in the
513		 * worst-case we'll generate a spurious fault.
514		 */
515	}
516
517	return changed;
518}
519
520int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
521			  pud_t *pudp, pud_t entry, int dirty)
522{
523	int changed = !pud_same(*pudp, entry);
524
525	VM_BUG_ON(address & ~HPAGE_PUD_MASK);
526
527	if (changed && dirty) {
528		set_pud(pudp, entry);
529		/*
530		 * We had a write-protection fault here and changed the pud
531		 * to to more permissive. No need to flush the TLB for that,
532		 * #PF is architecturally guaranteed to do that and in the
533		 * worst-case we'll generate a spurious fault.
534		 */
535	}
536
537	return changed;
538}
539#endif
540
541int ptep_test_and_clear_young(struct vm_area_struct *vma,
542			      unsigned long addr, pte_t *ptep)
543{
544	int ret = 0;
545
546	if (pte_young(*ptep))
547		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
548					 (unsigned long *) &ptep->pte);
549
 
 
 
550	return ret;
551}
552
553#ifdef CONFIG_TRANSPARENT_HUGEPAGE
554int pmdp_test_and_clear_young(struct vm_area_struct *vma,
555			      unsigned long addr, pmd_t *pmdp)
556{
557	int ret = 0;
558
559	if (pmd_young(*pmdp))
560		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
561					 (unsigned long *)pmdp);
562
563	return ret;
564}
565int pudp_test_and_clear_young(struct vm_area_struct *vma,
566			      unsigned long addr, pud_t *pudp)
567{
568	int ret = 0;
569
570	if (pud_young(*pudp))
571		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
572					 (unsigned long *)pudp);
573
574	return ret;
575}
576#endif
577
578int ptep_clear_flush_young(struct vm_area_struct *vma,
579			   unsigned long address, pte_t *ptep)
580{
581	/*
582	 * On x86 CPUs, clearing the accessed bit without a TLB flush
583	 * doesn't cause data corruption. [ It could cause incorrect
584	 * page aging and the (mistaken) reclaim of hot pages, but the
585	 * chance of that should be relatively low. ]
586	 *
587	 * So as a performance optimization don't flush the TLB when
588	 * clearing the accessed bit, it will eventually be flushed by
589	 * a context switch or a VM operation anyway. [ In the rare
590	 * event of it not getting flushed for a long time the delay
591	 * shouldn't really matter because there's no real memory
592	 * pressure for swapout to react to. ]
593	 */
594	return ptep_test_and_clear_young(vma, address, ptep);
595}
596
597#ifdef CONFIG_TRANSPARENT_HUGEPAGE
598int pmdp_clear_flush_young(struct vm_area_struct *vma,
599			   unsigned long address, pmd_t *pmdp)
600{
601	int young;
602
603	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
604
605	young = pmdp_test_and_clear_young(vma, address, pmdp);
606	if (young)
607		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
608
609	return young;
610}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
611#endif
612
613/**
614 * reserve_top_address - reserves a hole in the top of kernel address space
615 * @reserve - size of hole to reserve
616 *
617 * Can be used to relocate the fixmap area and poke a hole in the top
618 * of kernel address space to make room for a hypervisor.
619 */
620void __init reserve_top_address(unsigned long reserve)
621{
622#ifdef CONFIG_X86_32
623	BUG_ON(fixmaps_set > 0);
624	__FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
625	printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
626	       -reserve, __FIXADDR_TOP + PAGE_SIZE);
627#endif
628}
629
630int fixmaps_set;
631
632void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
633{
634	unsigned long address = __fix_to_virt(idx);
635
636#ifdef CONFIG_X86_64
637       /*
638	* Ensure that the static initial page tables are covering the
639	* fixmap completely.
640	*/
641	BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
642		     (FIXMAP_PMD_NUM * PTRS_PER_PTE));
643#endif
644
645	if (idx >= __end_of_fixed_addresses) {
646		BUG();
647		return;
648	}
649	set_pte_vaddr(address, pte);
650	fixmaps_set++;
651}
652
653void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
654		       phys_addr_t phys, pgprot_t flags)
655{
656	/* Sanitize 'prot' against any unsupported bits: */
657	pgprot_val(flags) &= __default_kernel_pte_mask;
658
659	__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
660}
661
662#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
663#ifdef CONFIG_X86_5LEVEL
664/**
665 * p4d_set_huge - setup kernel P4D mapping
666 *
667 * No 512GB pages yet -- always return 0
668 */
669int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
670{
671	return 0;
672}
673
674/**
675 * p4d_clear_huge - clear kernel P4D mapping when it is set
676 *
677 * No 512GB pages yet -- always return 0
678 */
679int p4d_clear_huge(p4d_t *p4d)
680{
681	return 0;
682}
683#endif
684
685/**
686 * pud_set_huge - setup kernel PUD mapping
687 *
688 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
689 * function sets up a huge page only if any of the following conditions are met:
690 *
691 * - MTRRs are disabled, or
692 *
693 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
694 *
695 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
696 *   has no effect on the requested PAT memory type.
697 *
698 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
699 * page mapping attempt fails.
700 *
701 * Returns 1 on success and 0 on failure.
702 */
703int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
704{
705	u8 mtrr, uniform;
706
707	mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
708	if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
709	    (mtrr != MTRR_TYPE_WRBACK))
710		return 0;
711
712	/* Bail out if we are we on a populated non-leaf entry: */
713	if (pud_present(*pud) && !pud_huge(*pud))
714		return 0;
715
716	set_pte((pte_t *)pud, pfn_pte(
717		(u64)addr >> PAGE_SHIFT,
718		__pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
719
720	return 1;
721}
722
723/**
724 * pmd_set_huge - setup kernel PMD mapping
725 *
726 * See text over pud_set_huge() above.
727 *
728 * Returns 1 on success and 0 on failure.
729 */
730int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
731{
732	u8 mtrr, uniform;
733
734	mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
735	if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
736	    (mtrr != MTRR_TYPE_WRBACK)) {
737		pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
738			     __func__, addr, addr + PMD_SIZE);
739		return 0;
740	}
741
742	/* Bail out if we are we on a populated non-leaf entry: */
743	if (pmd_present(*pmd) && !pmd_huge(*pmd))
744		return 0;
745
746	set_pte((pte_t *)pmd, pfn_pte(
747		(u64)addr >> PAGE_SHIFT,
748		__pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
749
750	return 1;
751}
752
753/**
754 * pud_clear_huge - clear kernel PUD mapping when it is set
755 *
756 * Returns 1 on success and 0 on failure (no PUD map is found).
757 */
758int pud_clear_huge(pud_t *pud)
759{
760	if (pud_large(*pud)) {
761		pud_clear(pud);
762		return 1;
763	}
764
765	return 0;
766}
767
768/**
769 * pmd_clear_huge - clear kernel PMD mapping when it is set
770 *
771 * Returns 1 on success and 0 on failure (no PMD map is found).
772 */
773int pmd_clear_huge(pmd_t *pmd)
774{
775	if (pmd_large(*pmd)) {
776		pmd_clear(pmd);
777		return 1;
778	}
779
780	return 0;
781}
782
783/*
784 * Until we support 512GB pages, skip them in the vmap area.
785 */
786int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
787{
788	return 0;
789}
790
791#ifdef CONFIG_X86_64
792/**
793 * pud_free_pmd_page - Clear pud entry and free pmd page.
794 * @pud: Pointer to a PUD.
795 * @addr: Virtual address associated with pud.
796 *
797 * Context: The pud range has been unmapped and TLB purged.
798 * Return: 1 if clearing the entry succeeded. 0 otherwise.
799 *
800 * NOTE: Callers must allow a single page allocation.
801 */
802int pud_free_pmd_page(pud_t *pud, unsigned long addr)
803{
804	pmd_t *pmd, *pmd_sv;
805	pte_t *pte;
806	int i;
807
808	pmd = (pmd_t *)pud_page_vaddr(*pud);
809	pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
810	if (!pmd_sv)
811		return 0;
812
813	for (i = 0; i < PTRS_PER_PMD; i++) {
814		pmd_sv[i] = pmd[i];
815		if (!pmd_none(pmd[i]))
816			pmd_clear(&pmd[i]);
817	}
818
819	pud_clear(pud);
820
821	/* INVLPG to clear all paging-structure caches */
822	flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
823
824	for (i = 0; i < PTRS_PER_PMD; i++) {
825		if (!pmd_none(pmd_sv[i])) {
826			pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
827			free_page((unsigned long)pte);
828		}
829	}
830
831	free_page((unsigned long)pmd_sv);
832	free_page((unsigned long)pmd);
833
834	return 1;
835}
836
837/**
838 * pmd_free_pte_page - Clear pmd entry and free pte page.
839 * @pmd: Pointer to a PMD.
840 * @addr: Virtual address associated with pmd.
841 *
842 * Context: The pmd range has been unmapped and TLB purged.
843 * Return: 1 if clearing the entry succeeded. 0 otherwise.
844 */
845int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
846{
847	pte_t *pte;
848
849	pte = (pte_t *)pmd_page_vaddr(*pmd);
850	pmd_clear(pmd);
851
852	/* INVLPG to clear all paging-structure caches */
853	flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
854
855	free_page((unsigned long)pte);
856
857	return 1;
858}
859
860#else /* !CONFIG_X86_64 */
861
862int pud_free_pmd_page(pud_t *pud, unsigned long addr)
863{
864	return pud_none(*pud);
865}
866
867/*
868 * Disable free page handling on x86-PAE. This assures that ioremap()
869 * does not update sync'd pmd entries. See vmalloc_sync_one().
870 */
871int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
872{
873	return pmd_none(*pmd);
874}
875
876#endif /* CONFIG_X86_64 */
877#endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
v3.15
 
  1#include <linux/mm.h>
  2#include <linux/gfp.h>
 
  3#include <asm/pgalloc.h>
  4#include <asm/pgtable.h>
  5#include <asm/tlb.h>
  6#include <asm/fixmap.h>
 
  7
  8#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
 
 
 
  9
 10#ifdef CONFIG_HIGHPTE
 11#define PGALLOC_USER_GFP __GFP_HIGHMEM
 12#else
 13#define PGALLOC_USER_GFP 0
 14#endif
 15
 16gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
 17
 18pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 19{
 20	return (pte_t *)__get_free_page(PGALLOC_GFP);
 21}
 
 
 
 22
 23pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 24{
 25	struct page *pte;
 26
 27	pte = alloc_pages(__userpte_alloc_gfp, 0);
 28	if (!pte)
 29		return NULL;
 30	if (!pgtable_page_ctor(pte)) {
 31		__free_page(pte);
 32		return NULL;
 33	}
 34	return pte;
 35}
 36
 37static int __init setup_userpte(char *arg)
 38{
 39	if (!arg)
 40		return -EINVAL;
 41
 42	/*
 43	 * "userpte=nohigh" disables allocation of user pagetables in
 44	 * high memory.
 45	 */
 46	if (strcmp(arg, "nohigh") == 0)
 47		__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
 48	else
 49		return -EINVAL;
 50	return 0;
 51}
 52early_param("userpte", setup_userpte);
 53
 54void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
 55{
 56	pgtable_page_dtor(pte);
 57	paravirt_release_pte(page_to_pfn(pte));
 58	tlb_remove_page(tlb, pte);
 59}
 60
 61#if PAGETABLE_LEVELS > 2
 62void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
 63{
 64	struct page *page = virt_to_page(pmd);
 65	paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
 66	/*
 67	 * NOTE! For PAE, any changes to the top page-directory-pointer-table
 68	 * entries need a full cr3 reload to flush.
 69	 */
 70#ifdef CONFIG_X86_PAE
 71	tlb->need_flush_all = 1;
 72#endif
 73	pgtable_pmd_page_dtor(page);
 74	tlb_remove_page(tlb, page);
 75}
 76
 77#if PAGETABLE_LEVELS > 3
 78void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
 79{
 80	paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
 81	tlb_remove_page(tlb, virt_to_page(pud));
 
 
 
 
 
 
 
 82}
 83#endif	/* PAGETABLE_LEVELS > 3 */
 84#endif	/* PAGETABLE_LEVELS > 2 */
 
 85
 86static inline void pgd_list_add(pgd_t *pgd)
 87{
 88	struct page *page = virt_to_page(pgd);
 89
 90	list_add(&page->lru, &pgd_list);
 91}
 92
 93static inline void pgd_list_del(pgd_t *pgd)
 94{
 95	struct page *page = virt_to_page(pgd);
 96
 97	list_del(&page->lru);
 98}
 99
100#define UNSHARED_PTRS_PER_PGD				\
101	(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
 
 
102
103
104static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
105{
106	BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
107	virt_to_page(pgd)->index = (pgoff_t)mm;
108}
109
110struct mm_struct *pgd_page_get_mm(struct page *page)
111{
112	return (struct mm_struct *)page->index;
113}
114
115static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
116{
117	/* If the pgd points to a shared pagetable level (either the
118	   ptes in non-PAE, or shared PMD in PAE), then just copy the
119	   references from swapper_pg_dir. */
120	if (PAGETABLE_LEVELS == 2 ||
121	    (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
122	    PAGETABLE_LEVELS == 4) {
123		clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
124				swapper_pg_dir + KERNEL_PGD_BOUNDARY,
125				KERNEL_PGD_PTRS);
126	}
127
128	/* list required to sync kernel mapping updates */
129	if (!SHARED_KERNEL_PMD) {
130		pgd_set_mm(pgd, mm);
131		pgd_list_add(pgd);
132	}
133}
134
135static void pgd_dtor(pgd_t *pgd)
136{
137	if (SHARED_KERNEL_PMD)
138		return;
139
140	spin_lock(&pgd_lock);
141	pgd_list_del(pgd);
142	spin_unlock(&pgd_lock);
143}
144
145/*
146 * List of all pgd's needed for non-PAE so it can invalidate entries
147 * in both cached and uncached pgd's; not needed for PAE since the
148 * kernel pmd is shared. If PAE were not to share the pmd a similar
149 * tactic would be needed. This is essentially codepath-based locking
150 * against pageattr.c; it is the unique case in which a valid change
151 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
152 * vmalloc faults work because attached pagetables are never freed.
153 * -- nyc
154 */
155
156#ifdef CONFIG_X86_PAE
157/*
158 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
159 * updating the top-level pagetable entries to guarantee the
160 * processor notices the update.  Since this is expensive, and
161 * all 4 top-level entries are used almost immediately in a
162 * new process's life, we just pre-populate them here.
163 *
164 * Also, if we're in a paravirt environment where the kernel pmd is
165 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
166 * and initialize the kernel pmds here.
167 */
168#define PREALLOCATED_PMDS	UNSHARED_PTRS_PER_PGD
 
 
 
 
 
 
 
 
 
 
169
170void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
171{
172	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
173
174	/* Note: almost everything apart from _PAGE_PRESENT is
175	   reserved at the pmd (PDPT) level. */
176	set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
177
178	/*
179	 * According to Intel App note "TLBs, Paging-Structure Caches,
180	 * and Their Invalidation", April 2007, document 317080-001,
181	 * section 8.1: in PAE mode we explicitly have to flush the
182	 * TLB via cr3 if the top-level pgd is changed...
183	 */
184	flush_tlb_mm(mm);
185}
186#else  /* !CONFIG_X86_PAE */
187
188/* No need to prepopulate any pagetable entries in non-PAE modes. */
189#define PREALLOCATED_PMDS	0
190
 
 
191#endif	/* CONFIG_X86_PAE */
192
193static void free_pmds(pmd_t *pmds[])
194{
195	int i;
196
197	for(i = 0; i < PREALLOCATED_PMDS; i++)
198		if (pmds[i]) {
199			pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
200			free_page((unsigned long)pmds[i]);
 
201		}
202}
203
204static int preallocate_pmds(pmd_t *pmds[])
205{
206	int i;
207	bool failed = false;
 
 
 
 
208
209	for(i = 0; i < PREALLOCATED_PMDS; i++) {
210		pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
211		if (!pmd)
212			failed = true;
213		if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
214			free_page((unsigned long)pmd);
215			pmd = NULL;
216			failed = true;
217		}
 
 
218		pmds[i] = pmd;
219	}
220
221	if (failed) {
222		free_pmds(pmds);
223		return -ENOMEM;
224	}
225
226	return 0;
227}
228
229/*
230 * Mop up any pmd pages which may still be attached to the pgd.
231 * Normally they will be freed by munmap/exit_mmap, but any pmd we
232 * preallocate which never got a corresponding vma will need to be
233 * freed manually.
234 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
236{
237	int i;
238
239	for(i = 0; i < PREALLOCATED_PMDS; i++) {
240		pgd_t pgd = pgdp[i];
241
242		if (pgd_val(pgd) != 0) {
243			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
244
245			pgdp[i] = native_make_pgd(0);
 
 
 
246
247			paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
248			pmd_free(mm, pmd);
249		}
250	}
251}
252
253static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
254{
 
255	pud_t *pud;
256	int i;
257
258	if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
259		return;
260
261	pud = pud_offset(pgd, 0);
 
262
263	for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
264		pmd_t *pmd = pmds[i];
265
266		if (i >= KERNEL_PGD_BOUNDARY)
267			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
268			       sizeof(pmd_t) * PTRS_PER_PMD);
269
270		pud_populate(mm, pud, pmd);
271	}
272}
273
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274pgd_t *pgd_alloc(struct mm_struct *mm)
275{
276	pgd_t *pgd;
277	pmd_t *pmds[PREALLOCATED_PMDS];
 
278
279	pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
280
281	if (pgd == NULL)
282		goto out;
283
284	mm->pgd = pgd;
285
286	if (preallocate_pmds(pmds) != 0)
287		goto out_free_pgd;
288
 
 
 
289	if (paravirt_pgd_alloc(mm) != 0)
290		goto out_free_pmds;
291
292	/*
293	 * Make sure that pre-populating the pmds is atomic with
294	 * respect to anything walking the pgd_list, so that they
295	 * never see a partially populated pgd.
296	 */
297	spin_lock(&pgd_lock);
298
299	pgd_ctor(mm, pgd);
300	pgd_prepopulate_pmd(mm, pgd, pmds);
 
301
302	spin_unlock(&pgd_lock);
303
304	return pgd;
305
 
 
306out_free_pmds:
307	free_pmds(pmds);
308out_free_pgd:
309	free_page((unsigned long)pgd);
310out:
311	return NULL;
312}
313
314void pgd_free(struct mm_struct *mm, pgd_t *pgd)
315{
316	pgd_mop_up_pmds(mm, pgd);
317	pgd_dtor(pgd);
318	paravirt_pgd_free(mm, pgd);
319	free_page((unsigned long)pgd);
320}
321
322/*
323 * Used to set accessed or dirty bits in the page table entries
324 * on other architectures. On x86, the accessed and dirty bits
325 * are tracked by hardware. However, do_wp_page calls this function
326 * to also make the pte writeable at the same time the dirty bit is
327 * set. In that case we do actually need to write the PTE.
328 */
329int ptep_set_access_flags(struct vm_area_struct *vma,
330			  unsigned long address, pte_t *ptep,
331			  pte_t entry, int dirty)
332{
333	int changed = !pte_same(*ptep, entry);
334
335	if (changed && dirty) {
336		*ptep = entry;
337		pte_update_defer(vma->vm_mm, address, ptep);
338	}
339
340	return changed;
341}
342
343#ifdef CONFIG_TRANSPARENT_HUGEPAGE
344int pmdp_set_access_flags(struct vm_area_struct *vma,
345			  unsigned long address, pmd_t *pmdp,
346			  pmd_t entry, int dirty)
347{
348	int changed = !pmd_same(*pmdp, entry);
349
350	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
351
352	if (changed && dirty) {
353		*pmdp = entry;
354		pmd_update_defer(vma->vm_mm, address, pmdp);
355		/*
356		 * We had a write-protection fault here and changed the pmd
357		 * to to more permissive. No need to flush the TLB for that,
358		 * #PF is architecturally guaranteed to do that and in the
359		 * worst-case we'll generate a spurious fault.
360		 */
361	}
362
363	return changed;
364}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
365#endif
366
367int ptep_test_and_clear_young(struct vm_area_struct *vma,
368			      unsigned long addr, pte_t *ptep)
369{
370	int ret = 0;
371
372	if (pte_young(*ptep))
373		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
374					 (unsigned long *) &ptep->pte);
375
376	if (ret)
377		pte_update(vma->vm_mm, addr, ptep);
378
379	return ret;
380}
381
382#ifdef CONFIG_TRANSPARENT_HUGEPAGE
383int pmdp_test_and_clear_young(struct vm_area_struct *vma,
384			      unsigned long addr, pmd_t *pmdp)
385{
386	int ret = 0;
387
388	if (pmd_young(*pmdp))
389		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
390					 (unsigned long *)pmdp);
391
392	if (ret)
393		pmd_update(vma->vm_mm, addr, pmdp);
 
 
 
 
 
 
 
 
394
395	return ret;
396}
397#endif
398
399int ptep_clear_flush_young(struct vm_area_struct *vma,
400			   unsigned long address, pte_t *ptep)
401{
402	int young;
403
404	young = ptep_test_and_clear_young(vma, address, ptep);
405	if (young)
406		flush_tlb_page(vma, address);
407
408	return young;
 
 
 
 
 
 
 
409}
410
411#ifdef CONFIG_TRANSPARENT_HUGEPAGE
412int pmdp_clear_flush_young(struct vm_area_struct *vma,
413			   unsigned long address, pmd_t *pmdp)
414{
415	int young;
416
417	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
418
419	young = pmdp_test_and_clear_young(vma, address, pmdp);
420	if (young)
421		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
422
423	return young;
424}
425
426void pmdp_splitting_flush(struct vm_area_struct *vma,
427			  unsigned long address, pmd_t *pmdp)
428{
429	int set;
430	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
431	set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
432				(unsigned long *)pmdp);
433	if (set) {
434		pmd_update(vma->vm_mm, address, pmdp);
435		/* need tlb flush only to serialize against gup-fast */
436		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
437	}
438}
439#endif
440
441/**
442 * reserve_top_address - reserves a hole in the top of kernel address space
443 * @reserve - size of hole to reserve
444 *
445 * Can be used to relocate the fixmap area and poke a hole in the top
446 * of kernel address space to make room for a hypervisor.
447 */
448void __init reserve_top_address(unsigned long reserve)
449{
450#ifdef CONFIG_X86_32
451	BUG_ON(fixmaps_set > 0);
452	printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
453	       (int)-reserve);
454	__FIXADDR_TOP = -reserve - PAGE_SIZE;
455#endif
456}
457
458int fixmaps_set;
459
460void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
461{
462	unsigned long address = __fix_to_virt(idx);
463
 
 
 
 
 
 
 
 
 
464	if (idx >= __end_of_fixed_addresses) {
465		BUG();
466		return;
467	}
468	set_pte_vaddr(address, pte);
469	fixmaps_set++;
470}
471
472void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
473		       pgprot_t flags)
474{
 
 
 
475	__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
476}