Linux Audio

Check our new training course

Loading...
v3.1
 
  1#include <linux/mm.h>
  2#include <linux/gfp.h>
 
  3#include <asm/pgalloc.h>
  4#include <asm/pgtable.h>
  5#include <asm/tlb.h>
  6#include <asm/fixmap.h>
 
  7
  8#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
  9
 10#ifdef CONFIG_HIGHPTE
 11#define PGALLOC_USER_GFP __GFP_HIGHMEM
 12#else
 13#define PGALLOC_USER_GFP 0
 14#endif
 15
 16gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
 17
 18pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 19{
 20	return (pte_t *)__get_free_page(PGALLOC_GFP);
 21}
 22
 23pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 24{
 25	struct page *pte;
 26
 27	pte = alloc_pages(__userpte_alloc_gfp, 0);
 28	if (pte)
 29		pgtable_page_ctor(pte);
 
 
 
 
 30	return pte;
 31}
 32
 33static int __init setup_userpte(char *arg)
 34{
 35	if (!arg)
 36		return -EINVAL;
 37
 38	/*
 39	 * "userpte=nohigh" disables allocation of user pagetables in
 40	 * high memory.
 41	 */
 42	if (strcmp(arg, "nohigh") == 0)
 43		__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
 44	else
 45		return -EINVAL;
 46	return 0;
 47}
 48early_param("userpte", setup_userpte);
 49
 50void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
 51{
 52	pgtable_page_dtor(pte);
 53	paravirt_release_pte(page_to_pfn(pte));
 54	tlb_remove_page(tlb, pte);
 55}
 56
 57#if PAGETABLE_LEVELS > 2
 58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
 59{
 
 60	paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
 61	tlb_remove_page(tlb, virt_to_page(pmd));
 
 
 
 
 
 
 
 
 62}
 63
 64#if PAGETABLE_LEVELS > 3
 65void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
 66{
 67	paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
 68	tlb_remove_page(tlb, virt_to_page(pud));
 69}
 70#endif	/* PAGETABLE_LEVELS > 3 */
 71#endif	/* PAGETABLE_LEVELS > 2 */
 
 
 
 
 
 
 
 
 72
 73static inline void pgd_list_add(pgd_t *pgd)
 74{
 75	struct page *page = virt_to_page(pgd);
 76
 77	list_add(&page->lru, &pgd_list);
 78}
 79
 80static inline void pgd_list_del(pgd_t *pgd)
 81{
 82	struct page *page = virt_to_page(pgd);
 83
 84	list_del(&page->lru);
 85}
 86
 87#define UNSHARED_PTRS_PER_PGD				\
 88	(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
 89
 90
 91static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
 92{
 93	BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
 94	virt_to_page(pgd)->index = (pgoff_t)mm;
 95}
 96
 97struct mm_struct *pgd_page_get_mm(struct page *page)
 98{
 99	return (struct mm_struct *)page->index;
100}
101
102static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
103{
104	/* If the pgd points to a shared pagetable level (either the
105	   ptes in non-PAE, or shared PMD in PAE), then just copy the
106	   references from swapper_pg_dir. */
107	if (PAGETABLE_LEVELS == 2 ||
108	    (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
109	    PAGETABLE_LEVELS == 4) {
110		clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
111				swapper_pg_dir + KERNEL_PGD_BOUNDARY,
112				KERNEL_PGD_PTRS);
113	}
114
115	/* list required to sync kernel mapping updates */
116	if (!SHARED_KERNEL_PMD) {
117		pgd_set_mm(pgd, mm);
118		pgd_list_add(pgd);
119	}
120}
121
122static void pgd_dtor(pgd_t *pgd)
123{
124	if (SHARED_KERNEL_PMD)
125		return;
126
127	spin_lock(&pgd_lock);
128	pgd_list_del(pgd);
129	spin_unlock(&pgd_lock);
130}
131
132/*
133 * List of all pgd's needed for non-PAE so it can invalidate entries
134 * in both cached and uncached pgd's; not needed for PAE since the
135 * kernel pmd is shared. If PAE were not to share the pmd a similar
136 * tactic would be needed. This is essentially codepath-based locking
137 * against pageattr.c; it is the unique case in which a valid change
138 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
139 * vmalloc faults work because attached pagetables are never freed.
140 * -- wli
141 */
142
143#ifdef CONFIG_X86_PAE
144/*
145 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
146 * updating the top-level pagetable entries to guarantee the
147 * processor notices the update.  Since this is expensive, and
148 * all 4 top-level entries are used almost immediately in a
149 * new process's life, we just pre-populate them here.
150 *
151 * Also, if we're in a paravirt environment where the kernel pmd is
152 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
153 * and initialize the kernel pmds here.
154 */
155#define PREALLOCATED_PMDS	UNSHARED_PTRS_PER_PGD
156
157void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
158{
159	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
160
161	/* Note: almost everything apart from _PAGE_PRESENT is
162	   reserved at the pmd (PDPT) level. */
163	set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
164
165	/*
166	 * According to Intel App note "TLBs, Paging-Structure Caches,
167	 * and Their Invalidation", April 2007, document 317080-001,
168	 * section 8.1: in PAE mode we explicitly have to flush the
169	 * TLB via cr3 if the top-level pgd is changed...
170	 */
171	flush_tlb_mm(mm);
172}
173#else  /* !CONFIG_X86_PAE */
174
175/* No need to prepopulate any pagetable entries in non-PAE modes. */
176#define PREALLOCATED_PMDS	0
177
178#endif	/* CONFIG_X86_PAE */
179
180static void free_pmds(pmd_t *pmds[])
181{
182	int i;
183
184	for(i = 0; i < PREALLOCATED_PMDS; i++)
185		if (pmds[i])
 
186			free_page((unsigned long)pmds[i]);
 
 
187}
188
189static int preallocate_pmds(pmd_t *pmds[])
190{
191	int i;
192	bool failed = false;
 
 
 
 
193
194	for(i = 0; i < PREALLOCATED_PMDS; i++) {
195		pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
196		if (pmd == NULL)
 
 
 
 
197			failed = true;
 
 
 
198		pmds[i] = pmd;
199	}
200
201	if (failed) {
202		free_pmds(pmds);
203		return -ENOMEM;
204	}
205
206	return 0;
207}
208
209/*
210 * Mop up any pmd pages which may still be attached to the pgd.
211 * Normally they will be freed by munmap/exit_mmap, but any pmd we
212 * preallocate which never got a corresponding vma will need to be
213 * freed manually.
214 */
215static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
216{
217	int i;
218
219	for(i = 0; i < PREALLOCATED_PMDS; i++) {
220		pgd_t pgd = pgdp[i];
221
222		if (pgd_val(pgd) != 0) {
223			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
224
225			pgdp[i] = native_make_pgd(0);
226
227			paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
228			pmd_free(mm, pmd);
 
229		}
230	}
231}
232
233static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
234{
 
235	pud_t *pud;
236	unsigned long addr;
237	int i;
238
239	if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
240		return;
241
242	pud = pud_offset(pgd, 0);
 
243
244 	for (addr = i = 0; i < PREALLOCATED_PMDS;
245	     i++, pud++, addr += PUD_SIZE) {
246		pmd_t *pmd = pmds[i];
247
248		if (i >= KERNEL_PGD_BOUNDARY)
249			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
250			       sizeof(pmd_t) * PTRS_PER_PMD);
251
252		pud_populate(mm, pud, pmd);
253	}
254}
255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256pgd_t *pgd_alloc(struct mm_struct *mm)
257{
258	pgd_t *pgd;
259	pmd_t *pmds[PREALLOCATED_PMDS];
260
261	pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
262
263	if (pgd == NULL)
264		goto out;
265
266	mm->pgd = pgd;
267
268	if (preallocate_pmds(pmds) != 0)
269		goto out_free_pgd;
270
271	if (paravirt_pgd_alloc(mm) != 0)
272		goto out_free_pmds;
273
274	/*
275	 * Make sure that pre-populating the pmds is atomic with
276	 * respect to anything walking the pgd_list, so that they
277	 * never see a partially populated pgd.
278	 */
279	spin_lock(&pgd_lock);
280
281	pgd_ctor(mm, pgd);
282	pgd_prepopulate_pmd(mm, pgd, pmds);
283
284	spin_unlock(&pgd_lock);
285
286	return pgd;
287
288out_free_pmds:
289	free_pmds(pmds);
290out_free_pgd:
291	free_page((unsigned long)pgd);
292out:
293	return NULL;
294}
295
296void pgd_free(struct mm_struct *mm, pgd_t *pgd)
297{
298	pgd_mop_up_pmds(mm, pgd);
299	pgd_dtor(pgd);
300	paravirt_pgd_free(mm, pgd);
301	free_page((unsigned long)pgd);
302}
303
 
 
 
 
 
 
 
304int ptep_set_access_flags(struct vm_area_struct *vma,
305			  unsigned long address, pte_t *ptep,
306			  pte_t entry, int dirty)
307{
308	int changed = !pte_same(*ptep, entry);
309
310	if (changed && dirty) {
311		*ptep = entry;
312		pte_update_defer(vma->vm_mm, address, ptep);
313		flush_tlb_page(vma, address);
314	}
315
316	return changed;
317}
318
319#ifdef CONFIG_TRANSPARENT_HUGEPAGE
320int pmdp_set_access_flags(struct vm_area_struct *vma,
321			  unsigned long address, pmd_t *pmdp,
322			  pmd_t entry, int dirty)
323{
324	int changed = !pmd_same(*pmdp, entry);
325
326	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
327
328	if (changed && dirty) {
329		*pmdp = entry;
330		pmd_update_defer(vma->vm_mm, address, pmdp);
331		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332	}
333
334	return changed;
335}
336#endif
337
338int ptep_test_and_clear_young(struct vm_area_struct *vma,
339			      unsigned long addr, pte_t *ptep)
340{
341	int ret = 0;
342
343	if (pte_young(*ptep))
344		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
345					 (unsigned long *) &ptep->pte);
346
347	if (ret)
348		pte_update(vma->vm_mm, addr, ptep);
349
350	return ret;
351}
352
353#ifdef CONFIG_TRANSPARENT_HUGEPAGE
354int pmdp_test_and_clear_young(struct vm_area_struct *vma,
355			      unsigned long addr, pmd_t *pmdp)
356{
357	int ret = 0;
358
359	if (pmd_young(*pmdp))
360		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
361					 (unsigned long *)pmdp);
362
363	if (ret)
364		pmd_update(vma->vm_mm, addr, pmdp);
 
 
 
 
 
 
 
 
365
366	return ret;
367}
368#endif
369
370int ptep_clear_flush_young(struct vm_area_struct *vma,
371			   unsigned long address, pte_t *ptep)
372{
373	int young;
374
375	young = ptep_test_and_clear_young(vma, address, ptep);
376	if (young)
377		flush_tlb_page(vma, address);
378
379	return young;
 
 
 
 
 
 
 
380}
381
382#ifdef CONFIG_TRANSPARENT_HUGEPAGE
383int pmdp_clear_flush_young(struct vm_area_struct *vma,
384			   unsigned long address, pmd_t *pmdp)
385{
386	int young;
387
388	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
389
390	young = pmdp_test_and_clear_young(vma, address, pmdp);
391	if (young)
392		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
393
394	return young;
395}
396
397void pmdp_splitting_flush(struct vm_area_struct *vma,
398			  unsigned long address, pmd_t *pmdp)
399{
400	int set;
401	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
402	set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
403				(unsigned long *)pmdp);
404	if (set) {
405		pmd_update(vma->vm_mm, address, pmdp);
406		/* need tlb flush only to serialize against gup-fast */
407		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
408	}
409}
410#endif
411
412/**
413 * reserve_top_address - reserves a hole in the top of kernel address space
414 * @reserve - size of hole to reserve
415 *
416 * Can be used to relocate the fixmap area and poke a hole in the top
417 * of kernel address space to make room for a hypervisor.
418 */
419void __init reserve_top_address(unsigned long reserve)
420{
421#ifdef CONFIG_X86_32
422	BUG_ON(fixmaps_set > 0);
423	printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
424	       (int)-reserve);
425	__FIXADDR_TOP = -reserve - PAGE_SIZE;
426#endif
427}
428
429int fixmaps_set;
430
431void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
432{
433	unsigned long address = __fix_to_virt(idx);
434
435	if (idx >= __end_of_fixed_addresses) {
436		BUG();
437		return;
438	}
439	set_pte_vaddr(address, pte);
440	fixmaps_set++;
441}
442
443void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
444		       pgprot_t flags)
445{
 
 
 
446	__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
447}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/mm.h>
  3#include <linux/gfp.h>
  4#include <linux/hugetlb.h>
  5#include <asm/pgalloc.h>
  6#include <asm/pgtable.h>
  7#include <asm/tlb.h>
  8#include <asm/fixmap.h>
  9#include <asm/mtrr.h>
 10
 11#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
 12
 13#ifdef CONFIG_HIGHPTE
 14#define PGALLOC_USER_GFP __GFP_HIGHMEM
 15#else
 16#define PGALLOC_USER_GFP 0
 17#endif
 18
 19gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
 20
 21pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 22{
 23	return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
 24}
 25
 26pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 27{
 28	struct page *pte;
 29
 30	pte = alloc_pages(__userpte_alloc_gfp, 0);
 31	if (!pte)
 32		return NULL;
 33	if (!pgtable_page_ctor(pte)) {
 34		__free_page(pte);
 35		return NULL;
 36	}
 37	return pte;
 38}
 39
 40static int __init setup_userpte(char *arg)
 41{
 42	if (!arg)
 43		return -EINVAL;
 44
 45	/*
 46	 * "userpte=nohigh" disables allocation of user pagetables in
 47	 * high memory.
 48	 */
 49	if (strcmp(arg, "nohigh") == 0)
 50		__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
 51	else
 52		return -EINVAL;
 53	return 0;
 54}
 55early_param("userpte", setup_userpte);
 56
 57void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
 58{
 59	pgtable_page_dtor(pte);
 60	paravirt_release_pte(page_to_pfn(pte));
 61	tlb_remove_table(tlb, pte);
 62}
 63
 64#if CONFIG_PGTABLE_LEVELS > 2
 65void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
 66{
 67	struct page *page = virt_to_page(pmd);
 68	paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
 69	/*
 70	 * NOTE! For PAE, any changes to the top page-directory-pointer-table
 71	 * entries need a full cr3 reload to flush.
 72	 */
 73#ifdef CONFIG_X86_PAE
 74	tlb->need_flush_all = 1;
 75#endif
 76	pgtable_pmd_page_dtor(page);
 77	tlb_remove_table(tlb, page);
 78}
 79
 80#if CONFIG_PGTABLE_LEVELS > 3
 81void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
 82{
 83	paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
 84	tlb_remove_table(tlb, virt_to_page(pud));
 85}
 86
 87#if CONFIG_PGTABLE_LEVELS > 4
 88void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
 89{
 90	paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
 91	tlb_remove_table(tlb, virt_to_page(p4d));
 92}
 93#endif	/* CONFIG_PGTABLE_LEVELS > 4 */
 94#endif	/* CONFIG_PGTABLE_LEVELS > 3 */
 95#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
 96
 97static inline void pgd_list_add(pgd_t *pgd)
 98{
 99	struct page *page = virt_to_page(pgd);
100
101	list_add(&page->lru, &pgd_list);
102}
103
104static inline void pgd_list_del(pgd_t *pgd)
105{
106	struct page *page = virt_to_page(pgd);
107
108	list_del(&page->lru);
109}
110
111#define UNSHARED_PTRS_PER_PGD				\
112	(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
113
114
115static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
116{
117	BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
118	virt_to_page(pgd)->index = (pgoff_t)mm;
119}
120
121struct mm_struct *pgd_page_get_mm(struct page *page)
122{
123	return (struct mm_struct *)page->index;
124}
125
126static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
127{
128	/* If the pgd points to a shared pagetable level (either the
129	   ptes in non-PAE, or shared PMD in PAE), then just copy the
130	   references from swapper_pg_dir. */
131	if (CONFIG_PGTABLE_LEVELS == 2 ||
132	    (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
133	    CONFIG_PGTABLE_LEVELS >= 4) {
134		clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
135				swapper_pg_dir + KERNEL_PGD_BOUNDARY,
136				KERNEL_PGD_PTRS);
137	}
138
139	/* list required to sync kernel mapping updates */
140	if (!SHARED_KERNEL_PMD) {
141		pgd_set_mm(pgd, mm);
142		pgd_list_add(pgd);
143	}
144}
145
146static void pgd_dtor(pgd_t *pgd)
147{
148	if (SHARED_KERNEL_PMD)
149		return;
150
151	spin_lock(&pgd_lock);
152	pgd_list_del(pgd);
153	spin_unlock(&pgd_lock);
154}
155
156/*
157 * List of all pgd's needed for non-PAE so it can invalidate entries
158 * in both cached and uncached pgd's; not needed for PAE since the
159 * kernel pmd is shared. If PAE were not to share the pmd a similar
160 * tactic would be needed. This is essentially codepath-based locking
161 * against pageattr.c; it is the unique case in which a valid change
162 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
163 * vmalloc faults work because attached pagetables are never freed.
164 * -- nyc
165 */
166
167#ifdef CONFIG_X86_PAE
168/*
169 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
170 * updating the top-level pagetable entries to guarantee the
171 * processor notices the update.  Since this is expensive, and
172 * all 4 top-level entries are used almost immediately in a
173 * new process's life, we just pre-populate them here.
174 *
175 * Also, if we're in a paravirt environment where the kernel pmd is
176 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
177 * and initialize the kernel pmds here.
178 */
179#define PREALLOCATED_PMDS	UNSHARED_PTRS_PER_PGD
180
181void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
182{
183	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
184
185	/* Note: almost everything apart from _PAGE_PRESENT is
186	   reserved at the pmd (PDPT) level. */
187	set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
188
189	/*
190	 * According to Intel App note "TLBs, Paging-Structure Caches,
191	 * and Their Invalidation", April 2007, document 317080-001,
192	 * section 8.1: in PAE mode we explicitly have to flush the
193	 * TLB via cr3 if the top-level pgd is changed...
194	 */
195	flush_tlb_mm(mm);
196}
197#else  /* !CONFIG_X86_PAE */
198
199/* No need to prepopulate any pagetable entries in non-PAE modes. */
200#define PREALLOCATED_PMDS	0
201
202#endif	/* CONFIG_X86_PAE */
203
204static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
205{
206	int i;
207
208	for(i = 0; i < PREALLOCATED_PMDS; i++)
209		if (pmds[i]) {
210			pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
211			free_page((unsigned long)pmds[i]);
212			mm_dec_nr_pmds(mm);
213		}
214}
215
216static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
217{
218	int i;
219	bool failed = false;
220	gfp_t gfp = PGALLOC_GFP;
221
222	if (mm == &init_mm)
223		gfp &= ~__GFP_ACCOUNT;
224
225	for(i = 0; i < PREALLOCATED_PMDS; i++) {
226		pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
227		if (!pmd)
228			failed = true;
229		if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
230			free_page((unsigned long)pmd);
231			pmd = NULL;
232			failed = true;
233		}
234		if (pmd)
235			mm_inc_nr_pmds(mm);
236		pmds[i] = pmd;
237	}
238
239	if (failed) {
240		free_pmds(mm, pmds);
241		return -ENOMEM;
242	}
243
244	return 0;
245}
246
247/*
248 * Mop up any pmd pages which may still be attached to the pgd.
249 * Normally they will be freed by munmap/exit_mmap, but any pmd we
250 * preallocate which never got a corresponding vma will need to be
251 * freed manually.
252 */
253static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
254{
255	int i;
256
257	for(i = 0; i < PREALLOCATED_PMDS; i++) {
258		pgd_t pgd = pgdp[i];
259
260		if (pgd_val(pgd) != 0) {
261			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
262
263			pgdp[i] = native_make_pgd(0);
264
265			paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
266			pmd_free(mm, pmd);
267			mm_dec_nr_pmds(mm);
268		}
269	}
270}
271
272static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
273{
274	p4d_t *p4d;
275	pud_t *pud;
 
276	int i;
277
278	if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
279		return;
280
281	p4d = p4d_offset(pgd, 0);
282	pud = pud_offset(p4d, 0);
283
284	for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
 
285		pmd_t *pmd = pmds[i];
286
287		if (i >= KERNEL_PGD_BOUNDARY)
288			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
289			       sizeof(pmd_t) * PTRS_PER_PMD);
290
291		pud_populate(mm, pud, pmd);
292	}
293}
294
295/*
296 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
297 * assumes that pgd should be in one page.
298 *
299 * But kernel with PAE paging that is not running as a Xen domain
300 * only needs to allocate 32 bytes for pgd instead of one page.
301 */
302#ifdef CONFIG_X86_PAE
303
304#include <linux/slab.h>
305
306#define PGD_SIZE	(PTRS_PER_PGD * sizeof(pgd_t))
307#define PGD_ALIGN	32
308
309static struct kmem_cache *pgd_cache;
310
311static int __init pgd_cache_init(void)
312{
313	/*
314	 * When PAE kernel is running as a Xen domain, it does not use
315	 * shared kernel pmd. And this requires a whole page for pgd.
316	 */
317	if (!SHARED_KERNEL_PMD)
318		return 0;
319
320	/*
321	 * when PAE kernel is not running as a Xen domain, it uses
322	 * shared kernel pmd. Shared kernel pmd does not require a whole
323	 * page for pgd. We are able to just allocate a 32-byte for pgd.
324	 * During boot time, we create a 32-byte slab for pgd table allocation.
325	 */
326	pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
327				      SLAB_PANIC, NULL);
328	if (!pgd_cache)
329		return -ENOMEM;
330
331	return 0;
332}
333core_initcall(pgd_cache_init);
334
335static inline pgd_t *_pgd_alloc(void)
336{
337	/*
338	 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
339	 * We allocate one page for pgd.
340	 */
341	if (!SHARED_KERNEL_PMD)
342		return (pgd_t *)__get_free_page(PGALLOC_GFP);
343
344	/*
345	 * Now PAE kernel is not running as a Xen domain. We can allocate
346	 * a 32-byte slab for pgd to save memory space.
347	 */
348	return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
349}
350
351static inline void _pgd_free(pgd_t *pgd)
352{
353	if (!SHARED_KERNEL_PMD)
354		free_page((unsigned long)pgd);
355	else
356		kmem_cache_free(pgd_cache, pgd);
357}
358#else
359
360static inline pgd_t *_pgd_alloc(void)
361{
362	return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
363}
364
365static inline void _pgd_free(pgd_t *pgd)
366{
367	free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
368}
369#endif /* CONFIG_X86_PAE */
370
371pgd_t *pgd_alloc(struct mm_struct *mm)
372{
373	pgd_t *pgd;
374	pmd_t *pmds[PREALLOCATED_PMDS];
375
376	pgd = _pgd_alloc();
377
378	if (pgd == NULL)
379		goto out;
380
381	mm->pgd = pgd;
382
383	if (preallocate_pmds(mm, pmds) != 0)
384		goto out_free_pgd;
385
386	if (paravirt_pgd_alloc(mm) != 0)
387		goto out_free_pmds;
388
389	/*
390	 * Make sure that pre-populating the pmds is atomic with
391	 * respect to anything walking the pgd_list, so that they
392	 * never see a partially populated pgd.
393	 */
394	spin_lock(&pgd_lock);
395
396	pgd_ctor(mm, pgd);
397	pgd_prepopulate_pmd(mm, pgd, pmds);
398
399	spin_unlock(&pgd_lock);
400
401	return pgd;
402
403out_free_pmds:
404	free_pmds(mm, pmds);
405out_free_pgd:
406	_pgd_free(pgd);
407out:
408	return NULL;
409}
410
411void pgd_free(struct mm_struct *mm, pgd_t *pgd)
412{
413	pgd_mop_up_pmds(mm, pgd);
414	pgd_dtor(pgd);
415	paravirt_pgd_free(mm, pgd);
416	_pgd_free(pgd);
417}
418
419/*
420 * Used to set accessed or dirty bits in the page table entries
421 * on other architectures. On x86, the accessed and dirty bits
422 * are tracked by hardware. However, do_wp_page calls this function
423 * to also make the pte writeable at the same time the dirty bit is
424 * set. In that case we do actually need to write the PTE.
425 */
426int ptep_set_access_flags(struct vm_area_struct *vma,
427			  unsigned long address, pte_t *ptep,
428			  pte_t entry, int dirty)
429{
430	int changed = !pte_same(*ptep, entry);
431
432	if (changed && dirty)
433		*ptep = entry;
 
 
 
434
435	return changed;
436}
437
438#ifdef CONFIG_TRANSPARENT_HUGEPAGE
439int pmdp_set_access_flags(struct vm_area_struct *vma,
440			  unsigned long address, pmd_t *pmdp,
441			  pmd_t entry, int dirty)
442{
443	int changed = !pmd_same(*pmdp, entry);
444
445	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
446
447	if (changed && dirty) {
448		*pmdp = entry;
449		/*
450		 * We had a write-protection fault here and changed the pmd
451		 * to to more permissive. No need to flush the TLB for that,
452		 * #PF is architecturally guaranteed to do that and in the
453		 * worst-case we'll generate a spurious fault.
454		 */
455	}
456
457	return changed;
458}
459
460int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
461			  pud_t *pudp, pud_t entry, int dirty)
462{
463	int changed = !pud_same(*pudp, entry);
464
465	VM_BUG_ON(address & ~HPAGE_PUD_MASK);
466
467	if (changed && dirty) {
468		*pudp = entry;
469		/*
470		 * We had a write-protection fault here and changed the pud
471		 * to to more permissive. No need to flush the TLB for that,
472		 * #PF is architecturally guaranteed to do that and in the
473		 * worst-case we'll generate a spurious fault.
474		 */
475	}
476
477	return changed;
478}
479#endif
480
481int ptep_test_and_clear_young(struct vm_area_struct *vma,
482			      unsigned long addr, pte_t *ptep)
483{
484	int ret = 0;
485
486	if (pte_young(*ptep))
487		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
488					 (unsigned long *) &ptep->pte);
489
 
 
 
490	return ret;
491}
492
493#ifdef CONFIG_TRANSPARENT_HUGEPAGE
494int pmdp_test_and_clear_young(struct vm_area_struct *vma,
495			      unsigned long addr, pmd_t *pmdp)
496{
497	int ret = 0;
498
499	if (pmd_young(*pmdp))
500		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
501					 (unsigned long *)pmdp);
502
503	return ret;
504}
505int pudp_test_and_clear_young(struct vm_area_struct *vma,
506			      unsigned long addr, pud_t *pudp)
507{
508	int ret = 0;
509
510	if (pud_young(*pudp))
511		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
512					 (unsigned long *)pudp);
513
514	return ret;
515}
516#endif
517
518int ptep_clear_flush_young(struct vm_area_struct *vma,
519			   unsigned long address, pte_t *ptep)
520{
521	/*
522	 * On x86 CPUs, clearing the accessed bit without a TLB flush
523	 * doesn't cause data corruption. [ It could cause incorrect
524	 * page aging and the (mistaken) reclaim of hot pages, but the
525	 * chance of that should be relatively low. ]
526	 *
527	 * So as a performance optimization don't flush the TLB when
528	 * clearing the accessed bit, it will eventually be flushed by
529	 * a context switch or a VM operation anyway. [ In the rare
530	 * event of it not getting flushed for a long time the delay
531	 * shouldn't really matter because there's no real memory
532	 * pressure for swapout to react to. ]
533	 */
534	return ptep_test_and_clear_young(vma, address, ptep);
535}
536
537#ifdef CONFIG_TRANSPARENT_HUGEPAGE
538int pmdp_clear_flush_young(struct vm_area_struct *vma,
539			   unsigned long address, pmd_t *pmdp)
540{
541	int young;
542
543	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
544
545	young = pmdp_test_and_clear_young(vma, address, pmdp);
546	if (young)
547		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
548
549	return young;
550}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551#endif
552
553/**
554 * reserve_top_address - reserves a hole in the top of kernel address space
555 * @reserve - size of hole to reserve
556 *
557 * Can be used to relocate the fixmap area and poke a hole in the top
558 * of kernel address space to make room for a hypervisor.
559 */
560void __init reserve_top_address(unsigned long reserve)
561{
562#ifdef CONFIG_X86_32
563	BUG_ON(fixmaps_set > 0);
564	__FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
565	printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
566	       -reserve, __FIXADDR_TOP + PAGE_SIZE);
567#endif
568}
569
570int fixmaps_set;
571
572void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
573{
574	unsigned long address = __fix_to_virt(idx);
575
576	if (idx >= __end_of_fixed_addresses) {
577		BUG();
578		return;
579	}
580	set_pte_vaddr(address, pte);
581	fixmaps_set++;
582}
583
584void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
585		       pgprot_t flags)
586{
587	/* Sanitize 'prot' against any unsupported bits: */
588	pgprot_val(flags) &= __default_kernel_pte_mask;
589
590	__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
591}
592
593#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
594#ifdef CONFIG_X86_5LEVEL
595/**
596 * p4d_set_huge - setup kernel P4D mapping
597 *
598 * No 512GB pages yet -- always return 0
599 */
600int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
601{
602	return 0;
603}
604
605/**
606 * p4d_clear_huge - clear kernel P4D mapping when it is set
607 *
608 * No 512GB pages yet -- always return 0
609 */
610int p4d_clear_huge(p4d_t *p4d)
611{
612	return 0;
613}
614#endif
615
616/**
617 * pud_set_huge - setup kernel PUD mapping
618 *
619 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
620 * function sets up a huge page only if any of the following conditions are met:
621 *
622 * - MTRRs are disabled, or
623 *
624 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
625 *
626 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
627 *   has no effect on the requested PAT memory type.
628 *
629 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
630 * page mapping attempt fails.
631 *
632 * Returns 1 on success and 0 on failure.
633 */
634int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
635{
636	u8 mtrr, uniform;
637
638	mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
639	if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
640	    (mtrr != MTRR_TYPE_WRBACK))
641		return 0;
642
643	/* Bail out if we are we on a populated non-leaf entry: */
644	if (pud_present(*pud) && !pud_huge(*pud))
645		return 0;
646
647	prot = pgprot_4k_2_large(prot);
648
649	set_pte((pte_t *)pud, pfn_pte(
650		(u64)addr >> PAGE_SHIFT,
651		__pgprot(pgprot_val(prot) | _PAGE_PSE)));
652
653	return 1;
654}
655
656/**
657 * pmd_set_huge - setup kernel PMD mapping
658 *
659 * See text over pud_set_huge() above.
660 *
661 * Returns 1 on success and 0 on failure.
662 */
663int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
664{
665	u8 mtrr, uniform;
666
667	mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
668	if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
669	    (mtrr != MTRR_TYPE_WRBACK)) {
670		pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
671			     __func__, addr, addr + PMD_SIZE);
672		return 0;
673	}
674
675	/* Bail out if we are we on a populated non-leaf entry: */
676	if (pmd_present(*pmd) && !pmd_huge(*pmd))
677		return 0;
678
679	prot = pgprot_4k_2_large(prot);
680
681	set_pte((pte_t *)pmd, pfn_pte(
682		(u64)addr >> PAGE_SHIFT,
683		__pgprot(pgprot_val(prot) | _PAGE_PSE)));
684
685	return 1;
686}
687
688/**
689 * pud_clear_huge - clear kernel PUD mapping when it is set
690 *
691 * Returns 1 on success and 0 on failure (no PUD map is found).
692 */
693int pud_clear_huge(pud_t *pud)
694{
695	if (pud_large(*pud)) {
696		pud_clear(pud);
697		return 1;
698	}
699
700	return 0;
701}
702
703/**
704 * pmd_clear_huge - clear kernel PMD mapping when it is set
705 *
706 * Returns 1 on success and 0 on failure (no PMD map is found).
707 */
708int pmd_clear_huge(pmd_t *pmd)
709{
710	if (pmd_large(*pmd)) {
711		pmd_clear(pmd);
712		return 1;
713	}
714
715	return 0;
716}
717
718/**
719 * pud_free_pmd_page - Clear pud entry and free pmd page.
720 * @pud: Pointer to a PUD.
721 *
722 * Context: The pud range has been unmaped and TLB purged.
723 * Return: 1 if clearing the entry succeeded. 0 otherwise.
724 */
725int pud_free_pmd_page(pud_t *pud)
726{
727	pmd_t *pmd;
728	int i;
729
730	if (pud_none(*pud))
731		return 1;
732
733	pmd = (pmd_t *)pud_page_vaddr(*pud);
734
735	for (i = 0; i < PTRS_PER_PMD; i++)
736		if (!pmd_free_pte_page(&pmd[i]))
737			return 0;
738
739	pud_clear(pud);
740	free_page((unsigned long)pmd);
741
742	return 1;
743}
744
745/**
746 * pmd_free_pte_page - Clear pmd entry and free pte page.
747 * @pmd: Pointer to a PMD.
748 *
749 * Context: The pmd range has been unmaped and TLB purged.
750 * Return: 1 if clearing the entry succeeded. 0 otherwise.
751 */
752int pmd_free_pte_page(pmd_t *pmd)
753{
754	pte_t *pte;
755
756	if (pmd_none(*pmd))
757		return 1;
758
759	pte = (pte_t *)pmd_page_vaddr(*pmd);
760	pmd_clear(pmd);
761	free_page((unsigned long)pte);
762
763	return 1;
764}
765#endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */