Loading...
1#include <linux/mm.h>
2#include <linux/gfp.h>
3#include <asm/pgalloc.h>
4#include <asm/pgtable.h>
5#include <asm/tlb.h>
6#include <asm/fixmap.h>
7
8#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
9
10#ifdef CONFIG_HIGHPTE
11#define PGALLOC_USER_GFP __GFP_HIGHMEM
12#else
13#define PGALLOC_USER_GFP 0
14#endif
15
16gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
17
18pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
19{
20 return (pte_t *)__get_free_page(PGALLOC_GFP);
21}
22
23pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
24{
25 struct page *pte;
26
27 pte = alloc_pages(__userpte_alloc_gfp, 0);
28 if (!pte)
29 return NULL;
30 if (!pgtable_page_ctor(pte)) {
31 __free_page(pte);
32 return NULL;
33 }
34 return pte;
35}
36
37static int __init setup_userpte(char *arg)
38{
39 if (!arg)
40 return -EINVAL;
41
42 /*
43 * "userpte=nohigh" disables allocation of user pagetables in
44 * high memory.
45 */
46 if (strcmp(arg, "nohigh") == 0)
47 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
48 else
49 return -EINVAL;
50 return 0;
51}
52early_param("userpte", setup_userpte);
53
54void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
55{
56 pgtable_page_dtor(pte);
57 paravirt_release_pte(page_to_pfn(pte));
58 tlb_remove_page(tlb, pte);
59}
60
61#if PAGETABLE_LEVELS > 2
62void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
63{
64 struct page *page = virt_to_page(pmd);
65 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
66 /*
67 * NOTE! For PAE, any changes to the top page-directory-pointer-table
68 * entries need a full cr3 reload to flush.
69 */
70#ifdef CONFIG_X86_PAE
71 tlb->need_flush_all = 1;
72#endif
73 pgtable_pmd_page_dtor(page);
74 tlb_remove_page(tlb, page);
75}
76
77#if PAGETABLE_LEVELS > 3
78void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
79{
80 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
81 tlb_remove_page(tlb, virt_to_page(pud));
82}
83#endif /* PAGETABLE_LEVELS > 3 */
84#endif /* PAGETABLE_LEVELS > 2 */
85
86static inline void pgd_list_add(pgd_t *pgd)
87{
88 struct page *page = virt_to_page(pgd);
89
90 list_add(&page->lru, &pgd_list);
91}
92
93static inline void pgd_list_del(pgd_t *pgd)
94{
95 struct page *page = virt_to_page(pgd);
96
97 list_del(&page->lru);
98}
99
100#define UNSHARED_PTRS_PER_PGD \
101 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
102
103
104static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
105{
106 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
107 virt_to_page(pgd)->index = (pgoff_t)mm;
108}
109
110struct mm_struct *pgd_page_get_mm(struct page *page)
111{
112 return (struct mm_struct *)page->index;
113}
114
115static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
116{
117 /* If the pgd points to a shared pagetable level (either the
118 ptes in non-PAE, or shared PMD in PAE), then just copy the
119 references from swapper_pg_dir. */
120 if (PAGETABLE_LEVELS == 2 ||
121 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
122 PAGETABLE_LEVELS == 4) {
123 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
124 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
125 KERNEL_PGD_PTRS);
126 }
127
128 /* list required to sync kernel mapping updates */
129 if (!SHARED_KERNEL_PMD) {
130 pgd_set_mm(pgd, mm);
131 pgd_list_add(pgd);
132 }
133}
134
135static void pgd_dtor(pgd_t *pgd)
136{
137 if (SHARED_KERNEL_PMD)
138 return;
139
140 spin_lock(&pgd_lock);
141 pgd_list_del(pgd);
142 spin_unlock(&pgd_lock);
143}
144
145/*
146 * List of all pgd's needed for non-PAE so it can invalidate entries
147 * in both cached and uncached pgd's; not needed for PAE since the
148 * kernel pmd is shared. If PAE were not to share the pmd a similar
149 * tactic would be needed. This is essentially codepath-based locking
150 * against pageattr.c; it is the unique case in which a valid change
151 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
152 * vmalloc faults work because attached pagetables are never freed.
153 * -- nyc
154 */
155
156#ifdef CONFIG_X86_PAE
157/*
158 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
159 * updating the top-level pagetable entries to guarantee the
160 * processor notices the update. Since this is expensive, and
161 * all 4 top-level entries are used almost immediately in a
162 * new process's life, we just pre-populate them here.
163 *
164 * Also, if we're in a paravirt environment where the kernel pmd is
165 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
166 * and initialize the kernel pmds here.
167 */
168#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
169
170void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
171{
172 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
173
174 /* Note: almost everything apart from _PAGE_PRESENT is
175 reserved at the pmd (PDPT) level. */
176 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
177
178 /*
179 * According to Intel App note "TLBs, Paging-Structure Caches,
180 * and Their Invalidation", April 2007, document 317080-001,
181 * section 8.1: in PAE mode we explicitly have to flush the
182 * TLB via cr3 if the top-level pgd is changed...
183 */
184 flush_tlb_mm(mm);
185}
186#else /* !CONFIG_X86_PAE */
187
188/* No need to prepopulate any pagetable entries in non-PAE modes. */
189#define PREALLOCATED_PMDS 0
190
191#endif /* CONFIG_X86_PAE */
192
193static void free_pmds(pmd_t *pmds[])
194{
195 int i;
196
197 for(i = 0; i < PREALLOCATED_PMDS; i++)
198 if (pmds[i]) {
199 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
200 free_page((unsigned long)pmds[i]);
201 }
202}
203
204static int preallocate_pmds(pmd_t *pmds[])
205{
206 int i;
207 bool failed = false;
208
209 for(i = 0; i < PREALLOCATED_PMDS; i++) {
210 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
211 if (!pmd)
212 failed = true;
213 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
214 free_page((unsigned long)pmd);
215 pmd = NULL;
216 failed = true;
217 }
218 pmds[i] = pmd;
219 }
220
221 if (failed) {
222 free_pmds(pmds);
223 return -ENOMEM;
224 }
225
226 return 0;
227}
228
229/*
230 * Mop up any pmd pages which may still be attached to the pgd.
231 * Normally they will be freed by munmap/exit_mmap, but any pmd we
232 * preallocate which never got a corresponding vma will need to be
233 * freed manually.
234 */
235static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
236{
237 int i;
238
239 for(i = 0; i < PREALLOCATED_PMDS; i++) {
240 pgd_t pgd = pgdp[i];
241
242 if (pgd_val(pgd) != 0) {
243 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
244
245 pgdp[i] = native_make_pgd(0);
246
247 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
248 pmd_free(mm, pmd);
249 }
250 }
251}
252
253static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
254{
255 pud_t *pud;
256 int i;
257
258 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
259 return;
260
261 pud = pud_offset(pgd, 0);
262
263 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
264 pmd_t *pmd = pmds[i];
265
266 if (i >= KERNEL_PGD_BOUNDARY)
267 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
268 sizeof(pmd_t) * PTRS_PER_PMD);
269
270 pud_populate(mm, pud, pmd);
271 }
272}
273
274pgd_t *pgd_alloc(struct mm_struct *mm)
275{
276 pgd_t *pgd;
277 pmd_t *pmds[PREALLOCATED_PMDS];
278
279 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
280
281 if (pgd == NULL)
282 goto out;
283
284 mm->pgd = pgd;
285
286 if (preallocate_pmds(pmds) != 0)
287 goto out_free_pgd;
288
289 if (paravirt_pgd_alloc(mm) != 0)
290 goto out_free_pmds;
291
292 /*
293 * Make sure that pre-populating the pmds is atomic with
294 * respect to anything walking the pgd_list, so that they
295 * never see a partially populated pgd.
296 */
297 spin_lock(&pgd_lock);
298
299 pgd_ctor(mm, pgd);
300 pgd_prepopulate_pmd(mm, pgd, pmds);
301
302 spin_unlock(&pgd_lock);
303
304 return pgd;
305
306out_free_pmds:
307 free_pmds(pmds);
308out_free_pgd:
309 free_page((unsigned long)pgd);
310out:
311 return NULL;
312}
313
314void pgd_free(struct mm_struct *mm, pgd_t *pgd)
315{
316 pgd_mop_up_pmds(mm, pgd);
317 pgd_dtor(pgd);
318 paravirt_pgd_free(mm, pgd);
319 free_page((unsigned long)pgd);
320}
321
322/*
323 * Used to set accessed or dirty bits in the page table entries
324 * on other architectures. On x86, the accessed and dirty bits
325 * are tracked by hardware. However, do_wp_page calls this function
326 * to also make the pte writeable at the same time the dirty bit is
327 * set. In that case we do actually need to write the PTE.
328 */
329int ptep_set_access_flags(struct vm_area_struct *vma,
330 unsigned long address, pte_t *ptep,
331 pte_t entry, int dirty)
332{
333 int changed = !pte_same(*ptep, entry);
334
335 if (changed && dirty) {
336 *ptep = entry;
337 pte_update_defer(vma->vm_mm, address, ptep);
338 }
339
340 return changed;
341}
342
343#ifdef CONFIG_TRANSPARENT_HUGEPAGE
344int pmdp_set_access_flags(struct vm_area_struct *vma,
345 unsigned long address, pmd_t *pmdp,
346 pmd_t entry, int dirty)
347{
348 int changed = !pmd_same(*pmdp, entry);
349
350 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
351
352 if (changed && dirty) {
353 *pmdp = entry;
354 pmd_update_defer(vma->vm_mm, address, pmdp);
355 /*
356 * We had a write-protection fault here and changed the pmd
357 * to to more permissive. No need to flush the TLB for that,
358 * #PF is architecturally guaranteed to do that and in the
359 * worst-case we'll generate a spurious fault.
360 */
361 }
362
363 return changed;
364}
365#endif
366
367int ptep_test_and_clear_young(struct vm_area_struct *vma,
368 unsigned long addr, pte_t *ptep)
369{
370 int ret = 0;
371
372 if (pte_young(*ptep))
373 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
374 (unsigned long *) &ptep->pte);
375
376 if (ret)
377 pte_update(vma->vm_mm, addr, ptep);
378
379 return ret;
380}
381
382#ifdef CONFIG_TRANSPARENT_HUGEPAGE
383int pmdp_test_and_clear_young(struct vm_area_struct *vma,
384 unsigned long addr, pmd_t *pmdp)
385{
386 int ret = 0;
387
388 if (pmd_young(*pmdp))
389 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
390 (unsigned long *)pmdp);
391
392 if (ret)
393 pmd_update(vma->vm_mm, addr, pmdp);
394
395 return ret;
396}
397#endif
398
399int ptep_clear_flush_young(struct vm_area_struct *vma,
400 unsigned long address, pte_t *ptep)
401{
402 int young;
403
404 young = ptep_test_and_clear_young(vma, address, ptep);
405 if (young)
406 flush_tlb_page(vma, address);
407
408 return young;
409}
410
411#ifdef CONFIG_TRANSPARENT_HUGEPAGE
412int pmdp_clear_flush_young(struct vm_area_struct *vma,
413 unsigned long address, pmd_t *pmdp)
414{
415 int young;
416
417 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
418
419 young = pmdp_test_and_clear_young(vma, address, pmdp);
420 if (young)
421 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
422
423 return young;
424}
425
426void pmdp_splitting_flush(struct vm_area_struct *vma,
427 unsigned long address, pmd_t *pmdp)
428{
429 int set;
430 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
431 set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
432 (unsigned long *)pmdp);
433 if (set) {
434 pmd_update(vma->vm_mm, address, pmdp);
435 /* need tlb flush only to serialize against gup-fast */
436 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
437 }
438}
439#endif
440
441/**
442 * reserve_top_address - reserves a hole in the top of kernel address space
443 * @reserve - size of hole to reserve
444 *
445 * Can be used to relocate the fixmap area and poke a hole in the top
446 * of kernel address space to make room for a hypervisor.
447 */
448void __init reserve_top_address(unsigned long reserve)
449{
450#ifdef CONFIG_X86_32
451 BUG_ON(fixmaps_set > 0);
452 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
453 (int)-reserve);
454 __FIXADDR_TOP = -reserve - PAGE_SIZE;
455#endif
456}
457
458int fixmaps_set;
459
460void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
461{
462 unsigned long address = __fix_to_virt(idx);
463
464 if (idx >= __end_of_fixed_addresses) {
465 BUG();
466 return;
467 }
468 set_pte_vaddr(address, pte);
469 fixmaps_set++;
470}
471
472void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
473 pgprot_t flags)
474{
475 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
476}
1#include <linux/mm.h>
2#include <linux/gfp.h>
3#include <asm/pgalloc.h>
4#include <asm/pgtable.h>
5#include <asm/tlb.h>
6#include <asm/fixmap.h>
7
8#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
9
10#ifdef CONFIG_HIGHPTE
11#define PGALLOC_USER_GFP __GFP_HIGHMEM
12#else
13#define PGALLOC_USER_GFP 0
14#endif
15
16gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
17
18pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
19{
20 return (pte_t *)__get_free_page(PGALLOC_GFP);
21}
22
23pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
24{
25 struct page *pte;
26
27 pte = alloc_pages(__userpte_alloc_gfp, 0);
28 if (pte)
29 pgtable_page_ctor(pte);
30 return pte;
31}
32
33static int __init setup_userpte(char *arg)
34{
35 if (!arg)
36 return -EINVAL;
37
38 /*
39 * "userpte=nohigh" disables allocation of user pagetables in
40 * high memory.
41 */
42 if (strcmp(arg, "nohigh") == 0)
43 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
44 else
45 return -EINVAL;
46 return 0;
47}
48early_param("userpte", setup_userpte);
49
50void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
51{
52 pgtable_page_dtor(pte);
53 paravirt_release_pte(page_to_pfn(pte));
54 tlb_remove_page(tlb, pte);
55}
56
57#if PAGETABLE_LEVELS > 2
58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
59{
60 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
61 tlb_remove_page(tlb, virt_to_page(pmd));
62}
63
64#if PAGETABLE_LEVELS > 3
65void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
66{
67 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
68 tlb_remove_page(tlb, virt_to_page(pud));
69}
70#endif /* PAGETABLE_LEVELS > 3 */
71#endif /* PAGETABLE_LEVELS > 2 */
72
73static inline void pgd_list_add(pgd_t *pgd)
74{
75 struct page *page = virt_to_page(pgd);
76
77 list_add(&page->lru, &pgd_list);
78}
79
80static inline void pgd_list_del(pgd_t *pgd)
81{
82 struct page *page = virt_to_page(pgd);
83
84 list_del(&page->lru);
85}
86
87#define UNSHARED_PTRS_PER_PGD \
88 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
89
90
91static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
92{
93 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
94 virt_to_page(pgd)->index = (pgoff_t)mm;
95}
96
97struct mm_struct *pgd_page_get_mm(struct page *page)
98{
99 return (struct mm_struct *)page->index;
100}
101
102static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
103{
104 /* If the pgd points to a shared pagetable level (either the
105 ptes in non-PAE, or shared PMD in PAE), then just copy the
106 references from swapper_pg_dir. */
107 if (PAGETABLE_LEVELS == 2 ||
108 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
109 PAGETABLE_LEVELS == 4) {
110 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
111 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
112 KERNEL_PGD_PTRS);
113 }
114
115 /* list required to sync kernel mapping updates */
116 if (!SHARED_KERNEL_PMD) {
117 pgd_set_mm(pgd, mm);
118 pgd_list_add(pgd);
119 }
120}
121
122static void pgd_dtor(pgd_t *pgd)
123{
124 if (SHARED_KERNEL_PMD)
125 return;
126
127 spin_lock(&pgd_lock);
128 pgd_list_del(pgd);
129 spin_unlock(&pgd_lock);
130}
131
132/*
133 * List of all pgd's needed for non-PAE so it can invalidate entries
134 * in both cached and uncached pgd's; not needed for PAE since the
135 * kernel pmd is shared. If PAE were not to share the pmd a similar
136 * tactic would be needed. This is essentially codepath-based locking
137 * against pageattr.c; it is the unique case in which a valid change
138 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
139 * vmalloc faults work because attached pagetables are never freed.
140 * -- wli
141 */
142
143#ifdef CONFIG_X86_PAE
144/*
145 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
146 * updating the top-level pagetable entries to guarantee the
147 * processor notices the update. Since this is expensive, and
148 * all 4 top-level entries are used almost immediately in a
149 * new process's life, we just pre-populate them here.
150 *
151 * Also, if we're in a paravirt environment where the kernel pmd is
152 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
153 * and initialize the kernel pmds here.
154 */
155#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
156
157void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
158{
159 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
160
161 /* Note: almost everything apart from _PAGE_PRESENT is
162 reserved at the pmd (PDPT) level. */
163 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
164
165 /*
166 * According to Intel App note "TLBs, Paging-Structure Caches,
167 * and Their Invalidation", April 2007, document 317080-001,
168 * section 8.1: in PAE mode we explicitly have to flush the
169 * TLB via cr3 if the top-level pgd is changed...
170 */
171 flush_tlb_mm(mm);
172}
173#else /* !CONFIG_X86_PAE */
174
175/* No need to prepopulate any pagetable entries in non-PAE modes. */
176#define PREALLOCATED_PMDS 0
177
178#endif /* CONFIG_X86_PAE */
179
180static void free_pmds(pmd_t *pmds[])
181{
182 int i;
183
184 for(i = 0; i < PREALLOCATED_PMDS; i++)
185 if (pmds[i])
186 free_page((unsigned long)pmds[i]);
187}
188
189static int preallocate_pmds(pmd_t *pmds[])
190{
191 int i;
192 bool failed = false;
193
194 for(i = 0; i < PREALLOCATED_PMDS; i++) {
195 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
196 if (pmd == NULL)
197 failed = true;
198 pmds[i] = pmd;
199 }
200
201 if (failed) {
202 free_pmds(pmds);
203 return -ENOMEM;
204 }
205
206 return 0;
207}
208
209/*
210 * Mop up any pmd pages which may still be attached to the pgd.
211 * Normally they will be freed by munmap/exit_mmap, but any pmd we
212 * preallocate which never got a corresponding vma will need to be
213 * freed manually.
214 */
215static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
216{
217 int i;
218
219 for(i = 0; i < PREALLOCATED_PMDS; i++) {
220 pgd_t pgd = pgdp[i];
221
222 if (pgd_val(pgd) != 0) {
223 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
224
225 pgdp[i] = native_make_pgd(0);
226
227 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
228 pmd_free(mm, pmd);
229 }
230 }
231}
232
233static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
234{
235 pud_t *pud;
236 unsigned long addr;
237 int i;
238
239 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
240 return;
241
242 pud = pud_offset(pgd, 0);
243
244 for (addr = i = 0; i < PREALLOCATED_PMDS;
245 i++, pud++, addr += PUD_SIZE) {
246 pmd_t *pmd = pmds[i];
247
248 if (i >= KERNEL_PGD_BOUNDARY)
249 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
250 sizeof(pmd_t) * PTRS_PER_PMD);
251
252 pud_populate(mm, pud, pmd);
253 }
254}
255
256pgd_t *pgd_alloc(struct mm_struct *mm)
257{
258 pgd_t *pgd;
259 pmd_t *pmds[PREALLOCATED_PMDS];
260
261 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
262
263 if (pgd == NULL)
264 goto out;
265
266 mm->pgd = pgd;
267
268 if (preallocate_pmds(pmds) != 0)
269 goto out_free_pgd;
270
271 if (paravirt_pgd_alloc(mm) != 0)
272 goto out_free_pmds;
273
274 /*
275 * Make sure that pre-populating the pmds is atomic with
276 * respect to anything walking the pgd_list, so that they
277 * never see a partially populated pgd.
278 */
279 spin_lock(&pgd_lock);
280
281 pgd_ctor(mm, pgd);
282 pgd_prepopulate_pmd(mm, pgd, pmds);
283
284 spin_unlock(&pgd_lock);
285
286 return pgd;
287
288out_free_pmds:
289 free_pmds(pmds);
290out_free_pgd:
291 free_page((unsigned long)pgd);
292out:
293 return NULL;
294}
295
296void pgd_free(struct mm_struct *mm, pgd_t *pgd)
297{
298 pgd_mop_up_pmds(mm, pgd);
299 pgd_dtor(pgd);
300 paravirt_pgd_free(mm, pgd);
301 free_page((unsigned long)pgd);
302}
303
304int ptep_set_access_flags(struct vm_area_struct *vma,
305 unsigned long address, pte_t *ptep,
306 pte_t entry, int dirty)
307{
308 int changed = !pte_same(*ptep, entry);
309
310 if (changed && dirty) {
311 *ptep = entry;
312 pte_update_defer(vma->vm_mm, address, ptep);
313 flush_tlb_page(vma, address);
314 }
315
316 return changed;
317}
318
319#ifdef CONFIG_TRANSPARENT_HUGEPAGE
320int pmdp_set_access_flags(struct vm_area_struct *vma,
321 unsigned long address, pmd_t *pmdp,
322 pmd_t entry, int dirty)
323{
324 int changed = !pmd_same(*pmdp, entry);
325
326 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
327
328 if (changed && dirty) {
329 *pmdp = entry;
330 pmd_update_defer(vma->vm_mm, address, pmdp);
331 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
332 }
333
334 return changed;
335}
336#endif
337
338int ptep_test_and_clear_young(struct vm_area_struct *vma,
339 unsigned long addr, pte_t *ptep)
340{
341 int ret = 0;
342
343 if (pte_young(*ptep))
344 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
345 (unsigned long *) &ptep->pte);
346
347 if (ret)
348 pte_update(vma->vm_mm, addr, ptep);
349
350 return ret;
351}
352
353#ifdef CONFIG_TRANSPARENT_HUGEPAGE
354int pmdp_test_and_clear_young(struct vm_area_struct *vma,
355 unsigned long addr, pmd_t *pmdp)
356{
357 int ret = 0;
358
359 if (pmd_young(*pmdp))
360 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
361 (unsigned long *)pmdp);
362
363 if (ret)
364 pmd_update(vma->vm_mm, addr, pmdp);
365
366 return ret;
367}
368#endif
369
370int ptep_clear_flush_young(struct vm_area_struct *vma,
371 unsigned long address, pte_t *ptep)
372{
373 int young;
374
375 young = ptep_test_and_clear_young(vma, address, ptep);
376 if (young)
377 flush_tlb_page(vma, address);
378
379 return young;
380}
381
382#ifdef CONFIG_TRANSPARENT_HUGEPAGE
383int pmdp_clear_flush_young(struct vm_area_struct *vma,
384 unsigned long address, pmd_t *pmdp)
385{
386 int young;
387
388 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
389
390 young = pmdp_test_and_clear_young(vma, address, pmdp);
391 if (young)
392 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
393
394 return young;
395}
396
397void pmdp_splitting_flush(struct vm_area_struct *vma,
398 unsigned long address, pmd_t *pmdp)
399{
400 int set;
401 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
402 set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
403 (unsigned long *)pmdp);
404 if (set) {
405 pmd_update(vma->vm_mm, address, pmdp);
406 /* need tlb flush only to serialize against gup-fast */
407 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
408 }
409}
410#endif
411
412/**
413 * reserve_top_address - reserves a hole in the top of kernel address space
414 * @reserve - size of hole to reserve
415 *
416 * Can be used to relocate the fixmap area and poke a hole in the top
417 * of kernel address space to make room for a hypervisor.
418 */
419void __init reserve_top_address(unsigned long reserve)
420{
421#ifdef CONFIG_X86_32
422 BUG_ON(fixmaps_set > 0);
423 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
424 (int)-reserve);
425 __FIXADDR_TOP = -reserve - PAGE_SIZE;
426#endif
427}
428
429int fixmaps_set;
430
431void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
432{
433 unsigned long address = __fix_to_virt(idx);
434
435 if (idx >= __end_of_fixed_addresses) {
436 BUG();
437 return;
438 }
439 set_pte_vaddr(address, pte);
440 fixmaps_set++;
441}
442
443void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
444 pgprot_t flags)
445{
446 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
447}