Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/mm.h>
3#include <linux/slab.h>
4
5static struct kmem_cache *pgd_cachep;
6#if PAGETABLE_LEVELS > 2
7static struct kmem_cache *pmd_cachep;
8#endif
9
10void pgd_ctor(void *x)
11{
12 pgd_t *pgd = x;
13
14 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
15 memcpy(pgd + USER_PTRS_PER_PGD,
16 swapper_pg_dir + USER_PTRS_PER_PGD,
17 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
18}
19
20void pgtable_cache_init(void)
21{
22 pgd_cachep = kmem_cache_create("pgd_cache",
23 PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
24 PAGE_SIZE, SLAB_PANIC, pgd_ctor);
25#if PAGETABLE_LEVELS > 2
26 pmd_cachep = kmem_cache_create("pmd_cache",
27 PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
28 PAGE_SIZE, SLAB_PANIC, NULL);
29#endif
30}
31
32pgd_t *pgd_alloc(struct mm_struct *mm)
33{
34 return kmem_cache_alloc(pgd_cachep, GFP_KERNEL);
35}
36
37void pgd_free(struct mm_struct *mm, pgd_t *pgd)
38{
39 kmem_cache_free(pgd_cachep, pgd);
40}
41
42#if PAGETABLE_LEVELS > 2
43void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
44{
45 set_pud(pud, __pud((unsigned long)pmd));
46}
47
48pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
49{
50 return kmem_cache_alloc(pmd_cachep, GFP_KERNEL | __GFP_ZERO);
51}
52
53void pmd_free(struct mm_struct *mm, pmd_t *pmd)
54{
55 kmem_cache_free(pmd_cachep, pmd);
56}
57#endif /* PAGETABLE_LEVELS > 2 */
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/mm.h>
3#include <linux/slab.h>
4
5#include <asm/pgalloc.h>
6
7static struct kmem_cache *pgd_cachep;
8#if PAGETABLE_LEVELS > 2
9static struct kmem_cache *pmd_cachep;
10#endif
11
12static void pgd_ctor(void *x)
13{
14 pgd_t *pgd = x;
15
16 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
17 memcpy(pgd + USER_PTRS_PER_PGD,
18 swapper_pg_dir + USER_PTRS_PER_PGD,
19 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
20}
21
22void pgtable_cache_init(void)
23{
24 pgd_cachep = kmem_cache_create("pgd_cache",
25 PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
26 PAGE_SIZE, SLAB_PANIC, pgd_ctor);
27#if PAGETABLE_LEVELS > 2
28 pmd_cachep = kmem_cache_create("pmd_cache",
29 PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
30 PAGE_SIZE, SLAB_PANIC, NULL);
31#endif
32}
33
34pgd_t *pgd_alloc(struct mm_struct *mm)
35{
36 return kmem_cache_alloc(pgd_cachep, GFP_KERNEL);
37}
38
39void pgd_free(struct mm_struct *mm, pgd_t *pgd)
40{
41 kmem_cache_free(pgd_cachep, pgd);
42}
43
44#if PAGETABLE_LEVELS > 2
45void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
46{
47 set_pud(pud, __pud((unsigned long)pmd));
48}
49
50pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
51{
52 return kmem_cache_alloc(pmd_cachep, GFP_KERNEL | __GFP_ZERO);
53}
54
55void pmd_free(struct mm_struct *mm, pmd_t *pmd)
56{
57 kmem_cache_free(pmd_cachep, pmd);
58}
59#endif /* PAGETABLE_LEVELS > 2 */