Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_GENERIC_PGALLOC_H
3#define __ASM_GENERIC_PGALLOC_H
4
5#ifdef CONFIG_MMU
6
7#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO)
8#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
9
10/**
11 * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
12 * @mm: the mm_struct of the current context
13 *
14 * This function is intended for architectures that need
15 * anything beyond simple page allocation.
16 *
17 * Return: pointer to the allocated memory or %NULL on error
18 */
19static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm)
20{
21 struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL &
22 ~__GFP_HIGHMEM, 0);
23
24 if (!ptdesc)
25 return NULL;
26 return ptdesc_address(ptdesc);
27}
28#define __pte_alloc_one_kernel(...) alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__))
29
30#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
31/**
32 * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
33 * @mm: the mm_struct of the current context
34 *
35 * Return: pointer to the allocated memory or %NULL on error
36 */
37static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm)
38{
39 return __pte_alloc_one_kernel_noprof(mm);
40}
41#define pte_alloc_one_kernel(...) alloc_hooks(pte_alloc_one_kernel_noprof(__VA_ARGS__))
42#endif
43
44/**
45 * pte_free_kernel - free PTE-level kernel page table memory
46 * @mm: the mm_struct of the current context
47 * @pte: pointer to the memory containing the page table
48 */
49static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
50{
51 pagetable_free(virt_to_ptdesc(pte));
52}
53
54/**
55 * __pte_alloc_one - allocate memory for a PTE-level user page table
56 * @mm: the mm_struct of the current context
57 * @gfp: GFP flags to use for the allocation
58 *
59 * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
60 *
61 * This function is intended for architectures that need
62 * anything beyond simple page allocation or must have custom GFP flags.
63 *
64 * Return: `struct page` referencing the ptdesc or %NULL on error
65 */
66static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
67{
68 struct ptdesc *ptdesc;
69
70 ptdesc = pagetable_alloc_noprof(gfp, 0);
71 if (!ptdesc)
72 return NULL;
73 if (!pagetable_pte_ctor(ptdesc)) {
74 pagetable_free(ptdesc);
75 return NULL;
76 }
77
78 return ptdesc_page(ptdesc);
79}
80#define __pte_alloc_one(...) alloc_hooks(__pte_alloc_one_noprof(__VA_ARGS__))
81
82#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
83/**
84 * pte_alloc_one - allocate a page for PTE-level user page table
85 * @mm: the mm_struct of the current context
86 *
87 * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
88 *
89 * Return: `struct page` referencing the ptdesc or %NULL on error
90 */
91static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm)
92{
93 return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER);
94}
95#define pte_alloc_one(...) alloc_hooks(pte_alloc_one_noprof(__VA_ARGS__))
96#endif
97
98/*
99 * Should really implement gc for free page table pages. This could be
100 * done with a reference count in struct page.
101 */
102
103/**
104 * pte_free - free PTE-level user page table memory
105 * @mm: the mm_struct of the current context
106 * @pte_page: the `struct page` referencing the ptdesc
107 */
108static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
109{
110 struct ptdesc *ptdesc = page_ptdesc(pte_page);
111
112 pagetable_pte_dtor(ptdesc);
113 pagetable_free(ptdesc);
114}
115
116
117#if CONFIG_PGTABLE_LEVELS > 2
118
119#ifndef __HAVE_ARCH_PMD_ALLOC_ONE
120/**
121 * pmd_alloc_one - allocate memory for a PMD-level page table
122 * @mm: the mm_struct of the current context
123 *
124 * Allocate memory for a page table and ptdesc and runs pagetable_pmd_ctor().
125 *
126 * Allocations use %GFP_PGTABLE_USER in user context and
127 * %GFP_PGTABLE_KERNEL in kernel context.
128 *
129 * Return: pointer to the allocated memory or %NULL on error
130 */
131static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
132{
133 struct ptdesc *ptdesc;
134 gfp_t gfp = GFP_PGTABLE_USER;
135
136 if (mm == &init_mm)
137 gfp = GFP_PGTABLE_KERNEL;
138 ptdesc = pagetable_alloc_noprof(gfp, 0);
139 if (!ptdesc)
140 return NULL;
141 if (!pagetable_pmd_ctor(ptdesc)) {
142 pagetable_free(ptdesc);
143 return NULL;
144 }
145 return ptdesc_address(ptdesc);
146}
147#define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__))
148#endif
149
150#ifndef __HAVE_ARCH_PMD_FREE
151static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
152{
153 struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
154
155 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
156 pagetable_pmd_dtor(ptdesc);
157 pagetable_free(ptdesc);
158}
159#endif
160
161#endif /* CONFIG_PGTABLE_LEVELS > 2 */
162
163#if CONFIG_PGTABLE_LEVELS > 3
164
165static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
166{
167 gfp_t gfp = GFP_PGTABLE_USER;
168 struct ptdesc *ptdesc;
169
170 if (mm == &init_mm)
171 gfp = GFP_PGTABLE_KERNEL;
172 gfp &= ~__GFP_HIGHMEM;
173
174 ptdesc = pagetable_alloc_noprof(gfp, 0);
175 if (!ptdesc)
176 return NULL;
177
178 pagetable_pud_ctor(ptdesc);
179 return ptdesc_address(ptdesc);
180}
181#define __pud_alloc_one(...) alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__))
182
183#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
184/**
185 * pud_alloc_one - allocate memory for a PUD-level page table
186 * @mm: the mm_struct of the current context
187 *
188 * Allocate memory for a page table using %GFP_PGTABLE_USER for user context
189 * and %GFP_PGTABLE_KERNEL for kernel context.
190 *
191 * Return: pointer to the allocated memory or %NULL on error
192 */
193static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
194{
195 return __pud_alloc_one_noprof(mm, addr);
196}
197#define pud_alloc_one(...) alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__))
198#endif
199
200static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
201{
202 struct ptdesc *ptdesc = virt_to_ptdesc(pud);
203
204 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
205 pagetable_pud_dtor(ptdesc);
206 pagetable_free(ptdesc);
207}
208
209#ifndef __HAVE_ARCH_PUD_FREE
210static inline void pud_free(struct mm_struct *mm, pud_t *pud)
211{
212 __pud_free(mm, pud);
213}
214#endif
215
216#endif /* CONFIG_PGTABLE_LEVELS > 3 */
217
218#ifndef __HAVE_ARCH_PGD_FREE
219static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
220{
221 pagetable_free(virt_to_ptdesc(pgd));
222}
223#endif
224
225#endif /* CONFIG_MMU */
226
227#endif /* __ASM_GENERIC_PGALLOC_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_GENERIC_PGALLOC_H
3#define __ASM_GENERIC_PGALLOC_H
4
5#ifdef CONFIG_MMU
6
7#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO)
8#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
9
10/**
11 * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
12 * @mm: the mm_struct of the current context
13 *
14 * This function is intended for architectures that need
15 * anything beyond simple page allocation.
16 *
17 * Return: pointer to the allocated memory or %NULL on error
18 */
19static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
20{
21 return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
22}
23
24#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
25/**
26 * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
27 * @mm: the mm_struct of the current context
28 *
29 * Return: pointer to the allocated memory or %NULL on error
30 */
31static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
32{
33 return __pte_alloc_one_kernel(mm);
34}
35#endif
36
37/**
38 * pte_free_kernel - free PTE-level kernel page table page
39 * @mm: the mm_struct of the current context
40 * @pte: pointer to the memory containing the page table
41 */
42static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
43{
44 free_page((unsigned long)pte);
45}
46
47/**
48 * __pte_alloc_one - allocate a page for PTE-level user page table
49 * @mm: the mm_struct of the current context
50 * @gfp: GFP flags to use for the allocation
51 *
52 * Allocates a page and runs the pgtable_pte_page_ctor().
53 *
54 * This function is intended for architectures that need
55 * anything beyond simple page allocation or must have custom GFP flags.
56 *
57 * Return: `struct page` initialized as page table or %NULL on error
58 */
59static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
60{
61 struct page *pte;
62
63 pte = alloc_page(gfp);
64 if (!pte)
65 return NULL;
66 if (!pgtable_pte_page_ctor(pte)) {
67 __free_page(pte);
68 return NULL;
69 }
70
71 return pte;
72}
73
74#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
75/**
76 * pte_alloc_one - allocate a page for PTE-level user page table
77 * @mm: the mm_struct of the current context
78 *
79 * Allocates a page and runs the pgtable_pte_page_ctor().
80 *
81 * Return: `struct page` initialized as page table or %NULL on error
82 */
83static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
84{
85 return __pte_alloc_one(mm, GFP_PGTABLE_USER);
86}
87#endif
88
89/*
90 * Should really implement gc for free page table pages. This could be
91 * done with a reference count in struct page.
92 */
93
94/**
95 * pte_free - free PTE-level user page table page
96 * @mm: the mm_struct of the current context
97 * @pte_page: the `struct page` representing the page table
98 */
99static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
100{
101 pgtable_pte_page_dtor(pte_page);
102 __free_page(pte_page);
103}
104
105#endif /* CONFIG_MMU */
106
107#endif /* __ASM_GENERIC_PGALLOC_H */