Loading...
1#ifndef _ALPHA_PGALLOC_H
2#define _ALPHA_PGALLOC_H
3
4#include <linux/mm.h>
5#include <linux/mmzone.h>
6
7/*
8 * Allocate and free page tables. The xxx_kernel() versions are
9 * used to allocate a kernel page table - this turns on ASN bits
10 * if any.
11 */
12
13static inline void
14pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
15{
16 pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
17}
18#define pmd_pgtable(pmd) pmd_page(pmd)
19
20static inline void
21pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
22{
23 pmd_set(pmd, pte);
24}
25
26static inline void
27pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
28{
29 pgd_set(pgd, pmd);
30}
31
32extern pgd_t *pgd_alloc(struct mm_struct *mm);
33
34static inline void
35pgd_free(struct mm_struct *mm, pgd_t *pgd)
36{
37 free_page((unsigned long)pgd);
38}
39
40static inline pmd_t *
41pmd_alloc_one(struct mm_struct *mm, unsigned long address)
42{
43 pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
44 return ret;
45}
46
47static inline void
48pmd_free(struct mm_struct *mm, pmd_t *pmd)
49{
50 free_page((unsigned long)pmd);
51}
52
53static inline pte_t *
54pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
55{
56 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
57 return pte;
58}
59
60static inline void
61pte_free_kernel(struct mm_struct *mm, pte_t *pte)
62{
63 free_page((unsigned long)pte);
64}
65
66static inline pgtable_t
67pte_alloc_one(struct mm_struct *mm, unsigned long address)
68{
69 pte_t *pte = pte_alloc_one_kernel(mm, address);
70 struct page *page;
71
72 if (!pte)
73 return NULL;
74 page = virt_to_page(pte);
75 pgtable_page_ctor(page);
76 return page;
77}
78
79static inline void
80pte_free(struct mm_struct *mm, pgtable_t page)
81{
82 pgtable_page_dtor(page);
83 __free_page(page);
84}
85
86#define check_pgt_cache() do { } while (0)
87
88#endif /* _ALPHA_PGALLOC_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ALPHA_PGALLOC_H
3#define _ALPHA_PGALLOC_H
4
5#include <linux/mm.h>
6#include <linux/mmzone.h>
7
8/*
9 * Allocate and free page tables. The xxx_kernel() versions are
10 * used to allocate a kernel page table - this turns on ASN bits
11 * if any.
12 */
13
14static inline void
15pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
16{
17 pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
18}
19#define pmd_pgtable(pmd) pmd_page(pmd)
20
21static inline void
22pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
23{
24 pmd_set(pmd, pte);
25}
26
27static inline void
28pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
29{
30 pgd_set(pgd, pmd);
31}
32
33extern pgd_t *pgd_alloc(struct mm_struct *mm);
34
35static inline void
36pgd_free(struct mm_struct *mm, pgd_t *pgd)
37{
38 free_page((unsigned long)pgd);
39}
40
41static inline pmd_t *
42pmd_alloc_one(struct mm_struct *mm, unsigned long address)
43{
44 pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
45 return ret;
46}
47
48static inline void
49pmd_free(struct mm_struct *mm, pmd_t *pmd)
50{
51 free_page((unsigned long)pmd);
52}
53
54static inline pte_t *
55pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
56{
57 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
58 return pte;
59}
60
61static inline void
62pte_free_kernel(struct mm_struct *mm, pte_t *pte)
63{
64 free_page((unsigned long)pte);
65}
66
67static inline pgtable_t
68pte_alloc_one(struct mm_struct *mm, unsigned long address)
69{
70 pte_t *pte = pte_alloc_one_kernel(mm, address);
71 struct page *page;
72
73 if (!pte)
74 return NULL;
75 page = virt_to_page(pte);
76 if (!pgtable_page_ctor(page)) {
77 __free_page(page);
78 return NULL;
79 }
80 return page;
81}
82
83static inline void
84pte_free(struct mm_struct *mm, pgtable_t page)
85{
86 pgtable_page_dtor(page);
87 __free_page(page);
88}
89
90#define check_pgt_cache() do { } while (0)
91
92#endif /* _ALPHA_PGALLOC_H */