Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/pgalloc.h
4 *
5 * Copyright (C) 2000-2001 Russell King
6 */
7#ifndef _ASMARM_PGALLOC_H
8#define _ASMARM_PGALLOC_H
9
10#include <linux/pagemap.h>
11
12#include <asm/domain.h>
13#include <asm/pgtable-hwdef.h>
14#include <asm/processor.h>
15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h>
17
18#ifdef CONFIG_MMU
19
20#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
21#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
22
23#ifdef CONFIG_ARM_LPAE
24#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
25
26static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
27{
28 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
29}
30
31#else /* !CONFIG_ARM_LPAE */
32#define PGD_SIZE (PAGE_SIZE << 2)
33
34/*
35 * Since we have only two-level page tables, these are trivial
36 */
37#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
38#define pmd_free(mm, pmd) do { } while (0)
39#ifdef CONFIG_KASAN
40/* The KASan core unconditionally calls pud_populate() on all architectures */
41#define pud_populate(mm,pmd,pte) do { } while (0)
42#else
43#define pud_populate(mm,pmd,pte) BUG()
44#endif
45#endif /* CONFIG_ARM_LPAE */
46
47extern pgd_t *pgd_alloc(struct mm_struct *mm);
48extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
49
50static inline void clean_pte_table(pte_t *pte)
51{
52 clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
53}
54
55/*
56 * Allocate one PTE table.
57 *
58 * This actually allocates two hardware PTE tables, but we wrap this up
59 * into one table thus:
60 *
61 * +------------+
62 * | Linux pt 0 |
63 * +------------+
64 * | Linux pt 1 |
65 * +------------+
66 * | h/w pt 0 |
67 * +------------+
68 * | h/w pt 1 |
69 * +------------+
70 */
71
72#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
73#define __HAVE_ARCH_PTE_ALLOC_ONE
74#define __HAVE_ARCH_PGD_FREE
75#include <asm-generic/pgalloc.h>
76
77static inline pte_t *
78pte_alloc_one_kernel(struct mm_struct *mm)
79{
80 pte_t *pte = __pte_alloc_one_kernel(mm);
81
82 if (pte)
83 clean_pte_table(pte);
84
85 return pte;
86}
87
88#ifdef CONFIG_HIGHPTE
89#define PGTABLE_HIGHMEM __GFP_HIGHMEM
90#else
91#define PGTABLE_HIGHMEM 0
92#endif
93
94static inline pgtable_t
95pte_alloc_one(struct mm_struct *mm)
96{
97 struct page *pte;
98
99 pte = __pte_alloc_one(mm, GFP_PGTABLE_USER | PGTABLE_HIGHMEM);
100 if (!pte)
101 return NULL;
102 if (!PageHighMem(pte))
103 clean_pte_table(page_address(pte));
104 return pte;
105}
106
107static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
108 pmdval_t prot)
109{
110 pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;
111 pmdp[0] = __pmd(pmdval);
112#ifndef CONFIG_ARM_LPAE
113 pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
114#endif
115 flush_pmd_entry(pmdp);
116}
117
118/*
119 * Populate the pmdp entry with a pointer to the pte. This pmd is part
120 * of the mm address space.
121 *
122 * Ensure that we always set both PMD entries.
123 */
124static inline void
125pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
126{
127 /*
128 * The pmd must be loaded with the physical address of the PTE table
129 */
130 __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE);
131}
132
133static inline void
134pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
135{
136 extern pmdval_t user_pmd_table;
137 pmdval_t prot;
138
139 if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE))
140 prot = user_pmd_table;
141 else
142 prot = _PAGE_USER_TABLE;
143
144 __pmd_populate(pmdp, page_to_phys(ptep), prot);
145}
146
147#endif /* CONFIG_MMU */
148
149#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/pgalloc.h
4 *
5 * Copyright (C) 2000-2001 Russell King
6 */
7#ifndef _ASMARM_PGALLOC_H
8#define _ASMARM_PGALLOC_H
9
10#include <linux/pagemap.h>
11
12#include <asm/domain.h>
13#include <asm/pgtable-hwdef.h>
14#include <asm/processor.h>
15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h>
17
18#ifdef CONFIG_MMU
19
20#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
21#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
22
23#ifdef CONFIG_ARM_LPAE
24
25static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
26{
27 return (pmd_t *)get_zeroed_page(GFP_KERNEL);
28}
29
30static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
31{
32 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
33 free_page((unsigned long)pmd);
34}
35
36static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
37{
38 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
39}
40
41#else /* !CONFIG_ARM_LPAE */
42
43/*
44 * Since we have only two-level page tables, these are trivial
45 */
46#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
47#define pmd_free(mm, pmd) do { } while (0)
48#define pud_populate(mm,pmd,pte) BUG()
49
50#endif /* CONFIG_ARM_LPAE */
51
52extern pgd_t *pgd_alloc(struct mm_struct *mm);
53extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
54
55static inline void clean_pte_table(pte_t *pte)
56{
57 clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
58}
59
60/*
61 * Allocate one PTE table.
62 *
63 * This actually allocates two hardware PTE tables, but we wrap this up
64 * into one table thus:
65 *
66 * +------------+
67 * | Linux pt 0 |
68 * +------------+
69 * | Linux pt 1 |
70 * +------------+
71 * | h/w pt 0 |
72 * +------------+
73 * | h/w pt 1 |
74 * +------------+
75 */
76
77#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
78#define __HAVE_ARCH_PTE_ALLOC_ONE
79#include <asm-generic/pgalloc.h>
80
81static inline pte_t *
82pte_alloc_one_kernel(struct mm_struct *mm)
83{
84 pte_t *pte = __pte_alloc_one_kernel(mm);
85
86 if (pte)
87 clean_pte_table(pte);
88
89 return pte;
90}
91
92#ifdef CONFIG_HIGHPTE
93#define PGTABLE_HIGHMEM __GFP_HIGHMEM
94#else
95#define PGTABLE_HIGHMEM 0
96#endif
97
98static inline pgtable_t
99pte_alloc_one(struct mm_struct *mm)
100{
101 struct page *pte;
102
103 pte = __pte_alloc_one(mm, GFP_PGTABLE_USER | PGTABLE_HIGHMEM);
104 if (!pte)
105 return NULL;
106 if (!PageHighMem(pte))
107 clean_pte_table(page_address(pte));
108 return pte;
109}
110
111static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
112 pmdval_t prot)
113{
114 pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;
115 pmdp[0] = __pmd(pmdval);
116#ifndef CONFIG_ARM_LPAE
117 pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
118#endif
119 flush_pmd_entry(pmdp);
120}
121
122/*
123 * Populate the pmdp entry with a pointer to the pte. This pmd is part
124 * of the mm address space.
125 *
126 * Ensure that we always set both PMD entries.
127 */
128static inline void
129pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
130{
131 /*
132 * The pmd must be loaded with the physical address of the PTE table
133 */
134 __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE);
135}
136
137static inline void
138pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
139{
140 extern pmdval_t user_pmd_table;
141 pmdval_t prot;
142
143 if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE))
144 prot = user_pmd_table;
145 else
146 prot = _PAGE_USER_TABLE;
147
148 __pmd_populate(pmdp, page_to_phys(ptep), prot);
149}
150#define pmd_pgtable(pmd) pmd_page(pmd)
151
152#endif /* CONFIG_MMU */
153
154#endif