Loading...
1/*
2 * linux/arch/arm/mm/pgd.c
3 *
4 * Copyright (C) 1998-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/mm.h>
11#include <linux/gfp.h>
12#include <linux/highmem.h>
13
14#include <asm/pgalloc.h>
15#include <asm/page.h>
16#include <asm/tlbflush.h>
17
18#include "mm.h"
19
20/*
21 * need to get a 16k page for level 1
22 */
23pgd_t *pgd_alloc(struct mm_struct *mm)
24{
25 pgd_t *new_pgd, *init_pgd;
26 pud_t *new_pud, *init_pud;
27 pmd_t *new_pmd, *init_pmd;
28 pte_t *new_pte, *init_pte;
29
30 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
31 if (!new_pgd)
32 goto no_pgd;
33
34 memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
35
36 /*
37 * Copy over the kernel and IO PGD entries
38 */
39 init_pgd = pgd_offset_k(0);
40 memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
41 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
42
43 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
44
45 if (!vectors_high()) {
46 /*
47 * On ARM, first page must always be allocated since it
48 * contains the machine vectors.
49 */
50 new_pud = pud_alloc(mm, new_pgd, 0);
51 if (!new_pud)
52 goto no_pud;
53
54 new_pmd = pmd_alloc(mm, new_pud, 0);
55 if (!new_pmd)
56 goto no_pmd;
57
58 new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
59 if (!new_pte)
60 goto no_pte;
61
62 init_pud = pud_offset(init_pgd, 0);
63 init_pmd = pmd_offset(init_pud, 0);
64 init_pte = pte_offset_map(init_pmd, 0);
65 set_pte_ext(new_pte, *init_pte, 0);
66 pte_unmap(init_pte);
67 pte_unmap(new_pte);
68 }
69
70 return new_pgd;
71
72no_pte:
73 pmd_free(mm, new_pmd);
74no_pmd:
75 pud_free(mm, new_pud);
76no_pud:
77 free_pages((unsigned long)new_pgd, 2);
78no_pgd:
79 return NULL;
80}
81
82void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
83{
84 pgd_t *pgd;
85 pud_t *pud;
86 pmd_t *pmd;
87 pgtable_t pte;
88
89 if (!pgd_base)
90 return;
91
92 pgd = pgd_base + pgd_index(0);
93 if (pgd_none_or_clear_bad(pgd))
94 goto no_pgd;
95
96 pud = pud_offset(pgd, 0);
97 if (pud_none_or_clear_bad(pud))
98 goto no_pud;
99
100 pmd = pmd_offset(pud, 0);
101 if (pmd_none_or_clear_bad(pmd))
102 goto no_pmd;
103
104 pte = pmd_pgtable(*pmd);
105 pmd_clear(pmd);
106 pte_free(mm, pte);
107no_pmd:
108 pud_clear(pud);
109 pmd_free(mm, pmd);
110no_pud:
111 pgd_clear(pgd);
112 pud_free(mm, pud);
113no_pgd:
114 free_pages((unsigned long) pgd_base, 2);
115}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mm/pgd.c
4 *
5 * Copyright (C) 1998-2005 Russell King
6 */
7#include <linux/mm.h>
8#include <linux/gfp.h>
9#include <linux/highmem.h>
10#include <linux/slab.h>
11
12#include <asm/cp15.h>
13#include <asm/pgalloc.h>
14#include <asm/page.h>
15#include <asm/tlbflush.h>
16
17#include "mm.h"
18
19#ifdef CONFIG_ARM_LPAE
20#define __pgd_alloc() kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
21#define __pgd_free(pgd) kfree(pgd)
22#else
23#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
24#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
25#endif
26
27/*
28 * need to get a 16k page for level 1
29 */
30pgd_t *pgd_alloc(struct mm_struct *mm)
31{
32 pgd_t *new_pgd, *init_pgd;
33 p4d_t *new_p4d, *init_p4d;
34 pud_t *new_pud, *init_pud;
35 pmd_t *new_pmd, *init_pmd;
36 pte_t *new_pte, *init_pte;
37
38 new_pgd = __pgd_alloc();
39 if (!new_pgd)
40 goto no_pgd;
41
42 memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
43
44 /*
45 * Copy over the kernel and IO PGD entries
46 */
47 init_pgd = pgd_offset_k(0);
48 memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
49 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
50
51 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
52
53#ifdef CONFIG_ARM_LPAE
54 /*
55 * Allocate PMD table for modules and pkmap mappings.
56 */
57 new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
58 MODULES_VADDR);
59 if (!new_p4d)
60 goto no_p4d;
61
62 new_pud = pud_alloc(mm, new_p4d, MODULES_VADDR);
63 if (!new_pud)
64 goto no_pud;
65
66 new_pmd = pmd_alloc(mm, new_pud, 0);
67 if (!new_pmd)
68 goto no_pmd;
69#endif
70
71 if (!vectors_high()) {
72 /*
73 * On ARM, first page must always be allocated since it
74 * contains the machine vectors. The vectors are always high
75 * with LPAE.
76 */
77 new_p4d = p4d_alloc(mm, new_pgd, 0);
78 if (!new_p4d)
79 goto no_p4d;
80
81 new_pud = pud_alloc(mm, new_p4d, 0);
82 if (!new_pud)
83 goto no_pud;
84
85 new_pmd = pmd_alloc(mm, new_pud, 0);
86 if (!new_pmd)
87 goto no_pmd;
88
89 new_pte = pte_alloc_map(mm, new_pmd, 0);
90 if (!new_pte)
91 goto no_pte;
92
93#ifndef CONFIG_ARM_LPAE
94 /*
95 * Modify the PTE pointer to have the correct domain. This
96 * needs to be the vectors domain to avoid the low vectors
97 * being unmapped.
98 */
99 pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
100 pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
101#endif
102
103 init_p4d = p4d_offset(init_pgd, 0);
104 init_pud = pud_offset(init_p4d, 0);
105 init_pmd = pmd_offset(init_pud, 0);
106 init_pte = pte_offset_map(init_pmd, 0);
107 set_pte_ext(new_pte + 0, init_pte[0], 0);
108 set_pte_ext(new_pte + 1, init_pte[1], 0);
109 pte_unmap(init_pte);
110 pte_unmap(new_pte);
111 }
112
113 return new_pgd;
114
115no_pte:
116 pmd_free(mm, new_pmd);
117 mm_dec_nr_pmds(mm);
118no_pmd:
119 pud_free(mm, new_pud);
120no_pud:
121 p4d_free(mm, new_p4d);
122no_p4d:
123 __pgd_free(new_pgd);
124no_pgd:
125 return NULL;
126}
127
128void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
129{
130 pgd_t *pgd;
131 p4d_t *p4d;
132 pud_t *pud;
133 pmd_t *pmd;
134 pgtable_t pte;
135
136 if (!pgd_base)
137 return;
138
139 pgd = pgd_base + pgd_index(0);
140 if (pgd_none_or_clear_bad(pgd))
141 goto no_pgd;
142
143 p4d = p4d_offset(pgd, 0);
144 if (p4d_none_or_clear_bad(p4d))
145 goto no_p4d;
146
147 pud = pud_offset(p4d, 0);
148 if (pud_none_or_clear_bad(pud))
149 goto no_pud;
150
151 pmd = pmd_offset(pud, 0);
152 if (pmd_none_or_clear_bad(pmd))
153 goto no_pmd;
154
155 pte = pmd_pgtable(*pmd);
156 pmd_clear(pmd);
157 pte_free(mm, pte);
158 mm_dec_nr_ptes(mm);
159no_pmd:
160 pud_clear(pud);
161 pmd_free(mm, pmd);
162 mm_dec_nr_pmds(mm);
163no_pud:
164 p4d_clear(p4d);
165 pud_free(mm, pud);
166no_p4d:
167 pgd_clear(pgd);
168 p4d_free(mm, p4d);
169no_pgd:
170#ifdef CONFIG_ARM_LPAE
171 /*
172 * Free modules/pkmap or identity pmd tables.
173 */
174 for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
175 if (pgd_none_or_clear_bad(pgd))
176 continue;
177 if (pgd_val(*pgd) & L_PGD_SWAPPER)
178 continue;
179 p4d = p4d_offset(pgd, 0);
180 if (p4d_none_or_clear_bad(p4d))
181 continue;
182 pud = pud_offset(p4d, 0);
183 if (pud_none_or_clear_bad(pud))
184 continue;
185 pmd = pmd_offset(pud, 0);
186 pud_clear(pud);
187 pmd_free(mm, pmd);
188 mm_dec_nr_pmds(mm);
189 p4d_clear(p4d);
190 pud_free(mm, pud);
191 mm_dec_nr_puds(mm);
192 pgd_clear(pgd);
193 p4d_free(mm, p4d);
194 }
195#endif
196 __pgd_free(pgd_base);
197}