Loading...
1/*
2 * linux/arch/arm/mm/pgd.c
3 *
4 * Copyright (C) 1998-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/mm.h>
11#include <linux/gfp.h>
12#include <linux/highmem.h>
13#include <linux/slab.h>
14
15#include <asm/cp15.h>
16#include <asm/pgalloc.h>
17#include <asm/page.h>
18#include <asm/tlbflush.h>
19
20#include "mm.h"
21
22#ifdef CONFIG_ARM_LPAE
23#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
24#define __pgd_free(pgd) kfree(pgd)
25#else
26#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
27#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
28#endif
29
30/*
31 * need to get a 16k page for level 1
32 */
33pgd_t *pgd_alloc(struct mm_struct *mm)
34{
35 pgd_t *new_pgd, *init_pgd;
36 pud_t *new_pud, *init_pud;
37 pmd_t *new_pmd, *init_pmd;
38 pte_t *new_pte, *init_pte;
39
40 new_pgd = __pgd_alloc();
41 if (!new_pgd)
42 goto no_pgd;
43
44 memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
45
46 /*
47 * Copy over the kernel and IO PGD entries
48 */
49 init_pgd = pgd_offset_k(0);
50 memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
51 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
52
53 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
54
55#ifdef CONFIG_ARM_LPAE
56 /*
57 * Allocate PMD table for modules and pkmap mappings.
58 */
59 new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
60 MODULES_VADDR);
61 if (!new_pud)
62 goto no_pud;
63
64 new_pmd = pmd_alloc(mm, new_pud, 0);
65 if (!new_pmd)
66 goto no_pmd;
67#endif
68
69 if (!vectors_high()) {
70 /*
71 * On ARM, first page must always be allocated since it
72 * contains the machine vectors. The vectors are always high
73 * with LPAE.
74 */
75 new_pud = pud_alloc(mm, new_pgd, 0);
76 if (!new_pud)
77 goto no_pud;
78
79 new_pmd = pmd_alloc(mm, new_pud, 0);
80 if (!new_pmd)
81 goto no_pmd;
82
83 new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
84 if (!new_pte)
85 goto no_pte;
86
87 init_pud = pud_offset(init_pgd, 0);
88 init_pmd = pmd_offset(init_pud, 0);
89 init_pte = pte_offset_map(init_pmd, 0);
90 set_pte_ext(new_pte, *init_pte, 0);
91 pte_unmap(init_pte);
92 pte_unmap(new_pte);
93 }
94
95 return new_pgd;
96
97no_pte:
98 pmd_free(mm, new_pmd);
99no_pmd:
100 pud_free(mm, new_pud);
101no_pud:
102 __pgd_free(new_pgd);
103no_pgd:
104 return NULL;
105}
106
107void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
108{
109 pgd_t *pgd;
110 pud_t *pud;
111 pmd_t *pmd;
112 pgtable_t pte;
113
114 if (!pgd_base)
115 return;
116
117 pgd = pgd_base + pgd_index(0);
118 if (pgd_none_or_clear_bad(pgd))
119 goto no_pgd;
120
121 pud = pud_offset(pgd, 0);
122 if (pud_none_or_clear_bad(pud))
123 goto no_pud;
124
125 pmd = pmd_offset(pud, 0);
126 if (pmd_none_or_clear_bad(pmd))
127 goto no_pmd;
128
129 pte = pmd_pgtable(*pmd);
130 pmd_clear(pmd);
131 pte_free(mm, pte);
132no_pmd:
133 pud_clear(pud);
134 pmd_free(mm, pmd);
135no_pud:
136 pgd_clear(pgd);
137 pud_free(mm, pud);
138no_pgd:
139#ifdef CONFIG_ARM_LPAE
140 /*
141 * Free modules/pkmap or identity pmd tables.
142 */
143 for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
144 if (pgd_none_or_clear_bad(pgd))
145 continue;
146 if (pgd_val(*pgd) & L_PGD_SWAPPER)
147 continue;
148 pud = pud_offset(pgd, 0);
149 if (pud_none_or_clear_bad(pud))
150 continue;
151 pmd = pmd_offset(pud, 0);
152 pud_clear(pud);
153 pmd_free(mm, pmd);
154 pgd_clear(pgd);
155 pud_free(mm, pud);
156 }
157#endif
158 __pgd_free(pgd_base);
159}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mm/pgd.c
4 *
5 * Copyright (C) 1998-2005 Russell King
6 */
7#include <linux/mm.h>
8#include <linux/gfp.h>
9#include <linux/highmem.h>
10#include <linux/slab.h>
11
12#include <asm/cp15.h>
13#include <asm/pgalloc.h>
14#include <asm/page.h>
15#include <asm/tlbflush.h>
16
17#include "mm.h"
18
19#ifdef CONFIG_ARM_LPAE
20#define __pgd_alloc() kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
21#define __pgd_free(pgd) kfree(pgd)
22#else
23#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
24#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
25#endif
26
27/*
28 * need to get a 16k page for level 1
29 */
30pgd_t *pgd_alloc(struct mm_struct *mm)
31{
32 pgd_t *new_pgd, *init_pgd;
33 pud_t *new_pud, *init_pud;
34 pmd_t *new_pmd, *init_pmd;
35 pte_t *new_pte, *init_pte;
36
37 new_pgd = __pgd_alloc();
38 if (!new_pgd)
39 goto no_pgd;
40
41 memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
42
43 /*
44 * Copy over the kernel and IO PGD entries
45 */
46 init_pgd = pgd_offset_k(0);
47 memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
48 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
49
50 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
51
52#ifdef CONFIG_ARM_LPAE
53 /*
54 * Allocate PMD table for modules and pkmap mappings.
55 */
56 new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
57 MODULES_VADDR);
58 if (!new_pud)
59 goto no_pud;
60
61 new_pmd = pmd_alloc(mm, new_pud, 0);
62 if (!new_pmd)
63 goto no_pmd;
64#endif
65
66 if (!vectors_high()) {
67 /*
68 * On ARM, first page must always be allocated since it
69 * contains the machine vectors. The vectors are always high
70 * with LPAE.
71 */
72 new_pud = pud_alloc(mm, new_pgd, 0);
73 if (!new_pud)
74 goto no_pud;
75
76 new_pmd = pmd_alloc(mm, new_pud, 0);
77 if (!new_pmd)
78 goto no_pmd;
79
80 new_pte = pte_alloc_map(mm, new_pmd, 0);
81 if (!new_pte)
82 goto no_pte;
83
84#ifndef CONFIG_ARM_LPAE
85 /*
86 * Modify the PTE pointer to have the correct domain. This
87 * needs to be the vectors domain to avoid the low vectors
88 * being unmapped.
89 */
90 pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
91 pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
92#endif
93
94 init_pud = pud_offset(init_pgd, 0);
95 init_pmd = pmd_offset(init_pud, 0);
96 init_pte = pte_offset_map(init_pmd, 0);
97 set_pte_ext(new_pte + 0, init_pte[0], 0);
98 set_pte_ext(new_pte + 1, init_pte[1], 0);
99 pte_unmap(init_pte);
100 pte_unmap(new_pte);
101 }
102
103 return new_pgd;
104
105no_pte:
106 pmd_free(mm, new_pmd);
107 mm_dec_nr_pmds(mm);
108no_pmd:
109 pud_free(mm, new_pud);
110no_pud:
111 __pgd_free(new_pgd);
112no_pgd:
113 return NULL;
114}
115
116void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
117{
118 pgd_t *pgd;
119 pud_t *pud;
120 pmd_t *pmd;
121 pgtable_t pte;
122
123 if (!pgd_base)
124 return;
125
126 pgd = pgd_base + pgd_index(0);
127 if (pgd_none_or_clear_bad(pgd))
128 goto no_pgd;
129
130 pud = pud_offset(pgd, 0);
131 if (pud_none_or_clear_bad(pud))
132 goto no_pud;
133
134 pmd = pmd_offset(pud, 0);
135 if (pmd_none_or_clear_bad(pmd))
136 goto no_pmd;
137
138 pte = pmd_pgtable(*pmd);
139 pmd_clear(pmd);
140 pte_free(mm, pte);
141 mm_dec_nr_ptes(mm);
142no_pmd:
143 pud_clear(pud);
144 pmd_free(mm, pmd);
145 mm_dec_nr_pmds(mm);
146no_pud:
147 pgd_clear(pgd);
148 pud_free(mm, pud);
149no_pgd:
150#ifdef CONFIG_ARM_LPAE
151 /*
152 * Free modules/pkmap or identity pmd tables.
153 */
154 for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
155 if (pgd_none_or_clear_bad(pgd))
156 continue;
157 if (pgd_val(*pgd) & L_PGD_SWAPPER)
158 continue;
159 pud = pud_offset(pgd, 0);
160 if (pud_none_or_clear_bad(pud))
161 continue;
162 pmd = pmd_offset(pud, 0);
163 pud_clear(pud);
164 pmd_free(mm, pmd);
165 mm_dec_nr_pmds(mm);
166 pgd_clear(pgd);
167 pud_free(mm, pud);
168 }
169#endif
170 __pgd_free(pgd_base);
171}