Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helper routines for building identity mapping page tables. This is
4 * included by both the compressed kernel and the regular kernel.
5 */
6
7static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
8 unsigned long addr, unsigned long end)
9{
10 addr &= PMD_MASK;
11 for (; addr < end; addr += PMD_SIZE) {
12 pmd_t *pmd = pmd_page + pmd_index(addr);
13
14 if (pmd_present(*pmd))
15 continue;
16
17 set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
18 }
19}
20
21static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
22 unsigned long addr, unsigned long end)
23{
24 unsigned long next;
25
26 for (; addr < end; addr = next) {
27 pud_t *pud = pud_page + pud_index(addr);
28 pmd_t *pmd;
29
30 next = (addr & PUD_MASK) + PUD_SIZE;
31 if (next > end)
32 next = end;
33
34 if (info->direct_gbpages) {
35 pud_t pudval;
36
37 if (pud_present(*pud))
38 continue;
39
40 addr &= PUD_MASK;
41 pudval = __pud((addr - info->offset) | info->page_flag);
42 set_pud(pud, pudval);
43 continue;
44 }
45
46 if (pud_present(*pud)) {
47 pmd = pmd_offset(pud, 0);
48 ident_pmd_init(info, pmd, addr, next);
49 continue;
50 }
51 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
52 if (!pmd)
53 return -ENOMEM;
54 ident_pmd_init(info, pmd, addr, next);
55 set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
56 }
57
58 return 0;
59}
60
61static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
62 unsigned long addr, unsigned long end)
63{
64 unsigned long next;
65 int result;
66
67 for (; addr < end; addr = next) {
68 p4d_t *p4d = p4d_page + p4d_index(addr);
69 pud_t *pud;
70
71 next = (addr & P4D_MASK) + P4D_SIZE;
72 if (next > end)
73 next = end;
74
75 if (p4d_present(*p4d)) {
76 pud = pud_offset(p4d, 0);
77 result = ident_pud_init(info, pud, addr, next);
78 if (result)
79 return result;
80
81 continue;
82 }
83 pud = (pud_t *)info->alloc_pgt_page(info->context);
84 if (!pud)
85 return -ENOMEM;
86
87 result = ident_pud_init(info, pud, addr, next);
88 if (result)
89 return result;
90
91 set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
92 }
93
94 return 0;
95}
96
97int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
98 unsigned long pstart, unsigned long pend)
99{
100 unsigned long addr = pstart + info->offset;
101 unsigned long end = pend + info->offset;
102 unsigned long next;
103 int result;
104
105 /* Set the default pagetable flags if not supplied */
106 if (!info->kernpg_flag)
107 info->kernpg_flag = _KERNPG_TABLE;
108
109 /* Filter out unsupported __PAGE_KERNEL_* bits: */
110 info->kernpg_flag &= __default_kernel_pte_mask;
111
112 for (; addr < end; addr = next) {
113 pgd_t *pgd = pgd_page + pgd_index(addr);
114 p4d_t *p4d;
115
116 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
117 if (next > end)
118 next = end;
119
120 if (pgd_present(*pgd)) {
121 p4d = p4d_offset(pgd, 0);
122 result = ident_p4d_init(info, p4d, addr, next);
123 if (result)
124 return result;
125 continue;
126 }
127
128 p4d = (p4d_t *)info->alloc_pgt_page(info->context);
129 if (!p4d)
130 return -ENOMEM;
131 result = ident_p4d_init(info, p4d, addr, next);
132 if (result)
133 return result;
134 if (pgtable_l5_enabled()) {
135 set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
136 } else {
137 /*
138 * With p4d folded, pgd is equal to p4d.
139 * The pgd entry has to point to the pud page table in this case.
140 */
141 pud_t *pud = pud_offset(p4d, 0);
142 set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
143 }
144 }
145
146 return 0;
147}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helper routines for building identity mapping page tables. This is
4 * included by both the compressed kernel and the regular kernel.
5 */
6
7static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
8 unsigned long addr, unsigned long end)
9{
10 addr &= PMD_MASK;
11 for (; addr < end; addr += PMD_SIZE) {
12 pmd_t *pmd = pmd_page + pmd_index(addr);
13
14 if (pmd_present(*pmd))
15 continue;
16
17 set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
18 }
19}
20
21static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
22 unsigned long addr, unsigned long end)
23{
24 unsigned long next;
25
26 for (; addr < end; addr = next) {
27 pud_t *pud = pud_page + pud_index(addr);
28 pmd_t *pmd;
29 bool use_gbpage;
30
31 next = (addr & PUD_MASK) + PUD_SIZE;
32 if (next > end)
33 next = end;
34
35 /* if this is already a gbpage, this portion is already mapped */
36 if (pud_large(*pud))
37 continue;
38
39 /* Is using a gbpage allowed? */
40 use_gbpage = info->direct_gbpages;
41
42 /* Don't use gbpage if it maps more than the requested region. */
43 /* at the begining: */
44 use_gbpage &= ((addr & ~PUD_MASK) == 0);
45 /* ... or at the end: */
46 use_gbpage &= ((next & ~PUD_MASK) == 0);
47
48 /* Never overwrite existing mappings */
49 use_gbpage &= !pud_present(*pud);
50
51 if (use_gbpage) {
52 pud_t pudval;
53
54 pudval = __pud((addr - info->offset) | info->page_flag);
55 set_pud(pud, pudval);
56 continue;
57 }
58
59 if (pud_present(*pud)) {
60 pmd = pmd_offset(pud, 0);
61 ident_pmd_init(info, pmd, addr, next);
62 continue;
63 }
64 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
65 if (!pmd)
66 return -ENOMEM;
67 ident_pmd_init(info, pmd, addr, next);
68 set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
69 }
70
71 return 0;
72}
73
74static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
75 unsigned long addr, unsigned long end)
76{
77 unsigned long next;
78 int result;
79
80 for (; addr < end; addr = next) {
81 p4d_t *p4d = p4d_page + p4d_index(addr);
82 pud_t *pud;
83
84 next = (addr & P4D_MASK) + P4D_SIZE;
85 if (next > end)
86 next = end;
87
88 if (p4d_present(*p4d)) {
89 pud = pud_offset(p4d, 0);
90 result = ident_pud_init(info, pud, addr, next);
91 if (result)
92 return result;
93
94 continue;
95 }
96 pud = (pud_t *)info->alloc_pgt_page(info->context);
97 if (!pud)
98 return -ENOMEM;
99
100 result = ident_pud_init(info, pud, addr, next);
101 if (result)
102 return result;
103
104 set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
105 }
106
107 return 0;
108}
109
110int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
111 unsigned long pstart, unsigned long pend)
112{
113 unsigned long addr = pstart + info->offset;
114 unsigned long end = pend + info->offset;
115 unsigned long next;
116 int result;
117
118 /* Set the default pagetable flags if not supplied */
119 if (!info->kernpg_flag)
120 info->kernpg_flag = _KERNPG_TABLE;
121
122 /* Filter out unsupported __PAGE_KERNEL_* bits: */
123 info->kernpg_flag &= __default_kernel_pte_mask;
124
125 for (; addr < end; addr = next) {
126 pgd_t *pgd = pgd_page + pgd_index(addr);
127 p4d_t *p4d;
128
129 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
130 if (next > end)
131 next = end;
132
133 if (pgd_present(*pgd)) {
134 p4d = p4d_offset(pgd, 0);
135 result = ident_p4d_init(info, p4d, addr, next);
136 if (result)
137 return result;
138 continue;
139 }
140
141 p4d = (p4d_t *)info->alloc_pgt_page(info->context);
142 if (!p4d)
143 return -ENOMEM;
144 result = ident_p4d_init(info, p4d, addr, next);
145 if (result)
146 return result;
147 if (pgtable_l5_enabled()) {
148 set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
149 } else {
150 /*
151 * With p4d folded, pgd is equal to p4d.
152 * The pgd entry has to point to the pud page table in this case.
153 */
154 pud_t *pud = pud_offset(p4d, 0);
155 set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
156 }
157 }
158
159 return 0;
160}