Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Common EFI memory map functions.
4 */
5
6#define pr_fmt(fmt) "efi: " fmt
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/efi.h>
11#include <linux/io.h>
12#include <asm/early_ioremap.h>
13#include <linux/memblock.h>
14#include <linux/slab.h>
15
16static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
17{
18 return memblock_phys_alloc(size, SMP_CACHE_BYTES);
19}
20
21static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
22{
23 unsigned int order = get_order(size);
24 struct page *p = alloc_pages(GFP_KERNEL, order);
25
26 if (!p)
27 return 0;
28
29 return PFN_PHYS(page_to_pfn(p));
30}
31
32void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
33{
34 if (flags & EFI_MEMMAP_MEMBLOCK) {
35 if (slab_is_available())
36 memblock_free_late(phys, size);
37 else
38 memblock_free(phys, size);
39 } else if (flags & EFI_MEMMAP_SLAB) {
40 struct page *p = pfn_to_page(PHYS_PFN(phys));
41 unsigned int order = get_order(size);
42
43 free_pages((unsigned long) page_address(p), order);
44 }
45}
46
47static void __init efi_memmap_free(void)
48{
49 __efi_memmap_free(efi.memmap.phys_map,
50 efi.memmap.desc_size * efi.memmap.nr_map,
51 efi.memmap.flags);
52}
53
54/**
55 * efi_memmap_alloc - Allocate memory for the EFI memory map
56 * @num_entries: Number of entries in the allocated map.
57 * @data: efi memmap installation parameters
58 *
59 * Depending on whether mm_init() has already been invoked or not,
60 * either memblock or "normal" page allocation is used.
61 *
62 * Returns the physical address of the allocated memory map on
63 * success, zero on failure.
64 */
65int __init efi_memmap_alloc(unsigned int num_entries,
66 struct efi_memory_map_data *data)
67{
68 /* Expect allocation parameters are zero initialized */
69 WARN_ON(data->phys_map || data->size);
70
71 data->size = num_entries * efi.memmap.desc_size;
72 data->desc_version = efi.memmap.desc_version;
73 data->desc_size = efi.memmap.desc_size;
74 data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
75 data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
76
77 if (slab_is_available()) {
78 data->flags |= EFI_MEMMAP_SLAB;
79 data->phys_map = __efi_memmap_alloc_late(data->size);
80 } else {
81 data->flags |= EFI_MEMMAP_MEMBLOCK;
82 data->phys_map = __efi_memmap_alloc_early(data->size);
83 }
84
85 if (!data->phys_map)
86 return -ENOMEM;
87 return 0;
88}
89
90/**
91 * __efi_memmap_init - Common code for mapping the EFI memory map
92 * @data: EFI memory map data
93 *
94 * This function takes care of figuring out which function to use to
95 * map the EFI memory map in efi.memmap based on how far into the boot
96 * we are.
97 *
98 * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we
99 * only have access to the early_memremap*() functions as the vmalloc
100 * space isn't setup. Once the kernel is fully booted we can fallback
101 * to the more robust memremap*() API.
102 *
103 * Returns zero on success, a negative error code on failure.
104 */
105static int __init __efi_memmap_init(struct efi_memory_map_data *data)
106{
107 struct efi_memory_map map;
108 phys_addr_t phys_map;
109
110 if (efi_enabled(EFI_PARAVIRT))
111 return 0;
112
113 phys_map = data->phys_map;
114
115 if (data->flags & EFI_MEMMAP_LATE)
116 map.map = memremap(phys_map, data->size, MEMREMAP_WB);
117 else
118 map.map = early_memremap(phys_map, data->size);
119
120 if (!map.map) {
121 pr_err("Could not map the memory map!\n");
122 return -ENOMEM;
123 }
124
125 /* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */
126 efi_memmap_free();
127
128 map.phys_map = data->phys_map;
129 map.nr_map = data->size / data->desc_size;
130 map.map_end = map.map + data->size;
131
132 map.desc_version = data->desc_version;
133 map.desc_size = data->desc_size;
134 map.flags = data->flags;
135
136 set_bit(EFI_MEMMAP, &efi.flags);
137
138 efi.memmap = map;
139
140 return 0;
141}
142
143/**
144 * efi_memmap_init_early - Map the EFI memory map data structure
145 * @data: EFI memory map data
146 *
147 * Use early_memremap() to map the passed in EFI memory map and assign
148 * it to efi.memmap.
149 */
150int __init efi_memmap_init_early(struct efi_memory_map_data *data)
151{
152 /* Cannot go backwards */
153 WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
154
155 data->flags = 0;
156 return __efi_memmap_init(data);
157}
158
159void __init efi_memmap_unmap(void)
160{
161 if (!efi_enabled(EFI_MEMMAP))
162 return;
163
164 if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) {
165 unsigned long size;
166
167 size = efi.memmap.desc_size * efi.memmap.nr_map;
168 early_memunmap(efi.memmap.map, size);
169 } else {
170 memunmap(efi.memmap.map);
171 }
172
173 efi.memmap.map = NULL;
174 clear_bit(EFI_MEMMAP, &efi.flags);
175}
176
177/**
178 * efi_memmap_init_late - Map efi.memmap with memremap()
179 * @phys_addr: Physical address of the new EFI memory map
180 * @size: Size in bytes of the new EFI memory map
181 *
182 * Setup a mapping of the EFI memory map using ioremap_cache(). This
183 * function should only be called once the vmalloc space has been
184 * setup and is therefore not suitable for calling during early EFI
185 * initialise, e.g. in efi_init(). Additionally, it expects
186 * efi_memmap_init_early() to have already been called.
187 *
188 * The reason there are two EFI memmap initialisation
189 * (efi_memmap_init_early() and this late version) is because the
190 * early EFI memmap should be explicitly unmapped once EFI
191 * initialisation is complete as the fixmap space used to map the EFI
192 * memmap (via early_memremap()) is a scarce resource.
193 *
194 * This late mapping is intended to persist for the duration of
195 * runtime so that things like efi_mem_desc_lookup() and
196 * efi_mem_attributes() always work.
197 *
198 * Returns zero on success, a negative error code on failure.
199 */
200int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
201{
202 struct efi_memory_map_data data = {
203 .phys_map = addr,
204 .size = size,
205 .flags = EFI_MEMMAP_LATE,
206 };
207
208 /* Did we forget to unmap the early EFI memmap? */
209 WARN_ON(efi.memmap.map);
210
211 /* Were we already called? */
212 WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
213
214 /*
215 * It makes no sense to allow callers to register different
216 * values for the following fields. Copy them out of the
217 * existing early EFI memmap.
218 */
219 data.desc_version = efi.memmap.desc_version;
220 data.desc_size = efi.memmap.desc_size;
221
222 return __efi_memmap_init(&data);
223}
224
225/**
226 * efi_memmap_install - Install a new EFI memory map in efi.memmap
227 * @ctx: map allocation parameters (address, size, flags)
228 *
229 * Unlike efi_memmap_init_*(), this function does not allow the caller
230 * to switch from early to late mappings. It simply uses the existing
231 * mapping function and installs the new memmap.
232 *
233 * Returns zero on success, a negative error code on failure.
234 */
235int __init efi_memmap_install(struct efi_memory_map_data *data)
236{
237 efi_memmap_unmap();
238
239 return __efi_memmap_init(data);
240}
241
242/**
243 * efi_memmap_split_count - Count number of additional EFI memmap entries
244 * @md: EFI memory descriptor to split
245 * @range: Address range (start, end) to split around
246 *
247 * Returns the number of additional EFI memmap entries required to
248 * accomodate @range.
249 */
250int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
251{
252 u64 m_start, m_end;
253 u64 start, end;
254 int count = 0;
255
256 start = md->phys_addr;
257 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
258
259 /* modifying range */
260 m_start = range->start;
261 m_end = range->end;
262
263 if (m_start <= start) {
264 /* split into 2 parts */
265 if (start < m_end && m_end < end)
266 count++;
267 }
268
269 if (start < m_start && m_start < end) {
270 /* split into 3 parts */
271 if (m_end < end)
272 count += 2;
273 /* split into 2 parts */
274 if (end <= m_end)
275 count++;
276 }
277
278 return count;
279}
280
281/**
282 * efi_memmap_insert - Insert a memory region in an EFI memmap
283 * @old_memmap: The existing EFI memory map structure
284 * @buf: Address of buffer to store new map
285 * @mem: Memory map entry to insert
286 *
287 * It is suggested that you call efi_memmap_split_count() first
288 * to see how large @buf needs to be.
289 */
290void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
291 struct efi_mem_range *mem)
292{
293 u64 m_start, m_end, m_attr;
294 efi_memory_desc_t *md;
295 u64 start, end;
296 void *old, *new;
297
298 /* modifying range */
299 m_start = mem->range.start;
300 m_end = mem->range.end;
301 m_attr = mem->attribute;
302
303 /*
304 * The EFI memory map deals with regions in EFI_PAGE_SIZE
305 * units. Ensure that the region described by 'mem' is aligned
306 * correctly.
307 */
308 if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
309 !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
310 WARN_ON(1);
311 return;
312 }
313
314 for (old = old_memmap->map, new = buf;
315 old < old_memmap->map_end;
316 old += old_memmap->desc_size, new += old_memmap->desc_size) {
317
318 /* copy original EFI memory descriptor */
319 memcpy(new, old, old_memmap->desc_size);
320 md = new;
321 start = md->phys_addr;
322 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
323
324 if (m_start <= start && end <= m_end)
325 md->attribute |= m_attr;
326
327 if (m_start <= start &&
328 (start < m_end && m_end < end)) {
329 /* first part */
330 md->attribute |= m_attr;
331 md->num_pages = (m_end - md->phys_addr + 1) >>
332 EFI_PAGE_SHIFT;
333 /* latter part */
334 new += old_memmap->desc_size;
335 memcpy(new, old, old_memmap->desc_size);
336 md = new;
337 md->phys_addr = m_end + 1;
338 md->num_pages = (end - md->phys_addr + 1) >>
339 EFI_PAGE_SHIFT;
340 }
341
342 if ((start < m_start && m_start < end) && m_end < end) {
343 /* first part */
344 md->num_pages = (m_start - md->phys_addr) >>
345 EFI_PAGE_SHIFT;
346 /* middle part */
347 new += old_memmap->desc_size;
348 memcpy(new, old, old_memmap->desc_size);
349 md = new;
350 md->attribute |= m_attr;
351 md->phys_addr = m_start;
352 md->num_pages = (m_end - m_start + 1) >>
353 EFI_PAGE_SHIFT;
354 /* last part */
355 new += old_memmap->desc_size;
356 memcpy(new, old, old_memmap->desc_size);
357 md = new;
358 md->phys_addr = m_end + 1;
359 md->num_pages = (end - m_end) >>
360 EFI_PAGE_SHIFT;
361 }
362
363 if ((start < m_start && m_start < end) &&
364 (end <= m_end)) {
365 /* first part */
366 md->num_pages = (m_start - md->phys_addr) >>
367 EFI_PAGE_SHIFT;
368 /* latter part */
369 new += old_memmap->desc_size;
370 memcpy(new, old, old_memmap->desc_size);
371 md = new;
372 md->phys_addr = m_start;
373 md->num_pages = (end - md->phys_addr + 1) >>
374 EFI_PAGE_SHIFT;
375 md->attribute |= m_attr;
376 }
377 }
378}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Common EFI memory map functions.
4 */
5
6#define pr_fmt(fmt) "efi: " fmt
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/efi.h>
11#include <linux/io.h>
12#include <asm/early_ioremap.h>
13#include <linux/memblock.h>
14#include <linux/slab.h>
15
16static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
17{
18 return memblock_phys_alloc(size, SMP_CACHE_BYTES);
19}
20
21static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
22{
23 unsigned int order = get_order(size);
24 struct page *p = alloc_pages(GFP_KERNEL, order);
25
26 if (!p)
27 return 0;
28
29 return PFN_PHYS(page_to_pfn(p));
30}
31
32/**
33 * efi_memmap_alloc - Allocate memory for the EFI memory map
34 * @num_entries: Number of entries in the allocated map.
35 *
36 * Depending on whether mm_init() has already been invoked or not,
37 * either memblock or "normal" page allocation is used.
38 *
39 * Returns the physical address of the allocated memory map on
40 * success, zero on failure.
41 */
42phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
43{
44 unsigned long size = num_entries * efi.memmap.desc_size;
45
46 if (slab_is_available())
47 return __efi_memmap_alloc_late(size);
48
49 return __efi_memmap_alloc_early(size);
50}
51
52/**
53 * __efi_memmap_init - Common code for mapping the EFI memory map
54 * @data: EFI memory map data
55 * @late: Use early or late mapping function?
56 *
57 * This function takes care of figuring out which function to use to
58 * map the EFI memory map in efi.memmap based on how far into the boot
59 * we are.
60 *
61 * During bootup @late should be %false since we only have access to
62 * the early_memremap*() functions as the vmalloc space isn't setup.
63 * Once the kernel is fully booted we can fallback to the more robust
64 * memremap*() API.
65 *
66 * Returns zero on success, a negative error code on failure.
67 */
68static int __init
69__efi_memmap_init(struct efi_memory_map_data *data, bool late)
70{
71 struct efi_memory_map map;
72 phys_addr_t phys_map;
73
74 if (efi_enabled(EFI_PARAVIRT))
75 return 0;
76
77 phys_map = data->phys_map;
78
79 if (late)
80 map.map = memremap(phys_map, data->size, MEMREMAP_WB);
81 else
82 map.map = early_memremap(phys_map, data->size);
83
84 if (!map.map) {
85 pr_err("Could not map the memory map!\n");
86 return -ENOMEM;
87 }
88
89 map.phys_map = data->phys_map;
90 map.nr_map = data->size / data->desc_size;
91 map.map_end = map.map + data->size;
92
93 map.desc_version = data->desc_version;
94 map.desc_size = data->desc_size;
95 map.late = late;
96
97 set_bit(EFI_MEMMAP, &efi.flags);
98
99 efi.memmap = map;
100
101 return 0;
102}
103
104/**
105 * efi_memmap_init_early - Map the EFI memory map data structure
106 * @data: EFI memory map data
107 *
108 * Use early_memremap() to map the passed in EFI memory map and assign
109 * it to efi.memmap.
110 */
111int __init efi_memmap_init_early(struct efi_memory_map_data *data)
112{
113 /* Cannot go backwards */
114 WARN_ON(efi.memmap.late);
115
116 return __efi_memmap_init(data, false);
117}
118
119void __init efi_memmap_unmap(void)
120{
121 if (!efi_enabled(EFI_MEMMAP))
122 return;
123
124 if (!efi.memmap.late) {
125 unsigned long size;
126
127 size = efi.memmap.desc_size * efi.memmap.nr_map;
128 early_memunmap(efi.memmap.map, size);
129 } else {
130 memunmap(efi.memmap.map);
131 }
132
133 efi.memmap.map = NULL;
134 clear_bit(EFI_MEMMAP, &efi.flags);
135}
136
137/**
138 * efi_memmap_init_late - Map efi.memmap with memremap()
139 * @phys_addr: Physical address of the new EFI memory map
140 * @size: Size in bytes of the new EFI memory map
141 *
142 * Setup a mapping of the EFI memory map using ioremap_cache(). This
143 * function should only be called once the vmalloc space has been
144 * setup and is therefore not suitable for calling during early EFI
145 * initialise, e.g. in efi_init(). Additionally, it expects
146 * efi_memmap_init_early() to have already been called.
147 *
148 * The reason there are two EFI memmap initialisation
149 * (efi_memmap_init_early() and this late version) is because the
150 * early EFI memmap should be explicitly unmapped once EFI
151 * initialisation is complete as the fixmap space used to map the EFI
152 * memmap (via early_memremap()) is a scarce resource.
153 *
154 * This late mapping is intended to persist for the duration of
155 * runtime so that things like efi_mem_desc_lookup() and
156 * efi_mem_attributes() always work.
157 *
158 * Returns zero on success, a negative error code on failure.
159 */
160int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
161{
162 struct efi_memory_map_data data = {
163 .phys_map = addr,
164 .size = size,
165 };
166
167 /* Did we forget to unmap the early EFI memmap? */
168 WARN_ON(efi.memmap.map);
169
170 /* Were we already called? */
171 WARN_ON(efi.memmap.late);
172
173 /*
174 * It makes no sense to allow callers to register different
175 * values for the following fields. Copy them out of the
176 * existing early EFI memmap.
177 */
178 data.desc_version = efi.memmap.desc_version;
179 data.desc_size = efi.memmap.desc_size;
180
181 return __efi_memmap_init(&data, true);
182}
183
184/**
185 * efi_memmap_install - Install a new EFI memory map in efi.memmap
186 * @addr: Physical address of the memory map
187 * @nr_map: Number of entries in the memory map
188 *
189 * Unlike efi_memmap_init_*(), this function does not allow the caller
190 * to switch from early to late mappings. It simply uses the existing
191 * mapping function and installs the new memmap.
192 *
193 * Returns zero on success, a negative error code on failure.
194 */
195int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map)
196{
197 struct efi_memory_map_data data;
198
199 efi_memmap_unmap();
200
201 data.phys_map = addr;
202 data.size = efi.memmap.desc_size * nr_map;
203 data.desc_version = efi.memmap.desc_version;
204 data.desc_size = efi.memmap.desc_size;
205
206 return __efi_memmap_init(&data, efi.memmap.late);
207}
208
209/**
210 * efi_memmap_split_count - Count number of additional EFI memmap entries
211 * @md: EFI memory descriptor to split
212 * @range: Address range (start, end) to split around
213 *
214 * Returns the number of additional EFI memmap entries required to
215 * accomodate @range.
216 */
217int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
218{
219 u64 m_start, m_end;
220 u64 start, end;
221 int count = 0;
222
223 start = md->phys_addr;
224 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
225
226 /* modifying range */
227 m_start = range->start;
228 m_end = range->end;
229
230 if (m_start <= start) {
231 /* split into 2 parts */
232 if (start < m_end && m_end < end)
233 count++;
234 }
235
236 if (start < m_start && m_start < end) {
237 /* split into 3 parts */
238 if (m_end < end)
239 count += 2;
240 /* split into 2 parts */
241 if (end <= m_end)
242 count++;
243 }
244
245 return count;
246}
247
248/**
249 * efi_memmap_insert - Insert a memory region in an EFI memmap
250 * @old_memmap: The existing EFI memory map structure
251 * @buf: Address of buffer to store new map
252 * @mem: Memory map entry to insert
253 *
254 * It is suggested that you call efi_memmap_split_count() first
255 * to see how large @buf needs to be.
256 */
257void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
258 struct efi_mem_range *mem)
259{
260 u64 m_start, m_end, m_attr;
261 efi_memory_desc_t *md;
262 u64 start, end;
263 void *old, *new;
264
265 /* modifying range */
266 m_start = mem->range.start;
267 m_end = mem->range.end;
268 m_attr = mem->attribute;
269
270 /*
271 * The EFI memory map deals with regions in EFI_PAGE_SIZE
272 * units. Ensure that the region described by 'mem' is aligned
273 * correctly.
274 */
275 if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
276 !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
277 WARN_ON(1);
278 return;
279 }
280
281 for (old = old_memmap->map, new = buf;
282 old < old_memmap->map_end;
283 old += old_memmap->desc_size, new += old_memmap->desc_size) {
284
285 /* copy original EFI memory descriptor */
286 memcpy(new, old, old_memmap->desc_size);
287 md = new;
288 start = md->phys_addr;
289 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
290
291 if (m_start <= start && end <= m_end)
292 md->attribute |= m_attr;
293
294 if (m_start <= start &&
295 (start < m_end && m_end < end)) {
296 /* first part */
297 md->attribute |= m_attr;
298 md->num_pages = (m_end - md->phys_addr + 1) >>
299 EFI_PAGE_SHIFT;
300 /* latter part */
301 new += old_memmap->desc_size;
302 memcpy(new, old, old_memmap->desc_size);
303 md = new;
304 md->phys_addr = m_end + 1;
305 md->num_pages = (end - md->phys_addr + 1) >>
306 EFI_PAGE_SHIFT;
307 }
308
309 if ((start < m_start && m_start < end) && m_end < end) {
310 /* first part */
311 md->num_pages = (m_start - md->phys_addr) >>
312 EFI_PAGE_SHIFT;
313 /* middle part */
314 new += old_memmap->desc_size;
315 memcpy(new, old, old_memmap->desc_size);
316 md = new;
317 md->attribute |= m_attr;
318 md->phys_addr = m_start;
319 md->num_pages = (m_end - m_start + 1) >>
320 EFI_PAGE_SHIFT;
321 /* last part */
322 new += old_memmap->desc_size;
323 memcpy(new, old, old_memmap->desc_size);
324 md = new;
325 md->phys_addr = m_end + 1;
326 md->num_pages = (end - m_end) >>
327 EFI_PAGE_SHIFT;
328 }
329
330 if ((start < m_start && m_start < end) &&
331 (end <= m_end)) {
332 /* first part */
333 md->num_pages = (m_start - md->phys_addr) >>
334 EFI_PAGE_SHIFT;
335 /* latter part */
336 new += old_memmap->desc_size;
337 memcpy(new, old, old_memmap->desc_size);
338 md = new;
339 md->phys_addr = m_start;
340 md->num_pages = (end - md->phys_addr + 1) >>
341 EFI_PAGE_SHIFT;
342 md->attribute |= m_attr;
343 }
344 }
345}