Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Common EFI memory map functions.
  4 */
  5
  6#define pr_fmt(fmt) "efi: " fmt
  7
  8#include <linux/init.h>
  9#include <linux/kernel.h>
 10#include <linux/efi.h>
 11#include <linux/io.h>
 12#include <asm/early_ioremap.h>
 13#include <asm/efi.h>
 14#include <linux/memblock.h>
 15#include <linux/slab.h>
 16
 17static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
 18{
 19	return memblock_phys_alloc(size, SMP_CACHE_BYTES);
 20}
 21
 22static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
 23{
 24	unsigned int order = get_order(size);
 25	struct page *p = alloc_pages(GFP_KERNEL, order);
 26
 27	if (!p)
 28		return 0;
 29
 30	return PFN_PHYS(page_to_pfn(p));
 31}
 32
 33void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
 34{
 35	if (flags & EFI_MEMMAP_MEMBLOCK) {
 36		if (slab_is_available())
 37			memblock_free_late(phys, size);
 38		else
 39			memblock_phys_free(phys, size);
 40	} else if (flags & EFI_MEMMAP_SLAB) {
 41		struct page *p = pfn_to_page(PHYS_PFN(phys));
 42		unsigned int order = get_order(size);
 43
 44		free_pages((unsigned long) page_address(p), order);
 45	}
 46}
 47
 48/**
 49 * efi_memmap_alloc - Allocate memory for the EFI memory map
 50 * @num_entries: Number of entries in the allocated map.
 51 * @data: efi memmap installation parameters
 52 *
 53 * Depending on whether mm_init() has already been invoked or not,
 54 * either memblock or "normal" page allocation is used.
 55 *
 56 * Returns zero on success, a negative error code on failure.
 57 */
 58int __init efi_memmap_alloc(unsigned int num_entries,
 59		struct efi_memory_map_data *data)
 60{
 61	/* Expect allocation parameters are zero initialized */
 62	WARN_ON(data->phys_map || data->size);
 63
 64	data->size = num_entries * efi.memmap.desc_size;
 65	data->desc_version = efi.memmap.desc_version;
 66	data->desc_size = efi.memmap.desc_size;
 67	data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
 68	data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
 69
 70	if (slab_is_available()) {
 71		data->flags |= EFI_MEMMAP_SLAB;
 72		data->phys_map = __efi_memmap_alloc_late(data->size);
 73	} else {
 74		data->flags |= EFI_MEMMAP_MEMBLOCK;
 75		data->phys_map = __efi_memmap_alloc_early(data->size);
 76	}
 77
 78	if (!data->phys_map)
 79		return -ENOMEM;
 80	return 0;
 81}
 82
 83/**
 84 * efi_memmap_install - Install a new EFI memory map in efi.memmap
 85 * @ctx: map allocation parameters (address, size, flags)
 86 *
 87 * Unlike efi_memmap_init_*(), this function does not allow the caller
 88 * to switch from early to late mappings. It simply uses the existing
 89 * mapping function and installs the new memmap.
 90 *
 91 * Returns zero on success, a negative error code on failure.
 92 */
 93int __init efi_memmap_install(struct efi_memory_map_data *data)
 94{
 95	efi_memmap_unmap();
 96
 97	if (efi_enabled(EFI_PARAVIRT))
 98		return 0;
 99
100	return __efi_memmap_init(data);
101}
102
103/**
104 * efi_memmap_split_count - Count number of additional EFI memmap entries
105 * @md: EFI memory descriptor to split
106 * @range: Address range (start, end) to split around
107 *
108 * Returns the number of additional EFI memmap entries required to
109 * accommodate @range.
110 */
111int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
112{
113	u64 m_start, m_end;
114	u64 start, end;
115	int count = 0;
116
117	start = md->phys_addr;
118	end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
119
120	/* modifying range */
121	m_start = range->start;
122	m_end = range->end;
123
124	if (m_start <= start) {
125		/* split into 2 parts */
126		if (start < m_end && m_end < end)
127			count++;
128	}
129
130	if (start < m_start && m_start < end) {
131		/* split into 3 parts */
132		if (m_end < end)
133			count += 2;
134		/* split into 2 parts */
135		if (end <= m_end)
136			count++;
137	}
138
139	return count;
140}
141
142/**
143 * efi_memmap_insert - Insert a memory region in an EFI memmap
144 * @old_memmap: The existing EFI memory map structure
145 * @buf: Address of buffer to store new map
146 * @mem: Memory map entry to insert
147 *
148 * It is suggested that you call efi_memmap_split_count() first
149 * to see how large @buf needs to be.
150 */
151void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
152			      struct efi_mem_range *mem)
153{
154	u64 m_start, m_end, m_attr;
155	efi_memory_desc_t *md;
156	u64 start, end;
157	void *old, *new;
158
159	/* modifying range */
160	m_start = mem->range.start;
161	m_end = mem->range.end;
162	m_attr = mem->attribute;
163
164	/*
165	 * The EFI memory map deals with regions in EFI_PAGE_SIZE
166	 * units. Ensure that the region described by 'mem' is aligned
167	 * correctly.
168	 */
169	if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
170	    !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
171		WARN_ON(1);
172		return;
173	}
174
175	for (old = old_memmap->map, new = buf;
176	     old < old_memmap->map_end;
177	     old += old_memmap->desc_size, new += old_memmap->desc_size) {
178
179		/* copy original EFI memory descriptor */
180		memcpy(new, old, old_memmap->desc_size);
181		md = new;
182		start = md->phys_addr;
183		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
184
185		if (m_start <= start && end <= m_end)
186			md->attribute |= m_attr;
187
188		if (m_start <= start &&
189		    (start < m_end && m_end < end)) {
190			/* first part */
191			md->attribute |= m_attr;
192			md->num_pages = (m_end - md->phys_addr + 1) >>
193				EFI_PAGE_SHIFT;
194			/* latter part */
195			new += old_memmap->desc_size;
196			memcpy(new, old, old_memmap->desc_size);
197			md = new;
198			md->phys_addr = m_end + 1;
199			md->num_pages = (end - md->phys_addr + 1) >>
200				EFI_PAGE_SHIFT;
201		}
202
203		if ((start < m_start && m_start < end) && m_end < end) {
204			/* first part */
205			md->num_pages = (m_start - md->phys_addr) >>
206				EFI_PAGE_SHIFT;
207			/* middle part */
208			new += old_memmap->desc_size;
209			memcpy(new, old, old_memmap->desc_size);
210			md = new;
211			md->attribute |= m_attr;
212			md->phys_addr = m_start;
213			md->num_pages = (m_end - m_start + 1) >>
214				EFI_PAGE_SHIFT;
215			/* last part */
216			new += old_memmap->desc_size;
217			memcpy(new, old, old_memmap->desc_size);
218			md = new;
219			md->phys_addr = m_end + 1;
220			md->num_pages = (end - m_end) >>
221				EFI_PAGE_SHIFT;
222		}
223
224		if ((start < m_start && m_start < end) &&
225		    (end <= m_end)) {
226			/* first part */
227			md->num_pages = (m_start - md->phys_addr) >>
228				EFI_PAGE_SHIFT;
229			/* latter part */
230			new += old_memmap->desc_size;
231			memcpy(new, old, old_memmap->desc_size);
232			md = new;
233			md->phys_addr = m_start;
234			md->num_pages = (end - md->phys_addr + 1) >>
235				EFI_PAGE_SHIFT;
236			md->attribute |= m_attr;
237		}
238	}
239}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Common EFI memory map functions.
  4 */
  5
  6#define pr_fmt(fmt) "efi: " fmt
  7
  8#include <linux/init.h>
  9#include <linux/kernel.h>
 10#include <linux/efi.h>
 11#include <linux/io.h>
 12#include <asm/early_ioremap.h>
 13#include <asm/efi.h>
 14#include <linux/memblock.h>
 15#include <linux/slab.h>
 16
 17static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
 18{
 19	return memblock_phys_alloc(size, SMP_CACHE_BYTES);
 20}
 21
 22static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
 23{
 24	unsigned int order = get_order(size);
 25	struct page *p = alloc_pages(GFP_KERNEL, order);
 26
 27	if (!p)
 28		return 0;
 29
 30	return PFN_PHYS(page_to_pfn(p));
 31}
 32
 33void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
 34{
 35	if (flags & EFI_MEMMAP_MEMBLOCK) {
 36		if (slab_is_available())
 37			memblock_free_late(phys, size);
 38		else
 39			memblock_phys_free(phys, size);
 40	} else if (flags & EFI_MEMMAP_SLAB) {
 41		struct page *p = pfn_to_page(PHYS_PFN(phys));
 42		unsigned int order = get_order(size);
 43
 44		free_pages((unsigned long) page_address(p), order);
 45	}
 46}
 47
 48/**
 49 * efi_memmap_alloc - Allocate memory for the EFI memory map
 50 * @num_entries: Number of entries in the allocated map.
 51 * @data: efi memmap installation parameters
 52 *
 53 * Depending on whether mm_init() has already been invoked or not,
 54 * either memblock or "normal" page allocation is used.
 55 *
 56 * Returns zero on success, a negative error code on failure.
 57 */
 58int __init efi_memmap_alloc(unsigned int num_entries,
 59		struct efi_memory_map_data *data)
 60{
 61	/* Expect allocation parameters are zero initialized */
 62	WARN_ON(data->phys_map || data->size);
 63
 64	data->size = num_entries * efi.memmap.desc_size;
 65	data->desc_version = efi.memmap.desc_version;
 66	data->desc_size = efi.memmap.desc_size;
 67	data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
 68	data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
 69
 70	if (slab_is_available()) {
 71		data->flags |= EFI_MEMMAP_SLAB;
 72		data->phys_map = __efi_memmap_alloc_late(data->size);
 73	} else {
 74		data->flags |= EFI_MEMMAP_MEMBLOCK;
 75		data->phys_map = __efi_memmap_alloc_early(data->size);
 76	}
 77
 78	if (!data->phys_map)
 79		return -ENOMEM;
 80	return 0;
 81}
 82
 83/**
 84 * efi_memmap_install - Install a new EFI memory map in efi.memmap
 85 * @data: efi memmap installation parameters
 86 *
 87 * Unlike efi_memmap_init_*(), this function does not allow the caller
 88 * to switch from early to late mappings. It simply uses the existing
 89 * mapping function and installs the new memmap.
 90 *
 91 * Returns zero on success, a negative error code on failure.
 92 */
 93int __init efi_memmap_install(struct efi_memory_map_data *data)
 94{
 95	efi_memmap_unmap();
 96
 97	if (efi_enabled(EFI_PARAVIRT))
 98		return 0;
 99
100	return __efi_memmap_init(data);
101}
102
103/**
104 * efi_memmap_split_count - Count number of additional EFI memmap entries
105 * @md: EFI memory descriptor to split
106 * @range: Address range (start, end) to split around
107 *
108 * Returns the number of additional EFI memmap entries required to
109 * accommodate @range.
110 */
111int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
112{
113	u64 m_start, m_end;
114	u64 start, end;
115	int count = 0;
116
117	start = md->phys_addr;
118	end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
119
120	/* modifying range */
121	m_start = range->start;
122	m_end = range->end;
123
124	if (m_start <= start) {
125		/* split into 2 parts */
126		if (start < m_end && m_end < end)
127			count++;
128	}
129
130	if (start < m_start && m_start < end) {
131		/* split into 3 parts */
132		if (m_end < end)
133			count += 2;
134		/* split into 2 parts */
135		if (end <= m_end)
136			count++;
137	}
138
139	return count;
140}
141
142/**
143 * efi_memmap_insert - Insert a memory region in an EFI memmap
144 * @old_memmap: The existing EFI memory map structure
145 * @buf: Address of buffer to store new map
146 * @mem: Memory map entry to insert
147 *
148 * It is suggested that you call efi_memmap_split_count() first
149 * to see how large @buf needs to be.
150 */
151void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
152			      struct efi_mem_range *mem)
153{
154	u64 m_start, m_end, m_attr;
155	efi_memory_desc_t *md;
156	u64 start, end;
157	void *old, *new;
158
159	/* modifying range */
160	m_start = mem->range.start;
161	m_end = mem->range.end;
162	m_attr = mem->attribute;
163
164	/*
165	 * The EFI memory map deals with regions in EFI_PAGE_SIZE
166	 * units. Ensure that the region described by 'mem' is aligned
167	 * correctly.
168	 */
169	if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
170	    !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
171		WARN_ON(1);
172		return;
173	}
174
175	for (old = old_memmap->map, new = buf;
176	     old < old_memmap->map_end;
177	     old += old_memmap->desc_size, new += old_memmap->desc_size) {
178
179		/* copy original EFI memory descriptor */
180		memcpy(new, old, old_memmap->desc_size);
181		md = new;
182		start = md->phys_addr;
183		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
184
185		if (m_start <= start && end <= m_end)
186			md->attribute |= m_attr;
187
188		if (m_start <= start &&
189		    (start < m_end && m_end < end)) {
190			/* first part */
191			md->attribute |= m_attr;
192			md->num_pages = (m_end - md->phys_addr + 1) >>
193				EFI_PAGE_SHIFT;
194			/* latter part */
195			new += old_memmap->desc_size;
196			memcpy(new, old, old_memmap->desc_size);
197			md = new;
198			md->phys_addr = m_end + 1;
199			md->num_pages = (end - md->phys_addr + 1) >>
200				EFI_PAGE_SHIFT;
201		}
202
203		if ((start < m_start && m_start < end) && m_end < end) {
204			/* first part */
205			md->num_pages = (m_start - md->phys_addr) >>
206				EFI_PAGE_SHIFT;
207			/* middle part */
208			new += old_memmap->desc_size;
209			memcpy(new, old, old_memmap->desc_size);
210			md = new;
211			md->attribute |= m_attr;
212			md->phys_addr = m_start;
213			md->num_pages = (m_end - m_start + 1) >>
214				EFI_PAGE_SHIFT;
215			/* last part */
216			new += old_memmap->desc_size;
217			memcpy(new, old, old_memmap->desc_size);
218			md = new;
219			md->phys_addr = m_end + 1;
220			md->num_pages = (end - m_end) >>
221				EFI_PAGE_SHIFT;
222		}
223
224		if ((start < m_start && m_start < end) &&
225		    (end <= m_end)) {
226			/* first part */
227			md->num_pages = (m_start - md->phys_addr) >>
228				EFI_PAGE_SHIFT;
229			/* latter part */
230			new += old_memmap->desc_size;
231			memcpy(new, old, old_memmap->desc_size);
232			md = new;
233			md->phys_addr = m_start;
234			md->num_pages = (end - md->phys_addr + 1) >>
235				EFI_PAGE_SHIFT;
236			md->attribute |= m_attr;
237		}
238	}
239}