Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Virtual Memory Map support
  4 *
  5 * (C) 2007 sgi. Christoph Lameter.
  6 *
  7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
  8 * virt_to_page, page_address() to be implemented as a base offset
  9 * calculation without memory access.
 10 *
 11 * However, virtual mappings need a page table and TLBs. Many Linux
 12 * architectures already map their physical space using 1-1 mappings
 13 * via TLBs. For those arches the virtual memory map is essentially
 14 * for free if we use the same page size as the 1-1 mappings. In that
 15 * case the overhead consists of a few additional pages that are
 16 * allocated to create a view of memory for vmemmap.
 17 *
 18 * The architecture is expected to provide a vmemmap_populate() function
 19 * to instantiate the mapping.
 20 */
 21#include <linux/mm.h>
 22#include <linux/mmzone.h>
 23#include <linux/memblock.h>
 24#include <linux/memremap.h>
 25#include <linux/highmem.h>
 26#include <linux/slab.h>
 27#include <linux/spinlock.h>
 28#include <linux/vmalloc.h>
 29#include <linux/sched.h>
 30#include <asm/dma.h>
 31#include <asm/pgalloc.h>
 32#include <asm/pgtable.h>
 33
 34/*
 35 * Allocate a block of memory to be used to back the virtual memory map
 36 * or to back the page tables that are used to create the mapping.
 37 * Uses the main allocators if they are available, else bootmem.
 38 */
 39
 40static void * __ref __earlyonly_bootmem_alloc(int node,
 41				unsigned long size,
 42				unsigned long align,
 43				unsigned long goal)
 44{
 45	return memblock_alloc_try_nid_raw(size, align, goal,
 46					       MEMBLOCK_ALLOC_ACCESSIBLE, node);
 47}
 48
 
 
 
 49void * __meminit vmemmap_alloc_block(unsigned long size, int node)
 50{
 51	/* If the main allocator is up use that, fallback to bootmem. */
 52	if (slab_is_available()) {
 53		gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
 54		int order = get_order(size);
 55		static bool warned;
 56		struct page *page;
 57
 58		page = alloc_pages_node(node, gfp_mask, order);
 59		if (page)
 60			return page_address(page);
 61
 62		if (!warned) {
 63			warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
 64				   "vmemmap alloc failure: order:%u", order);
 65			warned = true;
 66		}
 67		return NULL;
 68	} else
 69		return __earlyonly_bootmem_alloc(node, size, size,
 70				__pa(MAX_DMA_ADDRESS));
 71}
 72
 73/* need to make sure size is all the same during early stage */
 74void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
 75{
 76	void *ptr = sparse_buffer_alloc(size);
 
 
 
 
 
 
 
 
 
 
 77
 78	if (!ptr)
 79		ptr = vmemmap_alloc_block(size, node);
 80	return ptr;
 81}
 82
 83static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
 84{
 85	return altmap->base_pfn + altmap->reserve + altmap->alloc
 86		+ altmap->align;
 87}
 88
 89static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
 90{
 91	unsigned long allocated = altmap->alloc + altmap->align;
 92
 93	if (altmap->free > allocated)
 94		return altmap->free - allocated;
 95	return 0;
 96}
 97
 98/**
 99 * altmap_alloc_block_buf - allocate pages from the device page map
100 * @altmap:	device page map
101 * @size:	size (in bytes) of the allocation
102 *
103 * Allocations are aligned to the size of the request.
104 */
105void * __meminit altmap_alloc_block_buf(unsigned long size,
106		struct vmem_altmap *altmap)
107{
108	unsigned long pfn, nr_pfns, nr_align;
109
110	if (size & ~PAGE_MASK) {
111		pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
112				__func__, size);
113		return NULL;
114	}
115
116	pfn = vmem_altmap_next_pfn(altmap);
117	nr_pfns = size >> PAGE_SHIFT;
118	nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
119	nr_align = ALIGN(pfn, nr_align) - pfn;
120	if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
121		return NULL;
122
123	altmap->alloc += nr_pfns;
124	altmap->align += nr_align;
125	pfn += nr_align;
126
127	pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
128			__func__, pfn, altmap->alloc, altmap->align, nr_pfns);
129	return __va(__pfn_to_phys(pfn));
130}
131
132void __meminit vmemmap_verify(pte_t *pte, int node,
133				unsigned long start, unsigned long end)
134{
135	unsigned long pfn = pte_pfn(*pte);
136	int actual_node = early_pfn_to_nid(pfn);
137
138	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
139		pr_warn("[%lx-%lx] potential offnode page_structs\n",
140			start, end - 1);
141}
142
143pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
144{
145	pte_t *pte = pte_offset_kernel(pmd, addr);
146	if (pte_none(*pte)) {
147		pte_t entry;
148		void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
149		if (!p)
150			return NULL;
151		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
152		set_pte_at(&init_mm, addr, pte, entry);
153	}
154	return pte;
155}
156
157static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
158{
159	void *p = vmemmap_alloc_block(size, node);
160
161	if (!p)
162		return NULL;
163	memset(p, 0, size);
164
165	return p;
166}
167
168pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
169{
170	pmd_t *pmd = pmd_offset(pud, addr);
171	if (pmd_none(*pmd)) {
172		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
173		if (!p)
174			return NULL;
175		pmd_populate_kernel(&init_mm, pmd, p);
176	}
177	return pmd;
178}
179
180pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
181{
182	pud_t *pud = pud_offset(p4d, addr);
183	if (pud_none(*pud)) {
184		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
185		if (!p)
186			return NULL;
187		pud_populate(&init_mm, pud, p);
188	}
189	return pud;
190}
191
192p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
193{
194	p4d_t *p4d = p4d_offset(pgd, addr);
195	if (p4d_none(*p4d)) {
196		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
197		if (!p)
198			return NULL;
199		p4d_populate(&init_mm, p4d, p);
200	}
201	return p4d;
202}
203
204pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
205{
206	pgd_t *pgd = pgd_offset_k(addr);
207	if (pgd_none(*pgd)) {
208		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
209		if (!p)
210			return NULL;
211		pgd_populate(&init_mm, pgd, p);
212	}
213	return pgd;
214}
215
216int __meminit vmemmap_populate_basepages(unsigned long start,
217					 unsigned long end, int node)
218{
219	unsigned long addr = start;
220	pgd_t *pgd;
221	p4d_t *p4d;
222	pud_t *pud;
223	pmd_t *pmd;
224	pte_t *pte;
225
226	for (; addr < end; addr += PAGE_SIZE) {
227		pgd = vmemmap_pgd_populate(addr, node);
228		if (!pgd)
229			return -ENOMEM;
230		p4d = vmemmap_p4d_populate(pgd, addr, node);
231		if (!p4d)
232			return -ENOMEM;
233		pud = vmemmap_pud_populate(p4d, addr, node);
234		if (!pud)
235			return -ENOMEM;
236		pmd = vmemmap_pmd_populate(pud, addr, node);
237		if (!pmd)
238			return -ENOMEM;
239		pte = vmemmap_pte_populate(pmd, addr, node);
240		if (!pte)
241			return -ENOMEM;
242		vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
243	}
244
245	return 0;
246}
247
248struct page * __meminit __populate_section_memmap(unsigned long pfn,
249		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
250{
251	unsigned long start;
252	unsigned long end;
 
253
254	/*
255	 * The minimum granularity of memmap extensions is
256	 * PAGES_PER_SUBSECTION as allocations are tracked in the
257	 * 'subsection_map' bitmap of the section.
258	 */
259	end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION);
260	pfn &= PAGE_SUBSECTION_MASK;
261	nr_pages = end - pfn;
262
263	start = (unsigned long) pfn_to_page(pfn);
264	end = start + nr_pages * sizeof(struct page);
265
266	if (vmemmap_populate(start, end, nid, altmap))
267		return NULL;
268
269	return pfn_to_page(pfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Virtual Memory Map support
  4 *
  5 * (C) 2007 sgi. Christoph Lameter.
  6 *
  7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
  8 * virt_to_page, page_address() to be implemented as a base offset
  9 * calculation without memory access.
 10 *
 11 * However, virtual mappings need a page table and TLBs. Many Linux
 12 * architectures already map their physical space using 1-1 mappings
 13 * via TLBs. For those arches the virtual memory map is essentially
 14 * for free if we use the same page size as the 1-1 mappings. In that
 15 * case the overhead consists of a few additional pages that are
 16 * allocated to create a view of memory for vmemmap.
 17 *
 18 * The architecture is expected to provide a vmemmap_populate() function
 19 * to instantiate the mapping.
 20 */
 21#include <linux/mm.h>
 22#include <linux/mmzone.h>
 23#include <linux/bootmem.h>
 24#include <linux/memremap.h>
 25#include <linux/highmem.h>
 26#include <linux/slab.h>
 27#include <linux/spinlock.h>
 28#include <linux/vmalloc.h>
 29#include <linux/sched.h>
 30#include <asm/dma.h>
 31#include <asm/pgalloc.h>
 32#include <asm/pgtable.h>
 33
 34/*
 35 * Allocate a block of memory to be used to back the virtual memory map
 36 * or to back the page tables that are used to create the mapping.
 37 * Uses the main allocators if they are available, else bootmem.
 38 */
 39
 40static void * __ref __earlyonly_bootmem_alloc(int node,
 41				unsigned long size,
 42				unsigned long align,
 43				unsigned long goal)
 44{
 45	return memblock_virt_alloc_try_nid_raw(size, align, goal,
 46					    BOOTMEM_ALLOC_ACCESSIBLE, node);
 47}
 48
 49static void *vmemmap_buf;
 50static void *vmemmap_buf_end;
 51
 52void * __meminit vmemmap_alloc_block(unsigned long size, int node)
 53{
 54	/* If the main allocator is up use that, fallback to bootmem. */
 55	if (slab_is_available()) {
 56		gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
 57		int order = get_order(size);
 58		static bool warned;
 59		struct page *page;
 60
 61		page = alloc_pages_node(node, gfp_mask, order);
 62		if (page)
 63			return page_address(page);
 64
 65		if (!warned) {
 66			warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
 67				   "vmemmap alloc failure: order:%u", order);
 68			warned = true;
 69		}
 70		return NULL;
 71	} else
 72		return __earlyonly_bootmem_alloc(node, size, size,
 73				__pa(MAX_DMA_ADDRESS));
 74}
 75
 76/* need to make sure size is all the same during early stage */
 77void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
 78{
 79	void *ptr;
 80
 81	if (!vmemmap_buf)
 82		return vmemmap_alloc_block(size, node);
 83
 84	/* take the from buf */
 85	ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
 86	if (ptr + size > vmemmap_buf_end)
 87		return vmemmap_alloc_block(size, node);
 88
 89	vmemmap_buf = ptr + size;
 90
 
 
 91	return ptr;
 92}
 93
 94static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
 95{
 96	return altmap->base_pfn + altmap->reserve + altmap->alloc
 97		+ altmap->align;
 98}
 99
100static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
101{
102	unsigned long allocated = altmap->alloc + altmap->align;
103
104	if (altmap->free > allocated)
105		return altmap->free - allocated;
106	return 0;
107}
108
109/**
110 * altmap_alloc_block_buf - allocate pages from the device page map
111 * @altmap:	device page map
112 * @size:	size (in bytes) of the allocation
113 *
114 * Allocations are aligned to the size of the request.
115 */
116void * __meminit altmap_alloc_block_buf(unsigned long size,
117		struct vmem_altmap *altmap)
118{
119	unsigned long pfn, nr_pfns, nr_align;
120
121	if (size & ~PAGE_MASK) {
122		pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
123				__func__, size);
124		return NULL;
125	}
126
127	pfn = vmem_altmap_next_pfn(altmap);
128	nr_pfns = size >> PAGE_SHIFT;
129	nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
130	nr_align = ALIGN(pfn, nr_align) - pfn;
131	if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
132		return NULL;
133
134	altmap->alloc += nr_pfns;
135	altmap->align += nr_align;
136	pfn += nr_align;
137
138	pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
139			__func__, pfn, altmap->alloc, altmap->align, nr_pfns);
140	return __va(__pfn_to_phys(pfn));
141}
142
143void __meminit vmemmap_verify(pte_t *pte, int node,
144				unsigned long start, unsigned long end)
145{
146	unsigned long pfn = pte_pfn(*pte);
147	int actual_node = early_pfn_to_nid(pfn);
148
149	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
150		pr_warn("[%lx-%lx] potential offnode page_structs\n",
151			start, end - 1);
152}
153
154pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
155{
156	pte_t *pte = pte_offset_kernel(pmd, addr);
157	if (pte_none(*pte)) {
158		pte_t entry;
159		void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
160		if (!p)
161			return NULL;
162		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
163		set_pte_at(&init_mm, addr, pte, entry);
164	}
165	return pte;
166}
167
168static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
169{
170	void *p = vmemmap_alloc_block(size, node);
171
172	if (!p)
173		return NULL;
174	memset(p, 0, size);
175
176	return p;
177}
178
179pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
180{
181	pmd_t *pmd = pmd_offset(pud, addr);
182	if (pmd_none(*pmd)) {
183		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
184		if (!p)
185			return NULL;
186		pmd_populate_kernel(&init_mm, pmd, p);
187	}
188	return pmd;
189}
190
191pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
192{
193	pud_t *pud = pud_offset(p4d, addr);
194	if (pud_none(*pud)) {
195		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
196		if (!p)
197			return NULL;
198		pud_populate(&init_mm, pud, p);
199	}
200	return pud;
201}
202
203p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
204{
205	p4d_t *p4d = p4d_offset(pgd, addr);
206	if (p4d_none(*p4d)) {
207		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
208		if (!p)
209			return NULL;
210		p4d_populate(&init_mm, p4d, p);
211	}
212	return p4d;
213}
214
215pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
216{
217	pgd_t *pgd = pgd_offset_k(addr);
218	if (pgd_none(*pgd)) {
219		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
220		if (!p)
221			return NULL;
222		pgd_populate(&init_mm, pgd, p);
223	}
224	return pgd;
225}
226
227int __meminit vmemmap_populate_basepages(unsigned long start,
228					 unsigned long end, int node)
229{
230	unsigned long addr = start;
231	pgd_t *pgd;
232	p4d_t *p4d;
233	pud_t *pud;
234	pmd_t *pmd;
235	pte_t *pte;
236
237	for (; addr < end; addr += PAGE_SIZE) {
238		pgd = vmemmap_pgd_populate(addr, node);
239		if (!pgd)
240			return -ENOMEM;
241		p4d = vmemmap_p4d_populate(pgd, addr, node);
242		if (!p4d)
243			return -ENOMEM;
244		pud = vmemmap_pud_populate(p4d, addr, node);
245		if (!pud)
246			return -ENOMEM;
247		pmd = vmemmap_pmd_populate(pud, addr, node);
248		if (!pmd)
249			return -ENOMEM;
250		pte = vmemmap_pte_populate(pmd, addr, node);
251		if (!pte)
252			return -ENOMEM;
253		vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
254	}
255
256	return 0;
257}
258
259struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid,
260		struct vmem_altmap *altmap)
261{
262	unsigned long start;
263	unsigned long end;
264	struct page *map;
265
266	map = pfn_to_page(pnum * PAGES_PER_SECTION);
267	start = (unsigned long)map;
268	end = (unsigned long)(map + PAGES_PER_SECTION);
 
 
 
 
 
 
 
 
269
270	if (vmemmap_populate(start, end, nid, altmap))
271		return NULL;
272
273	return map;
274}
275
276void __init sparse_mem_maps_populate_node(struct page **map_map,
277					  unsigned long pnum_begin,
278					  unsigned long pnum_end,
279					  unsigned long map_count, int nodeid)
280{
281	unsigned long pnum;
282	unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
283	void *vmemmap_buf_start;
284
285	size = ALIGN(size, PMD_SIZE);
286	vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
287			 PMD_SIZE, __pa(MAX_DMA_ADDRESS));
288
289	if (vmemmap_buf_start) {
290		vmemmap_buf = vmemmap_buf_start;
291		vmemmap_buf_end = vmemmap_buf_start + size * map_count;
292	}
293
294	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
295		struct mem_section *ms;
296
297		if (!present_section_nr(pnum))
298			continue;
299
300		map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
301		if (map_map[pnum])
302			continue;
303		ms = __nr_to_section(pnum);
304		pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
305		       __func__);
306		ms->section_mem_map = 0;
307	}
308
309	if (vmemmap_buf_start) {
310		/* need to free left buf */
311		memblock_free_early(__pa(vmemmap_buf),
312				    vmemmap_buf_end - vmemmap_buf);
313		vmemmap_buf = NULL;
314		vmemmap_buf_end = NULL;
315	}
316}