Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  bootmem - A boot-time physical memory allocator and configurator
  4 *
  5 *  Copyright (C) 1999 Ingo Molnar
  6 *                1999 Kanoj Sarcar, SGI
  7 *                2008 Johannes Weiner
  8 *
  9 * Access to this subsystem has to be serialized externally (which is true
 10 * for the boot process anyway).
 11 */
 12#include <linux/init.h>
 13#include <linux/pfn.h>
 14#include <linux/slab.h>
 
 15#include <linux/export.h>
 16#include <linux/kmemleak.h>
 17#include <linux/range.h>
 18#include <linux/memblock.h>
 19#include <linux/bootmem.h>
 20
 21#include <asm/bug.h>
 22#include <asm/io.h>
 
 23
 24#include "internal.h"
 25
 26#ifndef CONFIG_HAVE_MEMBLOCK
 27#error CONFIG_HAVE_MEMBLOCK not defined
 28#endif
 29
 30#ifndef CONFIG_NEED_MULTIPLE_NODES
 31struct pglist_data __refdata contig_page_data;
 32EXPORT_SYMBOL(contig_page_data);
 33#endif
 34
 35unsigned long max_low_pfn;
 36unsigned long min_low_pfn;
 37unsigned long max_pfn;
 38unsigned long long max_possible_pfn;
 39
 40static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
 41					u64 goal, u64 limit)
 42{
 43	void *ptr;
 44	u64 addr;
 45	ulong flags = choose_memblock_flags();
 46
 47	if (limit > memblock.current_limit)
 48		limit = memblock.current_limit;
 49
 50again:
 51	addr = memblock_find_in_range_node(size, align, goal, limit, nid,
 52					   flags);
 53	if (!addr && (flags & MEMBLOCK_MIRROR)) {
 54		flags &= ~MEMBLOCK_MIRROR;
 55		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
 56			&size);
 57		goto again;
 58	}
 59	if (!addr)
 60		return NULL;
 61
 62	if (memblock_reserve(addr, size))
 63		return NULL;
 64
 65	ptr = phys_to_virt(addr);
 66	memset(ptr, 0, size);
 67	/*
 68	 * The min_count is set to 0 so that bootmem allocated blocks
 69	 * are never reported as leaks.
 70	 */
 71	kmemleak_alloc(ptr, size, 0, 0);
 72	return ptr;
 73}
 74
 75/*
 76 * free_bootmem_late - free bootmem pages directly to page allocator
 77 * @addr: starting address of the range
 78 * @size: size of the range in bytes
 79 *
 80 * This is only useful when the bootmem allocator has already been torn
 81 * down, but we are still initializing the system.  Pages are given directly
 82 * to the page allocator, no bootmem metadata is updated because it is gone.
 83 */
 84void __init free_bootmem_late(unsigned long addr, unsigned long size)
 85{
 86	unsigned long cursor, end;
 87
 88	kmemleak_free_part_phys(addr, size);
 89
 90	cursor = PFN_UP(addr);
 91	end = PFN_DOWN(addr + size);
 92
 93	for (; cursor < end; cursor++) {
 94		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
 95		totalram_pages++;
 96	}
 97}
 98
 99static void __init __free_pages_memory(unsigned long start, unsigned long end)
100{
101	int order;
102
103	while (start < end) {
104		order = min(MAX_ORDER - 1UL, __ffs(start));
105
106		while (start + (1UL << order) > end)
107			order--;
108
109		__free_pages_bootmem(pfn_to_page(start), start, order);
110
111		start += (1UL << order);
112	}
113}
114
115static unsigned long __init __free_memory_core(phys_addr_t start,
116				 phys_addr_t end)
117{
118	unsigned long start_pfn = PFN_UP(start);
119	unsigned long end_pfn = min_t(unsigned long,
120				      PFN_DOWN(end), max_low_pfn);
121
122	if (start_pfn >= end_pfn)
123		return 0;
124
125	__free_pages_memory(start_pfn, end_pfn);
126
127	return end_pfn - start_pfn;
128}
129
130static unsigned long __init free_low_memory_core_early(void)
131{
132	unsigned long count = 0;
133	phys_addr_t start, end;
134	u64 i;
135
136	memblock_clear_hotplug(0, -1);
137
138	for_each_reserved_mem_region(i, &start, &end)
139		reserve_bootmem_region(start, end);
140
141	/*
142	 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
143	 *  because in some case like Node0 doesn't have RAM installed
144	 *  low ram will be on Node1
145	 */
146	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
147				NULL)
148		count += __free_memory_core(start, end);
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150	return count;
151}
152
153static int reset_managed_pages_done __initdata;
154
155void reset_node_managed_pages(pg_data_t *pgdat)
156{
157	struct zone *z;
158
159	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
160		z->managed_pages = 0;
161}
162
163void __init reset_all_zones_managed_pages(void)
164{
165	struct pglist_data *pgdat;
166
167	if (reset_managed_pages_done)
168		return;
169
170	for_each_online_pgdat(pgdat)
171		reset_node_managed_pages(pgdat);
172
173	reset_managed_pages_done = 1;
174}
175
176/**
177 * free_all_bootmem - release free pages to the buddy allocator
178 *
179 * Returns the number of pages actually released.
180 */
181unsigned long __init free_all_bootmem(void)
182{
183	unsigned long pages;
184
185	reset_all_zones_managed_pages();
186
 
 
 
 
 
187	pages = free_low_memory_core_early();
188	totalram_pages += pages;
189
190	return pages;
191}
192
193/**
194 * free_bootmem_node - mark a page range as usable
195 * @pgdat: node the range resides on
196 * @physaddr: starting address of the range
197 * @size: size of the range in bytes
198 *
199 * Partial pages will be considered reserved and left as they are.
200 *
201 * The range must reside completely on the specified node.
202 */
203void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
204			      unsigned long size)
205{
206	memblock_free(physaddr, size);
207}
208
209/**
210 * free_bootmem - mark a page range as usable
211 * @addr: starting address of the range
212 * @size: size of the range in bytes
213 *
214 * Partial pages will be considered reserved and left as they are.
215 *
216 * The range must be contiguous but may span node boundaries.
217 */
218void __init free_bootmem(unsigned long addr, unsigned long size)
219{
220	memblock_free(addr, size);
221}
222
223static void * __init ___alloc_bootmem_nopanic(unsigned long size,
224					unsigned long align,
225					unsigned long goal,
226					unsigned long limit)
227{
228	void *ptr;
229
230	if (WARN_ON_ONCE(slab_is_available()))
231		return kzalloc(size, GFP_NOWAIT);
232
233restart:
234
235	ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit);
236
237	if (ptr)
238		return ptr;
239
240	if (goal != 0) {
241		goal = 0;
242		goto restart;
243	}
244
245	return NULL;
246}
247
248/**
249 * __alloc_bootmem_nopanic - allocate boot memory without panicking
250 * @size: size of the request in bytes
251 * @align: alignment of the region
252 * @goal: preferred starting address of the region
253 *
254 * The goal is dropped if it can not be satisfied and the allocation will
255 * fall back to memory below @goal.
256 *
257 * Allocation may happen on any node in the system.
258 *
259 * Returns NULL on failure.
260 */
261void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
262					unsigned long goal)
263{
264	unsigned long limit = -1UL;
265
266	return ___alloc_bootmem_nopanic(size, align, goal, limit);
267}
268
269static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
270					unsigned long goal, unsigned long limit)
271{
272	void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
273
274	if (mem)
275		return mem;
276	/*
277	 * Whoops, we cannot satisfy the allocation request.
278	 */
279	pr_alert("bootmem alloc of %lu bytes failed!\n", size);
280	panic("Out of memory");
281	return NULL;
282}
283
284/**
285 * __alloc_bootmem - allocate boot memory
286 * @size: size of the request in bytes
287 * @align: alignment of the region
288 * @goal: preferred starting address of the region
289 *
290 * The goal is dropped if it can not be satisfied and the allocation will
291 * fall back to memory below @goal.
292 *
293 * Allocation may happen on any node in the system.
294 *
295 * The function panics if the request can not be satisfied.
296 */
297void * __init __alloc_bootmem(unsigned long size, unsigned long align,
298			      unsigned long goal)
299{
300	unsigned long limit = -1UL;
301
302	return ___alloc_bootmem(size, align, goal, limit);
303}
304
305void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
306						   unsigned long size,
307						   unsigned long align,
308						   unsigned long goal,
309						   unsigned long limit)
310{
311	void *ptr;
312
313again:
314	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
315					goal, limit);
316	if (ptr)
317		return ptr;
318
319	ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align,
320					goal, limit);
321	if (ptr)
322		return ptr;
323
324	if (goal) {
325		goal = 0;
326		goto again;
327	}
328
329	return NULL;
330}
331
332void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
333				   unsigned long align, unsigned long goal)
334{
335	if (WARN_ON_ONCE(slab_is_available()))
336		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
337
338	return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
339}
340
341static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
342				    unsigned long align, unsigned long goal,
343				    unsigned long limit)
344{
345	void *ptr;
346
347	ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
348	if (ptr)
349		return ptr;
350
351	pr_alert("bootmem alloc of %lu bytes failed!\n", size);
352	panic("Out of memory");
353	return NULL;
354}
355
356/**
357 * __alloc_bootmem_node - allocate boot memory from a specific node
358 * @pgdat: node to allocate from
359 * @size: size of the request in bytes
360 * @align: alignment of the region
361 * @goal: preferred starting address of the region
362 *
363 * The goal is dropped if it can not be satisfied and the allocation will
364 * fall back to memory below @goal.
365 *
366 * Allocation may fall back to any node in the system if the specified node
367 * can not hold the requested memory.
368 *
369 * The function panics if the request can not be satisfied.
370 */
371void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
372				   unsigned long align, unsigned long goal)
373{
374	if (WARN_ON_ONCE(slab_is_available()))
375		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
376
377	return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
378}
379
380void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
381				   unsigned long align, unsigned long goal)
382{
383	return __alloc_bootmem_node(pgdat, size, align, goal);
384}
385
 
 
 
386
387/**
388 * __alloc_bootmem_low - allocate low boot memory
389 * @size: size of the request in bytes
390 * @align: alignment of the region
391 * @goal: preferred starting address of the region
392 *
393 * The goal is dropped if it can not be satisfied and the allocation will
394 * fall back to memory below @goal.
395 *
396 * Allocation may happen on any node in the system.
397 *
398 * The function panics if the request can not be satisfied.
399 */
400void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
401				  unsigned long goal)
402{
403	return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
404}
405
406void * __init __alloc_bootmem_low_nopanic(unsigned long size,
407					  unsigned long align,
408					  unsigned long goal)
409{
410	return ___alloc_bootmem_nopanic(size, align, goal,
411					ARCH_LOW_ADDRESS_LIMIT);
412}
413
414/**
415 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
416 * @pgdat: node to allocate from
417 * @size: size of the request in bytes
418 * @align: alignment of the region
419 * @goal: preferred starting address of the region
420 *
421 * The goal is dropped if it can not be satisfied and the allocation will
422 * fall back to memory below @goal.
423 *
424 * Allocation may fall back to any node in the system if the specified node
425 * can not hold the requested memory.
426 *
427 * The function panics if the request can not be satisfied.
428 */
429void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
430				       unsigned long align, unsigned long goal)
431{
432	if (WARN_ON_ONCE(slab_is_available()))
433		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
434
435	return ___alloc_bootmem_node(pgdat, size, align, goal,
436				     ARCH_LOW_ADDRESS_LIMIT);
437}
v4.6
 
  1/*
  2 *  bootmem - A boot-time physical memory allocator and configurator
  3 *
  4 *  Copyright (C) 1999 Ingo Molnar
  5 *                1999 Kanoj Sarcar, SGI
  6 *                2008 Johannes Weiner
  7 *
  8 * Access to this subsystem has to be serialized externally (which is true
  9 * for the boot process anyway).
 10 */
 11#include <linux/init.h>
 12#include <linux/pfn.h>
 13#include <linux/slab.h>
 14#include <linux/bootmem.h>
 15#include <linux/export.h>
 16#include <linux/kmemleak.h>
 17#include <linux/range.h>
 18#include <linux/memblock.h>
 
 19
 20#include <asm/bug.h>
 21#include <asm/io.h>
 22#include <asm/processor.h>
 23
 24#include "internal.h"
 25
 
 
 
 
 26#ifndef CONFIG_NEED_MULTIPLE_NODES
 27struct pglist_data __refdata contig_page_data;
 28EXPORT_SYMBOL(contig_page_data);
 29#endif
 30
 31unsigned long max_low_pfn;
 32unsigned long min_low_pfn;
 33unsigned long max_pfn;
 34unsigned long long max_possible_pfn;
 35
 36static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
 37					u64 goal, u64 limit)
 38{
 39	void *ptr;
 40	u64 addr;
 41	ulong flags = choose_memblock_flags();
 42
 43	if (limit > memblock.current_limit)
 44		limit = memblock.current_limit;
 45
 46again:
 47	addr = memblock_find_in_range_node(size, align, goal, limit, nid,
 48					   flags);
 49	if (!addr && (flags & MEMBLOCK_MIRROR)) {
 50		flags &= ~MEMBLOCK_MIRROR;
 51		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
 52			&size);
 53		goto again;
 54	}
 55	if (!addr)
 56		return NULL;
 57
 58	if (memblock_reserve(addr, size))
 59		return NULL;
 60
 61	ptr = phys_to_virt(addr);
 62	memset(ptr, 0, size);
 63	/*
 64	 * The min_count is set to 0 so that bootmem allocated blocks
 65	 * are never reported as leaks.
 66	 */
 67	kmemleak_alloc(ptr, size, 0, 0);
 68	return ptr;
 69}
 70
 71/*
 72 * free_bootmem_late - free bootmem pages directly to page allocator
 73 * @addr: starting address of the range
 74 * @size: size of the range in bytes
 75 *
 76 * This is only useful when the bootmem allocator has already been torn
 77 * down, but we are still initializing the system.  Pages are given directly
 78 * to the page allocator, no bootmem metadata is updated because it is gone.
 79 */
 80void __init free_bootmem_late(unsigned long addr, unsigned long size)
 81{
 82	unsigned long cursor, end;
 83
 84	kmemleak_free_part(__va(addr), size);
 85
 86	cursor = PFN_UP(addr);
 87	end = PFN_DOWN(addr + size);
 88
 89	for (; cursor < end; cursor++) {
 90		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
 91		totalram_pages++;
 92	}
 93}
 94
 95static void __init __free_pages_memory(unsigned long start, unsigned long end)
 96{
 97	int order;
 98
 99	while (start < end) {
100		order = min(MAX_ORDER - 1UL, __ffs(start));
101
102		while (start + (1UL << order) > end)
103			order--;
104
105		__free_pages_bootmem(pfn_to_page(start), start, order);
106
107		start += (1UL << order);
108	}
109}
110
111static unsigned long __init __free_memory_core(phys_addr_t start,
112				 phys_addr_t end)
113{
114	unsigned long start_pfn = PFN_UP(start);
115	unsigned long end_pfn = min_t(unsigned long,
116				      PFN_DOWN(end), max_low_pfn);
117
118	if (start_pfn > end_pfn)
119		return 0;
120
121	__free_pages_memory(start_pfn, end_pfn);
122
123	return end_pfn - start_pfn;
124}
125
126static unsigned long __init free_low_memory_core_early(void)
127{
128	unsigned long count = 0;
129	phys_addr_t start, end;
130	u64 i;
131
132	memblock_clear_hotplug(0, -1);
133
134	for_each_reserved_mem_region(i, &start, &end)
135		reserve_bootmem_region(start, end);
136
 
 
 
 
 
137	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
138				NULL)
139		count += __free_memory_core(start, end);
140
141#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
142	{
143		phys_addr_t size;
144
145		/* Free memblock.reserved array if it was allocated */
146		size = get_allocated_memblock_reserved_regions_info(&start);
147		if (size)
148			count += __free_memory_core(start, start + size);
149
150		/* Free memblock.memory array if it was allocated */
151		size = get_allocated_memblock_memory_regions_info(&start);
152		if (size)
153			count += __free_memory_core(start, start + size);
154	}
155#endif
156
157	return count;
158}
159
160static int reset_managed_pages_done __initdata;
161
162void reset_node_managed_pages(pg_data_t *pgdat)
163{
164	struct zone *z;
165
166	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
167		z->managed_pages = 0;
168}
169
170void __init reset_all_zones_managed_pages(void)
171{
172	struct pglist_data *pgdat;
173
174	if (reset_managed_pages_done)
175		return;
176
177	for_each_online_pgdat(pgdat)
178		reset_node_managed_pages(pgdat);
179
180	reset_managed_pages_done = 1;
181}
182
183/**
184 * free_all_bootmem - release free pages to the buddy allocator
185 *
186 * Returns the number of pages actually released.
187 */
188unsigned long __init free_all_bootmem(void)
189{
190	unsigned long pages;
191
192	reset_all_zones_managed_pages();
193
194	/*
195	 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
196	 *  because in some case like Node0 doesn't have RAM installed
197	 *  low ram will be on Node1
198	 */
199	pages = free_low_memory_core_early();
200	totalram_pages += pages;
201
202	return pages;
203}
204
205/**
206 * free_bootmem_node - mark a page range as usable
207 * @pgdat: node the range resides on
208 * @physaddr: starting address of the range
209 * @size: size of the range in bytes
210 *
211 * Partial pages will be considered reserved and left as they are.
212 *
213 * The range must reside completely on the specified node.
214 */
215void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
216			      unsigned long size)
217{
218	memblock_free(physaddr, size);
219}
220
221/**
222 * free_bootmem - mark a page range as usable
223 * @addr: starting address of the range
224 * @size: size of the range in bytes
225 *
226 * Partial pages will be considered reserved and left as they are.
227 *
228 * The range must be contiguous but may span node boundaries.
229 */
230void __init free_bootmem(unsigned long addr, unsigned long size)
231{
232	memblock_free(addr, size);
233}
234
235static void * __init ___alloc_bootmem_nopanic(unsigned long size,
236					unsigned long align,
237					unsigned long goal,
238					unsigned long limit)
239{
240	void *ptr;
241
242	if (WARN_ON_ONCE(slab_is_available()))
243		return kzalloc(size, GFP_NOWAIT);
244
245restart:
246
247	ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit);
248
249	if (ptr)
250		return ptr;
251
252	if (goal != 0) {
253		goal = 0;
254		goto restart;
255	}
256
257	return NULL;
258}
259
260/**
261 * __alloc_bootmem_nopanic - allocate boot memory without panicking
262 * @size: size of the request in bytes
263 * @align: alignment of the region
264 * @goal: preferred starting address of the region
265 *
266 * The goal is dropped if it can not be satisfied and the allocation will
267 * fall back to memory below @goal.
268 *
269 * Allocation may happen on any node in the system.
270 *
271 * Returns NULL on failure.
272 */
273void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
274					unsigned long goal)
275{
276	unsigned long limit = -1UL;
277
278	return ___alloc_bootmem_nopanic(size, align, goal, limit);
279}
280
281static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
282					unsigned long goal, unsigned long limit)
283{
284	void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
285
286	if (mem)
287		return mem;
288	/*
289	 * Whoops, we cannot satisfy the allocation request.
290	 */
291	pr_alert("bootmem alloc of %lu bytes failed!\n", size);
292	panic("Out of memory");
293	return NULL;
294}
295
296/**
297 * __alloc_bootmem - allocate boot memory
298 * @size: size of the request in bytes
299 * @align: alignment of the region
300 * @goal: preferred starting address of the region
301 *
302 * The goal is dropped if it can not be satisfied and the allocation will
303 * fall back to memory below @goal.
304 *
305 * Allocation may happen on any node in the system.
306 *
307 * The function panics if the request can not be satisfied.
308 */
309void * __init __alloc_bootmem(unsigned long size, unsigned long align,
310			      unsigned long goal)
311{
312	unsigned long limit = -1UL;
313
314	return ___alloc_bootmem(size, align, goal, limit);
315}
316
317void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
318						   unsigned long size,
319						   unsigned long align,
320						   unsigned long goal,
321						   unsigned long limit)
322{
323	void *ptr;
324
325again:
326	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
327					goal, limit);
328	if (ptr)
329		return ptr;
330
331	ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align,
332					goal, limit);
333	if (ptr)
334		return ptr;
335
336	if (goal) {
337		goal = 0;
338		goto again;
339	}
340
341	return NULL;
342}
343
344void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
345				   unsigned long align, unsigned long goal)
346{
347	if (WARN_ON_ONCE(slab_is_available()))
348		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
349
350	return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
351}
352
353static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
354				    unsigned long align, unsigned long goal,
355				    unsigned long limit)
356{
357	void *ptr;
358
359	ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
360	if (ptr)
361		return ptr;
362
363	pr_alert("bootmem alloc of %lu bytes failed!\n", size);
364	panic("Out of memory");
365	return NULL;
366}
367
368/**
369 * __alloc_bootmem_node - allocate boot memory from a specific node
370 * @pgdat: node to allocate from
371 * @size: size of the request in bytes
372 * @align: alignment of the region
373 * @goal: preferred starting address of the region
374 *
375 * The goal is dropped if it can not be satisfied and the allocation will
376 * fall back to memory below @goal.
377 *
378 * Allocation may fall back to any node in the system if the specified node
379 * can not hold the requested memory.
380 *
381 * The function panics if the request can not be satisfied.
382 */
383void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
384				   unsigned long align, unsigned long goal)
385{
386	if (WARN_ON_ONCE(slab_is_available()))
387		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
388
389	return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
390}
391
392void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
393				   unsigned long align, unsigned long goal)
394{
395	return __alloc_bootmem_node(pgdat, size, align, goal);
396}
397
398#ifndef ARCH_LOW_ADDRESS_LIMIT
399#define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL
400#endif
401
402/**
403 * __alloc_bootmem_low - allocate low boot memory
404 * @size: size of the request in bytes
405 * @align: alignment of the region
406 * @goal: preferred starting address of the region
407 *
408 * The goal is dropped if it can not be satisfied and the allocation will
409 * fall back to memory below @goal.
410 *
411 * Allocation may happen on any node in the system.
412 *
413 * The function panics if the request can not be satisfied.
414 */
415void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
416				  unsigned long goal)
417{
418	return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
419}
420
421void * __init __alloc_bootmem_low_nopanic(unsigned long size,
422					  unsigned long align,
423					  unsigned long goal)
424{
425	return ___alloc_bootmem_nopanic(size, align, goal,
426					ARCH_LOW_ADDRESS_LIMIT);
427}
428
429/**
430 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
431 * @pgdat: node to allocate from
432 * @size: size of the request in bytes
433 * @align: alignment of the region
434 * @goal: preferred starting address of the region
435 *
436 * The goal is dropped if it can not be satisfied and the allocation will
437 * fall back to memory below @goal.
438 *
439 * Allocation may fall back to any node in the system if the specified node
440 * can not hold the requested memory.
441 *
442 * The function panics if the request can not be satisfied.
443 */
444void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
445				       unsigned long align, unsigned long goal)
446{
447	if (WARN_ON_ONCE(slab_is_available()))
448		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
449
450	return ___alloc_bootmem_node(pgdat, size, align, goal,
451				     ARCH_LOW_ADDRESS_LIMIT);
452}