Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  bootmem - A boot-time physical memory allocator and configurator
  4 *
  5 *  Copyright (C) 1999 Ingo Molnar
  6 *                1999 Kanoj Sarcar, SGI
  7 *                2008 Johannes Weiner
  8 *
  9 * Access to this subsystem has to be serialized externally (which is true
 10 * for the boot process anyway).
 11 */
 12#include <linux/init.h>
 13#include <linux/pfn.h>
 14#include <linux/slab.h>
 15#include <linux/export.h>
 16#include <linux/kmemleak.h>
 17#include <linux/range.h>
 18#include <linux/bug.h>
 19#include <linux/io.h>
 20#include <linux/bootmem.h>
 21
 22#include "internal.h"
 23
 24#ifndef CONFIG_NEED_MULTIPLE_NODES
 25struct pglist_data __refdata contig_page_data = {
 26	.bdata = &bootmem_node_data[0]
 27};
 28EXPORT_SYMBOL(contig_page_data);
 29#endif
 30
 31unsigned long max_low_pfn;
 32unsigned long min_low_pfn;
 33unsigned long max_pfn;
 34unsigned long long max_possible_pfn;
 35
 36bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
 37
 38static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
 39
 40static int bootmem_debug;
 41
 42static int __init bootmem_debug_setup(char *buf)
 43{
 44	bootmem_debug = 1;
 45	return 0;
 46}
 47early_param("bootmem_debug", bootmem_debug_setup);
 48
 49#define bdebug(fmt, args...) ({				\
 50	if (unlikely(bootmem_debug))			\
 51		pr_info("bootmem::%s " fmt,		\
 52			__func__, ## args);		\
 53})
 54
 55static unsigned long __init bootmap_bytes(unsigned long pages)
 56{
 57	unsigned long bytes = DIV_ROUND_UP(pages, BITS_PER_BYTE);
 58
 59	return ALIGN(bytes, sizeof(long));
 60}
 61
 62/**
 63 * bootmem_bootmap_pages - calculate bitmap size in pages
 64 * @pages: number of pages the bitmap has to represent
 65 */
 66unsigned long __init bootmem_bootmap_pages(unsigned long pages)
 67{
 68	unsigned long bytes = bootmap_bytes(pages);
 69
 70	return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
 71}
 72
 73/*
 74 * link bdata in order
 75 */
 76static void __init link_bootmem(bootmem_data_t *bdata)
 77{
 78	bootmem_data_t *ent;
 79
 80	list_for_each_entry(ent, &bdata_list, list) {
 81		if (bdata->node_min_pfn < ent->node_min_pfn) {
 82			list_add_tail(&bdata->list, &ent->list);
 83			return;
 84		}
 85	}
 86
 87	list_add_tail(&bdata->list, &bdata_list);
 88}
 89
 90/*
 91 * Called once to set up the allocator itself.
 92 */
 93static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
 94	unsigned long mapstart, unsigned long start, unsigned long end)
 95{
 96	unsigned long mapsize;
 97
 98	mminit_validate_memmodel_limits(&start, &end);
 99	bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
100	bdata->node_min_pfn = start;
101	bdata->node_low_pfn = end;
102	link_bootmem(bdata);
103
104	/*
105	 * Initially all pages are reserved - setup_arch() has to
106	 * register free RAM areas explicitly.
107	 */
108	mapsize = bootmap_bytes(end - start);
109	memset(bdata->node_bootmem_map, 0xff, mapsize);
110
111	bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
112		bdata - bootmem_node_data, start, mapstart, end, mapsize);
113
114	return mapsize;
115}
116
117/**
118 * init_bootmem_node - register a node as boot memory
119 * @pgdat: node to register
120 * @freepfn: pfn where the bitmap for this node is to be placed
121 * @startpfn: first pfn on the node
122 * @endpfn: first pfn after the node
123 *
124 * Returns the number of bytes needed to hold the bitmap for this node.
125 */
126unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
127				unsigned long startpfn, unsigned long endpfn)
128{
129	return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
130}
131
132/**
133 * init_bootmem - register boot memory
134 * @start: pfn where the bitmap is to be placed
135 * @pages: number of available physical pages
136 *
137 * Returns the number of bytes needed to hold the bitmap.
138 */
139unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
140{
141	max_low_pfn = pages;
142	min_low_pfn = start;
143	return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
144}
145
146/*
147 * free_bootmem_late - free bootmem pages directly to page allocator
148 * @addr: starting physical address of the range
149 * @size: size of the range in bytes
150 *
151 * This is only useful when the bootmem allocator has already been torn
152 * down, but we are still initializing the system.  Pages are given directly
153 * to the page allocator, no bootmem metadata is updated because it is gone.
154 */
155void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
156{
157	unsigned long cursor, end;
158
159	kmemleak_free_part_phys(physaddr, size);
160
161	cursor = PFN_UP(physaddr);
162	end = PFN_DOWN(physaddr + size);
163
164	for (; cursor < end; cursor++) {
165		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
166		totalram_pages++;
167	}
168}
169
170static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
171{
172	struct page *page;
173	unsigned long *map, start, end, pages, cur, count = 0;
174
175	if (!bdata->node_bootmem_map)
176		return 0;
177
178	map = bdata->node_bootmem_map;
179	start = bdata->node_min_pfn;
180	end = bdata->node_low_pfn;
181
182	bdebug("nid=%td start=%lx end=%lx\n",
183		bdata - bootmem_node_data, start, end);
184
185	while (start < end) {
186		unsigned long idx, vec;
187		unsigned shift;
188
189		idx = start - bdata->node_min_pfn;
190		shift = idx & (BITS_PER_LONG - 1);
191		/*
192		 * vec holds at most BITS_PER_LONG map bits,
193		 * bit 0 corresponds to start.
194		 */
195		vec = ~map[idx / BITS_PER_LONG];
196
197		if (shift) {
198			vec >>= shift;
199			if (end - start >= BITS_PER_LONG)
200				vec |= ~map[idx / BITS_PER_LONG + 1] <<
201					(BITS_PER_LONG - shift);
202		}
203		/*
204		 * If we have a properly aligned and fully unreserved
205		 * BITS_PER_LONG block of pages in front of us, free
206		 * it in one go.
207		 */
208		if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
209			int order = ilog2(BITS_PER_LONG);
210
211			__free_pages_bootmem(pfn_to_page(start), start, order);
212			count += BITS_PER_LONG;
213			start += BITS_PER_LONG;
214		} else {
215			cur = start;
216
217			start = ALIGN(start + 1, BITS_PER_LONG);
218			while (vec && cur != start) {
219				if (vec & 1) {
220					page = pfn_to_page(cur);
221					__free_pages_bootmem(page, cur, 0);
222					count++;
223				}
224				vec >>= 1;
225				++cur;
226			}
227		}
228	}
229
230	cur = bdata->node_min_pfn;
231	page = virt_to_page(bdata->node_bootmem_map);
232	pages = bdata->node_low_pfn - bdata->node_min_pfn;
233	pages = bootmem_bootmap_pages(pages);
234	count += pages;
235	while (pages--)
236		__free_pages_bootmem(page++, cur++, 0);
237	bdata->node_bootmem_map = NULL;
238
239	bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
240
241	return count;
242}
243
244static int reset_managed_pages_done __initdata;
245
246void reset_node_managed_pages(pg_data_t *pgdat)
247{
248	struct zone *z;
249
250	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
251		z->managed_pages = 0;
252}
253
254void __init reset_all_zones_managed_pages(void)
255{
256	struct pglist_data *pgdat;
257
258	if (reset_managed_pages_done)
259		return;
260
261	for_each_online_pgdat(pgdat)
262		reset_node_managed_pages(pgdat);
263
264	reset_managed_pages_done = 1;
265}
266
267/**
268 * free_all_bootmem - release free pages to the buddy allocator
269 *
270 * Returns the number of pages actually released.
271 */
272unsigned long __init free_all_bootmem(void)
273{
274	unsigned long total_pages = 0;
275	bootmem_data_t *bdata;
276
277	reset_all_zones_managed_pages();
278
279	list_for_each_entry(bdata, &bdata_list, list)
280		total_pages += free_all_bootmem_core(bdata);
281
282	totalram_pages += total_pages;
283
284	return total_pages;
285}
286
287static void __init __free(bootmem_data_t *bdata,
288			unsigned long sidx, unsigned long eidx)
289{
290	unsigned long idx;
291
292	bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
293		sidx + bdata->node_min_pfn,
294		eidx + bdata->node_min_pfn);
295
296	if (WARN_ON(bdata->node_bootmem_map == NULL))
297		return;
298
299	if (bdata->hint_idx > sidx)
300		bdata->hint_idx = sidx;
301
302	for (idx = sidx; idx < eidx; idx++)
303		if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
304			BUG();
305}
306
307static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
308			unsigned long eidx, int flags)
309{
310	unsigned long idx;
311	int exclusive = flags & BOOTMEM_EXCLUSIVE;
312
313	bdebug("nid=%td start=%lx end=%lx flags=%x\n",
314		bdata - bootmem_node_data,
315		sidx + bdata->node_min_pfn,
316		eidx + bdata->node_min_pfn,
317		flags);
318
319	if (WARN_ON(bdata->node_bootmem_map == NULL))
320		return 0;
321
322	for (idx = sidx; idx < eidx; idx++)
323		if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
324			if (exclusive) {
325				__free(bdata, sidx, idx);
326				return -EBUSY;
327			}
328			bdebug("silent double reserve of PFN %lx\n",
329				idx + bdata->node_min_pfn);
330		}
331	return 0;
332}
333
334static int __init mark_bootmem_node(bootmem_data_t *bdata,
335				unsigned long start, unsigned long end,
336				int reserve, int flags)
337{
338	unsigned long sidx, eidx;
339
340	bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
341		bdata - bootmem_node_data, start, end, reserve, flags);
342
343	BUG_ON(start < bdata->node_min_pfn);
344	BUG_ON(end > bdata->node_low_pfn);
345
346	sidx = start - bdata->node_min_pfn;
347	eidx = end - bdata->node_min_pfn;
348
349	if (reserve)
350		return __reserve(bdata, sidx, eidx, flags);
351	else
352		__free(bdata, sidx, eidx);
353	return 0;
354}
355
356static int __init mark_bootmem(unsigned long start, unsigned long end,
357				int reserve, int flags)
358{
359	unsigned long pos;
360	bootmem_data_t *bdata;
361
362	pos = start;
363	list_for_each_entry(bdata, &bdata_list, list) {
364		int err;
365		unsigned long max;
366
367		if (pos < bdata->node_min_pfn ||
368		    pos >= bdata->node_low_pfn) {
369			BUG_ON(pos != start);
370			continue;
371		}
372
373		max = min(bdata->node_low_pfn, end);
374
375		err = mark_bootmem_node(bdata, pos, max, reserve, flags);
376		if (reserve && err) {
377			mark_bootmem(start, pos, 0, 0);
378			return err;
379		}
380
381		if (max == end)
382			return 0;
383		pos = bdata->node_low_pfn;
384	}
385	BUG();
386}
387
388/**
389 * free_bootmem_node - mark a page range as usable
390 * @pgdat: node the range resides on
391 * @physaddr: starting address of the range
392 * @size: size of the range in bytes
393 *
394 * Partial pages will be considered reserved and left as they are.
395 *
396 * The range must reside completely on the specified node.
397 */
398void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
399			      unsigned long size)
400{
401	unsigned long start, end;
402
403	kmemleak_free_part_phys(physaddr, size);
404
405	start = PFN_UP(physaddr);
406	end = PFN_DOWN(physaddr + size);
407
408	mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
409}
410
411/**
412 * free_bootmem - mark a page range as usable
413 * @physaddr: starting physical address of the range
414 * @size: size of the range in bytes
415 *
416 * Partial pages will be considered reserved and left as they are.
417 *
418 * The range must be contiguous but may span node boundaries.
419 */
420void __init free_bootmem(unsigned long physaddr, unsigned long size)
421{
422	unsigned long start, end;
423
424	kmemleak_free_part_phys(physaddr, size);
425
426	start = PFN_UP(physaddr);
427	end = PFN_DOWN(physaddr + size);
428
429	mark_bootmem(start, end, 0, 0);
430}
431
432/**
433 * reserve_bootmem_node - mark a page range as reserved
434 * @pgdat: node the range resides on
435 * @physaddr: starting address of the range
436 * @size: size of the range in bytes
437 * @flags: reservation flags (see linux/bootmem.h)
438 *
439 * Partial pages will be reserved.
440 *
441 * The range must reside completely on the specified node.
442 */
443int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
444				 unsigned long size, int flags)
445{
446	unsigned long start, end;
447
448	start = PFN_DOWN(physaddr);
449	end = PFN_UP(physaddr + size);
450
451	return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
452}
453
454/**
455 * reserve_bootmem - mark a page range as reserved
456 * @addr: starting address of the range
457 * @size: size of the range in bytes
458 * @flags: reservation flags (see linux/bootmem.h)
459 *
460 * Partial pages will be reserved.
461 *
462 * The range must be contiguous but may span node boundaries.
463 */
464int __init reserve_bootmem(unsigned long addr, unsigned long size,
465			    int flags)
466{
467	unsigned long start, end;
468
469	start = PFN_DOWN(addr);
470	end = PFN_UP(addr + size);
471
472	return mark_bootmem(start, end, 1, flags);
473}
474
475static unsigned long __init align_idx(struct bootmem_data *bdata,
476				      unsigned long idx, unsigned long step)
477{
478	unsigned long base = bdata->node_min_pfn;
479
480	/*
481	 * Align the index with respect to the node start so that the
482	 * combination of both satisfies the requested alignment.
483	 */
484
485	return ALIGN(base + idx, step) - base;
486}
487
488static unsigned long __init align_off(struct bootmem_data *bdata,
489				      unsigned long off, unsigned long align)
490{
491	unsigned long base = PFN_PHYS(bdata->node_min_pfn);
492
493	/* Same as align_idx for byte offsets */
494
495	return ALIGN(base + off, align) - base;
496}
497
498static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
499					unsigned long size, unsigned long align,
500					unsigned long goal, unsigned long limit)
501{
502	unsigned long fallback = 0;
503	unsigned long min, max, start, sidx, midx, step;
504
505	bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
506		bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
507		align, goal, limit);
508
509	BUG_ON(!size);
510	BUG_ON(align & (align - 1));
511	BUG_ON(limit && goal + size > limit);
512
513	if (!bdata->node_bootmem_map)
514		return NULL;
515
516	min = bdata->node_min_pfn;
517	max = bdata->node_low_pfn;
518
519	goal >>= PAGE_SHIFT;
520	limit >>= PAGE_SHIFT;
521
522	if (limit && max > limit)
523		max = limit;
524	if (max <= min)
525		return NULL;
526
527	step = max(align >> PAGE_SHIFT, 1UL);
528
529	if (goal && min < goal && goal < max)
530		start = ALIGN(goal, step);
531	else
532		start = ALIGN(min, step);
533
534	sidx = start - bdata->node_min_pfn;
535	midx = max - bdata->node_min_pfn;
536
537	if (bdata->hint_idx > sidx) {
538		/*
539		 * Handle the valid case of sidx being zero and still
540		 * catch the fallback below.
541		 */
542		fallback = sidx + 1;
543		sidx = align_idx(bdata, bdata->hint_idx, step);
544	}
545
546	while (1) {
547		int merge;
548		void *region;
549		unsigned long eidx, i, start_off, end_off;
550find_block:
551		sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
552		sidx = align_idx(bdata, sidx, step);
553		eidx = sidx + PFN_UP(size);
554
555		if (sidx >= midx || eidx > midx)
556			break;
557
558		for (i = sidx; i < eidx; i++)
559			if (test_bit(i, bdata->node_bootmem_map)) {
560				sidx = align_idx(bdata, i, step);
561				if (sidx == i)
562					sidx += step;
563				goto find_block;
564			}
565
566		if (bdata->last_end_off & (PAGE_SIZE - 1) &&
567				PFN_DOWN(bdata->last_end_off) + 1 == sidx)
568			start_off = align_off(bdata, bdata->last_end_off, align);
569		else
570			start_off = PFN_PHYS(sidx);
571
572		merge = PFN_DOWN(start_off) < sidx;
573		end_off = start_off + size;
574
575		bdata->last_end_off = end_off;
576		bdata->hint_idx = PFN_UP(end_off);
577
578		/*
579		 * Reserve the area now:
580		 */
581		if (__reserve(bdata, PFN_DOWN(start_off) + merge,
582				PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
583			BUG();
584
585		region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
586				start_off);
587		memset(region, 0, size);
588		/*
589		 * The min_count is set to 0 so that bootmem allocated blocks
590		 * are never reported as leaks.
591		 */
592		kmemleak_alloc(region, size, 0, 0);
593		return region;
594	}
595
596	if (fallback) {
597		sidx = align_idx(bdata, fallback - 1, step);
598		fallback = 0;
599		goto find_block;
600	}
601
602	return NULL;
603}
604
605static void * __init alloc_bootmem_core(unsigned long size,
606					unsigned long align,
607					unsigned long goal,
608					unsigned long limit)
609{
610	bootmem_data_t *bdata;
611	void *region;
612
613	if (WARN_ON_ONCE(slab_is_available()))
614		return kzalloc(size, GFP_NOWAIT);
615
616	list_for_each_entry(bdata, &bdata_list, list) {
617		if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
618			continue;
619		if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
620			break;
621
622		region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
623		if (region)
624			return region;
625	}
626
627	return NULL;
628}
629
630static void * __init ___alloc_bootmem_nopanic(unsigned long size,
631					      unsigned long align,
632					      unsigned long goal,
633					      unsigned long limit)
634{
635	void *ptr;
636
637restart:
638	ptr = alloc_bootmem_core(size, align, goal, limit);
639	if (ptr)
640		return ptr;
641	if (goal) {
642		goal = 0;
643		goto restart;
644	}
645
646	return NULL;
647}
648
649/**
650 * __alloc_bootmem_nopanic - allocate boot memory without panicking
651 * @size: size of the request in bytes
652 * @align: alignment of the region
653 * @goal: preferred starting address of the region
654 *
655 * The goal is dropped if it can not be satisfied and the allocation will
656 * fall back to memory below @goal.
657 *
658 * Allocation may happen on any node in the system.
659 *
660 * Returns NULL on failure.
661 */
662void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
663					unsigned long goal)
664{
665	unsigned long limit = 0;
666
667	return ___alloc_bootmem_nopanic(size, align, goal, limit);
668}
669
670static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
671					unsigned long goal, unsigned long limit)
672{
673	void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
674
675	if (mem)
676		return mem;
677	/*
678	 * Whoops, we cannot satisfy the allocation request.
679	 */
680	pr_alert("bootmem alloc of %lu bytes failed!\n", size);
681	panic("Out of memory");
682	return NULL;
683}
684
685/**
686 * __alloc_bootmem - allocate boot memory
687 * @size: size of the request in bytes
688 * @align: alignment of the region
689 * @goal: preferred starting address of the region
690 *
691 * The goal is dropped if it can not be satisfied and the allocation will
692 * fall back to memory below @goal.
693 *
694 * Allocation may happen on any node in the system.
695 *
696 * The function panics if the request can not be satisfied.
697 */
698void * __init __alloc_bootmem(unsigned long size, unsigned long align,
699			      unsigned long goal)
700{
701	unsigned long limit = 0;
702
703	return ___alloc_bootmem(size, align, goal, limit);
704}
705
706void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
707				unsigned long size, unsigned long align,
708				unsigned long goal, unsigned long limit)
709{
710	void *ptr;
711
712	if (WARN_ON_ONCE(slab_is_available()))
713		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
714again:
715
716	/* do not panic in alloc_bootmem_bdata() */
717	if (limit && goal + size > limit)
718		limit = 0;
719
720	ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
721	if (ptr)
722		return ptr;
723
724	ptr = alloc_bootmem_core(size, align, goal, limit);
725	if (ptr)
726		return ptr;
727
728	if (goal) {
729		goal = 0;
730		goto again;
731	}
732
733	return NULL;
734}
735
736void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
737				   unsigned long align, unsigned long goal)
738{
739	return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
740}
741
742void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
743				    unsigned long align, unsigned long goal,
744				    unsigned long limit)
745{
746	void *ptr;
747
748	ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
749	if (ptr)
750		return ptr;
751
752	pr_alert("bootmem alloc of %lu bytes failed!\n", size);
753	panic("Out of memory");
754	return NULL;
755}
756
757/**
758 * __alloc_bootmem_node - allocate boot memory from a specific node
759 * @pgdat: node to allocate from
760 * @size: size of the request in bytes
761 * @align: alignment of the region
762 * @goal: preferred starting address of the region
763 *
764 * The goal is dropped if it can not be satisfied and the allocation will
765 * fall back to memory below @goal.
766 *
767 * Allocation may fall back to any node in the system if the specified node
768 * can not hold the requested memory.
769 *
770 * The function panics if the request can not be satisfied.
771 */
772void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
773				   unsigned long align, unsigned long goal)
774{
775	if (WARN_ON_ONCE(slab_is_available()))
776		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
777
778	return  ___alloc_bootmem_node(pgdat, size, align, goal, 0);
779}
780
781void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
782				   unsigned long align, unsigned long goal)
783{
784#ifdef MAX_DMA32_PFN
785	unsigned long end_pfn;
786
787	if (WARN_ON_ONCE(slab_is_available()))
788		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
789
790	/* update goal according ...MAX_DMA32_PFN */
791	end_pfn = pgdat_end_pfn(pgdat);
792
793	if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
794	    (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
795		void *ptr;
796		unsigned long new_goal;
797
798		new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
799		ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
800						 new_goal, 0);
801		if (ptr)
802			return ptr;
803	}
804#endif
805
806	return __alloc_bootmem_node(pgdat, size, align, goal);
807
808}
809
810/**
811 * __alloc_bootmem_low - allocate low boot memory
812 * @size: size of the request in bytes
813 * @align: alignment of the region
814 * @goal: preferred starting address of the region
815 *
816 * The goal is dropped if it can not be satisfied and the allocation will
817 * fall back to memory below @goal.
818 *
819 * Allocation may happen on any node in the system.
820 *
821 * The function panics if the request can not be satisfied.
822 */
823void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
824				  unsigned long goal)
825{
826	return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
827}
828
829void * __init __alloc_bootmem_low_nopanic(unsigned long size,
830					  unsigned long align,
831					  unsigned long goal)
832{
833	return ___alloc_bootmem_nopanic(size, align, goal,
834					ARCH_LOW_ADDRESS_LIMIT);
835}
836
837/**
838 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
839 * @pgdat: node to allocate from
840 * @size: size of the request in bytes
841 * @align: alignment of the region
842 * @goal: preferred starting address of the region
843 *
844 * The goal is dropped if it can not be satisfied and the allocation will
845 * fall back to memory below @goal.
846 *
847 * Allocation may fall back to any node in the system if the specified node
848 * can not hold the requested memory.
849 *
850 * The function panics if the request can not be satisfied.
851 */
852void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
853				       unsigned long align, unsigned long goal)
854{
855	if (WARN_ON_ONCE(slab_is_available()))
856		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
857
858	return ___alloc_bootmem_node(pgdat, size, align,
859				     goal, ARCH_LOW_ADDRESS_LIMIT);
860}