Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * Procedures for maintaining information about logical memory blocks.
   3 *
   4 * Peter Bergner, IBM Corp.	June 2001.
   5 * Copyright (C) 2001 Peter Bergner.
   6 *
   7 *      This program is free software; you can redistribute it and/or
   8 *      modify it under the terms of the GNU General Public License
   9 *      as published by the Free Software Foundation; either version
  10 *      2 of the License, or (at your option) any later version.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/init.h>
  16#include <linux/bitops.h>
  17#include <linux/poison.h>
  18#include <linux/pfn.h>
  19#include <linux/debugfs.h>
  20#include <linux/kmemleak.h>
  21#include <linux/seq_file.h>
  22#include <linux/memblock.h>
  23
  24#include <asm/sections.h>
  25#include <linux/io.h>
  26
  27#include "internal.h"
  28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  29static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  30static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
  31#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  32static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
  33#endif
  34
  35struct memblock memblock __initdata_memblock = {
  36	.memory.regions		= memblock_memory_init_regions,
  37	.memory.cnt		= 1,	/* empty dummy entry */
  38	.memory.max		= INIT_MEMBLOCK_REGIONS,
  39	.memory.name		= "memory",
  40
  41	.reserved.regions	= memblock_reserved_init_regions,
  42	.reserved.cnt		= 1,	/* empty dummy entry */
  43	.reserved.max		= INIT_MEMBLOCK_REGIONS,
  44	.reserved.name		= "reserved",
  45
  46#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  47	.physmem.regions	= memblock_physmem_init_regions,
  48	.physmem.cnt		= 1,	/* empty dummy entry */
  49	.physmem.max		= INIT_PHYSMEM_REGIONS,
  50	.physmem.name		= "physmem",
  51#endif
  52
  53	.bottom_up		= false,
  54	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
  55};
  56
  57int memblock_debug __initdata_memblock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58static bool system_has_some_mirror __initdata_memblock = false;
  59static int memblock_can_resize __initdata_memblock;
  60static int memblock_memory_in_slab __initdata_memblock = 0;
  61static int memblock_reserved_in_slab __initdata_memblock = 0;
  62
  63ulong __init_memblock choose_memblock_flags(void)
  64{
  65	return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
  66}
  67
  68/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
  69static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
  70{
  71	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
  72}
  73
  74/*
  75 * Address comparison utilities
  76 */
  77static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
  78				       phys_addr_t base2, phys_addr_t size2)
  79{
  80	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
  81}
  82
  83bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
  84					phys_addr_t base, phys_addr_t size)
  85{
  86	unsigned long i;
  87
 
 
  88	for (i = 0; i < type->cnt; i++)
  89		if (memblock_addrs_overlap(base, size, type->regions[i].base,
  90					   type->regions[i].size))
  91			break;
  92	return i < type->cnt;
  93}
  94
  95/*
  96 * __memblock_find_range_bottom_up - find free area utility in bottom-up
  97 * @start: start of candidate range
  98 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 
  99 * @size: size of free area to find
 100 * @align: alignment of free area to find
 101 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 102 * @flags: pick from blocks based on memory attributes
 103 *
 104 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 105 *
 106 * RETURNS:
 107 * Found address on success, 0 on failure.
 108 */
 109static phys_addr_t __init_memblock
 110__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
 111				phys_addr_t size, phys_addr_t align, int nid,
 112				ulong flags)
 113{
 114	phys_addr_t this_start, this_end, cand;
 115	u64 i;
 116
 117	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
 118		this_start = clamp(this_start, start, end);
 119		this_end = clamp(this_end, start, end);
 120
 121		cand = round_up(this_start, align);
 122		if (cand < this_end && this_end - cand >= size)
 123			return cand;
 124	}
 125
 126	return 0;
 127}
 128
 129/**
 130 * __memblock_find_range_top_down - find free area utility, in top-down
 131 * @start: start of candidate range
 132 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 
 133 * @size: size of free area to find
 134 * @align: alignment of free area to find
 135 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 136 * @flags: pick from blocks based on memory attributes
 137 *
 138 * Utility called from memblock_find_in_range_node(), find free area top-down.
 139 *
 140 * RETURNS:
 141 * Found address on success, 0 on failure.
 142 */
 143static phys_addr_t __init_memblock
 144__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
 145			       phys_addr_t size, phys_addr_t align, int nid,
 146			       ulong flags)
 147{
 148	phys_addr_t this_start, this_end, cand;
 149	u64 i;
 150
 151	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
 152					NULL) {
 153		this_start = clamp(this_start, start, end);
 154		this_end = clamp(this_end, start, end);
 155
 156		if (this_end < size)
 157			continue;
 158
 159		cand = round_down(this_end - size, align);
 160		if (cand >= this_start)
 161			return cand;
 162	}
 163
 164	return 0;
 165}
 166
 167/**
 168 * memblock_find_in_range_node - find free area in given range and node
 169 * @size: size of free area to find
 170 * @align: alignment of free area to find
 171 * @start: start of candidate range
 172 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 
 173 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 174 * @flags: pick from blocks based on memory attributes
 175 *
 176 * Find @size free area aligned to @align in the specified range and node.
 177 *
 178 * When allocation direction is bottom-up, the @start should be greater
 179 * than the end of the kernel image. Otherwise, it will be trimmed. The
 180 * reason is that we want the bottom-up allocation just near the kernel
 181 * image so it is highly likely that the allocated memory and the kernel
 182 * will reside in the same node.
 183 *
 184 * If bottom-up allocation failed, will try to allocate memory top-down.
 185 *
 186 * RETURNS:
 187 * Found address on success, 0 on failure.
 188 */
 189phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
 190					phys_addr_t align, phys_addr_t start,
 191					phys_addr_t end, int nid, ulong flags)
 
 192{
 193	phys_addr_t kernel_end, ret;
 194
 195	/* pump up @end */
 196	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
 
 197		end = memblock.current_limit;
 198
 199	/* avoid allocating the first page */
 200	start = max_t(phys_addr_t, start, PAGE_SIZE);
 201	end = max(start, end);
 202	kernel_end = __pa_symbol(_end);
 203
 204	/*
 205	 * try bottom-up allocation only when bottom-up mode
 206	 * is set and @end is above the kernel image.
 207	 */
 208	if (memblock_bottom_up() && end > kernel_end) {
 209		phys_addr_t bottom_up_start;
 210
 211		/* make sure we will allocate above the kernel */
 212		bottom_up_start = max(start, kernel_end);
 213
 214		/* ok, try bottom-up allocation first */
 215		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
 216						      size, align, nid, flags);
 217		if (ret)
 218			return ret;
 219
 220		/*
 221		 * we always limit bottom-up allocation above the kernel,
 222		 * but top-down allocation doesn't have the limit, so
 223		 * retrying top-down allocation may succeed when bottom-up
 224		 * allocation failed.
 225		 *
 226		 * bottom-up allocation is expected to be fail very rarely,
 227		 * so we use WARN_ONCE() here to see the stack trace if
 228		 * fail happens.
 229		 */
 230		WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
 231	}
 232
 233	return __memblock_find_range_top_down(start, end, size, align, nid,
 234					      flags);
 235}
 236
 237/**
 238 * memblock_find_in_range - find free area in given range
 239 * @start: start of candidate range
 240 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 
 241 * @size: size of free area to find
 242 * @align: alignment of free area to find
 243 *
 244 * Find @size free area aligned to @align in the specified range.
 245 *
 246 * RETURNS:
 247 * Found address on success, 0 on failure.
 248 */
 249phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
 250					phys_addr_t end, phys_addr_t size,
 251					phys_addr_t align)
 252{
 253	phys_addr_t ret;
 254	ulong flags = choose_memblock_flags();
 255
 256again:
 257	ret = memblock_find_in_range_node(size, align, start, end,
 258					    NUMA_NO_NODE, flags);
 259
 260	if (!ret && (flags & MEMBLOCK_MIRROR)) {
 261		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
 262			&size);
 263		flags &= ~MEMBLOCK_MIRROR;
 264		goto again;
 265	}
 266
 267	return ret;
 268}
 269
 270static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 271{
 272	type->total_size -= type->regions[r].size;
 273	memmove(&type->regions[r], &type->regions[r + 1],
 274		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
 275	type->cnt--;
 276
 277	/* Special case for empty arrays */
 278	if (type->cnt == 0) {
 279		WARN_ON(type->total_size != 0);
 280		type->cnt = 1;
 281		type->regions[0].base = 0;
 282		type->regions[0].size = 0;
 283		type->regions[0].flags = 0;
 284		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
 285	}
 286}
 287
 288#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
 289/**
 290 * Discard memory and reserved arrays if they were allocated
 291 */
 292void __init memblock_discard(void)
 293{
 294	phys_addr_t addr, size;
 295
 296	if (memblock.reserved.regions != memblock_reserved_init_regions) {
 297		addr = __pa(memblock.reserved.regions);
 298		size = PAGE_ALIGN(sizeof(struct memblock_region) *
 299				  memblock.reserved.max);
 300		__memblock_free_late(addr, size);
 301	}
 302
 303	if (memblock.memory.regions != memblock_memory_init_regions) {
 304		addr = __pa(memblock.memory.regions);
 305		size = PAGE_ALIGN(sizeof(struct memblock_region) *
 306				  memblock.memory.max);
 307		__memblock_free_late(addr, size);
 308	}
 
 
 309}
 310#endif
 311
 312/**
 313 * memblock_double_array - double the size of the memblock regions array
 314 * @type: memblock type of the regions array being doubled
 315 * @new_area_start: starting address of memory range to avoid overlap with
 316 * @new_area_size: size of memory range to avoid overlap with
 317 *
 318 * Double the size of the @type regions array. If memblock is being used to
 319 * allocate memory for a new reserved regions array and there is a previously
 320 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
 321 * waiting to be reserved, ensure the memory used by the new array does
 322 * not overlap.
 323 *
 324 * RETURNS:
 325 * 0 on success, -1 on failure.
 326 */
 327static int __init_memblock memblock_double_array(struct memblock_type *type,
 328						phys_addr_t new_area_start,
 329						phys_addr_t new_area_size)
 330{
 331	struct memblock_region *new_array, *old_array;
 332	phys_addr_t old_alloc_size, new_alloc_size;
 333	phys_addr_t old_size, new_size, addr;
 334	int use_slab = slab_is_available();
 335	int *in_slab;
 336
 337	/* We don't allow resizing until we know about the reserved regions
 338	 * of memory that aren't suitable for allocation
 339	 */
 340	if (!memblock_can_resize)
 341		return -1;
 342
 343	/* Calculate new doubled size */
 344	old_size = type->max * sizeof(struct memblock_region);
 345	new_size = old_size << 1;
 346	/*
 347	 * We need to allocated new one align to PAGE_SIZE,
 348	 *   so we can free them completely later.
 349	 */
 350	old_alloc_size = PAGE_ALIGN(old_size);
 351	new_alloc_size = PAGE_ALIGN(new_size);
 352
 353	/* Retrieve the slab flag */
 354	if (type == &memblock.memory)
 355		in_slab = &memblock_memory_in_slab;
 356	else
 357		in_slab = &memblock_reserved_in_slab;
 358
 359	/* Try to find some space for it.
 360	 *
 361	 * WARNING: We assume that either slab_is_available() and we use it or
 362	 * we use MEMBLOCK for allocations. That means that this is unsafe to
 363	 * use when bootmem is currently active (unless bootmem itself is
 364	 * implemented on top of MEMBLOCK which isn't the case yet)
 365	 *
 366	 * This should however not be an issue for now, as we currently only
 367	 * call into MEMBLOCK while it's still active, or much later when slab
 368	 * is active for memory hotplug operations
 369	 */
 370	if (use_slab) {
 371		new_array = kmalloc(new_size, GFP_KERNEL);
 372		addr = new_array ? __pa(new_array) : 0;
 373	} else {
 374		/* only exclude range when trying to double reserved.regions */
 375		if (type != &memblock.reserved)
 376			new_area_start = new_area_size = 0;
 377
 378		addr = memblock_find_in_range(new_area_start + new_area_size,
 379						memblock.current_limit,
 380						new_alloc_size, PAGE_SIZE);
 381		if (!addr && new_area_size)
 382			addr = memblock_find_in_range(0,
 383				min(new_area_start, memblock.current_limit),
 384				new_alloc_size, PAGE_SIZE);
 385
 386		new_array = addr ? __va(addr) : NULL;
 387	}
 388	if (!addr) {
 389		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
 390		       type->name, type->max, type->max * 2);
 391		return -1;
 392	}
 393
 394	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
 395			type->name, type->max * 2, (u64)addr,
 396			(u64)addr + new_size - 1);
 397
 398	/*
 399	 * Found space, we now need to move the array over before we add the
 400	 * reserved region since it may be our reserved array itself that is
 401	 * full.
 402	 */
 403	memcpy(new_array, type->regions, old_size);
 404	memset(new_array + type->max, 0, old_size);
 405	old_array = type->regions;
 406	type->regions = new_array;
 407	type->max <<= 1;
 408
 409	/* Free old array. We needn't free it if the array is the static one */
 410	if (*in_slab)
 411		kfree(old_array);
 412	else if (old_array != memblock_memory_init_regions &&
 413		 old_array != memblock_reserved_init_regions)
 414		memblock_free(__pa(old_array), old_alloc_size);
 415
 416	/*
 417	 * Reserve the new array if that comes from the memblock.  Otherwise, we
 418	 * needn't do it
 419	 */
 420	if (!use_slab)
 421		BUG_ON(memblock_reserve(addr, new_alloc_size));
 422
 423	/* Update slab flag */
 424	*in_slab = use_slab;
 425
 426	return 0;
 427}
 428
 429/**
 430 * memblock_merge_regions - merge neighboring compatible regions
 431 * @type: memblock type to scan
 432 *
 433 * Scan @type and merge neighboring compatible regions.
 434 */
 435static void __init_memblock memblock_merge_regions(struct memblock_type *type)
 436{
 437	int i = 0;
 438
 439	/* cnt never goes below 1 */
 440	while (i < type->cnt - 1) {
 441		struct memblock_region *this = &type->regions[i];
 442		struct memblock_region *next = &type->regions[i + 1];
 443
 444		if (this->base + this->size != next->base ||
 445		    memblock_get_region_node(this) !=
 446		    memblock_get_region_node(next) ||
 447		    this->flags != next->flags) {
 448			BUG_ON(this->base + this->size > next->base);
 449			i++;
 450			continue;
 451		}
 452
 453		this->size += next->size;
 454		/* move forward from next + 1, index of which is i + 2 */
 455		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
 456		type->cnt--;
 457	}
 458}
 459
 460/**
 461 * memblock_insert_region - insert new memblock region
 462 * @type:	memblock type to insert into
 463 * @idx:	index for the insertion point
 464 * @base:	base address of the new region
 465 * @size:	size of the new region
 466 * @nid:	node id of the new region
 467 * @flags:	flags of the new region
 468 *
 469 * Insert new memblock region [@base,@base+@size) into @type at @idx.
 470 * @type must already have extra room to accommodate the new region.
 471 */
 472static void __init_memblock memblock_insert_region(struct memblock_type *type,
 473						   int idx, phys_addr_t base,
 474						   phys_addr_t size,
 475						   int nid, unsigned long flags)
 
 476{
 477	struct memblock_region *rgn = &type->regions[idx];
 478
 479	BUG_ON(type->cnt >= type->max);
 480	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
 481	rgn->base = base;
 482	rgn->size = size;
 483	rgn->flags = flags;
 484	memblock_set_region_node(rgn, nid);
 485	type->cnt++;
 486	type->total_size += size;
 487}
 488
 489/**
 490 * memblock_add_range - add new memblock region
 491 * @type: memblock type to add new region into
 492 * @base: base address of the new region
 493 * @size: size of the new region
 494 * @nid: nid of the new region
 495 * @flags: flags of the new region
 496 *
 497 * Add new memblock region [@base,@base+@size) into @type.  The new region
 498 * is allowed to overlap with existing ones - overlaps don't affect already
 499 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 500 * compatible regions are merged) after the addition.
 501 *
 502 * RETURNS:
 503 * 0 on success, -errno on failure.
 504 */
 505int __init_memblock memblock_add_range(struct memblock_type *type,
 506				phys_addr_t base, phys_addr_t size,
 507				int nid, unsigned long flags)
 508{
 509	bool insert = false;
 510	phys_addr_t obase = base;
 511	phys_addr_t end = base + memblock_cap_size(base, &size);
 512	int idx, nr_new;
 513	struct memblock_region *rgn;
 514
 515	if (!size)
 516		return 0;
 517
 518	/* special case for empty array */
 519	if (type->regions[0].size == 0) {
 520		WARN_ON(type->cnt != 1 || type->total_size);
 521		type->regions[0].base = base;
 522		type->regions[0].size = size;
 523		type->regions[0].flags = flags;
 524		memblock_set_region_node(&type->regions[0], nid);
 525		type->total_size = size;
 526		return 0;
 527	}
 528repeat:
 529	/*
 530	 * The following is executed twice.  Once with %false @insert and
 531	 * then with %true.  The first counts the number of regions needed
 532	 * to accommodate the new area.  The second actually inserts them.
 533	 */
 534	base = obase;
 535	nr_new = 0;
 536
 537	for_each_memblock_type(idx, type, rgn) {
 538		phys_addr_t rbase = rgn->base;
 539		phys_addr_t rend = rbase + rgn->size;
 540
 541		if (rbase >= end)
 542			break;
 543		if (rend <= base)
 544			continue;
 545		/*
 546		 * @rgn overlaps.  If it separates the lower part of new
 547		 * area, insert that portion.
 548		 */
 549		if (rbase > base) {
 550#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 551			WARN_ON(nid != memblock_get_region_node(rgn));
 552#endif
 553			WARN_ON(flags != rgn->flags);
 554			nr_new++;
 555			if (insert)
 556				memblock_insert_region(type, idx++, base,
 557						       rbase - base, nid,
 558						       flags);
 559		}
 560		/* area below @rend is dealt with, forget about it */
 561		base = min(rend, end);
 562	}
 563
 564	/* insert the remaining portion */
 565	if (base < end) {
 566		nr_new++;
 567		if (insert)
 568			memblock_insert_region(type, idx, base, end - base,
 569					       nid, flags);
 570	}
 571
 572	if (!nr_new)
 573		return 0;
 574
 575	/*
 576	 * If this was the first round, resize array and repeat for actual
 577	 * insertions; otherwise, merge and return.
 578	 */
 579	if (!insert) {
 580		while (type->cnt + nr_new > type->max)
 581			if (memblock_double_array(type, obase, size) < 0)
 582				return -ENOMEM;
 583		insert = true;
 584		goto repeat;
 585	} else {
 586		memblock_merge_regions(type);
 587		return 0;
 588	}
 589}
 590
 
 
 
 
 
 
 
 
 
 
 
 
 591int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
 592				       int nid)
 593{
 594	return memblock_add_range(&memblock.memory, base, size, nid, 0);
 595}
 596
 
 
 
 
 
 
 
 
 
 
 
 597int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
 598{
 599	phys_addr_t end = base + size - 1;
 600
 601	memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
 602		     &base, &end, (void *)_RET_IP_);
 603
 604	return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
 605}
 606
 607/**
 608 * memblock_isolate_range - isolate given range into disjoint memblocks
 609 * @type: memblock type to isolate range for
 610 * @base: base of range to isolate
 611 * @size: size of range to isolate
 612 * @start_rgn: out parameter for the start of isolated region
 613 * @end_rgn: out parameter for the end of isolated region
 614 *
 615 * Walk @type and ensure that regions don't cross the boundaries defined by
 616 * [@base,@base+@size).  Crossing regions are split at the boundaries,
 617 * which may create at most two more regions.  The index of the first
 618 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 619 *
 620 * RETURNS:
 621 * 0 on success, -errno on failure.
 622 */
 623static int __init_memblock memblock_isolate_range(struct memblock_type *type,
 624					phys_addr_t base, phys_addr_t size,
 625					int *start_rgn, int *end_rgn)
 626{
 627	phys_addr_t end = base + memblock_cap_size(base, &size);
 628	int idx;
 629	struct memblock_region *rgn;
 630
 631	*start_rgn = *end_rgn = 0;
 632
 633	if (!size)
 634		return 0;
 635
 636	/* we'll create at most two more regions */
 637	while (type->cnt + 2 > type->max)
 638		if (memblock_double_array(type, base, size) < 0)
 639			return -ENOMEM;
 640
 641	for_each_memblock_type(idx, type, rgn) {
 642		phys_addr_t rbase = rgn->base;
 643		phys_addr_t rend = rbase + rgn->size;
 644
 645		if (rbase >= end)
 646			break;
 647		if (rend <= base)
 648			continue;
 649
 650		if (rbase < base) {
 651			/*
 652			 * @rgn intersects from below.  Split and continue
 653			 * to process the next region - the new top half.
 654			 */
 655			rgn->base = base;
 656			rgn->size -= base - rbase;
 657			type->total_size -= base - rbase;
 658			memblock_insert_region(type, idx, rbase, base - rbase,
 659					       memblock_get_region_node(rgn),
 660					       rgn->flags);
 661		} else if (rend > end) {
 662			/*
 663			 * @rgn intersects from above.  Split and redo the
 664			 * current region - the new bottom half.
 665			 */
 666			rgn->base = end;
 667			rgn->size -= end - rbase;
 668			type->total_size -= end - rbase;
 669			memblock_insert_region(type, idx--, rbase, end - rbase,
 670					       memblock_get_region_node(rgn),
 671					       rgn->flags);
 672		} else {
 673			/* @rgn is fully contained, record it */
 674			if (!*end_rgn)
 675				*start_rgn = idx;
 676			*end_rgn = idx + 1;
 677		}
 678	}
 679
 680	return 0;
 681}
 682
 683static int __init_memblock memblock_remove_range(struct memblock_type *type,
 684					  phys_addr_t base, phys_addr_t size)
 685{
 686	int start_rgn, end_rgn;
 687	int i, ret;
 688
 689	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 690	if (ret)
 691		return ret;
 692
 693	for (i = end_rgn - 1; i >= start_rgn; i--)
 694		memblock_remove_region(type, i);
 695	return 0;
 696}
 697
 698int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 699{
 
 
 
 
 
 700	return memblock_remove_range(&memblock.memory, base, size);
 701}
 702
 703
 
 
 
 
 
 
 
 704int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 705{
 706	phys_addr_t end = base + size - 1;
 707
 708	memblock_dbg("   memblock_free: [%pa-%pa] %pF\n",
 709		     &base, &end, (void *)_RET_IP_);
 710
 711	kmemleak_free_part_phys(base, size);
 712	return memblock_remove_range(&memblock.reserved, base, size);
 713}
 714
 715int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
 716{
 717	phys_addr_t end = base + size - 1;
 718
 719	memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
 720		     &base, &end, (void *)_RET_IP_);
 721
 722	return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
 723}
 724
 
 
 
 
 
 
 
 
 
 
 
 
 725/**
 
 
 
 
 
 726 *
 727 * This function isolates region [@base, @base + @size), and sets/clears flag
 728 *
 729 * Return 0 on success, -errno on failure.
 730 */
 731static int __init_memblock memblock_setclr_flag(phys_addr_t base,
 732				phys_addr_t size, int set, int flag)
 733{
 734	struct memblock_type *type = &memblock.memory;
 735	int i, ret, start_rgn, end_rgn;
 736
 737	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 738	if (ret)
 739		return ret;
 740
 741	for (i = start_rgn; i < end_rgn; i++)
 
 
 742		if (set)
 743			memblock_set_region_flags(&type->regions[i], flag);
 744		else
 745			memblock_clear_region_flags(&type->regions[i], flag);
 
 746
 747	memblock_merge_regions(type);
 748	return 0;
 749}
 750
 751/**
 752 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
 753 * @base: the base phys addr of the region
 754 * @size: the size of the region
 755 *
 756 * Return 0 on success, -errno on failure.
 757 */
 758int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
 759{
 760	return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
 761}
 762
 763/**
 764 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 765 * @base: the base phys addr of the region
 766 * @size: the size of the region
 767 *
 768 * Return 0 on success, -errno on failure.
 769 */
 770int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
 771{
 772	return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
 773}
 774
 775/**
 776 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
 777 * @base: the base phys addr of the region
 778 * @size: the size of the region
 779 *
 780 * Return 0 on success, -errno on failure.
 781 */
 782int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
 783{
 784	system_has_some_mirror = true;
 785
 786	return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
 787}
 788
 789/**
 790 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
 791 * @base: the base phys addr of the region
 792 * @size: the size of the region
 793 *
 794 * Return 0 on success, -errno on failure.
 
 
 
 
 
 795 */
 796int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
 797{
 798	return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
 799}
 800
 801/**
 802 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
 803 * @base: the base phys addr of the region
 804 * @size: the size of the region
 805 *
 806 * Return 0 on success, -errno on failure.
 807 */
 808int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
 809{
 810	return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
 811}
 812
 813/**
 814 * __next_reserved_mem_region - next function for for_each_reserved_region()
 815 * @idx: pointer to u64 loop variable
 816 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
 817 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
 818 *
 819 * Iterate over all reserved memory regions.
 820 */
 821void __init_memblock __next_reserved_mem_region(u64 *idx,
 822					   phys_addr_t *out_start,
 823					   phys_addr_t *out_end)
 824{
 825	struct memblock_type *type = &memblock.reserved;
 826
 827	if (*idx < type->cnt) {
 828		struct memblock_region *r = &type->regions[*idx];
 829		phys_addr_t base = r->base;
 830		phys_addr_t size = r->size;
 831
 832		if (out_start)
 833			*out_start = base;
 834		if (out_end)
 835			*out_end = base + size - 1;
 836
 837		*idx += 1;
 838		return;
 839	}
 
 840
 841	/* signal end of iteration */
 842	*idx = ULLONG_MAX;
 
 
 
 
 
 
 
 843}
 844
 845/**
 846 * __next__mem_range - next function for for_each_free_mem_range() etc.
 847 * @idx: pointer to u64 loop variable
 848 * @nid: node selector, %NUMA_NO_NODE for all nodes
 849 * @flags: pick from blocks based on memory attributes
 850 * @type_a: pointer to memblock_type from where the range is taken
 851 * @type_b: pointer to memblock_type which excludes memory from being taken
 852 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 853 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 854 * @out_nid: ptr to int for nid of the range, can be %NULL
 855 *
 856 * Find the first area from *@idx which matches @nid, fill the out
 857 * parameters, and update *@idx for the next iteration.  The lower 32bit of
 858 * *@idx contains index into type_a and the upper 32bit indexes the
 859 * areas before each region in type_b.	For example, if type_b regions
 860 * look like the following,
 861 *
 862 *	0:[0-16), 1:[32-48), 2:[128-130)
 863 *
 864 * The upper 32bit indexes the following regions.
 865 *
 866 *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
 867 *
 868 * As both region arrays are sorted, the function advances the two indices
 869 * in lockstep and returns each intersection.
 870 */
 871void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
 872				      struct memblock_type *type_a,
 873				      struct memblock_type *type_b,
 874				      phys_addr_t *out_start,
 875				      phys_addr_t *out_end, int *out_nid)
 876{
 877	int idx_a = *idx & 0xffffffff;
 878	int idx_b = *idx >> 32;
 879
 880	if (WARN_ONCE(nid == MAX_NUMNODES,
 881	"Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
 882		nid = NUMA_NO_NODE;
 883
 884	for (; idx_a < type_a->cnt; idx_a++) {
 885		struct memblock_region *m = &type_a->regions[idx_a];
 886
 887		phys_addr_t m_start = m->base;
 888		phys_addr_t m_end = m->base + m->size;
 889		int	    m_nid = memblock_get_region_node(m);
 890
 891		/* only memory regions are associated with nodes, check it */
 892		if (nid != NUMA_NO_NODE && nid != m_nid)
 893			continue;
 894
 895		/* skip hotpluggable memory regions if needed */
 896		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
 897			continue;
 898
 899		/* if we want mirror memory skip non-mirror memory regions */
 900		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
 901			continue;
 902
 903		/* skip nomap memory unless we were asked for it explicitly */
 904		if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
 905			continue;
 906
 907		if (!type_b) {
 908			if (out_start)
 909				*out_start = m_start;
 910			if (out_end)
 911				*out_end = m_end;
 912			if (out_nid)
 913				*out_nid = m_nid;
 914			idx_a++;
 915			*idx = (u32)idx_a | (u64)idx_b << 32;
 916			return;
 917		}
 918
 919		/* scan areas before each reservation */
 920		for (; idx_b < type_b->cnt + 1; idx_b++) {
 921			struct memblock_region *r;
 922			phys_addr_t r_start;
 923			phys_addr_t r_end;
 924
 925			r = &type_b->regions[idx_b];
 926			r_start = idx_b ? r[-1].base + r[-1].size : 0;
 927			r_end = idx_b < type_b->cnt ?
 928				r->base : (phys_addr_t)ULLONG_MAX;
 929
 930			/*
 931			 * if idx_b advanced past idx_a,
 932			 * break out to advance idx_a
 933			 */
 934			if (r_start >= m_end)
 935				break;
 936			/* if the two regions intersect, we're done */
 937			if (m_start < r_end) {
 938				if (out_start)
 939					*out_start =
 940						max(m_start, r_start);
 941				if (out_end)
 942					*out_end = min(m_end, r_end);
 943				if (out_nid)
 944					*out_nid = m_nid;
 945				/*
 946				 * The region which ends first is
 947				 * advanced for the next iteration.
 948				 */
 949				if (m_end <= r_end)
 950					idx_a++;
 951				else
 952					idx_b++;
 953				*idx = (u32)idx_a | (u64)idx_b << 32;
 954				return;
 955			}
 956		}
 957	}
 958
 959	/* signal end of iteration */
 960	*idx = ULLONG_MAX;
 961}
 962
 963/**
 964 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
 965 *
 966 * Finds the next range from type_a which is not marked as unsuitable
 967 * in type_b.
 968 *
 969 * @idx: pointer to u64 loop variable
 970 * @nid: node selector, %NUMA_NO_NODE for all nodes
 971 * @flags: pick from blocks based on memory attributes
 972 * @type_a: pointer to memblock_type from where the range is taken
 973 * @type_b: pointer to memblock_type which excludes memory from being taken
 974 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 975 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 976 * @out_nid: ptr to int for nid of the range, can be %NULL
 977 *
 
 
 
 978 * Reverse of __next_mem_range().
 979 */
 980void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
 
 981					  struct memblock_type *type_a,
 982					  struct memblock_type *type_b,
 983					  phys_addr_t *out_start,
 984					  phys_addr_t *out_end, int *out_nid)
 985{
 986	int idx_a = *idx & 0xffffffff;
 987	int idx_b = *idx >> 32;
 988
 989	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
 990		nid = NUMA_NO_NODE;
 991
 992	if (*idx == (u64)ULLONG_MAX) {
 993		idx_a = type_a->cnt - 1;
 994		if (type_b != NULL)
 995			idx_b = type_b->cnt;
 996		else
 997			idx_b = 0;
 998	}
 999
1000	for (; idx_a >= 0; idx_a--) {
1001		struct memblock_region *m = &type_a->regions[idx_a];
1002
1003		phys_addr_t m_start = m->base;
1004		phys_addr_t m_end = m->base + m->size;
1005		int m_nid = memblock_get_region_node(m);
1006
1007		/* only memory regions are associated with nodes, check it */
1008		if (nid != NUMA_NO_NODE && nid != m_nid)
1009			continue;
1010
1011		/* skip hotpluggable memory regions if needed */
1012		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
1013			continue;
1014
1015		/* if we want mirror memory skip non-mirror memory regions */
1016		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1017			continue;
1018
1019		/* skip nomap memory unless we were asked for it explicitly */
1020		if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1021			continue;
1022
1023		if (!type_b) {
1024			if (out_start)
1025				*out_start = m_start;
1026			if (out_end)
1027				*out_end = m_end;
1028			if (out_nid)
1029				*out_nid = m_nid;
1030			idx_a--;
1031			*idx = (u32)idx_a | (u64)idx_b << 32;
1032			return;
1033		}
1034
1035		/* scan areas before each reservation */
1036		for (; idx_b >= 0; idx_b--) {
1037			struct memblock_region *r;
1038			phys_addr_t r_start;
1039			phys_addr_t r_end;
1040
1041			r = &type_b->regions[idx_b];
1042			r_start = idx_b ? r[-1].base + r[-1].size : 0;
1043			r_end = idx_b < type_b->cnt ?
1044				r->base : (phys_addr_t)ULLONG_MAX;
1045			/*
1046			 * if idx_b advanced past idx_a,
1047			 * break out to advance idx_a
1048			 */
1049
1050			if (r_end <= m_start)
1051				break;
1052			/* if the two regions intersect, we're done */
1053			if (m_end > r_start) {
1054				if (out_start)
1055					*out_start = max(m_start, r_start);
1056				if (out_end)
1057					*out_end = min(m_end, r_end);
1058				if (out_nid)
1059					*out_nid = m_nid;
1060				if (m_start >= r_start)
1061					idx_a--;
1062				else
1063					idx_b--;
1064				*idx = (u32)idx_a | (u64)idx_b << 32;
1065				return;
1066			}
1067		}
1068	}
1069	/* signal end of iteration */
1070	*idx = ULLONG_MAX;
1071}
1072
1073#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1074/*
1075 * Common iterator interface used to define for_each_mem_range().
1076 */
1077void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1078				unsigned long *out_start_pfn,
1079				unsigned long *out_end_pfn, int *out_nid)
1080{
1081	struct memblock_type *type = &memblock.memory;
1082	struct memblock_region *r;
 
1083
1084	while (++*idx < type->cnt) {
1085		r = &type->regions[*idx];
 
1086
1087		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1088			continue;
1089		if (nid == MAX_NUMNODES || nid == r->nid)
1090			break;
1091	}
1092	if (*idx >= type->cnt) {
1093		*idx = -1;
1094		return;
1095	}
1096
1097	if (out_start_pfn)
1098		*out_start_pfn = PFN_UP(r->base);
1099	if (out_end_pfn)
1100		*out_end_pfn = PFN_DOWN(r->base + r->size);
1101	if (out_nid)
1102		*out_nid = r->nid;
1103}
1104
1105/**
1106 * memblock_set_node - set node ID on memblock regions
1107 * @base: base of area to set node ID for
1108 * @size: size of area to set node ID for
1109 * @type: memblock type to set node ID for
1110 * @nid: node ID to set
1111 *
1112 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
1113 * Regions which cross the area boundaries are split as necessary.
1114 *
1115 * RETURNS:
1116 * 0 on success, -errno on failure.
1117 */
1118int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1119				      struct memblock_type *type, int nid)
1120{
 
1121	int start_rgn, end_rgn;
1122	int i, ret;
1123
1124	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1125	if (ret)
1126		return ret;
1127
1128	for (i = start_rgn; i < end_rgn; i++)
1129		memblock_set_region_node(&type->regions[i], nid);
1130
1131	memblock_merge_regions(type);
 
1132	return 0;
1133}
1134#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1135
1136static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1137					phys_addr_t align, phys_addr_t start,
1138					phys_addr_t end, int nid, ulong flags)
1139{
1140	phys_addr_t found;
1141
1142	if (!align)
1143		align = SMP_CACHE_BYTES;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1144
1145	found = memblock_find_in_range_node(size, align, start, end, nid,
1146					    flags);
1147	if (found && !memblock_reserve(found, size)) {
1148		/*
1149		 * The min_count is set to 0 so that memblock allocations are
1150		 * never reported as leaks.
1151		 */
1152		kmemleak_alloc_phys(found, size, 0, 0);
1153		return found;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154	}
1155	return 0;
1156}
1157
1158phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1159					phys_addr_t start, phys_addr_t end,
1160					ulong flags)
1161{
1162	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1163					flags);
1164}
1165
1166phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1167					phys_addr_t align, phys_addr_t max_addr,
1168					int nid, ulong flags)
1169{
1170	return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1171}
1172
1173phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174{
1175	ulong flags = choose_memblock_flags();
1176	phys_addr_t ret;
 
 
 
 
 
 
 
 
 
1177
1178again:
1179	ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
1180				      nid, flags);
 
 
1181
1182	if (!ret && (flags & MEMBLOCK_MIRROR)) {
 
 
 
 
 
 
 
 
1183		flags &= ~MEMBLOCK_MIRROR;
 
 
1184		goto again;
1185	}
1186	return ret;
1187}
1188
1189phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1190{
1191	return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1192				       MEMBLOCK_NONE);
1193}
1194
1195phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1196{
1197	phys_addr_t alloc;
1198
1199	alloc = __memblock_alloc_base(size, align, max_addr);
1200
1201	if (alloc == 0)
1202		panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
1203		      &size, &max_addr);
 
 
 
 
 
 
 
1204
1205	return alloc;
1206}
1207
1208phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209{
1210	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
 
 
 
 
1211}
1212
1213phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
 
 
 
 
 
 
 
 
 
 
 
 
 
1214{
1215	phys_addr_t res = memblock_alloc_nid(size, align, nid);
1216
1217	if (res)
1218		return res;
1219	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1220}
1221
1222/**
1223 * memblock_virt_alloc_internal - allocate boot memory block
1224 * @size: size of memory block to be allocated in bytes
1225 * @align: alignment of the region and block's size
1226 * @min_addr: the lower bound of the memory region to allocate (phys address)
1227 * @max_addr: the upper bound of the memory region to allocate (phys address)
1228 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 
1229 *
1230 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1231 * will fall back to memory below @min_addr. Also, allocation may fall back
1232 * to any node in the system if the specified node can not
1233 * hold the requested memory.
1234 *
1235 * The allocation is performed from memory region limited by
1236 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1237 *
1238 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1239 *
1240 * The phys address of allocated boot memory block is converted to virtual and
1241 * allocated memory is reset to 0.
1242 *
1243 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1244 * allocated boot memory block, so that it is never reported as leaks.
1245 *
1246 * RETURNS:
1247 * Virtual address of allocated memory block on success, NULL on failure.
1248 */
1249static void * __init memblock_virt_alloc_internal(
1250				phys_addr_t size, phys_addr_t align,
1251				phys_addr_t min_addr, phys_addr_t max_addr,
1252				int nid)
1253{
1254	phys_addr_t alloc;
1255	void *ptr;
1256	ulong flags = choose_memblock_flags();
1257
1258	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1259		nid = NUMA_NO_NODE;
1260
1261	/*
1262	 * Detect any accidental use of these APIs after slab is ready, as at
1263	 * this moment memblock may be deinitialized already and its
1264	 * internal data may be destroyed (after execution of free_all_bootmem)
1265	 */
1266	if (WARN_ON_ONCE(slab_is_available()))
1267		return kzalloc_node(size, GFP_NOWAIT, nid);
1268
1269	if (!align)
1270		align = SMP_CACHE_BYTES;
1271
1272	if (max_addr > memblock.current_limit)
1273		max_addr = memblock.current_limit;
1274again:
1275	alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1276					    nid, flags);
1277	if (alloc && !memblock_reserve(alloc, size))
1278		goto done;
1279
1280	if (nid != NUMA_NO_NODE) {
1281		alloc = memblock_find_in_range_node(size, align, min_addr,
1282						    max_addr, NUMA_NO_NODE,
1283						    flags);
1284		if (alloc && !memblock_reserve(alloc, size))
1285			goto done;
1286	}
1287
1288	if (min_addr) {
1289		min_addr = 0;
1290		goto again;
1291	}
1292
1293	if (flags & MEMBLOCK_MIRROR) {
1294		flags &= ~MEMBLOCK_MIRROR;
1295		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1296			&size);
1297		goto again;
1298	}
1299
1300	return NULL;
1301done:
1302	ptr = phys_to_virt(alloc);
1303
1304	/*
1305	 * The min_count is set to 0 so that bootmem allocated blocks
1306	 * are never reported as leaks. This is because many of these blocks
1307	 * are only referred via the physical address which is not
1308	 * looked up by kmemleak.
1309	 */
1310	kmemleak_alloc(ptr, size, 0, 0);
1311
1312	return ptr;
1313}
1314
1315/**
1316 * memblock_virt_alloc_try_nid_raw - allocate boot memory block without zeroing
1317 * memory and without panicking
1318 * @size: size of memory block to be allocated in bytes
1319 * @align: alignment of the region and block's size
1320 * @min_addr: the lower bound of the memory region from where the allocation
1321 *	  is preferred (phys address)
1322 * @max_addr: the upper bound of the memory region from where the allocation
1323 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1324 *	      allocate only from memory limited by memblock.current_limit value
1325 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1326 *
1327 * Public function, provides additional debug information (including caller
1328 * info), if enabled. Does not zero allocated memory, does not panic if request
1329 * cannot be satisfied.
1330 *
1331 * RETURNS:
1332 * Virtual address of allocated memory block on success, NULL on failure.
1333 */
1334void * __init memblock_virt_alloc_try_nid_raw(
1335			phys_addr_t size, phys_addr_t align,
1336			phys_addr_t min_addr, phys_addr_t max_addr,
1337			int nid)
1338{
1339	void *ptr;
1340
1341	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1342		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1343		     (u64)max_addr, (void *)_RET_IP_);
1344
1345	ptr = memblock_virt_alloc_internal(size, align,
1346					   min_addr, max_addr, nid);
1347#ifdef CONFIG_DEBUG_VM
1348	if (ptr && size > 0)
1349		memset(ptr, PAGE_POISON_PATTERN, size);
1350#endif
1351	return ptr;
1352}
1353
1354/**
1355 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
 
1356 * @size: size of memory block to be allocated in bytes
1357 * @align: alignment of the region and block's size
1358 * @min_addr: the lower bound of the memory region from where the allocation
1359 *	  is preferred (phys address)
1360 * @max_addr: the upper bound of the memory region from where the allocation
1361 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1362 *	      allocate only from memory limited by memblock.current_limit value
1363 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1364 *
1365 * Public function, provides additional debug information (including caller
1366 * info), if enabled. This function zeroes the allocated memory.
 
1367 *
1368 * RETURNS:
1369 * Virtual address of allocated memory block on success, NULL on failure.
1370 */
1371void * __init memblock_virt_alloc_try_nid_nopanic(
1372				phys_addr_t size, phys_addr_t align,
1373				phys_addr_t min_addr, phys_addr_t max_addr,
1374				int nid)
1375{
1376	void *ptr;
1377
1378	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1379		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1380		     (u64)max_addr, (void *)_RET_IP_);
 
 
 
 
 
1381
1382	ptr = memblock_virt_alloc_internal(size, align,
1383					   min_addr, max_addr, nid);
1384	if (ptr)
1385		memset(ptr, 0, size);
1386	return ptr;
1387}
1388
1389/**
1390 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1391 * @size: size of memory block to be allocated in bytes
1392 * @align: alignment of the region and block's size
1393 * @min_addr: the lower bound of the memory region from where the allocation
1394 *	  is preferred (phys address)
1395 * @max_addr: the upper bound of the memory region from where the allocation
1396 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1397 *	      allocate only from memory limited by memblock.current_limit value
1398 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1399 *
1400 * Public panicking version of memblock_virt_alloc_try_nid_nopanic()
1401 * which provides debug information (including caller info), if enabled,
1402 * and panics if the request can not be satisfied.
1403 *
1404 * RETURNS:
1405 * Virtual address of allocated memory block on success, NULL on failure.
1406 */
1407void * __init memblock_virt_alloc_try_nid(
1408			phys_addr_t size, phys_addr_t align,
1409			phys_addr_t min_addr, phys_addr_t max_addr,
1410			int nid)
1411{
1412	void *ptr;
1413
1414	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1415		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1416		     (u64)max_addr, (void *)_RET_IP_);
1417	ptr = memblock_virt_alloc_internal(size, align,
1418					   min_addr, max_addr, nid);
1419	if (ptr) {
1420		memset(ptr, 0, size);
1421		return ptr;
1422	}
1423
1424	panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1425	      __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1426	      (u64)max_addr);
1427	return NULL;
1428}
1429
1430/**
1431 * __memblock_free_early - free boot memory block
1432 * @base: phys starting address of the  boot memory block
1433 * @size: size of the boot memory block in bytes
1434 *
1435 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1436 * The freeing memory will not be released to the buddy allocator.
1437 */
1438void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1439{
1440	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1441		     __func__, (u64)base, (u64)base + size - 1,
1442		     (void *)_RET_IP_);
1443	kmemleak_free_part_phys(base, size);
1444	memblock_remove_range(&memblock.reserved, base, size);
1445}
1446
1447/*
1448 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1449 * @addr: phys starting address of the  boot memory block
1450 * @size: size of the boot memory block in bytes
1451 *
1452 * This is only useful when the bootmem allocator has already been torn
1453 * down, but we are still initializing the system.  Pages are released directly
1454 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1455 */
1456void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1457{
1458	u64 cursor, end;
1459
1460	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1461		     __func__, (u64)base, (u64)base + size - 1,
1462		     (void *)_RET_IP_);
1463	kmemleak_free_part_phys(base, size);
1464	cursor = PFN_UP(base);
1465	end = PFN_DOWN(base + size);
1466
1467	for (; cursor < end; cursor++) {
1468		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1469		totalram_pages++;
1470	}
1471}
1472
1473/*
1474 * Remaining API functions
1475 */
1476
1477phys_addr_t __init_memblock memblock_phys_mem_size(void)
1478{
1479	return memblock.memory.total_size;
1480}
1481
1482phys_addr_t __init_memblock memblock_reserved_size(void)
1483{
1484	return memblock.reserved.total_size;
1485}
1486
1487phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1488{
1489	unsigned long pages = 0;
1490	struct memblock_region *r;
1491	unsigned long start_pfn, end_pfn;
1492
1493	for_each_memblock(memory, r) {
1494		start_pfn = memblock_region_memory_base_pfn(r);
1495		end_pfn = memblock_region_memory_end_pfn(r);
1496		start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1497		end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1498		pages += end_pfn - start_pfn;
1499	}
1500
1501	return PFN_PHYS(pages);
1502}
1503
1504/* lowest address */
1505phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1506{
1507	return memblock.memory.regions[0].base;
1508}
1509
1510phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1511{
1512	int idx = memblock.memory.cnt - 1;
1513
1514	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1515}
1516
1517static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1518{
1519	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1520	struct memblock_region *r;
1521
1522	/*
1523	 * translate the memory @limit size into the max address within one of
1524	 * the memory memblock regions, if the @limit exceeds the total size
1525	 * of those regions, max_addr will keep original value ULLONG_MAX
1526	 */
1527	for_each_memblock(memory, r) {
1528		if (limit <= r->size) {
1529			max_addr = r->base + limit;
1530			break;
1531		}
1532		limit -= r->size;
1533	}
1534
1535	return max_addr;
1536}
1537
1538void __init memblock_enforce_memory_limit(phys_addr_t limit)
1539{
1540	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1541
1542	if (!limit)
1543		return;
1544
1545	max_addr = __find_max_addr(limit);
1546
1547	/* @limit exceeds the total size of the memory, do nothing */
1548	if (max_addr == (phys_addr_t)ULLONG_MAX)
1549		return;
1550
1551	/* truncate both memory and reserved regions */
1552	memblock_remove_range(&memblock.memory, max_addr,
1553			      (phys_addr_t)ULLONG_MAX);
1554	memblock_remove_range(&memblock.reserved, max_addr,
1555			      (phys_addr_t)ULLONG_MAX);
1556}
1557
1558void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1559{
1560	int start_rgn, end_rgn;
1561	int i, ret;
1562
1563	if (!size)
1564		return;
1565
1566	ret = memblock_isolate_range(&memblock.memory, base, size,
1567						&start_rgn, &end_rgn);
1568	if (ret)
1569		return;
1570
1571	/* remove all the MAP regions */
1572	for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1573		if (!memblock_is_nomap(&memblock.memory.regions[i]))
1574			memblock_remove_region(&memblock.memory, i);
1575
1576	for (i = start_rgn - 1; i >= 0; i--)
1577		if (!memblock_is_nomap(&memblock.memory.regions[i]))
1578			memblock_remove_region(&memblock.memory, i);
1579
1580	/* truncate the reserved regions */
1581	memblock_remove_range(&memblock.reserved, 0, base);
1582	memblock_remove_range(&memblock.reserved,
1583			base + size, (phys_addr_t)ULLONG_MAX);
1584}
1585
1586void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1587{
1588	phys_addr_t max_addr;
1589
1590	if (!limit)
1591		return;
1592
1593	max_addr = __find_max_addr(limit);
1594
1595	/* @limit exceeds the total size of the memory, do nothing */
1596	if (max_addr == (phys_addr_t)ULLONG_MAX)
1597		return;
1598
1599	memblock_cap_memory_range(0, max_addr);
1600}
1601
1602static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1603{
1604	unsigned int left = 0, right = type->cnt;
1605
1606	do {
1607		unsigned int mid = (right + left) / 2;
1608
1609		if (addr < type->regions[mid].base)
1610			right = mid;
1611		else if (addr >= (type->regions[mid].base +
1612				  type->regions[mid].size))
1613			left = mid + 1;
1614		else
1615			return mid;
1616	} while (left < right);
1617	return -1;
1618}
1619
1620bool __init memblock_is_reserved(phys_addr_t addr)
1621{
1622	return memblock_search(&memblock.reserved, addr) != -1;
1623}
1624
1625bool __init_memblock memblock_is_memory(phys_addr_t addr)
1626{
1627	return memblock_search(&memblock.memory, addr) != -1;
1628}
1629
1630bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1631{
1632	int i = memblock_search(&memblock.memory, addr);
1633
1634	if (i == -1)
1635		return false;
1636	return !memblock_is_nomap(&memblock.memory.regions[i]);
1637}
1638
1639#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1640int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1641			 unsigned long *start_pfn, unsigned long *end_pfn)
1642{
1643	struct memblock_type *type = &memblock.memory;
1644	int mid = memblock_search(type, PFN_PHYS(pfn));
1645
1646	if (mid == -1)
1647		return -1;
1648
1649	*start_pfn = PFN_DOWN(type->regions[mid].base);
1650	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1651
1652	return type->regions[mid].nid;
1653}
1654#endif
1655
1656/**
1657 * memblock_is_region_memory - check if a region is a subset of memory
1658 * @base: base of region to check
1659 * @size: size of region to check
1660 *
1661 * Check if the region [@base, @base+@size) is a subset of a memory block.
1662 *
1663 * RETURNS:
1664 * 0 if false, non-zero if true
1665 */
1666bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1667{
1668	int idx = memblock_search(&memblock.memory, base);
1669	phys_addr_t end = base + memblock_cap_size(base, &size);
1670
1671	if (idx == -1)
1672		return false;
1673	return (memblock.memory.regions[idx].base +
1674		 memblock.memory.regions[idx].size) >= end;
1675}
1676
1677/**
1678 * memblock_is_region_reserved - check if a region intersects reserved memory
1679 * @base: base of region to check
1680 * @size: size of region to check
1681 *
1682 * Check if the region [@base, @base+@size) intersects a reserved memory block.
 
1683 *
1684 * RETURNS:
1685 * True if they intersect, false if not.
1686 */
1687bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1688{
1689	memblock_cap_size(base, &size);
1690	return memblock_overlaps_region(&memblock.reserved, base, size);
1691}
1692
1693void __init_memblock memblock_trim_memory(phys_addr_t align)
1694{
1695	phys_addr_t start, end, orig_start, orig_end;
1696	struct memblock_region *r;
1697
1698	for_each_memblock(memory, r) {
1699		orig_start = r->base;
1700		orig_end = r->base + r->size;
1701		start = round_up(orig_start, align);
1702		end = round_down(orig_end, align);
1703
1704		if (start == orig_start && end == orig_end)
1705			continue;
1706
1707		if (start < end) {
1708			r->base = start;
1709			r->size = end - start;
1710		} else {
1711			memblock_remove_region(&memblock.memory,
1712					       r - memblock.memory.regions);
1713			r--;
1714		}
1715	}
1716}
1717
1718void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1719{
1720	memblock.current_limit = limit;
1721}
1722
1723phys_addr_t __init_memblock memblock_get_current_limit(void)
1724{
1725	return memblock.current_limit;
1726}
1727
1728static void __init_memblock memblock_dump(struct memblock_type *type)
1729{
1730	phys_addr_t base, end, size;
1731	unsigned long flags;
1732	int idx;
1733	struct memblock_region *rgn;
1734
1735	pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
1736
1737	for_each_memblock_type(idx, type, rgn) {
1738		char nid_buf[32] = "";
1739
1740		base = rgn->base;
1741		size = rgn->size;
1742		end = base + size - 1;
1743		flags = rgn->flags;
1744#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1745		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1746			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1747				 memblock_get_region_node(rgn));
1748#endif
1749		pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n",
1750			type->name, idx, &base, &end, &size, nid_buf, flags);
1751	}
1752}
1753
1754void __init_memblock __memblock_dump_all(void)
1755{
1756	pr_info("MEMBLOCK configuration:\n");
1757	pr_info(" memory size = %pa reserved size = %pa\n",
1758		&memblock.memory.total_size,
1759		&memblock.reserved.total_size);
1760
1761	memblock_dump(&memblock.memory);
1762	memblock_dump(&memblock.reserved);
1763#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1764	memblock_dump(&memblock.physmem);
1765#endif
1766}
1767
 
 
 
 
 
 
1768void __init memblock_allow_resize(void)
1769{
1770	memblock_can_resize = 1;
1771}
1772
1773static int __init early_memblock(char *p)
1774{
1775	if (p && strstr(p, "debug"))
1776		memblock_debug = 1;
1777	return 0;
1778}
1779early_param("memblock", early_memblock);
1780
1781#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1782
1783static int memblock_debug_show(struct seq_file *m, void *private)
1784{
1785	struct memblock_type *type = m->private;
1786	struct memblock_region *reg;
1787	int i;
1788	phys_addr_t end;
1789
1790	for (i = 0; i < type->cnt; i++) {
1791		reg = &type->regions[i];
1792		end = reg->base + reg->size - 1;
1793
1794		seq_printf(m, "%4d: ", i);
1795		seq_printf(m, "%pa..%pa\n", &reg->base, &end);
1796	}
1797	return 0;
1798}
1799DEFINE_SHOW_ATTRIBUTE(memblock_debug);
1800
1801static int __init memblock_init_debugfs(void)
1802{
1803	struct dentry *root = debugfs_create_dir("memblock", NULL);
1804	if (!root)
1805		return -ENXIO;
1806	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1807	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
 
1808#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1809	debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
 
1810#endif
1811
1812	return 0;
1813}
1814__initcall(memblock_init_debugfs);
1815
1816#endif /* CONFIG_DEBUG_FS */
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Procedures for maintaining information about logical memory blocks.
   4 *
   5 * Peter Bergner, IBM Corp.	June 2001.
   6 * Copyright (C) 2001 Peter Bergner.
 
 
 
 
 
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/init.h>
  12#include <linux/bitops.h>
  13#include <linux/poison.h>
  14#include <linux/pfn.h>
  15#include <linux/debugfs.h>
  16#include <linux/kmemleak.h>
  17#include <linux/seq_file.h>
  18#include <linux/memblock.h>
  19
  20#include <asm/sections.h>
  21#include <linux/io.h>
  22
  23#include "internal.h"
  24
  25#define INIT_MEMBLOCK_REGIONS			128
  26#define INIT_PHYSMEM_REGIONS			4
  27
  28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
  29# define INIT_MEMBLOCK_RESERVED_REGIONS		INIT_MEMBLOCK_REGIONS
  30#endif
  31
  32/**
  33 * DOC: memblock overview
  34 *
  35 * Memblock is a method of managing memory regions during the early
  36 * boot period when the usual kernel memory allocators are not up and
  37 * running.
  38 *
  39 * Memblock views the system memory as collections of contiguous
  40 * regions. There are several types of these collections:
  41 *
  42 * * ``memory`` - describes the physical memory available to the
  43 *   kernel; this may differ from the actual physical memory installed
  44 *   in the system, for instance when the memory is restricted with
  45 *   ``mem=`` command line parameter
  46 * * ``reserved`` - describes the regions that were allocated
  47 * * ``physmem`` - describes the actual physical memory available during
  48 *   boot regardless of the possible restrictions and memory hot(un)plug;
  49 *   the ``physmem`` type is only available on some architectures.
  50 *
  51 * Each region is represented by struct memblock_region that
  52 * defines the region extents, its attributes and NUMA node id on NUMA
  53 * systems. Every memory type is described by the struct memblock_type
  54 * which contains an array of memory regions along with
  55 * the allocator metadata. The "memory" and "reserved" types are nicely
  56 * wrapped with struct memblock. This structure is statically
  57 * initialized at build time. The region arrays are initially sized to
  58 * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
  59 * for "reserved". The region array for "physmem" is initially sized to
  60 * %INIT_PHYSMEM_REGIONS.
  61 * The memblock_allow_resize() enables automatic resizing of the region
  62 * arrays during addition of new regions. This feature should be used
  63 * with care so that memory allocated for the region array will not
  64 * overlap with areas that should be reserved, for example initrd.
  65 *
  66 * The early architecture setup should tell memblock what the physical
  67 * memory layout is by using memblock_add() or memblock_add_node()
  68 * functions. The first function does not assign the region to a NUMA
  69 * node and it is appropriate for UMA systems. Yet, it is possible to
  70 * use it on NUMA systems as well and assign the region to a NUMA node
  71 * later in the setup process using memblock_set_node(). The
  72 * memblock_add_node() performs such an assignment directly.
  73 *
  74 * Once memblock is setup the memory can be allocated using one of the
  75 * API variants:
  76 *
  77 * * memblock_phys_alloc*() - these functions return the **physical**
  78 *   address of the allocated memory
  79 * * memblock_alloc*() - these functions return the **virtual** address
  80 *   of the allocated memory.
  81 *
  82 * Note, that both API variants use implicit assumptions about allowed
  83 * memory ranges and the fallback methods. Consult the documentation
  84 * of memblock_alloc_internal() and memblock_alloc_range_nid()
  85 * functions for more elaborate description.
  86 *
  87 * As the system boot progresses, the architecture specific mem_init()
  88 * function frees all the memory to the buddy page allocator.
  89 *
  90 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
  91 * memblock data structures (except "physmem") will be discarded after the
  92 * system initialization completes.
  93 */
  94
  95#ifndef CONFIG_NUMA
  96struct pglist_data __refdata contig_page_data;
  97EXPORT_SYMBOL(contig_page_data);
  98#endif
  99
 100unsigned long max_low_pfn;
 101unsigned long min_low_pfn;
 102unsigned long max_pfn;
 103unsigned long long max_possible_pfn;
 104
 105static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
 106static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
 107#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 108static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
 109#endif
 110
 111struct memblock memblock __initdata_memblock = {
 112	.memory.regions		= memblock_memory_init_regions,
 113	.memory.cnt		= 1,	/* empty dummy entry */
 114	.memory.max		= INIT_MEMBLOCK_REGIONS,
 115	.memory.name		= "memory",
 116
 117	.reserved.regions	= memblock_reserved_init_regions,
 118	.reserved.cnt		= 1,	/* empty dummy entry */
 119	.reserved.max		= INIT_MEMBLOCK_RESERVED_REGIONS,
 120	.reserved.name		= "reserved",
 121
 
 
 
 
 
 
 
 122	.bottom_up		= false,
 123	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
 124};
 125
 126#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 127struct memblock_type physmem = {
 128	.regions		= memblock_physmem_init_regions,
 129	.cnt			= 1,	/* empty dummy entry */
 130	.max			= INIT_PHYSMEM_REGIONS,
 131	.name			= "physmem",
 132};
 133#endif
 134
 135/*
 136 * keep a pointer to &memblock.memory in the text section to use it in
 137 * __next_mem_range() and its helpers.
 138 *  For architectures that do not keep memblock data after init, this
 139 * pointer will be reset to NULL at memblock_discard()
 140 */
 141static __refdata struct memblock_type *memblock_memory = &memblock.memory;
 142
 143#define for_each_memblock_type(i, memblock_type, rgn)			\
 144	for (i = 0, rgn = &memblock_type->regions[0];			\
 145	     i < memblock_type->cnt;					\
 146	     i++, rgn = &memblock_type->regions[i])
 147
 148#define memblock_dbg(fmt, ...)						\
 149	do {								\
 150		if (memblock_debug)					\
 151			pr_info(fmt, ##__VA_ARGS__);			\
 152	} while (0)
 153
 154static int memblock_debug __initdata_memblock;
 155static bool system_has_some_mirror __initdata_memblock = false;
 156static int memblock_can_resize __initdata_memblock;
 157static int memblock_memory_in_slab __initdata_memblock = 0;
 158static int memblock_reserved_in_slab __initdata_memblock = 0;
 159
 160static enum memblock_flags __init_memblock choose_memblock_flags(void)
 161{
 162	return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
 163}
 164
 165/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
 166static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
 167{
 168	return *size = min(*size, PHYS_ADDR_MAX - base);
 169}
 170
 171/*
 172 * Address comparison utilities
 173 */
 174static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
 175				       phys_addr_t base2, phys_addr_t size2)
 176{
 177	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
 178}
 179
 180bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
 181					phys_addr_t base, phys_addr_t size)
 182{
 183	unsigned long i;
 184
 185	memblock_cap_size(base, &size);
 186
 187	for (i = 0; i < type->cnt; i++)
 188		if (memblock_addrs_overlap(base, size, type->regions[i].base,
 189					   type->regions[i].size))
 190			break;
 191	return i < type->cnt;
 192}
 193
 194/**
 195 * __memblock_find_range_bottom_up - find free area utility in bottom-up
 196 * @start: start of candidate range
 197 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 198 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 199 * @size: size of free area to find
 200 * @align: alignment of free area to find
 201 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 202 * @flags: pick from blocks based on memory attributes
 203 *
 204 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 205 *
 206 * Return:
 207 * Found address on success, 0 on failure.
 208 */
 209static phys_addr_t __init_memblock
 210__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
 211				phys_addr_t size, phys_addr_t align, int nid,
 212				enum memblock_flags flags)
 213{
 214	phys_addr_t this_start, this_end, cand;
 215	u64 i;
 216
 217	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
 218		this_start = clamp(this_start, start, end);
 219		this_end = clamp(this_end, start, end);
 220
 221		cand = round_up(this_start, align);
 222		if (cand < this_end && this_end - cand >= size)
 223			return cand;
 224	}
 225
 226	return 0;
 227}
 228
 229/**
 230 * __memblock_find_range_top_down - find free area utility, in top-down
 231 * @start: start of candidate range
 232 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 233 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 234 * @size: size of free area to find
 235 * @align: alignment of free area to find
 236 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 237 * @flags: pick from blocks based on memory attributes
 238 *
 239 * Utility called from memblock_find_in_range_node(), find free area top-down.
 240 *
 241 * Return:
 242 * Found address on success, 0 on failure.
 243 */
 244static phys_addr_t __init_memblock
 245__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
 246			       phys_addr_t size, phys_addr_t align, int nid,
 247			       enum memblock_flags flags)
 248{
 249	phys_addr_t this_start, this_end, cand;
 250	u64 i;
 251
 252	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
 253					NULL) {
 254		this_start = clamp(this_start, start, end);
 255		this_end = clamp(this_end, start, end);
 256
 257		if (this_end < size)
 258			continue;
 259
 260		cand = round_down(this_end - size, align);
 261		if (cand >= this_start)
 262			return cand;
 263	}
 264
 265	return 0;
 266}
 267
 268/**
 269 * memblock_find_in_range_node - find free area in given range and node
 270 * @size: size of free area to find
 271 * @align: alignment of free area to find
 272 * @start: start of candidate range
 273 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 274 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 275 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 276 * @flags: pick from blocks based on memory attributes
 277 *
 278 * Find @size free area aligned to @align in the specified range and node.
 279 *
 280 * Return:
 
 
 
 
 
 
 
 
 281 * Found address on success, 0 on failure.
 282 */
 283static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
 284					phys_addr_t align, phys_addr_t start,
 285					phys_addr_t end, int nid,
 286					enum memblock_flags flags)
 287{
 
 
 288	/* pump up @end */
 289	if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
 290	    end == MEMBLOCK_ALLOC_KASAN)
 291		end = memblock.current_limit;
 292
 293	/* avoid allocating the first page */
 294	start = max_t(phys_addr_t, start, PAGE_SIZE);
 295	end = max(start, end);
 
 
 
 
 
 
 
 
 
 
 
 296
 297	if (memblock_bottom_up())
 298		return __memblock_find_range_bottom_up(start, end, size, align,
 299						       nid, flags);
 300	else
 301		return __memblock_find_range_top_down(start, end, size, align,
 302						      nid, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 303}
 304
 305/**
 306 * memblock_find_in_range - find free area in given range
 307 * @start: start of candidate range
 308 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 309 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 310 * @size: size of free area to find
 311 * @align: alignment of free area to find
 312 *
 313 * Find @size free area aligned to @align in the specified range.
 314 *
 315 * Return:
 316 * Found address on success, 0 on failure.
 317 */
 318phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
 319					phys_addr_t end, phys_addr_t size,
 320					phys_addr_t align)
 321{
 322	phys_addr_t ret;
 323	enum memblock_flags flags = choose_memblock_flags();
 324
 325again:
 326	ret = memblock_find_in_range_node(size, align, start, end,
 327					    NUMA_NO_NODE, flags);
 328
 329	if (!ret && (flags & MEMBLOCK_MIRROR)) {
 330		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
 331			&size);
 332		flags &= ~MEMBLOCK_MIRROR;
 333		goto again;
 334	}
 335
 336	return ret;
 337}
 338
 339static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 340{
 341	type->total_size -= type->regions[r].size;
 342	memmove(&type->regions[r], &type->regions[r + 1],
 343		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
 344	type->cnt--;
 345
 346	/* Special case for empty arrays */
 347	if (type->cnt == 0) {
 348		WARN_ON(type->total_size != 0);
 349		type->cnt = 1;
 350		type->regions[0].base = 0;
 351		type->regions[0].size = 0;
 352		type->regions[0].flags = 0;
 353		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
 354	}
 355}
 356
 357#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
 358/**
 359 * memblock_discard - discard memory and reserved arrays if they were allocated
 360 */
 361void __init memblock_discard(void)
 362{
 363	phys_addr_t addr, size;
 364
 365	if (memblock.reserved.regions != memblock_reserved_init_regions) {
 366		addr = __pa(memblock.reserved.regions);
 367		size = PAGE_ALIGN(sizeof(struct memblock_region) *
 368				  memblock.reserved.max);
 369		__memblock_free_late(addr, size);
 370	}
 371
 372	if (memblock.memory.regions != memblock_memory_init_regions) {
 373		addr = __pa(memblock.memory.regions);
 374		size = PAGE_ALIGN(sizeof(struct memblock_region) *
 375				  memblock.memory.max);
 376		__memblock_free_late(addr, size);
 377	}
 378
 379	memblock_memory = NULL;
 380}
 381#endif
 382
 383/**
 384 * memblock_double_array - double the size of the memblock regions array
 385 * @type: memblock type of the regions array being doubled
 386 * @new_area_start: starting address of memory range to avoid overlap with
 387 * @new_area_size: size of memory range to avoid overlap with
 388 *
 389 * Double the size of the @type regions array. If memblock is being used to
 390 * allocate memory for a new reserved regions array and there is a previously
 391 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
 392 * waiting to be reserved, ensure the memory used by the new array does
 393 * not overlap.
 394 *
 395 * Return:
 396 * 0 on success, -1 on failure.
 397 */
 398static int __init_memblock memblock_double_array(struct memblock_type *type,
 399						phys_addr_t new_area_start,
 400						phys_addr_t new_area_size)
 401{
 402	struct memblock_region *new_array, *old_array;
 403	phys_addr_t old_alloc_size, new_alloc_size;
 404	phys_addr_t old_size, new_size, addr, new_end;
 405	int use_slab = slab_is_available();
 406	int *in_slab;
 407
 408	/* We don't allow resizing until we know about the reserved regions
 409	 * of memory that aren't suitable for allocation
 410	 */
 411	if (!memblock_can_resize)
 412		return -1;
 413
 414	/* Calculate new doubled size */
 415	old_size = type->max * sizeof(struct memblock_region);
 416	new_size = old_size << 1;
 417	/*
 418	 * We need to allocated new one align to PAGE_SIZE,
 419	 *   so we can free them completely later.
 420	 */
 421	old_alloc_size = PAGE_ALIGN(old_size);
 422	new_alloc_size = PAGE_ALIGN(new_size);
 423
 424	/* Retrieve the slab flag */
 425	if (type == &memblock.memory)
 426		in_slab = &memblock_memory_in_slab;
 427	else
 428		in_slab = &memblock_reserved_in_slab;
 429
 430	/* Try to find some space for it */
 
 
 
 
 
 
 
 
 
 
 431	if (use_slab) {
 432		new_array = kmalloc(new_size, GFP_KERNEL);
 433		addr = new_array ? __pa(new_array) : 0;
 434	} else {
 435		/* only exclude range when trying to double reserved.regions */
 436		if (type != &memblock.reserved)
 437			new_area_start = new_area_size = 0;
 438
 439		addr = memblock_find_in_range(new_area_start + new_area_size,
 440						memblock.current_limit,
 441						new_alloc_size, PAGE_SIZE);
 442		if (!addr && new_area_size)
 443			addr = memblock_find_in_range(0,
 444				min(new_area_start, memblock.current_limit),
 445				new_alloc_size, PAGE_SIZE);
 446
 447		new_array = addr ? __va(addr) : NULL;
 448	}
 449	if (!addr) {
 450		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
 451		       type->name, type->max, type->max * 2);
 452		return -1;
 453	}
 454
 455	new_end = addr + new_size - 1;
 456	memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
 457			type->name, type->max * 2, &addr, &new_end);
 458
 459	/*
 460	 * Found space, we now need to move the array over before we add the
 461	 * reserved region since it may be our reserved array itself that is
 462	 * full.
 463	 */
 464	memcpy(new_array, type->regions, old_size);
 465	memset(new_array + type->max, 0, old_size);
 466	old_array = type->regions;
 467	type->regions = new_array;
 468	type->max <<= 1;
 469
 470	/* Free old array. We needn't free it if the array is the static one */
 471	if (*in_slab)
 472		kfree(old_array);
 473	else if (old_array != memblock_memory_init_regions &&
 474		 old_array != memblock_reserved_init_regions)
 475		memblock_free(__pa(old_array), old_alloc_size);
 476
 477	/*
 478	 * Reserve the new array if that comes from the memblock.  Otherwise, we
 479	 * needn't do it
 480	 */
 481	if (!use_slab)
 482		BUG_ON(memblock_reserve(addr, new_alloc_size));
 483
 484	/* Update slab flag */
 485	*in_slab = use_slab;
 486
 487	return 0;
 488}
 489
 490/**
 491 * memblock_merge_regions - merge neighboring compatible regions
 492 * @type: memblock type to scan
 493 *
 494 * Scan @type and merge neighboring compatible regions.
 495 */
 496static void __init_memblock memblock_merge_regions(struct memblock_type *type)
 497{
 498	int i = 0;
 499
 500	/* cnt never goes below 1 */
 501	while (i < type->cnt - 1) {
 502		struct memblock_region *this = &type->regions[i];
 503		struct memblock_region *next = &type->regions[i + 1];
 504
 505		if (this->base + this->size != next->base ||
 506		    memblock_get_region_node(this) !=
 507		    memblock_get_region_node(next) ||
 508		    this->flags != next->flags) {
 509			BUG_ON(this->base + this->size > next->base);
 510			i++;
 511			continue;
 512		}
 513
 514		this->size += next->size;
 515		/* move forward from next + 1, index of which is i + 2 */
 516		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
 517		type->cnt--;
 518	}
 519}
 520
 521/**
 522 * memblock_insert_region - insert new memblock region
 523 * @type:	memblock type to insert into
 524 * @idx:	index for the insertion point
 525 * @base:	base address of the new region
 526 * @size:	size of the new region
 527 * @nid:	node id of the new region
 528 * @flags:	flags of the new region
 529 *
 530 * Insert new memblock region [@base, @base + @size) into @type at @idx.
 531 * @type must already have extra room to accommodate the new region.
 532 */
 533static void __init_memblock memblock_insert_region(struct memblock_type *type,
 534						   int idx, phys_addr_t base,
 535						   phys_addr_t size,
 536						   int nid,
 537						   enum memblock_flags flags)
 538{
 539	struct memblock_region *rgn = &type->regions[idx];
 540
 541	BUG_ON(type->cnt >= type->max);
 542	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
 543	rgn->base = base;
 544	rgn->size = size;
 545	rgn->flags = flags;
 546	memblock_set_region_node(rgn, nid);
 547	type->cnt++;
 548	type->total_size += size;
 549}
 550
 551/**
 552 * memblock_add_range - add new memblock region
 553 * @type: memblock type to add new region into
 554 * @base: base address of the new region
 555 * @size: size of the new region
 556 * @nid: nid of the new region
 557 * @flags: flags of the new region
 558 *
 559 * Add new memblock region [@base, @base + @size) into @type.  The new region
 560 * is allowed to overlap with existing ones - overlaps don't affect already
 561 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 562 * compatible regions are merged) after the addition.
 563 *
 564 * Return:
 565 * 0 on success, -errno on failure.
 566 */
 567static int __init_memblock memblock_add_range(struct memblock_type *type,
 568				phys_addr_t base, phys_addr_t size,
 569				int nid, enum memblock_flags flags)
 570{
 571	bool insert = false;
 572	phys_addr_t obase = base;
 573	phys_addr_t end = base + memblock_cap_size(base, &size);
 574	int idx, nr_new;
 575	struct memblock_region *rgn;
 576
 577	if (!size)
 578		return 0;
 579
 580	/* special case for empty array */
 581	if (type->regions[0].size == 0) {
 582		WARN_ON(type->cnt != 1 || type->total_size);
 583		type->regions[0].base = base;
 584		type->regions[0].size = size;
 585		type->regions[0].flags = flags;
 586		memblock_set_region_node(&type->regions[0], nid);
 587		type->total_size = size;
 588		return 0;
 589	}
 590repeat:
 591	/*
 592	 * The following is executed twice.  Once with %false @insert and
 593	 * then with %true.  The first counts the number of regions needed
 594	 * to accommodate the new area.  The second actually inserts them.
 595	 */
 596	base = obase;
 597	nr_new = 0;
 598
 599	for_each_memblock_type(idx, type, rgn) {
 600		phys_addr_t rbase = rgn->base;
 601		phys_addr_t rend = rbase + rgn->size;
 602
 603		if (rbase >= end)
 604			break;
 605		if (rend <= base)
 606			continue;
 607		/*
 608		 * @rgn overlaps.  If it separates the lower part of new
 609		 * area, insert that portion.
 610		 */
 611		if (rbase > base) {
 612#ifdef CONFIG_NUMA
 613			WARN_ON(nid != memblock_get_region_node(rgn));
 614#endif
 615			WARN_ON(flags != rgn->flags);
 616			nr_new++;
 617			if (insert)
 618				memblock_insert_region(type, idx++, base,
 619						       rbase - base, nid,
 620						       flags);
 621		}
 622		/* area below @rend is dealt with, forget about it */
 623		base = min(rend, end);
 624	}
 625
 626	/* insert the remaining portion */
 627	if (base < end) {
 628		nr_new++;
 629		if (insert)
 630			memblock_insert_region(type, idx, base, end - base,
 631					       nid, flags);
 632	}
 633
 634	if (!nr_new)
 635		return 0;
 636
 637	/*
 638	 * If this was the first round, resize array and repeat for actual
 639	 * insertions; otherwise, merge and return.
 640	 */
 641	if (!insert) {
 642		while (type->cnt + nr_new > type->max)
 643			if (memblock_double_array(type, obase, size) < 0)
 644				return -ENOMEM;
 645		insert = true;
 646		goto repeat;
 647	} else {
 648		memblock_merge_regions(type);
 649		return 0;
 650	}
 651}
 652
 653/**
 654 * memblock_add_node - add new memblock region within a NUMA node
 655 * @base: base address of the new region
 656 * @size: size of the new region
 657 * @nid: nid of the new region
 658 *
 659 * Add new memblock region [@base, @base + @size) to the "memory"
 660 * type. See memblock_add_range() description for mode details
 661 *
 662 * Return:
 663 * 0 on success, -errno on failure.
 664 */
 665int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
 666				       int nid)
 667{
 668	return memblock_add_range(&memblock.memory, base, size, nid, 0);
 669}
 670
 671/**
 672 * memblock_add - add new memblock region
 673 * @base: base address of the new region
 674 * @size: size of the new region
 675 *
 676 * Add new memblock region [@base, @base + @size) to the "memory"
 677 * type. See memblock_add_range() description for mode details
 678 *
 679 * Return:
 680 * 0 on success, -errno on failure.
 681 */
 682int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
 683{
 684	phys_addr_t end = base + size - 1;
 685
 686	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 687		     &base, &end, (void *)_RET_IP_);
 688
 689	return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
 690}
 691
 692/**
 693 * memblock_isolate_range - isolate given range into disjoint memblocks
 694 * @type: memblock type to isolate range for
 695 * @base: base of range to isolate
 696 * @size: size of range to isolate
 697 * @start_rgn: out parameter for the start of isolated region
 698 * @end_rgn: out parameter for the end of isolated region
 699 *
 700 * Walk @type and ensure that regions don't cross the boundaries defined by
 701 * [@base, @base + @size).  Crossing regions are split at the boundaries,
 702 * which may create at most two more regions.  The index of the first
 703 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 704 *
 705 * Return:
 706 * 0 on success, -errno on failure.
 707 */
 708static int __init_memblock memblock_isolate_range(struct memblock_type *type,
 709					phys_addr_t base, phys_addr_t size,
 710					int *start_rgn, int *end_rgn)
 711{
 712	phys_addr_t end = base + memblock_cap_size(base, &size);
 713	int idx;
 714	struct memblock_region *rgn;
 715
 716	*start_rgn = *end_rgn = 0;
 717
 718	if (!size)
 719		return 0;
 720
 721	/* we'll create at most two more regions */
 722	while (type->cnt + 2 > type->max)
 723		if (memblock_double_array(type, base, size) < 0)
 724			return -ENOMEM;
 725
 726	for_each_memblock_type(idx, type, rgn) {
 727		phys_addr_t rbase = rgn->base;
 728		phys_addr_t rend = rbase + rgn->size;
 729
 730		if (rbase >= end)
 731			break;
 732		if (rend <= base)
 733			continue;
 734
 735		if (rbase < base) {
 736			/*
 737			 * @rgn intersects from below.  Split and continue
 738			 * to process the next region - the new top half.
 739			 */
 740			rgn->base = base;
 741			rgn->size -= base - rbase;
 742			type->total_size -= base - rbase;
 743			memblock_insert_region(type, idx, rbase, base - rbase,
 744					       memblock_get_region_node(rgn),
 745					       rgn->flags);
 746		} else if (rend > end) {
 747			/*
 748			 * @rgn intersects from above.  Split and redo the
 749			 * current region - the new bottom half.
 750			 */
 751			rgn->base = end;
 752			rgn->size -= end - rbase;
 753			type->total_size -= end - rbase;
 754			memblock_insert_region(type, idx--, rbase, end - rbase,
 755					       memblock_get_region_node(rgn),
 756					       rgn->flags);
 757		} else {
 758			/* @rgn is fully contained, record it */
 759			if (!*end_rgn)
 760				*start_rgn = idx;
 761			*end_rgn = idx + 1;
 762		}
 763	}
 764
 765	return 0;
 766}
 767
 768static int __init_memblock memblock_remove_range(struct memblock_type *type,
 769					  phys_addr_t base, phys_addr_t size)
 770{
 771	int start_rgn, end_rgn;
 772	int i, ret;
 773
 774	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 775	if (ret)
 776		return ret;
 777
 778	for (i = end_rgn - 1; i >= start_rgn; i--)
 779		memblock_remove_region(type, i);
 780	return 0;
 781}
 782
 783int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 784{
 785	phys_addr_t end = base + size - 1;
 786
 787	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 788		     &base, &end, (void *)_RET_IP_);
 789
 790	return memblock_remove_range(&memblock.memory, base, size);
 791}
 792
 793/**
 794 * memblock_free - free boot memory block
 795 * @base: phys starting address of the  boot memory block
 796 * @size: size of the boot memory block in bytes
 797 *
 798 * Free boot memory block previously allocated by memblock_alloc_xx() API.
 799 * The freeing memory will not be released to the buddy allocator.
 800 */
 801int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 802{
 803	phys_addr_t end = base + size - 1;
 804
 805	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 806		     &base, &end, (void *)_RET_IP_);
 807
 808	kmemleak_free_part_phys(base, size);
 809	return memblock_remove_range(&memblock.reserved, base, size);
 810}
 811
 812int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
 813{
 814	phys_addr_t end = base + size - 1;
 815
 816	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 817		     &base, &end, (void *)_RET_IP_);
 818
 819	return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
 820}
 821
 822#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 823int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
 824{
 825	phys_addr_t end = base + size - 1;
 826
 827	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 828		     &base, &end, (void *)_RET_IP_);
 829
 830	return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
 831}
 832#endif
 833
 834/**
 835 * memblock_setclr_flag - set or clear flag for a memory region
 836 * @base: base address of the region
 837 * @size: size of the region
 838 * @set: set or clear the flag
 839 * @flag: the flag to update
 840 *
 841 * This function isolates region [@base, @base + @size), and sets/clears flag
 842 *
 843 * Return: 0 on success, -errno on failure.
 844 */
 845static int __init_memblock memblock_setclr_flag(phys_addr_t base,
 846				phys_addr_t size, int set, int flag)
 847{
 848	struct memblock_type *type = &memblock.memory;
 849	int i, ret, start_rgn, end_rgn;
 850
 851	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 852	if (ret)
 853		return ret;
 854
 855	for (i = start_rgn; i < end_rgn; i++) {
 856		struct memblock_region *r = &type->regions[i];
 857
 858		if (set)
 859			r->flags |= flag;
 860		else
 861			r->flags &= ~flag;
 862	}
 863
 864	memblock_merge_regions(type);
 865	return 0;
 866}
 867
 868/**
 869 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
 870 * @base: the base phys addr of the region
 871 * @size: the size of the region
 872 *
 873 * Return: 0 on success, -errno on failure.
 874 */
 875int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
 876{
 877	return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
 878}
 879
 880/**
 881 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 882 * @base: the base phys addr of the region
 883 * @size: the size of the region
 884 *
 885 * Return: 0 on success, -errno on failure.
 886 */
 887int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
 888{
 889	return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
 890}
 891
 892/**
 893 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
 894 * @base: the base phys addr of the region
 895 * @size: the size of the region
 896 *
 897 * Return: 0 on success, -errno on failure.
 898 */
 899int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
 900{
 901	system_has_some_mirror = true;
 902
 903	return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
 904}
 905
 906/**
 907 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
 908 * @base: the base phys addr of the region
 909 * @size: the size of the region
 910 *
 911 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
 912 * direct mapping of the physical memory. These regions will still be
 913 * covered by the memory map. The struct page representing NOMAP memory
 914 * frames in the memory map will be PageReserved()
 915 *
 916 * Return: 0 on success, -errno on failure.
 917 */
 918int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
 919{
 920	return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
 921}
 922
 923/**
 924 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
 925 * @base: the base phys addr of the region
 926 * @size: the size of the region
 927 *
 928 * Return: 0 on success, -errno on failure.
 929 */
 930int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
 931{
 932	return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
 933}
 934
 935static bool should_skip_region(struct memblock_type *type,
 936			       struct memblock_region *m,
 937			       int nid, int flags)
 
 
 
 
 
 
 
 
 938{
 939	int m_nid = memblock_get_region_node(m);
 940
 941	/* we never skip regions when iterating memblock.reserved or physmem */
 942	if (type != memblock_memory)
 943		return false;
 
 944
 945	/* only memory regions are associated with nodes, check it */
 946	if (nid != NUMA_NO_NODE && nid != m_nid)
 947		return true;
 
 948
 949	/* skip hotpluggable memory regions if needed */
 950	if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
 951	    !(flags & MEMBLOCK_HOTPLUG))
 952		return true;
 953
 954	/* if we want mirror memory skip non-mirror memory regions */
 955	if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
 956		return true;
 957
 958	/* skip nomap memory unless we were asked for it explicitly */
 959	if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
 960		return true;
 961
 962	return false;
 963}
 964
 965/**
 966 * __next_mem_range - next function for for_each_free_mem_range() etc.
 967 * @idx: pointer to u64 loop variable
 968 * @nid: node selector, %NUMA_NO_NODE for all nodes
 969 * @flags: pick from blocks based on memory attributes
 970 * @type_a: pointer to memblock_type from where the range is taken
 971 * @type_b: pointer to memblock_type which excludes memory from being taken
 972 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 973 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 974 * @out_nid: ptr to int for nid of the range, can be %NULL
 975 *
 976 * Find the first area from *@idx which matches @nid, fill the out
 977 * parameters, and update *@idx for the next iteration.  The lower 32bit of
 978 * *@idx contains index into type_a and the upper 32bit indexes the
 979 * areas before each region in type_b.	For example, if type_b regions
 980 * look like the following,
 981 *
 982 *	0:[0-16), 1:[32-48), 2:[128-130)
 983 *
 984 * The upper 32bit indexes the following regions.
 985 *
 986 *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
 987 *
 988 * As both region arrays are sorted, the function advances the two indices
 989 * in lockstep and returns each intersection.
 990 */
 991void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
 992		      struct memblock_type *type_a,
 993		      struct memblock_type *type_b, phys_addr_t *out_start,
 994		      phys_addr_t *out_end, int *out_nid)
 
 995{
 996	int idx_a = *idx & 0xffffffff;
 997	int idx_b = *idx >> 32;
 998
 999	if (WARN_ONCE(nid == MAX_NUMNODES,
1000	"Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1001		nid = NUMA_NO_NODE;
1002
1003	for (; idx_a < type_a->cnt; idx_a++) {
1004		struct memblock_region *m = &type_a->regions[idx_a];
1005
1006		phys_addr_t m_start = m->base;
1007		phys_addr_t m_end = m->base + m->size;
1008		int	    m_nid = memblock_get_region_node(m);
1009
1010		if (should_skip_region(type_a, m, nid, flags))
 
 
 
 
 
 
 
 
 
 
 
 
 
1011			continue;
1012
1013		if (!type_b) {
1014			if (out_start)
1015				*out_start = m_start;
1016			if (out_end)
1017				*out_end = m_end;
1018			if (out_nid)
1019				*out_nid = m_nid;
1020			idx_a++;
1021			*idx = (u32)idx_a | (u64)idx_b << 32;
1022			return;
1023		}
1024
1025		/* scan areas before each reservation */
1026		for (; idx_b < type_b->cnt + 1; idx_b++) {
1027			struct memblock_region *r;
1028			phys_addr_t r_start;
1029			phys_addr_t r_end;
1030
1031			r = &type_b->regions[idx_b];
1032			r_start = idx_b ? r[-1].base + r[-1].size : 0;
1033			r_end = idx_b < type_b->cnt ?
1034				r->base : PHYS_ADDR_MAX;
1035
1036			/*
1037			 * if idx_b advanced past idx_a,
1038			 * break out to advance idx_a
1039			 */
1040			if (r_start >= m_end)
1041				break;
1042			/* if the two regions intersect, we're done */
1043			if (m_start < r_end) {
1044				if (out_start)
1045					*out_start =
1046						max(m_start, r_start);
1047				if (out_end)
1048					*out_end = min(m_end, r_end);
1049				if (out_nid)
1050					*out_nid = m_nid;
1051				/*
1052				 * The region which ends first is
1053				 * advanced for the next iteration.
1054				 */
1055				if (m_end <= r_end)
1056					idx_a++;
1057				else
1058					idx_b++;
1059				*idx = (u32)idx_a | (u64)idx_b << 32;
1060				return;
1061			}
1062		}
1063	}
1064
1065	/* signal end of iteration */
1066	*idx = ULLONG_MAX;
1067}
1068
1069/**
1070 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1071 *
 
 
 
1072 * @idx: pointer to u64 loop variable
1073 * @nid: node selector, %NUMA_NO_NODE for all nodes
1074 * @flags: pick from blocks based on memory attributes
1075 * @type_a: pointer to memblock_type from where the range is taken
1076 * @type_b: pointer to memblock_type which excludes memory from being taken
1077 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1078 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1079 * @out_nid: ptr to int for nid of the range, can be %NULL
1080 *
1081 * Finds the next range from type_a which is not marked as unsuitable
1082 * in type_b.
1083 *
1084 * Reverse of __next_mem_range().
1085 */
1086void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1087					  enum memblock_flags flags,
1088					  struct memblock_type *type_a,
1089					  struct memblock_type *type_b,
1090					  phys_addr_t *out_start,
1091					  phys_addr_t *out_end, int *out_nid)
1092{
1093	int idx_a = *idx & 0xffffffff;
1094	int idx_b = *idx >> 32;
1095
1096	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1097		nid = NUMA_NO_NODE;
1098
1099	if (*idx == (u64)ULLONG_MAX) {
1100		idx_a = type_a->cnt - 1;
1101		if (type_b != NULL)
1102			idx_b = type_b->cnt;
1103		else
1104			idx_b = 0;
1105	}
1106
1107	for (; idx_a >= 0; idx_a--) {
1108		struct memblock_region *m = &type_a->regions[idx_a];
1109
1110		phys_addr_t m_start = m->base;
1111		phys_addr_t m_end = m->base + m->size;
1112		int m_nid = memblock_get_region_node(m);
1113
1114		if (should_skip_region(type_a, m, nid, flags))
 
 
 
 
 
 
 
 
 
 
 
 
 
1115			continue;
1116
1117		if (!type_b) {
1118			if (out_start)
1119				*out_start = m_start;
1120			if (out_end)
1121				*out_end = m_end;
1122			if (out_nid)
1123				*out_nid = m_nid;
1124			idx_a--;
1125			*idx = (u32)idx_a | (u64)idx_b << 32;
1126			return;
1127		}
1128
1129		/* scan areas before each reservation */
1130		for (; idx_b >= 0; idx_b--) {
1131			struct memblock_region *r;
1132			phys_addr_t r_start;
1133			phys_addr_t r_end;
1134
1135			r = &type_b->regions[idx_b];
1136			r_start = idx_b ? r[-1].base + r[-1].size : 0;
1137			r_end = idx_b < type_b->cnt ?
1138				r->base : PHYS_ADDR_MAX;
1139			/*
1140			 * if idx_b advanced past idx_a,
1141			 * break out to advance idx_a
1142			 */
1143
1144			if (r_end <= m_start)
1145				break;
1146			/* if the two regions intersect, we're done */
1147			if (m_end > r_start) {
1148				if (out_start)
1149					*out_start = max(m_start, r_start);
1150				if (out_end)
1151					*out_end = min(m_end, r_end);
1152				if (out_nid)
1153					*out_nid = m_nid;
1154				if (m_start >= r_start)
1155					idx_a--;
1156				else
1157					idx_b--;
1158				*idx = (u32)idx_a | (u64)idx_b << 32;
1159				return;
1160			}
1161		}
1162	}
1163	/* signal end of iteration */
1164	*idx = ULLONG_MAX;
1165}
1166
 
1167/*
1168 * Common iterator interface used to define for_each_mem_pfn_range().
1169 */
1170void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1171				unsigned long *out_start_pfn,
1172				unsigned long *out_end_pfn, int *out_nid)
1173{
1174	struct memblock_type *type = &memblock.memory;
1175	struct memblock_region *r;
1176	int r_nid;
1177
1178	while (++*idx < type->cnt) {
1179		r = &type->regions[*idx];
1180		r_nid = memblock_get_region_node(r);
1181
1182		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1183			continue;
1184		if (nid == MAX_NUMNODES || nid == r_nid)
1185			break;
1186	}
1187	if (*idx >= type->cnt) {
1188		*idx = -1;
1189		return;
1190	}
1191
1192	if (out_start_pfn)
1193		*out_start_pfn = PFN_UP(r->base);
1194	if (out_end_pfn)
1195		*out_end_pfn = PFN_DOWN(r->base + r->size);
1196	if (out_nid)
1197		*out_nid = r_nid;
1198}
1199
1200/**
1201 * memblock_set_node - set node ID on memblock regions
1202 * @base: base of area to set node ID for
1203 * @size: size of area to set node ID for
1204 * @type: memblock type to set node ID for
1205 * @nid: node ID to set
1206 *
1207 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1208 * Regions which cross the area boundaries are split as necessary.
1209 *
1210 * Return:
1211 * 0 on success, -errno on failure.
1212 */
1213int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1214				      struct memblock_type *type, int nid)
1215{
1216#ifdef CONFIG_NUMA
1217	int start_rgn, end_rgn;
1218	int i, ret;
1219
1220	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1221	if (ret)
1222		return ret;
1223
1224	for (i = start_rgn; i < end_rgn; i++)
1225		memblock_set_region_node(&type->regions[i], nid);
1226
1227	memblock_merge_regions(type);
1228#endif
1229	return 0;
1230}
 
1231
1232#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1233/**
1234 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1235 *
1236 * @idx: pointer to u64 loop variable
1237 * @zone: zone in which all of the memory blocks reside
1238 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1239 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1240 *
1241 * This function is meant to be a zone/pfn specific wrapper for the
1242 * for_each_mem_range type iterators. Specifically they are used in the
1243 * deferred memory init routines and as such we were duplicating much of
1244 * this logic throughout the code. So instead of having it in multiple
1245 * locations it seemed like it would make more sense to centralize this to
1246 * one new iterator that does everything they need.
1247 */
1248void __init_memblock
1249__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1250			     unsigned long *out_spfn, unsigned long *out_epfn)
1251{
1252	int zone_nid = zone_to_nid(zone);
1253	phys_addr_t spa, epa;
1254	int nid;
1255
1256	__next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1257			 &memblock.memory, &memblock.reserved,
1258			 &spa, &epa, &nid);
1259
1260	while (*idx != U64_MAX) {
1261		unsigned long epfn = PFN_DOWN(epa);
1262		unsigned long spfn = PFN_UP(spa);
1263
 
 
 
1264		/*
1265		 * Verify the end is at least past the start of the zone and
1266		 * that we have at least one PFN to initialize.
1267		 */
1268		if (zone->zone_start_pfn < epfn && spfn < epfn) {
1269			/* if we went too far just stop searching */
1270			if (zone_end_pfn(zone) <= spfn) {
1271				*idx = U64_MAX;
1272				break;
1273			}
1274
1275			if (out_spfn)
1276				*out_spfn = max(zone->zone_start_pfn, spfn);
1277			if (out_epfn)
1278				*out_epfn = min(zone_end_pfn(zone), epfn);
1279
1280			return;
1281		}
1282
1283		__next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1284				 &memblock.memory, &memblock.reserved,
1285				 &spa, &epa, &nid);
1286	}
 
 
1287
1288	/* signal end of iteration */
1289	if (out_spfn)
1290		*out_spfn = ULONG_MAX;
1291	if (out_epfn)
1292		*out_epfn = 0;
 
1293}
1294
1295#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
 
 
 
 
1296
1297/**
1298 * memblock_alloc_range_nid - allocate boot memory block
1299 * @size: size of memory block to be allocated in bytes
1300 * @align: alignment of the region and block's size
1301 * @start: the lower bound of the memory region to allocate (phys address)
1302 * @end: the upper bound of the memory region to allocate (phys address)
1303 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1304 * @exact_nid: control the allocation fall back to other nodes
1305 *
1306 * The allocation is performed from memory region limited by
1307 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1308 *
1309 * If the specified node can not hold the requested memory and @exact_nid
1310 * is false, the allocation falls back to any node in the system.
1311 *
1312 * For systems with memory mirroring, the allocation is attempted first
1313 * from the regions with mirroring enabled and then retried from any
1314 * memory region.
1315 *
1316 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1317 * allocated boot memory block, so that it is never reported as leaks.
1318 *
1319 * Return:
1320 * Physical address of allocated memory block on success, %0 on failure.
1321 */
1322phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1323					phys_addr_t align, phys_addr_t start,
1324					phys_addr_t end, int nid,
1325					bool exact_nid)
1326{
1327	enum memblock_flags flags = choose_memblock_flags();
1328	phys_addr_t found;
1329
1330	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1331		nid = NUMA_NO_NODE;
1332
1333	if (!align) {
1334		/* Can't use WARNs this early in boot on powerpc */
1335		dump_stack();
1336		align = SMP_CACHE_BYTES;
1337	}
1338
1339again:
1340	found = memblock_find_in_range_node(size, align, start, end, nid,
1341					    flags);
1342	if (found && !memblock_reserve(found, size))
1343		goto done;
1344
1345	if (nid != NUMA_NO_NODE && !exact_nid) {
1346		found = memblock_find_in_range_node(size, align, start,
1347						    end, NUMA_NO_NODE,
1348						    flags);
1349		if (found && !memblock_reserve(found, size))
1350			goto done;
1351	}
1352
1353	if (flags & MEMBLOCK_MIRROR) {
1354		flags &= ~MEMBLOCK_MIRROR;
1355		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1356			&size);
1357		goto again;
1358	}
 
 
 
 
 
 
 
 
 
 
 
 
1359
1360	return 0;
1361
1362done:
1363	/* Skip kmemleak for kasan_init() due to high volume. */
1364	if (end != MEMBLOCK_ALLOC_KASAN)
1365		/*
1366		 * The min_count is set to 0 so that memblock allocated
1367		 * blocks are never reported as leaks. This is because many
1368		 * of these blocks are only referred via the physical
1369		 * address which is not looked up by kmemleak.
1370		 */
1371		kmemleak_alloc_phys(found, size, 0, 0);
1372
1373	return found;
1374}
1375
1376/**
1377 * memblock_phys_alloc_range - allocate a memory block inside specified range
1378 * @size: size of memory block to be allocated in bytes
1379 * @align: alignment of the region and block's size
1380 * @start: the lower bound of the memory region to allocate (physical address)
1381 * @end: the upper bound of the memory region to allocate (physical address)
1382 *
1383 * Allocate @size bytes in the between @start and @end.
1384 *
1385 * Return: physical address of the allocated memory block on success,
1386 * %0 on failure.
1387 */
1388phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1389					     phys_addr_t align,
1390					     phys_addr_t start,
1391					     phys_addr_t end)
1392{
1393	memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1394		     __func__, (u64)size, (u64)align, &start, &end,
1395		     (void *)_RET_IP_);
1396	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1397					false);
1398}
1399
1400/**
1401 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1402 * @size: size of memory block to be allocated in bytes
1403 * @align: alignment of the region and block's size
1404 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1405 *
1406 * Allocates memory block from the specified NUMA node. If the node
1407 * has no available memory, attempts to allocated from any node in the
1408 * system.
1409 *
1410 * Return: physical address of the allocated memory block on success,
1411 * %0 on failure.
1412 */
1413phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1414{
1415	return memblock_alloc_range_nid(size, align, 0,
1416					MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
 
 
 
1417}
1418
1419/**
1420 * memblock_alloc_internal - allocate boot memory block
1421 * @size: size of memory block to be allocated in bytes
1422 * @align: alignment of the region and block's size
1423 * @min_addr: the lower bound of the memory region to allocate (phys address)
1424 * @max_addr: the upper bound of the memory region to allocate (phys address)
1425 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1426 * @exact_nid: control the allocation fall back to other nodes
1427 *
1428 * Allocates memory block using memblock_alloc_range_nid() and
1429 * converts the returned physical address to virtual.
 
 
1430 *
1431 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1432 * will fall back to memory below @min_addr. Other constraints, such
1433 * as node and mirrored memory will be handled again in
1434 * memblock_alloc_range_nid().
 
 
 
 
 
 
1435 *
1436 * Return:
1437 * Virtual address of allocated memory block on success, NULL on failure.
1438 */
1439static void * __init memblock_alloc_internal(
1440				phys_addr_t size, phys_addr_t align,
1441				phys_addr_t min_addr, phys_addr_t max_addr,
1442				int nid, bool exact_nid)
1443{
1444	phys_addr_t alloc;
 
 
 
 
 
1445
1446	/*
1447	 * Detect any accidental use of these APIs after slab is ready, as at
1448	 * this moment memblock may be deinitialized already and its
1449	 * internal data may be destroyed (after execution of memblock_free_all)
1450	 */
1451	if (WARN_ON_ONCE(slab_is_available()))
1452		return kzalloc_node(size, GFP_NOWAIT, nid);
1453
 
 
 
1454	if (max_addr > memblock.current_limit)
1455		max_addr = memblock.current_limit;
 
 
 
 
 
1456
1457	alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1458					exact_nid);
 
 
 
 
 
1459
1460	/* retry allocation without lower limit */
1461	if (!alloc && min_addr)
1462		alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1463						exact_nid);
1464
1465	if (!alloc)
1466		return NULL;
 
 
 
 
 
 
 
 
1467
1468	return phys_to_virt(alloc);
 
 
 
 
 
 
 
 
1469}
1470
1471/**
1472 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1473 * without zeroing memory
1474 * @size: size of memory block to be allocated in bytes
1475 * @align: alignment of the region and block's size
1476 * @min_addr: the lower bound of the memory region from where the allocation
1477 *	  is preferred (phys address)
1478 * @max_addr: the upper bound of the memory region from where the allocation
1479 *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1480 *	      allocate only from memory limited by memblock.current_limit value
1481 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1482 *
1483 * Public function, provides additional debug information (including caller
1484 * info), if enabled. Does not zero allocated memory.
 
1485 *
1486 * Return:
1487 * Virtual address of allocated memory block on success, NULL on failure.
1488 */
1489void * __init memblock_alloc_exact_nid_raw(
1490			phys_addr_t size, phys_addr_t align,
1491			phys_addr_t min_addr, phys_addr_t max_addr,
1492			int nid)
1493{
1494	void *ptr;
1495
1496	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1497		     __func__, (u64)size, (u64)align, nid, &min_addr,
1498		     &max_addr, (void *)_RET_IP_);
1499
1500	ptr = memblock_alloc_internal(size, align,
1501					   min_addr, max_addr, nid, true);
 
1502	if (ptr && size > 0)
1503		page_init_poison(ptr, size);
1504
1505	return ptr;
1506}
1507
1508/**
1509 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1510 * memory and without panicking
1511 * @size: size of memory block to be allocated in bytes
1512 * @align: alignment of the region and block's size
1513 * @min_addr: the lower bound of the memory region from where the allocation
1514 *	  is preferred (phys address)
1515 * @max_addr: the upper bound of the memory region from where the allocation
1516 *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1517 *	      allocate only from memory limited by memblock.current_limit value
1518 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1519 *
1520 * Public function, provides additional debug information (including caller
1521 * info), if enabled. Does not zero allocated memory, does not panic if request
1522 * cannot be satisfied.
1523 *
1524 * Return:
1525 * Virtual address of allocated memory block on success, NULL on failure.
1526 */
1527void * __init memblock_alloc_try_nid_raw(
1528			phys_addr_t size, phys_addr_t align,
1529			phys_addr_t min_addr, phys_addr_t max_addr,
1530			int nid)
1531{
1532	void *ptr;
1533
1534	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1535		     __func__, (u64)size, (u64)align, nid, &min_addr,
1536		     &max_addr, (void *)_RET_IP_);
1537
1538	ptr = memblock_alloc_internal(size, align,
1539					   min_addr, max_addr, nid, false);
1540	if (ptr && size > 0)
1541		page_init_poison(ptr, size);
1542
 
 
 
 
1543	return ptr;
1544}
1545
1546/**
1547 * memblock_alloc_try_nid - allocate boot memory block
1548 * @size: size of memory block to be allocated in bytes
1549 * @align: alignment of the region and block's size
1550 * @min_addr: the lower bound of the memory region from where the allocation
1551 *	  is preferred (phys address)
1552 * @max_addr: the upper bound of the memory region from where the allocation
1553 *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1554 *	      allocate only from memory limited by memblock.current_limit value
1555 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1556 *
1557 * Public function, provides additional debug information (including caller
1558 * info), if enabled. This function zeroes the allocated memory.
 
1559 *
1560 * Return:
1561 * Virtual address of allocated memory block on success, NULL on failure.
1562 */
1563void * __init memblock_alloc_try_nid(
1564			phys_addr_t size, phys_addr_t align,
1565			phys_addr_t min_addr, phys_addr_t max_addr,
1566			int nid)
1567{
1568	void *ptr;
1569
1570	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1571		     __func__, (u64)size, (u64)align, nid, &min_addr,
1572		     &max_addr, (void *)_RET_IP_);
1573	ptr = memblock_alloc_internal(size, align,
1574					   min_addr, max_addr, nid, false);
1575	if (ptr)
1576		memset(ptr, 0, size);
 
 
1577
1578	return ptr;
 
 
 
1579}
1580
1581/**
1582 * __memblock_free_late - free pages directly to buddy allocator
1583 * @base: phys starting address of the  boot memory block
1584 * @size: size of the boot memory block in bytes
1585 *
1586 * This is only useful when the memblock allocator has already been torn
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1587 * down, but we are still initializing the system.  Pages are released directly
1588 * to the buddy allocator.
1589 */
1590void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1591{
1592	phys_addr_t cursor, end;
1593
1594	end = base + size - 1;
1595	memblock_dbg("%s: [%pa-%pa] %pS\n",
1596		     __func__, &base, &end, (void *)_RET_IP_);
1597	kmemleak_free_part_phys(base, size);
1598	cursor = PFN_UP(base);
1599	end = PFN_DOWN(base + size);
1600
1601	for (; cursor < end; cursor++) {
1602		memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1603		totalram_pages_inc();
1604	}
1605}
1606
1607/*
1608 * Remaining API functions
1609 */
1610
1611phys_addr_t __init_memblock memblock_phys_mem_size(void)
1612{
1613	return memblock.memory.total_size;
1614}
1615
1616phys_addr_t __init_memblock memblock_reserved_size(void)
1617{
1618	return memblock.reserved.total_size;
1619}
1620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1621/* lowest address */
1622phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1623{
1624	return memblock.memory.regions[0].base;
1625}
1626
1627phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1628{
1629	int idx = memblock.memory.cnt - 1;
1630
1631	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1632}
1633
1634static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1635{
1636	phys_addr_t max_addr = PHYS_ADDR_MAX;
1637	struct memblock_region *r;
1638
1639	/*
1640	 * translate the memory @limit size into the max address within one of
1641	 * the memory memblock regions, if the @limit exceeds the total size
1642	 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1643	 */
1644	for_each_mem_region(r) {
1645		if (limit <= r->size) {
1646			max_addr = r->base + limit;
1647			break;
1648		}
1649		limit -= r->size;
1650	}
1651
1652	return max_addr;
1653}
1654
1655void __init memblock_enforce_memory_limit(phys_addr_t limit)
1656{
1657	phys_addr_t max_addr;
1658
1659	if (!limit)
1660		return;
1661
1662	max_addr = __find_max_addr(limit);
1663
1664	/* @limit exceeds the total size of the memory, do nothing */
1665	if (max_addr == PHYS_ADDR_MAX)
1666		return;
1667
1668	/* truncate both memory and reserved regions */
1669	memblock_remove_range(&memblock.memory, max_addr,
1670			      PHYS_ADDR_MAX);
1671	memblock_remove_range(&memblock.reserved, max_addr,
1672			      PHYS_ADDR_MAX);
1673}
1674
1675void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1676{
1677	int start_rgn, end_rgn;
1678	int i, ret;
1679
1680	if (!size)
1681		return;
1682
1683	ret = memblock_isolate_range(&memblock.memory, base, size,
1684						&start_rgn, &end_rgn);
1685	if (ret)
1686		return;
1687
1688	/* remove all the MAP regions */
1689	for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1690		if (!memblock_is_nomap(&memblock.memory.regions[i]))
1691			memblock_remove_region(&memblock.memory, i);
1692
1693	for (i = start_rgn - 1; i >= 0; i--)
1694		if (!memblock_is_nomap(&memblock.memory.regions[i]))
1695			memblock_remove_region(&memblock.memory, i);
1696
1697	/* truncate the reserved regions */
1698	memblock_remove_range(&memblock.reserved, 0, base);
1699	memblock_remove_range(&memblock.reserved,
1700			base + size, PHYS_ADDR_MAX);
1701}
1702
1703void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1704{
1705	phys_addr_t max_addr;
1706
1707	if (!limit)
1708		return;
1709
1710	max_addr = __find_max_addr(limit);
1711
1712	/* @limit exceeds the total size of the memory, do nothing */
1713	if (max_addr == PHYS_ADDR_MAX)
1714		return;
1715
1716	memblock_cap_memory_range(0, max_addr);
1717}
1718
1719static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1720{
1721	unsigned int left = 0, right = type->cnt;
1722
1723	do {
1724		unsigned int mid = (right + left) / 2;
1725
1726		if (addr < type->regions[mid].base)
1727			right = mid;
1728		else if (addr >= (type->regions[mid].base +
1729				  type->regions[mid].size))
1730			left = mid + 1;
1731		else
1732			return mid;
1733	} while (left < right);
1734	return -1;
1735}
1736
1737bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1738{
1739	return memblock_search(&memblock.reserved, addr) != -1;
1740}
1741
1742bool __init_memblock memblock_is_memory(phys_addr_t addr)
1743{
1744	return memblock_search(&memblock.memory, addr) != -1;
1745}
1746
1747bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1748{
1749	int i = memblock_search(&memblock.memory, addr);
1750
1751	if (i == -1)
1752		return false;
1753	return !memblock_is_nomap(&memblock.memory.regions[i]);
1754}
1755
 
1756int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1757			 unsigned long *start_pfn, unsigned long *end_pfn)
1758{
1759	struct memblock_type *type = &memblock.memory;
1760	int mid = memblock_search(type, PFN_PHYS(pfn));
1761
1762	if (mid == -1)
1763		return -1;
1764
1765	*start_pfn = PFN_DOWN(type->regions[mid].base);
1766	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1767
1768	return memblock_get_region_node(&type->regions[mid]);
1769}
 
1770
1771/**
1772 * memblock_is_region_memory - check if a region is a subset of memory
1773 * @base: base of region to check
1774 * @size: size of region to check
1775 *
1776 * Check if the region [@base, @base + @size) is a subset of a memory block.
1777 *
1778 * Return:
1779 * 0 if false, non-zero if true
1780 */
1781bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1782{
1783	int idx = memblock_search(&memblock.memory, base);
1784	phys_addr_t end = base + memblock_cap_size(base, &size);
1785
1786	if (idx == -1)
1787		return false;
1788	return (memblock.memory.regions[idx].base +
1789		 memblock.memory.regions[idx].size) >= end;
1790}
1791
1792/**
1793 * memblock_is_region_reserved - check if a region intersects reserved memory
1794 * @base: base of region to check
1795 * @size: size of region to check
1796 *
1797 * Check if the region [@base, @base + @size) intersects a reserved
1798 * memory block.
1799 *
1800 * Return:
1801 * True if they intersect, false if not.
1802 */
1803bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1804{
 
1805	return memblock_overlaps_region(&memblock.reserved, base, size);
1806}
1807
1808void __init_memblock memblock_trim_memory(phys_addr_t align)
1809{
1810	phys_addr_t start, end, orig_start, orig_end;
1811	struct memblock_region *r;
1812
1813	for_each_mem_region(r) {
1814		orig_start = r->base;
1815		orig_end = r->base + r->size;
1816		start = round_up(orig_start, align);
1817		end = round_down(orig_end, align);
1818
1819		if (start == orig_start && end == orig_end)
1820			continue;
1821
1822		if (start < end) {
1823			r->base = start;
1824			r->size = end - start;
1825		} else {
1826			memblock_remove_region(&memblock.memory,
1827					       r - memblock.memory.regions);
1828			r--;
1829		}
1830	}
1831}
1832
1833void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1834{
1835	memblock.current_limit = limit;
1836}
1837
1838phys_addr_t __init_memblock memblock_get_current_limit(void)
1839{
1840	return memblock.current_limit;
1841}
1842
1843static void __init_memblock memblock_dump(struct memblock_type *type)
1844{
1845	phys_addr_t base, end, size;
1846	enum memblock_flags flags;
1847	int idx;
1848	struct memblock_region *rgn;
1849
1850	pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
1851
1852	for_each_memblock_type(idx, type, rgn) {
1853		char nid_buf[32] = "";
1854
1855		base = rgn->base;
1856		size = rgn->size;
1857		end = base + size - 1;
1858		flags = rgn->flags;
1859#ifdef CONFIG_NUMA
1860		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1861			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1862				 memblock_get_region_node(rgn));
1863#endif
1864		pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1865			type->name, idx, &base, &end, &size, nid_buf, flags);
1866	}
1867}
1868
1869static void __init_memblock __memblock_dump_all(void)
1870{
1871	pr_info("MEMBLOCK configuration:\n");
1872	pr_info(" memory size = %pa reserved size = %pa\n",
1873		&memblock.memory.total_size,
1874		&memblock.reserved.total_size);
1875
1876	memblock_dump(&memblock.memory);
1877	memblock_dump(&memblock.reserved);
1878#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1879	memblock_dump(&physmem);
1880#endif
1881}
1882
1883void __init_memblock memblock_dump_all(void)
1884{
1885	if (memblock_debug)
1886		__memblock_dump_all();
1887}
1888
1889void __init memblock_allow_resize(void)
1890{
1891	memblock_can_resize = 1;
1892}
1893
1894static int __init early_memblock(char *p)
1895{
1896	if (p && strstr(p, "debug"))
1897		memblock_debug = 1;
1898	return 0;
1899}
1900early_param("memblock", early_memblock);
1901
1902static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
1903{
1904	struct page *start_pg, *end_pg;
1905	phys_addr_t pg, pgend;
1906
1907	/*
1908	 * Convert start_pfn/end_pfn to a struct page pointer.
1909	 */
1910	start_pg = pfn_to_page(start_pfn - 1) + 1;
1911	end_pg = pfn_to_page(end_pfn - 1) + 1;
1912
1913	/*
1914	 * Convert to physical addresses, and round start upwards and end
1915	 * downwards.
1916	 */
1917	pg = PAGE_ALIGN(__pa(start_pg));
1918	pgend = __pa(end_pg) & PAGE_MASK;
1919
1920	/*
1921	 * If there are free pages between these, free the section of the
1922	 * memmap array.
1923	 */
1924	if (pg < pgend)
1925		memblock_free(pg, pgend - pg);
1926}
1927
1928/*
1929 * The mem_map array can get very big.  Free the unused area of the memory map.
1930 */
1931static void __init free_unused_memmap(void)
1932{
1933	unsigned long start, end, prev_end = 0;
1934	int i;
1935
1936	if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
1937	    IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
1938		return;
1939
1940	/*
1941	 * This relies on each bank being in address order.
1942	 * The banks are sorted previously in bootmem_init().
1943	 */
1944	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
1945#ifdef CONFIG_SPARSEMEM
1946		/*
1947		 * Take care not to free memmap entries that don't exist
1948		 * due to SPARSEMEM sections which aren't present.
1949		 */
1950		start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
1951#endif
1952		/*
1953		 * Align down here since many operations in VM subsystem
1954		 * presume that there are no holes in the memory map inside
1955		 * a pageblock
1956		 */
1957		start = round_down(start, pageblock_nr_pages);
1958
1959		/*
1960		 * If we had a previous bank, and there is a space
1961		 * between the current bank and the previous, free it.
1962		 */
1963		if (prev_end && prev_end < start)
1964			free_memmap(prev_end, start);
1965
1966		/*
1967		 * Align up here since many operations in VM subsystem
1968		 * presume that there are no holes in the memory map inside
1969		 * a pageblock
1970		 */
1971		prev_end = ALIGN(end, pageblock_nr_pages);
1972	}
1973
1974#ifdef CONFIG_SPARSEMEM
1975	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
1976		prev_end = ALIGN(end, pageblock_nr_pages);
1977		free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
1978	}
1979#endif
1980}
1981
1982static void __init __free_pages_memory(unsigned long start, unsigned long end)
1983{
1984	int order;
1985
1986	while (start < end) {
1987		order = min(MAX_ORDER - 1UL, __ffs(start));
1988
1989		while (start + (1UL << order) > end)
1990			order--;
1991
1992		memblock_free_pages(pfn_to_page(start), start, order);
1993
1994		start += (1UL << order);
1995	}
1996}
1997
1998static unsigned long __init __free_memory_core(phys_addr_t start,
1999				 phys_addr_t end)
2000{
2001	unsigned long start_pfn = PFN_UP(start);
2002	unsigned long end_pfn = min_t(unsigned long,
2003				      PFN_DOWN(end), max_low_pfn);
2004
2005	if (start_pfn >= end_pfn)
2006		return 0;
2007
2008	__free_pages_memory(start_pfn, end_pfn);
2009
2010	return end_pfn - start_pfn;
2011}
2012
2013static void __init memmap_init_reserved_pages(void)
2014{
2015	struct memblock_region *region;
2016	phys_addr_t start, end;
2017	u64 i;
2018
2019	/* initialize struct pages for the reserved regions */
2020	for_each_reserved_mem_range(i, &start, &end)
2021		reserve_bootmem_region(start, end);
2022
2023	/* and also treat struct pages for the NOMAP regions as PageReserved */
2024	for_each_mem_region(region) {
2025		if (memblock_is_nomap(region)) {
2026			start = region->base;
2027			end = start + region->size;
2028			reserve_bootmem_region(start, end);
2029		}
2030	}
2031}
2032
2033static unsigned long __init free_low_memory_core_early(void)
2034{
2035	unsigned long count = 0;
2036	phys_addr_t start, end;
2037	u64 i;
2038
2039	memblock_clear_hotplug(0, -1);
2040
2041	memmap_init_reserved_pages();
2042
2043	/*
2044	 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2045	 *  because in some case like Node0 doesn't have RAM installed
2046	 *  low ram will be on Node1
2047	 */
2048	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2049				NULL)
2050		count += __free_memory_core(start, end);
2051
2052	return count;
2053}
2054
2055static int reset_managed_pages_done __initdata;
2056
2057void reset_node_managed_pages(pg_data_t *pgdat)
2058{
2059	struct zone *z;
2060
2061	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2062		atomic_long_set(&z->managed_pages, 0);
2063}
2064
2065void __init reset_all_zones_managed_pages(void)
2066{
2067	struct pglist_data *pgdat;
2068
2069	if (reset_managed_pages_done)
2070		return;
2071
2072	for_each_online_pgdat(pgdat)
2073		reset_node_managed_pages(pgdat);
2074
2075	reset_managed_pages_done = 1;
2076}
2077
2078/**
2079 * memblock_free_all - release free pages to the buddy allocator
2080 */
2081void __init memblock_free_all(void)
2082{
2083	unsigned long pages;
2084
2085	free_unused_memmap();
2086	reset_all_zones_managed_pages();
2087
2088	pages = free_low_memory_core_early();
2089	totalram_pages_add(pages);
2090}
2091
2092#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2093
2094static int memblock_debug_show(struct seq_file *m, void *private)
2095{
2096	struct memblock_type *type = m->private;
2097	struct memblock_region *reg;
2098	int i;
2099	phys_addr_t end;
2100
2101	for (i = 0; i < type->cnt; i++) {
2102		reg = &type->regions[i];
2103		end = reg->base + reg->size - 1;
2104
2105		seq_printf(m, "%4d: ", i);
2106		seq_printf(m, "%pa..%pa\n", &reg->base, &end);
2107	}
2108	return 0;
2109}
2110DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2111
2112static int __init memblock_init_debugfs(void)
2113{
2114	struct dentry *root = debugfs_create_dir("memblock", NULL);
2115
2116	debugfs_create_file("memory", 0444, root,
2117			    &memblock.memory, &memblock_debug_fops);
2118	debugfs_create_file("reserved", 0444, root,
2119			    &memblock.reserved, &memblock_debug_fops);
2120#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2121	debugfs_create_file("physmem", 0444, root, &physmem,
2122			    &memblock_debug_fops);
2123#endif
2124
2125	return 0;
2126}
2127__initcall(memblock_init_debugfs);
2128
2129#endif /* CONFIG_DEBUG_FS */