Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * mm_init.c - Memory initialisation verification and debugging
  3 *
  4 * Copyright 2008 IBM Corporation, 2008
  5 * Author Mel Gorman <mel@csn.ul.ie>
  6 *
  7 */
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/kobject.h>
 11#include <linux/export.h>
 12#include <linux/memory.h>
 13#include <linux/notifier.h>
 14#include <linux/sched.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 15#include "internal.h"
 
 
 
 
 16
 17#ifdef CONFIG_DEBUG_MEMORY_INIT
 18int __meminitdata mminit_loglevel;
 19
 20#ifndef SECTIONS_SHIFT
 21#define SECTIONS_SHIFT	0
 22#endif
 23
 24/* The zonelists are simply reported, validation is manual. */
 25void __init mminit_verify_zonelist(void)
 26{
 27	int nid;
 28
 29	if (mminit_loglevel < MMINIT_VERIFY)
 30		return;
 31
 32	for_each_online_node(nid) {
 33		pg_data_t *pgdat = NODE_DATA(nid);
 34		struct zone *zone;
 35		struct zoneref *z;
 36		struct zonelist *zonelist;
 37		int i, listid, zoneid;
 38
 39		BUG_ON(MAX_ZONELISTS > 2);
 40		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
 41
 42			/* Identify the zone and nodelist */
 43			zoneid = i % MAX_NR_ZONES;
 44			listid = i / MAX_NR_ZONES;
 45			zonelist = &pgdat->node_zonelists[listid];
 46			zone = &pgdat->node_zones[zoneid];
 47			if (!populated_zone(zone))
 48				continue;
 49
 50			/* Print information about the zonelist */
 51			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
 52				listid > 0 ? "thisnode" : "general", nid,
 53				zone->name);
 54
 55			/* Iterate the zonelist */
 56			for_each_zone_zonelist(zone, z, zonelist, zoneid) {
 57#ifdef CONFIG_NUMA
 58				pr_cont("%d:%s ", zone->node, zone->name);
 59#else
 60				pr_cont("0:%s ", zone->name);
 61#endif /* CONFIG_NUMA */
 62			}
 63			pr_cont("\n");
 64		}
 65	}
 66}
 67
 68void __init mminit_verify_pageflags_layout(void)
 69{
 70	int shift, width;
 71	unsigned long or_mask, add_mask;
 72
 73	shift = 8 * sizeof(unsigned long);
 74	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT;
 75	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
 76		"Section %d Node %d Zone %d Lastcpupid %d Flags %d\n",
 77		SECTIONS_WIDTH,
 78		NODES_WIDTH,
 79		ZONES_WIDTH,
 80		LAST_CPUPID_WIDTH,
 
 
 
 81		NR_PAGEFLAGS);
 82	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
 83		"Section %d Node %d Zone %d Lastcpupid %d\n",
 84		SECTIONS_SHIFT,
 85		NODES_SHIFT,
 86		ZONES_SHIFT,
 87		LAST_CPUPID_SHIFT);
 
 88	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
 89		"Section %lu Node %lu Zone %lu Lastcpupid %lu\n",
 90		(unsigned long)SECTIONS_PGSHIFT,
 91		(unsigned long)NODES_PGSHIFT,
 92		(unsigned long)ZONES_PGSHIFT,
 93		(unsigned long)LAST_CPUPID_PGSHIFT);
 
 94	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
 95		"Node/Zone ID: %lu -> %lu\n",
 96		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
 97		(unsigned long)ZONEID_PGOFF);
 98	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
 99		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
100		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
101#ifdef NODE_NOT_IN_PAGE_FLAGS
102	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
103		"Node not in page flags");
104#endif
105#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
106	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
107		"Last cpupid not in page flags");
108#endif
109
110	if (SECTIONS_WIDTH) {
111		shift -= SECTIONS_WIDTH;
112		BUG_ON(shift != SECTIONS_PGSHIFT);
113	}
114	if (NODES_WIDTH) {
115		shift -= NODES_WIDTH;
116		BUG_ON(shift != NODES_PGSHIFT);
117	}
118	if (ZONES_WIDTH) {
119		shift -= ZONES_WIDTH;
120		BUG_ON(shift != ZONES_PGSHIFT);
121	}
122
123	/* Check for bitmask overlaps */
124	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
125			(NODES_MASK << NODES_PGSHIFT) |
126			(SECTIONS_MASK << SECTIONS_PGSHIFT);
127	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
128			(NODES_MASK << NODES_PGSHIFT) +
129			(SECTIONS_MASK << SECTIONS_PGSHIFT);
130	BUG_ON(or_mask != add_mask);
131}
132
133static __init int set_mminit_loglevel(char *str)
134{
135	get_option(&str, &mminit_loglevel);
136	return 0;
137}
138early_param("mminit_loglevel", set_mminit_loglevel);
139#endif /* CONFIG_DEBUG_MEMORY_INIT */
140
141struct kobject *mm_kobj;
142EXPORT_SYMBOL_GPL(mm_kobj);
143
144#ifdef CONFIG_SMP
145s32 vm_committed_as_batch = 32;
146
147static void __meminit mm_compute_batch(void)
148{
149	u64 memsized_batch;
150	s32 nr = num_present_cpus();
151	s32 batch = max_t(s32, nr*2, 32);
 
152
153	/* batch size set to 0.4% of (total memory/#cpus), or max int32 */
154	memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff);
 
 
 
 
 
 
 
 
155
156	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
157}
158
159static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
160					unsigned long action, void *arg)
161{
162	switch (action) {
163	case MEM_ONLINE:
164	case MEM_OFFLINE:
165		mm_compute_batch();
 
166	default:
167		break;
168	}
169	return NOTIFY_OK;
170}
171
172static struct notifier_block compute_batch_nb __meminitdata = {
173	.notifier_call = mm_compute_batch_notifier,
174	.priority = IPC_CALLBACK_PRI, /* use lowest priority */
175};
176
177static int __init mm_compute_batch_init(void)
178{
179	mm_compute_batch();
180	register_hotmemory_notifier(&compute_batch_nb);
181
182	return 0;
183}
184
185__initcall(mm_compute_batch_init);
186
187#endif
188
189static int __init mm_sysfs_init(void)
190{
191	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
192	if (!mm_kobj)
193		return -ENOMEM;
194
195	return 0;
196}
197postcore_initcall(mm_sysfs_init);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm_init.c - Memory initialisation verification and debugging
   4 *
   5 * Copyright 2008 IBM Corporation, 2008
   6 * Author Mel Gorman <mel@csn.ul.ie>
   7 *
   8 */
   9#include <linux/kernel.h>
  10#include <linux/init.h>
  11#include <linux/kobject.h>
  12#include <linux/export.h>
  13#include <linux/memory.h>
  14#include <linux/notifier.h>
  15#include <linux/sched.h>
  16#include <linux/mman.h>
  17#include <linux/memblock.h>
  18#include <linux/page-isolation.h>
  19#include <linux/padata.h>
  20#include <linux/nmi.h>
  21#include <linux/buffer_head.h>
  22#include <linux/kmemleak.h>
  23#include <linux/kfence.h>
  24#include <linux/page_ext.h>
  25#include <linux/pti.h>
  26#include <linux/pgtable.h>
  27#include <linux/stackdepot.h>
  28#include <linux/swap.h>
  29#include <linux/cma.h>
  30#include <linux/crash_dump.h>
  31#include <linux/execmem.h>
  32#include <linux/vmstat.h>
  33#include "internal.h"
  34#include "slab.h"
  35#include "shuffle.h"
  36
  37#include <asm/setup.h>
  38
  39#ifdef CONFIG_DEBUG_MEMORY_INIT
  40int __meminitdata mminit_loglevel;
  41
 
 
 
 
  42/* The zonelists are simply reported, validation is manual. */
  43void __init mminit_verify_zonelist(void)
  44{
  45	int nid;
  46
  47	if (mminit_loglevel < MMINIT_VERIFY)
  48		return;
  49
  50	for_each_online_node(nid) {
  51		pg_data_t *pgdat = NODE_DATA(nid);
  52		struct zone *zone;
  53		struct zoneref *z;
  54		struct zonelist *zonelist;
  55		int i, listid, zoneid;
  56
 
  57		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
  58
  59			/* Identify the zone and nodelist */
  60			zoneid = i % MAX_NR_ZONES;
  61			listid = i / MAX_NR_ZONES;
  62			zonelist = &pgdat->node_zonelists[listid];
  63			zone = &pgdat->node_zones[zoneid];
  64			if (!populated_zone(zone))
  65				continue;
  66
  67			/* Print information about the zonelist */
  68			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
  69				listid > 0 ? "thisnode" : "general", nid,
  70				zone->name);
  71
  72			/* Iterate the zonelist */
  73			for_each_zone_zonelist(zone, z, zonelist, zoneid)
  74				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
 
 
 
 
 
  75			pr_cont("\n");
  76		}
  77	}
  78}
  79
  80void __init mminit_verify_pageflags_layout(void)
  81{
  82	int shift, width;
  83	unsigned long or_mask, add_mask;
  84
  85	shift = BITS_PER_LONG;
  86	width = shift - NR_NON_PAGEFLAG_BITS;
  87	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
  88		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
  89		SECTIONS_WIDTH,
  90		NODES_WIDTH,
  91		ZONES_WIDTH,
  92		LAST_CPUPID_WIDTH,
  93		KASAN_TAG_WIDTH,
  94		LRU_GEN_WIDTH,
  95		LRU_REFS_WIDTH,
  96		NR_PAGEFLAGS);
  97	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
  98		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
  99		SECTIONS_SHIFT,
 100		NODES_SHIFT,
 101		ZONES_SHIFT,
 102		LAST_CPUPID_SHIFT,
 103		KASAN_TAG_WIDTH);
 104	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
 105		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
 106		(unsigned long)SECTIONS_PGSHIFT,
 107		(unsigned long)NODES_PGSHIFT,
 108		(unsigned long)ZONES_PGSHIFT,
 109		(unsigned long)LAST_CPUPID_PGSHIFT,
 110		(unsigned long)KASAN_TAG_PGSHIFT);
 111	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
 112		"Node/Zone ID: %lu -> %lu\n",
 113		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
 114		(unsigned long)ZONEID_PGOFF);
 115	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
 116		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
 117		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
 118#ifdef NODE_NOT_IN_PAGE_FLAGS
 119	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
 120		"Node not in page flags");
 121#endif
 122#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 123	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
 124		"Last cpupid not in page flags");
 125#endif
 126
 127	if (SECTIONS_WIDTH) {
 128		shift -= SECTIONS_WIDTH;
 129		BUG_ON(shift != SECTIONS_PGSHIFT);
 130	}
 131	if (NODES_WIDTH) {
 132		shift -= NODES_WIDTH;
 133		BUG_ON(shift != NODES_PGSHIFT);
 134	}
 135	if (ZONES_WIDTH) {
 136		shift -= ZONES_WIDTH;
 137		BUG_ON(shift != ZONES_PGSHIFT);
 138	}
 139
 140	/* Check for bitmask overlaps */
 141	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
 142			(NODES_MASK << NODES_PGSHIFT) |
 143			(SECTIONS_MASK << SECTIONS_PGSHIFT);
 144	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
 145			(NODES_MASK << NODES_PGSHIFT) +
 146			(SECTIONS_MASK << SECTIONS_PGSHIFT);
 147	BUG_ON(or_mask != add_mask);
 148}
 149
 150static __init int set_mminit_loglevel(char *str)
 151{
 152	get_option(&str, &mminit_loglevel);
 153	return 0;
 154}
 155early_param("mminit_loglevel", set_mminit_loglevel);
 156#endif /* CONFIG_DEBUG_MEMORY_INIT */
 157
 158struct kobject *mm_kobj;
 
 159
 160#ifdef CONFIG_SMP
 161s32 vm_committed_as_batch = 32;
 162
 163void mm_compute_batch(int overcommit_policy)
 164{
 165	u64 memsized_batch;
 166	s32 nr = num_present_cpus();
 167	s32 batch = max_t(s32, nr*2, 32);
 168	unsigned long ram_pages = totalram_pages();
 169
 170	/*
 171	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
 172	 * (total memory/#cpus), and lift it to 25% for other policies
 173	 * to easy the possible lock contention for percpu_counter
 174	 * vm_committed_as, while the max limit is INT_MAX
 175	 */
 176	if (overcommit_policy == OVERCOMMIT_NEVER)
 177		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
 178	else
 179		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
 180
 181	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
 182}
 183
 184static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
 185					unsigned long action, void *arg)
 186{
 187	switch (action) {
 188	case MEM_ONLINE:
 189	case MEM_OFFLINE:
 190		mm_compute_batch(sysctl_overcommit_memory);
 191		break;
 192	default:
 193		break;
 194	}
 195	return NOTIFY_OK;
 196}
 197
 
 
 
 
 
 198static int __init mm_compute_batch_init(void)
 199{
 200	mm_compute_batch(sysctl_overcommit_memory);
 201	hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
 
 202	return 0;
 203}
 204
 205__initcall(mm_compute_batch_init);
 206
 207#endif
 208
 209static int __init mm_sysfs_init(void)
 210{
 211	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
 212	if (!mm_kobj)
 213		return -ENOMEM;
 214
 215	return 0;
 216}
 217postcore_initcall(mm_sysfs_init);
 218
 219static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
 220static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
 221static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
 222
 223static unsigned long required_kernelcore __initdata;
 224static unsigned long required_kernelcore_percent __initdata;
 225static unsigned long required_movablecore __initdata;
 226static unsigned long required_movablecore_percent __initdata;
 227
 228static unsigned long nr_kernel_pages __initdata;
 229static unsigned long nr_all_pages __initdata;
 230
 231static bool deferred_struct_pages __meminitdata;
 232
 233static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
 234
 235static int __init cmdline_parse_core(char *p, unsigned long *core,
 236				     unsigned long *percent)
 237{
 238	unsigned long long coremem;
 239	char *endptr;
 240
 241	if (!p)
 242		return -EINVAL;
 243
 244	/* Value may be a percentage of total memory, otherwise bytes */
 245	coremem = simple_strtoull(p, &endptr, 0);
 246	if (*endptr == '%') {
 247		/* Paranoid check for percent values greater than 100 */
 248		WARN_ON(coremem > 100);
 249
 250		*percent = coremem;
 251	} else {
 252		coremem = memparse(p, &p);
 253		/* Paranoid check that UL is enough for the coremem value */
 254		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
 255
 256		*core = coremem >> PAGE_SHIFT;
 257		*percent = 0UL;
 258	}
 259	return 0;
 260}
 261
 262bool mirrored_kernelcore __initdata_memblock;
 263
 264/*
 265 * kernelcore=size sets the amount of memory for use for allocations that
 266 * cannot be reclaimed or migrated.
 267 */
 268static int __init cmdline_parse_kernelcore(char *p)
 269{
 270	/* parse kernelcore=mirror */
 271	if (parse_option_str(p, "mirror")) {
 272		mirrored_kernelcore = true;
 273		return 0;
 274	}
 275
 276	return cmdline_parse_core(p, &required_kernelcore,
 277				  &required_kernelcore_percent);
 278}
 279early_param("kernelcore", cmdline_parse_kernelcore);
 280
 281/*
 282 * movablecore=size sets the amount of memory for use for allocations that
 283 * can be reclaimed or migrated.
 284 */
 285static int __init cmdline_parse_movablecore(char *p)
 286{
 287	return cmdline_parse_core(p, &required_movablecore,
 288				  &required_movablecore_percent);
 289}
 290early_param("movablecore", cmdline_parse_movablecore);
 291
 292/*
 293 * early_calculate_totalpages()
 294 * Sum pages in active regions for movable zone.
 295 * Populate N_MEMORY for calculating usable_nodes.
 296 */
 297static unsigned long __init early_calculate_totalpages(void)
 298{
 299	unsigned long totalpages = 0;
 300	unsigned long start_pfn, end_pfn;
 301	int i, nid;
 302
 303	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
 304		unsigned long pages = end_pfn - start_pfn;
 305
 306		totalpages += pages;
 307		if (pages)
 308			node_set_state(nid, N_MEMORY);
 309	}
 310	return totalpages;
 311}
 312
 313/*
 314 * This finds a zone that can be used for ZONE_MOVABLE pages. The
 315 * assumption is made that zones within a node are ordered in monotonic
 316 * increasing memory addresses so that the "highest" populated zone is used
 317 */
 318static void __init find_usable_zone_for_movable(void)
 319{
 320	int zone_index;
 321	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
 322		if (zone_index == ZONE_MOVABLE)
 323			continue;
 324
 325		if (arch_zone_highest_possible_pfn[zone_index] >
 326				arch_zone_lowest_possible_pfn[zone_index])
 327			break;
 328	}
 329
 330	VM_BUG_ON(zone_index == -1);
 331	movable_zone = zone_index;
 332}
 333
 334/*
 335 * Find the PFN the Movable zone begins in each node. Kernel memory
 336 * is spread evenly between nodes as long as the nodes have enough
 337 * memory. When they don't, some nodes will have more kernelcore than
 338 * others
 339 */
 340static void __init find_zone_movable_pfns_for_nodes(void)
 341{
 342	int i, nid;
 343	unsigned long usable_startpfn;
 344	unsigned long kernelcore_node, kernelcore_remaining;
 345	/* save the state before borrow the nodemask */
 346	nodemask_t saved_node_state = node_states[N_MEMORY];
 347	unsigned long totalpages = early_calculate_totalpages();
 348	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
 349	struct memblock_region *r;
 350
 351	/* Need to find movable_zone earlier when movable_node is specified. */
 352	find_usable_zone_for_movable();
 353
 354	/*
 355	 * If movable_node is specified, ignore kernelcore and movablecore
 356	 * options.
 357	 */
 358	if (movable_node_is_enabled()) {
 359		for_each_mem_region(r) {
 360			if (!memblock_is_hotpluggable(r))
 361				continue;
 362
 363			nid = memblock_get_region_node(r);
 364
 365			usable_startpfn = memblock_region_memory_base_pfn(r);
 366			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
 367				min(usable_startpfn, zone_movable_pfn[nid]) :
 368				usable_startpfn;
 369		}
 370
 371		goto out2;
 372	}
 373
 374	/*
 375	 * If kernelcore=mirror is specified, ignore movablecore option
 376	 */
 377	if (mirrored_kernelcore) {
 378		bool mem_below_4gb_not_mirrored = false;
 379
 380		if (!memblock_has_mirror()) {
 381			pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n");
 382			goto out;
 383		}
 384
 385		if (is_kdump_kernel()) {
 386			pr_warn("The system is under kdump, ignore kernelcore=mirror.\n");
 387			goto out;
 388		}
 389
 390		for_each_mem_region(r) {
 391			if (memblock_is_mirror(r))
 392				continue;
 393
 394			nid = memblock_get_region_node(r);
 395
 396			usable_startpfn = memblock_region_memory_base_pfn(r);
 397
 398			if (usable_startpfn < PHYS_PFN(SZ_4G)) {
 399				mem_below_4gb_not_mirrored = true;
 400				continue;
 401			}
 402
 403			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
 404				min(usable_startpfn, zone_movable_pfn[nid]) :
 405				usable_startpfn;
 406		}
 407
 408		if (mem_below_4gb_not_mirrored)
 409			pr_warn("This configuration results in unmirrored kernel memory.\n");
 410
 411		goto out2;
 412	}
 413
 414	/*
 415	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
 416	 * amount of necessary memory.
 417	 */
 418	if (required_kernelcore_percent)
 419		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
 420				       10000UL;
 421	if (required_movablecore_percent)
 422		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
 423					10000UL;
 424
 425	/*
 426	 * If movablecore= was specified, calculate what size of
 427	 * kernelcore that corresponds so that memory usable for
 428	 * any allocation type is evenly spread. If both kernelcore
 429	 * and movablecore are specified, then the value of kernelcore
 430	 * will be used for required_kernelcore if it's greater than
 431	 * what movablecore would have allowed.
 432	 */
 433	if (required_movablecore) {
 434		unsigned long corepages;
 435
 436		/*
 437		 * Round-up so that ZONE_MOVABLE is at least as large as what
 438		 * was requested by the user
 439		 */
 440		required_movablecore =
 441			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
 442		required_movablecore = min(totalpages, required_movablecore);
 443		corepages = totalpages - required_movablecore;
 444
 445		required_kernelcore = max(required_kernelcore, corepages);
 446	}
 447
 448	/*
 449	 * If kernelcore was not specified or kernelcore size is larger
 450	 * than totalpages, there is no ZONE_MOVABLE.
 451	 */
 452	if (!required_kernelcore || required_kernelcore >= totalpages)
 453		goto out;
 454
 455	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
 456	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
 457
 458restart:
 459	/* Spread kernelcore memory as evenly as possible throughout nodes */
 460	kernelcore_node = required_kernelcore / usable_nodes;
 461	for_each_node_state(nid, N_MEMORY) {
 462		unsigned long start_pfn, end_pfn;
 463
 464		/*
 465		 * Recalculate kernelcore_node if the division per node
 466		 * now exceeds what is necessary to satisfy the requested
 467		 * amount of memory for the kernel
 468		 */
 469		if (required_kernelcore < kernelcore_node)
 470			kernelcore_node = required_kernelcore / usable_nodes;
 471
 472		/*
 473		 * As the map is walked, we track how much memory is usable
 474		 * by the kernel using kernelcore_remaining. When it is
 475		 * 0, the rest of the node is usable by ZONE_MOVABLE
 476		 */
 477		kernelcore_remaining = kernelcore_node;
 478
 479		/* Go through each range of PFNs within this node */
 480		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
 481			unsigned long size_pages;
 482
 483			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
 484			if (start_pfn >= end_pfn)
 485				continue;
 486
 487			/* Account for what is only usable for kernelcore */
 488			if (start_pfn < usable_startpfn) {
 489				unsigned long kernel_pages;
 490				kernel_pages = min(end_pfn, usable_startpfn)
 491								- start_pfn;
 492
 493				kernelcore_remaining -= min(kernel_pages,
 494							kernelcore_remaining);
 495				required_kernelcore -= min(kernel_pages,
 496							required_kernelcore);
 497
 498				/* Continue if range is now fully accounted */
 499				if (end_pfn <= usable_startpfn) {
 500
 501					/*
 502					 * Push zone_movable_pfn to the end so
 503					 * that if we have to rebalance
 504					 * kernelcore across nodes, we will
 505					 * not double account here
 506					 */
 507					zone_movable_pfn[nid] = end_pfn;
 508					continue;
 509				}
 510				start_pfn = usable_startpfn;
 511			}
 512
 513			/*
 514			 * The usable PFN range for ZONE_MOVABLE is from
 515			 * start_pfn->end_pfn. Calculate size_pages as the
 516			 * number of pages used as kernelcore
 517			 */
 518			size_pages = end_pfn - start_pfn;
 519			if (size_pages > kernelcore_remaining)
 520				size_pages = kernelcore_remaining;
 521			zone_movable_pfn[nid] = start_pfn + size_pages;
 522
 523			/*
 524			 * Some kernelcore has been met, update counts and
 525			 * break if the kernelcore for this node has been
 526			 * satisfied
 527			 */
 528			required_kernelcore -= min(required_kernelcore,
 529								size_pages);
 530			kernelcore_remaining -= size_pages;
 531			if (!kernelcore_remaining)
 532				break;
 533		}
 534	}
 535
 536	/*
 537	 * If there is still required_kernelcore, we do another pass with one
 538	 * less node in the count. This will push zone_movable_pfn[nid] further
 539	 * along on the nodes that still have memory until kernelcore is
 540	 * satisfied
 541	 */
 542	usable_nodes--;
 543	if (usable_nodes && required_kernelcore > usable_nodes)
 544		goto restart;
 545
 546out2:
 547	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
 548	for (nid = 0; nid < MAX_NUMNODES; nid++) {
 549		unsigned long start_pfn, end_pfn;
 550
 551		zone_movable_pfn[nid] =
 552			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
 553
 554		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 555		if (zone_movable_pfn[nid] >= end_pfn)
 556			zone_movable_pfn[nid] = 0;
 557	}
 558
 559out:
 560	/* restore the node_state */
 561	node_states[N_MEMORY] = saved_node_state;
 562}
 563
 564void __meminit __init_single_page(struct page *page, unsigned long pfn,
 565				unsigned long zone, int nid)
 566{
 567	mm_zero_struct_page(page);
 568	set_page_links(page, zone, nid, pfn);
 569	init_page_count(page);
 570	atomic_set(&page->_mapcount, -1);
 571	page_cpupid_reset_last(page);
 572	page_kasan_tag_reset(page);
 573
 574	INIT_LIST_HEAD(&page->lru);
 575#ifdef WANT_PAGE_VIRTUAL
 576	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
 577	if (!is_highmem_idx(zone))
 578		set_page_address(page, __va(pfn << PAGE_SHIFT));
 579#endif
 580}
 581
 582#ifdef CONFIG_NUMA
 583/*
 584 * During memory init memblocks map pfns to nids. The search is expensive and
 585 * this caches recent lookups. The implementation of __early_pfn_to_nid
 586 * treats start/end as pfns.
 587 */
 588struct mminit_pfnnid_cache {
 589	unsigned long last_start;
 590	unsigned long last_end;
 591	int last_nid;
 592};
 593
 594static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
 595
 596/*
 597 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
 598 */
 599static int __meminit __early_pfn_to_nid(unsigned long pfn,
 600					struct mminit_pfnnid_cache *state)
 601{
 602	unsigned long start_pfn, end_pfn;
 603	int nid;
 604
 605	if (state->last_start <= pfn && pfn < state->last_end)
 606		return state->last_nid;
 607
 608	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
 609	if (nid != NUMA_NO_NODE) {
 610		state->last_start = start_pfn;
 611		state->last_end = end_pfn;
 612		state->last_nid = nid;
 613	}
 614
 615	return nid;
 616}
 617
 618int __meminit early_pfn_to_nid(unsigned long pfn)
 619{
 620	static DEFINE_SPINLOCK(early_pfn_lock);
 621	int nid;
 622
 623	spin_lock(&early_pfn_lock);
 624	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
 625	if (nid < 0)
 626		nid = first_online_node;
 627	spin_unlock(&early_pfn_lock);
 628
 629	return nid;
 630}
 631
 632int hashdist = HASHDIST_DEFAULT;
 633
 634static int __init set_hashdist(char *str)
 635{
 636	if (!str)
 637		return 0;
 638	hashdist = simple_strtoul(str, &str, 0);
 639	return 1;
 640}
 641__setup("hashdist=", set_hashdist);
 642
 643static inline void fixup_hashdist(void)
 644{
 645	if (num_node_state(N_MEMORY) == 1)
 646		hashdist = 0;
 647}
 648#else
 649static inline void fixup_hashdist(void) {}
 650#endif /* CONFIG_NUMA */
 651
 652#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 653static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
 654{
 655	pgdat->first_deferred_pfn = ULONG_MAX;
 656}
 657
 658/* Returns true if the struct page for the pfn is initialised */
 659static inline bool __meminit early_page_initialised(unsigned long pfn, int nid)
 660{
 661	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
 662		return false;
 663
 664	return true;
 665}
 666
 667/*
 668 * Returns true when the remaining initialisation should be deferred until
 669 * later in the boot cycle when it can be parallelised.
 670 */
 671static bool __meminit
 672defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 673{
 674	static unsigned long prev_end_pfn, nr_initialised;
 675
 676	if (early_page_ext_enabled())
 677		return false;
 678
 679	/* Always populate low zones for address-constrained allocations */
 680	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
 681		return false;
 682
 683	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
 684		return true;
 685
 686	/*
 687	 * prev_end_pfn static that contains the end of previous zone
 688	 * No need to protect because called very early in boot before smp_init.
 689	 */
 690	if (prev_end_pfn != end_pfn) {
 691		prev_end_pfn = end_pfn;
 692		nr_initialised = 0;
 693	}
 694
 695	/*
 696	 * We start only with one section of pages, more pages are added as
 697	 * needed until the rest of deferred pages are initialized.
 698	 */
 699	nr_initialised++;
 700	if ((nr_initialised > PAGES_PER_SECTION) &&
 701	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
 702		NODE_DATA(nid)->first_deferred_pfn = pfn;
 703		return true;
 704	}
 705	return false;
 706}
 707
 708static void __meminit init_reserved_page(unsigned long pfn, int nid)
 709{
 710	pg_data_t *pgdat;
 711	int zid;
 712
 713	if (early_page_initialised(pfn, nid))
 714		return;
 715
 716	pgdat = NODE_DATA(nid);
 717
 718	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 719		struct zone *zone = &pgdat->node_zones[zid];
 720
 721		if (zone_spans_pfn(zone, pfn))
 722			break;
 723	}
 724	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
 725
 726	if (pageblock_aligned(pfn))
 727		set_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE);
 728}
 729#else
 730static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
 731
 732static inline bool early_page_initialised(unsigned long pfn, int nid)
 733{
 734	return true;
 735}
 736
 737static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 738{
 739	return false;
 740}
 741
 742static inline void init_reserved_page(unsigned long pfn, int nid)
 743{
 744}
 745#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 746
 747/*
 748 * Initialised pages do not have PageReserved set. This function is
 749 * called for each range allocated by the bootmem allocator and
 750 * marks the pages PageReserved. The remaining valid pages are later
 751 * sent to the buddy page allocator.
 752 */
 753void __meminit reserve_bootmem_region(phys_addr_t start,
 754				      phys_addr_t end, int nid)
 755{
 756	unsigned long start_pfn = PFN_DOWN(start);
 757	unsigned long end_pfn = PFN_UP(end);
 758
 759	for (; start_pfn < end_pfn; start_pfn++) {
 760		if (pfn_valid(start_pfn)) {
 761			struct page *page = pfn_to_page(start_pfn);
 762
 763			init_reserved_page(start_pfn, nid);
 764
 765			/*
 766			 * no need for atomic set_bit because the struct
 767			 * page is not visible yet so nobody should
 768			 * access it yet.
 769			 */
 770			__SetPageReserved(page);
 771		}
 772	}
 773}
 774
 775/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
 776static bool __meminit
 777overlap_memmap_init(unsigned long zone, unsigned long *pfn)
 778{
 779	static struct memblock_region *r;
 780
 781	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
 782		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
 783			for_each_mem_region(r) {
 784				if (*pfn < memblock_region_memory_end_pfn(r))
 785					break;
 786			}
 787		}
 788		if (*pfn >= memblock_region_memory_base_pfn(r) &&
 789		    memblock_is_mirror(r)) {
 790			*pfn = memblock_region_memory_end_pfn(r);
 791			return true;
 792		}
 793	}
 794	return false;
 795}
 796
 797/*
 798 * Only struct pages that correspond to ranges defined by memblock.memory
 799 * are zeroed and initialized by going through __init_single_page() during
 800 * memmap_init_zone_range().
 801 *
 802 * But, there could be struct pages that correspond to holes in
 803 * memblock.memory. This can happen because of the following reasons:
 804 * - physical memory bank size is not necessarily the exact multiple of the
 805 *   arbitrary section size
 806 * - early reserved memory may not be listed in memblock.memory
 807 * - non-memory regions covered by the contigious flatmem mapping
 808 * - memory layouts defined with memmap= kernel parameter may not align
 809 *   nicely with memmap sections
 810 *
 811 * Explicitly initialize those struct pages so that:
 812 * - PG_Reserved is set
 813 * - zone and node links point to zone and node that span the page if the
 814 *   hole is in the middle of a zone
 815 * - zone and node links point to adjacent zone/node if the hole falls on
 816 *   the zone boundary; the pages in such holes will be prepended to the
 817 *   zone/node above the hole except for the trailing pages in the last
 818 *   section that will be appended to the zone/node below.
 819 */
 820static void __init init_unavailable_range(unsigned long spfn,
 821					  unsigned long epfn,
 822					  int zone, int node)
 823{
 824	unsigned long pfn;
 825	u64 pgcnt = 0;
 826
 827	for (pfn = spfn; pfn < epfn; pfn++) {
 828		if (!pfn_valid(pageblock_start_pfn(pfn))) {
 829			pfn = pageblock_end_pfn(pfn) - 1;
 830			continue;
 831		}
 832		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
 833		__SetPageReserved(pfn_to_page(pfn));
 834		pgcnt++;
 835	}
 836
 837	if (pgcnt)
 838		pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n",
 839			node, zone_names[zone], pgcnt);
 840}
 841
 842/*
 843 * Initially all pages are reserved - free ones are freed
 844 * up by memblock_free_all() once the early boot process is
 845 * done. Non-atomic initialization, single-pass.
 846 *
 847 * All aligned pageblocks are initialized to the specified migratetype
 848 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
 849 * zone stats (e.g., nr_isolate_pageblock) are touched.
 850 */
 851void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
 852		unsigned long start_pfn, unsigned long zone_end_pfn,
 853		enum meminit_context context,
 854		struct vmem_altmap *altmap, int migratetype)
 855{
 856	unsigned long pfn, end_pfn = start_pfn + size;
 857	struct page *page;
 858
 859	if (highest_memmap_pfn < end_pfn - 1)
 860		highest_memmap_pfn = end_pfn - 1;
 861
 862#ifdef CONFIG_ZONE_DEVICE
 863	/*
 864	 * Honor reservation requested by the driver for this ZONE_DEVICE
 865	 * memory. We limit the total number of pages to initialize to just
 866	 * those that might contain the memory mapping. We will defer the
 867	 * ZONE_DEVICE page initialization until after we have released
 868	 * the hotplug lock.
 869	 */
 870	if (zone == ZONE_DEVICE) {
 871		if (!altmap)
 872			return;
 873
 874		if (start_pfn == altmap->base_pfn)
 875			start_pfn += altmap->reserve;
 876		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
 877	}
 878#endif
 879
 880	for (pfn = start_pfn; pfn < end_pfn; ) {
 881		/*
 882		 * There can be holes in boot-time mem_map[]s handed to this
 883		 * function.  They do not exist on hotplugged memory.
 884		 */
 885		if (context == MEMINIT_EARLY) {
 886			if (overlap_memmap_init(zone, &pfn))
 887				continue;
 888			if (defer_init(nid, pfn, zone_end_pfn)) {
 889				deferred_struct_pages = true;
 890				break;
 891			}
 892		}
 893
 894		page = pfn_to_page(pfn);
 895		__init_single_page(page, pfn, zone, nid);
 896		if (context == MEMINIT_HOTPLUG) {
 897#ifdef CONFIG_ZONE_DEVICE
 898			if (zone == ZONE_DEVICE)
 899				__SetPageReserved(page);
 900			else
 901#endif
 902				__SetPageOffline(page);
 903		}
 904
 905		/*
 906		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
 907		 * such that unmovable allocations won't be scattered all
 908		 * over the place during system boot.
 909		 */
 910		if (pageblock_aligned(pfn)) {
 911			set_pageblock_migratetype(page, migratetype);
 912			cond_resched();
 913		}
 914		pfn++;
 915	}
 916}
 917
 918static void __init memmap_init_zone_range(struct zone *zone,
 919					  unsigned long start_pfn,
 920					  unsigned long end_pfn,
 921					  unsigned long *hole_pfn)
 922{
 923	unsigned long zone_start_pfn = zone->zone_start_pfn;
 924	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
 925	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
 926
 927	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
 928	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
 929
 930	if (start_pfn >= end_pfn)
 931		return;
 932
 933	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
 934			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
 935
 936	if (*hole_pfn < start_pfn)
 937		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
 938
 939	*hole_pfn = end_pfn;
 940}
 941
 942static void __init memmap_init(void)
 943{
 944	unsigned long start_pfn, end_pfn;
 945	unsigned long hole_pfn = 0;
 946	int i, j, zone_id = 0, nid;
 947
 948	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
 949		struct pglist_data *node = NODE_DATA(nid);
 950
 951		for (j = 0; j < MAX_NR_ZONES; j++) {
 952			struct zone *zone = node->node_zones + j;
 953
 954			if (!populated_zone(zone))
 955				continue;
 956
 957			memmap_init_zone_range(zone, start_pfn, end_pfn,
 958					       &hole_pfn);
 959			zone_id = j;
 960		}
 961	}
 962
 963#ifdef CONFIG_SPARSEMEM
 964	/*
 965	 * Initialize the memory map for hole in the range [memory_end,
 966	 * section_end].
 967	 * Append the pages in this hole to the highest zone in the last
 968	 * node.
 969	 * The call to init_unavailable_range() is outside the ifdef to
 970	 * silence the compiler warining about zone_id set but not used;
 971	 * for FLATMEM it is a nop anyway
 972	 */
 973	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
 974	if (hole_pfn < end_pfn)
 975#endif
 976		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
 977}
 978
 979#ifdef CONFIG_ZONE_DEVICE
 980static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
 981					  unsigned long zone_idx, int nid,
 982					  struct dev_pagemap *pgmap)
 983{
 984
 985	__init_single_page(page, pfn, zone_idx, nid);
 986
 987	/*
 988	 * Mark page reserved as it will need to wait for onlining
 989	 * phase for it to be fully associated with a zone.
 990	 *
 991	 * We can use the non-atomic __set_bit operation for setting
 992	 * the flag as we are still initializing the pages.
 993	 */
 994	__SetPageReserved(page);
 995
 996	/*
 997	 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
 998	 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
 999	 * ever freed or placed on a driver-private list.
1000	 */
1001	page->pgmap = pgmap;
1002	page->zone_device_data = NULL;
1003
1004	/*
1005	 * Mark the block movable so that blocks are reserved for
1006	 * movable at startup. This will force kernel allocations
1007	 * to reserve their blocks rather than leaking throughout
1008	 * the address space during boot when many long-lived
1009	 * kernel allocations are made.
1010	 *
1011	 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
1012	 * because this is done early in section_activate()
1013	 */
1014	if (pageblock_aligned(pfn)) {
1015		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1016		cond_resched();
1017	}
1018
1019	/*
1020	 * ZONE_DEVICE pages are released directly to the driver page allocator
1021	 * which will set the page count to 1 when allocating the page.
1022	 */
1023	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
1024	    pgmap->type == MEMORY_DEVICE_COHERENT)
1025		set_page_count(page, 0);
1026}
1027
1028/*
1029 * With compound page geometry and when struct pages are stored in ram most
1030 * tail pages are reused. Consequently, the amount of unique struct pages to
1031 * initialize is a lot smaller that the total amount of struct pages being
1032 * mapped. This is a paired / mild layering violation with explicit knowledge
1033 * of how the sparse_vmemmap internals handle compound pages in the lack
1034 * of an altmap. See vmemmap_populate_compound_pages().
1035 */
1036static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
1037					      struct dev_pagemap *pgmap)
1038{
1039	if (!vmemmap_can_optimize(altmap, pgmap))
1040		return pgmap_vmemmap_nr(pgmap);
1041
1042	return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
1043}
1044
1045static void __ref memmap_init_compound(struct page *head,
1046				       unsigned long head_pfn,
1047				       unsigned long zone_idx, int nid,
1048				       struct dev_pagemap *pgmap,
1049				       unsigned long nr_pages)
1050{
1051	unsigned long pfn, end_pfn = head_pfn + nr_pages;
1052	unsigned int order = pgmap->vmemmap_shift;
1053
1054	__SetPageHead(head);
1055	for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
1056		struct page *page = pfn_to_page(pfn);
1057
1058		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1059		prep_compound_tail(head, pfn - head_pfn);
1060		set_page_count(page, 0);
1061
1062		/*
1063		 * The first tail page stores important compound page info.
1064		 * Call prep_compound_head() after the first tail page has
1065		 * been initialized, to not have the data overwritten.
1066		 */
1067		if (pfn == head_pfn + 1)
1068			prep_compound_head(head, order);
1069	}
1070}
1071
1072void __ref memmap_init_zone_device(struct zone *zone,
1073				   unsigned long start_pfn,
1074				   unsigned long nr_pages,
1075				   struct dev_pagemap *pgmap)
1076{
1077	unsigned long pfn, end_pfn = start_pfn + nr_pages;
1078	struct pglist_data *pgdat = zone->zone_pgdat;
1079	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
1080	unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
1081	unsigned long zone_idx = zone_idx(zone);
1082	unsigned long start = jiffies;
1083	int nid = pgdat->node_id;
1084
1085	if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
1086		return;
1087
1088	/*
1089	 * The call to memmap_init should have already taken care
1090	 * of the pages reserved for the memmap, so we can just jump to
1091	 * the end of that region and start processing the device pages.
1092	 */
1093	if (altmap) {
1094		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
1095		nr_pages = end_pfn - start_pfn;
1096	}
1097
1098	for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
1099		struct page *page = pfn_to_page(pfn);
1100
1101		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1102
1103		if (pfns_per_compound == 1)
1104			continue;
1105
1106		memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
1107				     compound_nr_pages(altmap, pgmap));
1108	}
1109
1110	pr_debug("%s initialised %lu pages in %ums\n", __func__,
1111		nr_pages, jiffies_to_msecs(jiffies - start));
1112}
1113#endif
1114
1115/*
1116 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
1117 * because it is sized independent of architecture. Unlike the other zones,
1118 * the starting point for ZONE_MOVABLE is not fixed. It may be different
1119 * in each node depending on the size of each node and how evenly kernelcore
1120 * is distributed. This helper function adjusts the zone ranges
1121 * provided by the architecture for a given node by using the end of the
1122 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
1123 * zones within a node are in order of monotonic increases memory addresses
1124 */
1125static void __init adjust_zone_range_for_zone_movable(int nid,
1126					unsigned long zone_type,
1127					unsigned long node_end_pfn,
1128					unsigned long *zone_start_pfn,
1129					unsigned long *zone_end_pfn)
1130{
1131	/* Only adjust if ZONE_MOVABLE is on this node */
1132	if (zone_movable_pfn[nid]) {
1133		/* Size ZONE_MOVABLE */
1134		if (zone_type == ZONE_MOVABLE) {
1135			*zone_start_pfn = zone_movable_pfn[nid];
1136			*zone_end_pfn = min(node_end_pfn,
1137				arch_zone_highest_possible_pfn[movable_zone]);
1138
1139		/* Adjust for ZONE_MOVABLE starting within this range */
1140		} else if (!mirrored_kernelcore &&
1141			*zone_start_pfn < zone_movable_pfn[nid] &&
1142			*zone_end_pfn > zone_movable_pfn[nid]) {
1143			*zone_end_pfn = zone_movable_pfn[nid];
1144
1145		/* Check if this whole range is within ZONE_MOVABLE */
1146		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
1147			*zone_start_pfn = *zone_end_pfn;
1148	}
1149}
1150
1151/*
1152 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
1153 * then all holes in the requested range will be accounted for.
1154 */
1155static unsigned long __init __absent_pages_in_range(int nid,
1156				unsigned long range_start_pfn,
1157				unsigned long range_end_pfn)
1158{
1159	unsigned long nr_absent = range_end_pfn - range_start_pfn;
1160	unsigned long start_pfn, end_pfn;
1161	int i;
1162
1163	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
1164		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
1165		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
1166		nr_absent -= end_pfn - start_pfn;
1167	}
1168	return nr_absent;
1169}
1170
1171/**
1172 * absent_pages_in_range - Return number of page frames in holes within a range
1173 * @start_pfn: The start PFN to start searching for holes
1174 * @end_pfn: The end PFN to stop searching for holes
1175 *
1176 * Return: the number of pages frames in memory holes within a range.
1177 */
1178unsigned long __init absent_pages_in_range(unsigned long start_pfn,
1179							unsigned long end_pfn)
1180{
1181	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
1182}
1183
1184/* Return the number of page frames in holes in a zone on a node */
1185static unsigned long __init zone_absent_pages_in_node(int nid,
1186					unsigned long zone_type,
1187					unsigned long zone_start_pfn,
1188					unsigned long zone_end_pfn)
1189{
1190	unsigned long nr_absent;
1191
1192	/* zone is empty, we don't have any absent pages */
1193	if (zone_start_pfn == zone_end_pfn)
1194		return 0;
1195
1196	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
1197
1198	/*
1199	 * ZONE_MOVABLE handling.
1200	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
1201	 * and vice versa.
1202	 */
1203	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
1204		unsigned long start_pfn, end_pfn;
1205		struct memblock_region *r;
1206
1207		for_each_mem_region(r) {
1208			start_pfn = clamp(memblock_region_memory_base_pfn(r),
1209					  zone_start_pfn, zone_end_pfn);
1210			end_pfn = clamp(memblock_region_memory_end_pfn(r),
1211					zone_start_pfn, zone_end_pfn);
1212
1213			if (zone_type == ZONE_MOVABLE &&
1214			    memblock_is_mirror(r))
1215				nr_absent += end_pfn - start_pfn;
1216
1217			if (zone_type == ZONE_NORMAL &&
1218			    !memblock_is_mirror(r))
1219				nr_absent += end_pfn - start_pfn;
1220		}
1221	}
1222
1223	return nr_absent;
1224}
1225
1226/*
1227 * Return the number of pages a zone spans in a node, including holes
1228 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1229 */
1230static unsigned long __init zone_spanned_pages_in_node(int nid,
1231					unsigned long zone_type,
1232					unsigned long node_start_pfn,
1233					unsigned long node_end_pfn,
1234					unsigned long *zone_start_pfn,
1235					unsigned long *zone_end_pfn)
1236{
1237	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
1238	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
1239
1240	/* Get the start and end of the zone */
1241	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
1242	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
1243	adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn,
1244					   zone_start_pfn, zone_end_pfn);
1245
1246	/* Check that this node has pages within the zone's required range */
1247	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
1248		return 0;
1249
1250	/* Move the zone boundaries inside the node if necessary */
1251	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
1252	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
1253
1254	/* Return the spanned pages */
1255	return *zone_end_pfn - *zone_start_pfn;
1256}
1257
1258static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat)
1259{
1260	struct zone *z;
1261
1262	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) {
1263		z->zone_start_pfn = 0;
1264		z->spanned_pages = 0;
1265		z->present_pages = 0;
1266#if defined(CONFIG_MEMORY_HOTPLUG)
1267		z->present_early_pages = 0;
1268#endif
1269	}
1270
1271	pgdat->node_spanned_pages = 0;
1272	pgdat->node_present_pages = 0;
1273	pr_debug("On node %d totalpages: 0\n", pgdat->node_id);
1274}
1275
1276static void __init calc_nr_kernel_pages(void)
1277{
1278	unsigned long start_pfn, end_pfn;
1279	phys_addr_t start_addr, end_addr;
1280	u64 u;
1281#ifdef CONFIG_HIGHMEM
1282	unsigned long high_zone_low = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM];
1283#endif
1284
1285	for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
1286		start_pfn = PFN_UP(start_addr);
1287		end_pfn   = PFN_DOWN(end_addr);
1288
1289		if (start_pfn < end_pfn) {
1290			nr_all_pages += end_pfn - start_pfn;
1291#ifdef CONFIG_HIGHMEM
1292			start_pfn = clamp(start_pfn, 0, high_zone_low);
1293			end_pfn = clamp(end_pfn, 0, high_zone_low);
1294#endif
1295			nr_kernel_pages += end_pfn - start_pfn;
1296		}
1297	}
1298}
1299
1300static void __init calculate_node_totalpages(struct pglist_data *pgdat,
1301						unsigned long node_start_pfn,
1302						unsigned long node_end_pfn)
1303{
1304	unsigned long realtotalpages = 0, totalpages = 0;
1305	enum zone_type i;
1306
1307	for (i = 0; i < MAX_NR_ZONES; i++) {
1308		struct zone *zone = pgdat->node_zones + i;
1309		unsigned long zone_start_pfn, zone_end_pfn;
1310		unsigned long spanned, absent;
1311		unsigned long real_size;
1312
1313		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
1314						     node_start_pfn,
1315						     node_end_pfn,
1316						     &zone_start_pfn,
1317						     &zone_end_pfn);
1318		absent = zone_absent_pages_in_node(pgdat->node_id, i,
1319						   zone_start_pfn,
1320						   zone_end_pfn);
1321
1322		real_size = spanned - absent;
1323
1324		if (spanned)
1325			zone->zone_start_pfn = zone_start_pfn;
1326		else
1327			zone->zone_start_pfn = 0;
1328		zone->spanned_pages = spanned;
1329		zone->present_pages = real_size;
1330#if defined(CONFIG_MEMORY_HOTPLUG)
1331		zone->present_early_pages = real_size;
1332#endif
1333
1334		totalpages += spanned;
1335		realtotalpages += real_size;
1336	}
1337
1338	pgdat->node_spanned_pages = totalpages;
1339	pgdat->node_present_pages = realtotalpages;
1340	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1341}
1342
1343#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1344static void pgdat_init_split_queue(struct pglist_data *pgdat)
1345{
1346	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
1347
1348	spin_lock_init(&ds_queue->split_queue_lock);
1349	INIT_LIST_HEAD(&ds_queue->split_queue);
1350	ds_queue->split_queue_len = 0;
1351}
1352#else
1353static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
1354#endif
1355
1356#ifdef CONFIG_COMPACTION
1357static void pgdat_init_kcompactd(struct pglist_data *pgdat)
1358{
1359	init_waitqueue_head(&pgdat->kcompactd_wait);
1360}
1361#else
1362static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
1363#endif
1364
1365static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
1366{
1367	int i;
1368
1369	pgdat_resize_init(pgdat);
1370	pgdat_kswapd_lock_init(pgdat);
1371
1372	pgdat_init_split_queue(pgdat);
1373	pgdat_init_kcompactd(pgdat);
1374
1375	init_waitqueue_head(&pgdat->kswapd_wait);
1376	init_waitqueue_head(&pgdat->pfmemalloc_wait);
1377
1378	for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
1379		init_waitqueue_head(&pgdat->reclaim_wait[i]);
1380
1381	pgdat_page_ext_init(pgdat);
1382	lruvec_init(&pgdat->__lruvec);
1383}
1384
1385static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
1386							unsigned long remaining_pages)
1387{
1388	atomic_long_set(&zone->managed_pages, remaining_pages);
1389	zone_set_nid(zone, nid);
1390	zone->name = zone_names[idx];
1391	zone->zone_pgdat = NODE_DATA(nid);
1392	spin_lock_init(&zone->lock);
1393	zone_seqlock_init(zone);
1394	zone_pcp_init(zone);
1395}
1396
1397static void __meminit zone_init_free_lists(struct zone *zone)
1398{
1399	unsigned int order, t;
1400	for_each_migratetype_order(order, t) {
1401		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1402		zone->free_area[order].nr_free = 0;
1403	}
1404
1405#ifdef CONFIG_UNACCEPTED_MEMORY
1406	INIT_LIST_HEAD(&zone->unaccepted_pages);
1407#endif
1408}
1409
1410void __meminit init_currently_empty_zone(struct zone *zone,
1411					unsigned long zone_start_pfn,
1412					unsigned long size)
1413{
1414	struct pglist_data *pgdat = zone->zone_pgdat;
1415	int zone_idx = zone_idx(zone) + 1;
1416
1417	if (zone_idx > pgdat->nr_zones)
1418		pgdat->nr_zones = zone_idx;
1419
1420	zone->zone_start_pfn = zone_start_pfn;
1421
1422	mminit_dprintk(MMINIT_TRACE, "memmap_init",
1423			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
1424			pgdat->node_id,
1425			(unsigned long)zone_idx(zone),
1426			zone_start_pfn, (zone_start_pfn + size));
1427
1428	zone_init_free_lists(zone);
1429	zone->initialized = 1;
1430}
1431
1432#ifndef CONFIG_SPARSEMEM
1433/*
1434 * Calculate the size of the zone->blockflags rounded to an unsigned long
1435 * Start by making sure zonesize is a multiple of pageblock_order by rounding
1436 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
1437 * round what is now in bits to nearest long in bits, then return it in
1438 * bytes.
1439 */
1440static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
1441{
1442	unsigned long usemapsize;
1443
1444	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
1445	usemapsize = roundup(zonesize, pageblock_nr_pages);
1446	usemapsize = usemapsize >> pageblock_order;
1447	usemapsize *= NR_PAGEBLOCK_BITS;
1448	usemapsize = roundup(usemapsize, BITS_PER_LONG);
1449
1450	return usemapsize / BITS_PER_BYTE;
1451}
1452
1453static void __ref setup_usemap(struct zone *zone)
1454{
1455	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
1456					       zone->spanned_pages);
1457	zone->pageblock_flags = NULL;
1458	if (usemapsize) {
1459		zone->pageblock_flags =
1460			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
1461					    zone_to_nid(zone));
1462		if (!zone->pageblock_flags)
1463			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
1464			      usemapsize, zone->name, zone_to_nid(zone));
1465	}
1466}
1467#else
1468static inline void setup_usemap(struct zone *zone) {}
1469#endif /* CONFIG_SPARSEMEM */
1470
1471#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
1472
1473/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
1474void __init set_pageblock_order(void)
1475{
1476	unsigned int order = MAX_PAGE_ORDER;
1477
1478	/* Check that pageblock_nr_pages has not already been setup */
1479	if (pageblock_order)
1480		return;
1481
1482	/* Don't let pageblocks exceed the maximum allocation granularity. */
1483	if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
1484		order = HUGETLB_PAGE_ORDER;
1485
1486	/*
1487	 * Assume the largest contiguous order of interest is a huge page.
1488	 * This value may be variable depending on boot parameters on powerpc.
1489	 */
1490	pageblock_order = order;
1491}
1492#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1493
1494/*
1495 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
1496 * is unused as pageblock_order is set at compile-time. See
1497 * include/linux/pageblock-flags.h for the values of pageblock_order based on
1498 * the kernel config
1499 */
1500void __init set_pageblock_order(void)
1501{
1502}
1503
1504#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1505
1506/*
1507 * Set up the zone data structures
1508 * - init pgdat internals
1509 * - init all zones belonging to this node
1510 *
1511 * NOTE: this function is only called during memory hotplug
1512 */
1513#ifdef CONFIG_MEMORY_HOTPLUG
1514void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
1515{
1516	int nid = pgdat->node_id;
1517	enum zone_type z;
1518	int cpu;
1519
1520	pgdat_init_internals(pgdat);
1521
1522	if (pgdat->per_cpu_nodestats == &boot_nodestats)
1523		pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1524
1525	/*
1526	 * Reset the nr_zones, order and highest_zoneidx before reuse.
1527	 * Note that kswapd will init kswapd_highest_zoneidx properly
1528	 * when it starts in the near future.
1529	 */
1530	pgdat->nr_zones = 0;
1531	pgdat->kswapd_order = 0;
1532	pgdat->kswapd_highest_zoneidx = 0;
1533	pgdat->node_start_pfn = 0;
1534	pgdat->node_present_pages = 0;
1535
1536	for_each_online_cpu(cpu) {
1537		struct per_cpu_nodestat *p;
1538
1539		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
1540		memset(p, 0, sizeof(*p));
1541	}
1542
1543	/*
1544	 * When memory is hot-added, all the memory is in offline state. So
1545	 * clear all zones' present_pages and managed_pages because they will
1546	 * be updated in online_pages() and offline_pages().
1547	 */
1548	for (z = 0; z < MAX_NR_ZONES; z++) {
1549		struct zone *zone = pgdat->node_zones + z;
1550
1551		zone->present_pages = 0;
1552		zone_init_internals(zone, z, nid, 0);
1553	}
1554}
1555#endif
1556
1557static void __init free_area_init_core(struct pglist_data *pgdat)
1558{
1559	enum zone_type j;
1560	int nid = pgdat->node_id;
1561
1562	pgdat_init_internals(pgdat);
1563	pgdat->per_cpu_nodestats = &boot_nodestats;
1564
1565	for (j = 0; j < MAX_NR_ZONES; j++) {
1566		struct zone *zone = pgdat->node_zones + j;
1567		unsigned long size = zone->spanned_pages;
1568
1569		/*
1570		 * Initialize zone->managed_pages as 0 , it will be reset
1571		 * when memblock allocator frees pages into buddy system.
1572		 */
1573		zone_init_internals(zone, j, nid, zone->present_pages);
1574
1575		if (!size)
1576			continue;
1577
1578		setup_usemap(zone);
1579		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
1580	}
1581}
1582
1583void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
1584			  phys_addr_t min_addr, int nid, bool exact_nid)
1585{
1586	void *ptr;
1587
1588	if (exact_nid)
1589		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
1590						   MEMBLOCK_ALLOC_ACCESSIBLE,
1591						   nid);
1592	else
1593		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
1594						 MEMBLOCK_ALLOC_ACCESSIBLE,
1595						 nid);
1596
1597	if (ptr && size > 0)
1598		page_init_poison(ptr, size);
1599
1600	return ptr;
1601}
1602
1603#ifdef CONFIG_FLATMEM
1604static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1605{
1606	unsigned long start, offset, size, end;
1607	struct page *map;
1608
1609	/* Skip empty nodes */
1610	if (!pgdat->node_spanned_pages)
1611		return;
1612
1613	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
1614	offset = pgdat->node_start_pfn - start;
1615	/*
1616		 * The zone's endpoints aren't required to be MAX_PAGE_ORDER
1617	 * aligned but the node_mem_map endpoints must be in order
1618	 * for the buddy allocator to function correctly.
1619	 */
1620	end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES);
1621	size =  (end - start) * sizeof(struct page);
1622	map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
1623			   pgdat->node_id, false);
1624	if (!map)
1625		panic("Failed to allocate %ld bytes for node %d memory map\n",
1626		      size, pgdat->node_id);
1627	pgdat->node_mem_map = map + offset;
1628	memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
1629	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
1630		 __func__, pgdat->node_id, (unsigned long)pgdat,
1631		 (unsigned long)pgdat->node_mem_map);
1632#ifndef CONFIG_NUMA
1633	/* the global mem_map is just set as node 0's */
1634	if (pgdat == NODE_DATA(0)) {
1635		mem_map = NODE_DATA(0)->node_mem_map;
1636		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
1637			mem_map -= offset;
1638	}
1639#endif
1640}
1641#else
1642static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
1643#endif /* CONFIG_FLATMEM */
1644
1645/**
1646 * get_pfn_range_for_nid - Return the start and end page frames for a node
1647 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
1648 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
1649 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
1650 *
1651 * It returns the start and end page frame of a node based on information
1652 * provided by memblock_set_node(). If called for a node
1653 * with no available memory, the start and end PFNs will be 0.
1654 */
1655void __init get_pfn_range_for_nid(unsigned int nid,
1656			unsigned long *start_pfn, unsigned long *end_pfn)
1657{
1658	unsigned long this_start_pfn, this_end_pfn;
1659	int i;
1660
1661	*start_pfn = -1UL;
1662	*end_pfn = 0;
1663
1664	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
1665		*start_pfn = min(*start_pfn, this_start_pfn);
1666		*end_pfn = max(*end_pfn, this_end_pfn);
1667	}
1668
1669	if (*start_pfn == -1UL)
1670		*start_pfn = 0;
1671}
1672
1673static void __init free_area_init_node(int nid)
1674{
1675	pg_data_t *pgdat = NODE_DATA(nid);
1676	unsigned long start_pfn = 0;
1677	unsigned long end_pfn = 0;
1678
1679	/* pg_data_t should be reset to zero when it's allocated */
1680	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
1681
1682	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1683
1684	pgdat->node_id = nid;
1685	pgdat->node_start_pfn = start_pfn;
1686	pgdat->per_cpu_nodestats = NULL;
1687
1688	if (start_pfn != end_pfn) {
1689		pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
1690			(u64)start_pfn << PAGE_SHIFT,
1691			end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
1692
1693		calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1694	} else {
1695		pr_info("Initmem setup node %d as memoryless\n", nid);
1696
1697		reset_memoryless_node_totalpages(pgdat);
1698	}
1699
1700	alloc_node_mem_map(pgdat);
1701	pgdat_set_deferred_range(pgdat);
1702
1703	free_area_init_core(pgdat);
1704	lru_gen_init_pgdat(pgdat);
1705}
1706
1707/* Any regular or high memory on that node ? */
1708static void __init check_for_memory(pg_data_t *pgdat)
1709{
1710	enum zone_type zone_type;
1711
1712	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
1713		struct zone *zone = &pgdat->node_zones[zone_type];
1714		if (populated_zone(zone)) {
1715			if (IS_ENABLED(CONFIG_HIGHMEM))
1716				node_set_state(pgdat->node_id, N_HIGH_MEMORY);
1717			if (zone_type <= ZONE_NORMAL)
1718				node_set_state(pgdat->node_id, N_NORMAL_MEMORY);
1719			break;
1720		}
1721	}
1722}
1723
1724#if MAX_NUMNODES > 1
1725/*
1726 * Figure out the number of possible node ids.
1727 */
1728void __init setup_nr_node_ids(void)
1729{
1730	unsigned int highest;
1731
1732	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
1733	nr_node_ids = highest + 1;
1734}
1735#endif
1736
1737/*
1738 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
1739 * such cases we allow max_zone_pfn sorted in the descending order
1740 */
1741static bool arch_has_descending_max_zone_pfns(void)
1742{
1743	return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
1744}
1745
1746/**
1747 * free_area_init - Initialise all pg_data_t and zone data
1748 * @max_zone_pfn: an array of max PFNs for each zone
1749 *
1750 * This will call free_area_init_node() for each active node in the system.
1751 * Using the page ranges provided by memblock_set_node(), the size of each
1752 * zone in each node and their holes is calculated. If the maximum PFN
1753 * between two adjacent zones match, it is assumed that the zone is empty.
1754 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
1755 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1756 * starts where the previous one ended. For example, ZONE_DMA32 starts
1757 * at arch_max_dma_pfn.
1758 */
1759void __init free_area_init(unsigned long *max_zone_pfn)
1760{
1761	unsigned long start_pfn, end_pfn;
1762	int i, nid, zone;
1763	bool descending;
1764
1765	/* Record where the zone boundaries are */
1766	memset(arch_zone_lowest_possible_pfn, 0,
1767				sizeof(arch_zone_lowest_possible_pfn));
1768	memset(arch_zone_highest_possible_pfn, 0,
1769				sizeof(arch_zone_highest_possible_pfn));
1770
1771	start_pfn = PHYS_PFN(memblock_start_of_DRAM());
1772	descending = arch_has_descending_max_zone_pfns();
1773
1774	for (i = 0; i < MAX_NR_ZONES; i++) {
1775		if (descending)
1776			zone = MAX_NR_ZONES - i - 1;
1777		else
1778			zone = i;
1779
1780		if (zone == ZONE_MOVABLE)
1781			continue;
1782
1783		end_pfn = max(max_zone_pfn[zone], start_pfn);
1784		arch_zone_lowest_possible_pfn[zone] = start_pfn;
1785		arch_zone_highest_possible_pfn[zone] = end_pfn;
1786
1787		start_pfn = end_pfn;
1788	}
1789
1790	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
1791	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
1792	find_zone_movable_pfns_for_nodes();
1793
1794	/* Print out the zone ranges */
1795	pr_info("Zone ranges:\n");
1796	for (i = 0; i < MAX_NR_ZONES; i++) {
1797		if (i == ZONE_MOVABLE)
1798			continue;
1799		pr_info("  %-8s ", zone_names[i]);
1800		if (arch_zone_lowest_possible_pfn[i] ==
1801				arch_zone_highest_possible_pfn[i])
1802			pr_cont("empty\n");
1803		else
1804			pr_cont("[mem %#018Lx-%#018Lx]\n",
1805				(u64)arch_zone_lowest_possible_pfn[i]
1806					<< PAGE_SHIFT,
1807				((u64)arch_zone_highest_possible_pfn[i]
1808					<< PAGE_SHIFT) - 1);
1809	}
1810
1811	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
1812	pr_info("Movable zone start for each node\n");
1813	for (i = 0; i < MAX_NUMNODES; i++) {
1814		if (zone_movable_pfn[i])
1815			pr_info("  Node %d: %#018Lx\n", i,
1816			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
1817	}
1818
1819	/*
1820	 * Print out the early node map, and initialize the
1821	 * subsection-map relative to active online memory ranges to
1822	 * enable future "sub-section" extensions of the memory map.
1823	 */
1824	pr_info("Early memory node ranges\n");
1825	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
1826		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
1827			(u64)start_pfn << PAGE_SHIFT,
1828			((u64)end_pfn << PAGE_SHIFT) - 1);
1829		subsection_map_init(start_pfn, end_pfn - start_pfn);
1830	}
1831
1832	/* Initialise every node */
1833	mminit_verify_pageflags_layout();
1834	setup_nr_node_ids();
1835	set_pageblock_order();
1836
1837	for_each_node(nid) {
1838		pg_data_t *pgdat;
1839
1840		if (!node_online(nid))
1841			alloc_offline_node_data(nid);
1842
1843		pgdat = NODE_DATA(nid);
1844		free_area_init_node(nid);
1845
1846		/*
1847		 * No sysfs hierarcy will be created via register_one_node()
1848		 *for memory-less node because here it's not marked as N_MEMORY
1849		 *and won't be set online later. The benefit is userspace
1850		 *program won't be confused by sysfs files/directories of
1851		 *memory-less node. The pgdat will get fully initialized by
1852		 *hotadd_init_pgdat() when memory is hotplugged into this node.
1853		 */
1854		if (pgdat->node_present_pages) {
1855			node_set_state(nid, N_MEMORY);
1856			check_for_memory(pgdat);
1857		}
1858	}
1859
1860	calc_nr_kernel_pages();
1861	memmap_init();
1862
1863	/* disable hash distribution for systems with a single node */
1864	fixup_hashdist();
1865}
1866
1867/**
1868 * node_map_pfn_alignment - determine the maximum internode alignment
1869 *
1870 * This function should be called after node map is populated and sorted.
1871 * It calculates the maximum power of two alignment which can distinguish
1872 * all the nodes.
1873 *
1874 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
1875 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
1876 * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
1877 * shifted, 1GiB is enough and this function will indicate so.
1878 *
1879 * This is used to test whether pfn -> nid mapping of the chosen memory
1880 * model has fine enough granularity to avoid incorrect mapping for the
1881 * populated node map.
1882 *
1883 * Return: the determined alignment in pfn's.  0 if there is no alignment
1884 * requirement (single node).
1885 */
1886unsigned long __init node_map_pfn_alignment(void)
1887{
1888	unsigned long accl_mask = 0, last_end = 0;
1889	unsigned long start, end, mask;
1890	int last_nid = NUMA_NO_NODE;
1891	int i, nid;
1892
1893	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1894		if (!start || last_nid < 0 || last_nid == nid) {
1895			last_nid = nid;
1896			last_end = end;
1897			continue;
1898		}
1899
1900		/*
1901		 * Start with a mask granular enough to pin-point to the
1902		 * start pfn and tick off bits one-by-one until it becomes
1903		 * too coarse to separate the current node from the last.
1904		 */
1905		mask = ~((1 << __ffs(start)) - 1);
1906		while (mask && last_end <= (start & (mask << 1)))
1907			mask <<= 1;
1908
1909		/* accumulate all internode masks */
1910		accl_mask |= mask;
1911	}
1912
1913	/* convert mask to number of pages */
1914	return ~accl_mask + 1;
1915}
1916
1917#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1918static void __init deferred_free_pages(unsigned long pfn,
1919		unsigned long nr_pages)
1920{
1921	struct page *page;
1922	unsigned long i;
1923
1924	if (!nr_pages)
1925		return;
1926
1927	page = pfn_to_page(pfn);
1928
1929	/* Free a large naturally-aligned chunk if possible */
1930	if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
1931		for (i = 0; i < nr_pages; i += pageblock_nr_pages)
1932			set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
1933		__free_pages_core(page, MAX_PAGE_ORDER, MEMINIT_EARLY);
1934		return;
1935	}
1936
1937	/* Accept chunks smaller than MAX_PAGE_ORDER upfront */
1938	accept_memory(PFN_PHYS(pfn), nr_pages * PAGE_SIZE);
1939
1940	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1941		if (pageblock_aligned(pfn))
1942			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1943		__free_pages_core(page, 0, MEMINIT_EARLY);
1944	}
1945}
1946
1947/* Completion tracking for deferred_init_memmap() threads */
1948static atomic_t pgdat_init_n_undone __initdata;
1949static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1950
1951static inline void __init pgdat_init_report_one_done(void)
1952{
1953	if (atomic_dec_and_test(&pgdat_init_n_undone))
1954		complete(&pgdat_init_all_done_comp);
1955}
1956
1957/*
1958 * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1959 * by performing it only once every MAX_ORDER_NR_PAGES.
1960 * Return number of pages initialized.
1961 */
1962static unsigned long __init deferred_init_pages(struct zone *zone,
1963		unsigned long pfn, unsigned long end_pfn)
1964{
1965	int nid = zone_to_nid(zone);
1966	unsigned long nr_pages = end_pfn - pfn;
1967	int zid = zone_idx(zone);
1968	struct page *page = pfn_to_page(pfn);
1969
1970	for (; pfn < end_pfn; pfn++, page++)
1971		__init_single_page(page, pfn, zid, nid);
1972	return nr_pages;
1973}
1974
1975/*
1976 * This function is meant to pre-load the iterator for the zone init from
1977 * a given point.
1978 * Specifically it walks through the ranges starting with initial index
1979 * passed to it until we are caught up to the first_init_pfn value and
1980 * exits there. If we never encounter the value we return false indicating
1981 * there are no valid ranges left.
1982 */
1983static bool __init
1984deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1985				    unsigned long *spfn, unsigned long *epfn,
1986				    unsigned long first_init_pfn)
1987{
1988	u64 j = *i;
1989
1990	if (j == 0)
1991		__next_mem_pfn_range_in_zone(&j, zone, spfn, epfn);
1992
1993	/*
1994	 * Start out by walking through the ranges in this zone that have
1995	 * already been initialized. We don't need to do anything with them
1996	 * so we just need to flush them out of the system.
1997	 */
1998	for_each_free_mem_pfn_range_in_zone_from(j, zone, spfn, epfn) {
1999		if (*epfn <= first_init_pfn)
2000			continue;
2001		if (*spfn < first_init_pfn)
2002			*spfn = first_init_pfn;
2003		*i = j;
2004		return true;
2005	}
2006
2007	return false;
2008}
2009
2010/*
2011 * Initialize and free pages. We do it in two loops: first we initialize
2012 * struct page, then free to buddy allocator, because while we are
2013 * freeing pages we can access pages that are ahead (computing buddy
2014 * page in __free_one_page()).
2015 *
2016 * In order to try and keep some memory in the cache we have the loop
2017 * broken along max page order boundaries. This way we will not cause
2018 * any issues with the buddy page computation.
2019 */
2020static unsigned long __init
2021deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2022		       unsigned long *end_pfn)
2023{
2024	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2025	unsigned long spfn = *start_pfn, epfn = *end_pfn;
2026	unsigned long nr_pages = 0;
2027	u64 j = *i;
2028
2029	/* First we loop through and initialize the page values */
2030	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2031		unsigned long t;
2032
2033		if (mo_pfn <= *start_pfn)
2034			break;
2035
2036		t = min(mo_pfn, *end_pfn);
2037		nr_pages += deferred_init_pages(zone, *start_pfn, t);
2038
2039		if (mo_pfn < *end_pfn) {
2040			*start_pfn = mo_pfn;
2041			break;
2042		}
2043	}
2044
2045	/* Reset values and now loop through freeing pages as needed */
2046	swap(j, *i);
2047
2048	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2049		unsigned long t;
2050
2051		if (mo_pfn <= spfn)
2052			break;
2053
2054		t = min(mo_pfn, epfn);
2055		deferred_free_pages(spfn, t - spfn);
2056
2057		if (mo_pfn <= epfn)
2058			break;
2059	}
2060
2061	return nr_pages;
2062}
2063
2064static void __init
2065deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2066			   void *arg)
2067{
2068	unsigned long spfn, epfn;
2069	struct zone *zone = arg;
2070	u64 i = 0;
2071
2072	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2073
2074	/*
2075	 * Initialize and free pages in MAX_PAGE_ORDER sized increments so that
2076	 * we can avoid introducing any issues with the buddy allocator.
2077	 */
2078	while (spfn < end_pfn) {
2079		deferred_init_maxorder(&i, zone, &spfn, &epfn);
2080		cond_resched();
2081	}
2082}
2083
2084static unsigned int __init
2085deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2086{
2087	return max(cpumask_weight(node_cpumask), 1U);
2088}
2089
2090/* Initialise remaining memory on a node */
2091static int __init deferred_init_memmap(void *data)
2092{
2093	pg_data_t *pgdat = data;
2094	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2095	unsigned long spfn = 0, epfn = 0;
2096	unsigned long first_init_pfn, flags;
2097	unsigned long start = jiffies;
2098	struct zone *zone;
2099	int max_threads;
2100	u64 i = 0;
2101
2102	/* Bind memory initialisation thread to a local node if possible */
2103	if (!cpumask_empty(cpumask))
2104		set_cpus_allowed_ptr(current, cpumask);
2105
2106	pgdat_resize_lock(pgdat, &flags);
2107	first_init_pfn = pgdat->first_deferred_pfn;
2108	if (first_init_pfn == ULONG_MAX) {
2109		pgdat_resize_unlock(pgdat, &flags);
2110		pgdat_init_report_one_done();
2111		return 0;
2112	}
2113
2114	/* Sanity check boundaries */
2115	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2116	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2117	pgdat->first_deferred_pfn = ULONG_MAX;
2118
2119	/*
2120	 * Once we unlock here, the zone cannot be grown anymore, thus if an
2121	 * interrupt thread must allocate this early in boot, zone must be
2122	 * pre-grown prior to start of deferred page initialization.
2123	 */
2124	pgdat_resize_unlock(pgdat, &flags);
2125
2126	/* Only the highest zone is deferred */
2127	zone = pgdat->node_zones + pgdat->nr_zones - 1;
2128
2129	max_threads = deferred_page_init_max_threads(cpumask);
2130
2131	while (deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, first_init_pfn)) {
2132		first_init_pfn = ALIGN(epfn, PAGES_PER_SECTION);
2133		struct padata_mt_job job = {
2134			.thread_fn   = deferred_init_memmap_chunk,
2135			.fn_arg      = zone,
2136			.start       = spfn,
2137			.size        = first_init_pfn - spfn,
2138			.align       = PAGES_PER_SECTION,
2139			.min_chunk   = PAGES_PER_SECTION,
2140			.max_threads = max_threads,
2141			.numa_aware  = false,
2142		};
2143
2144		padata_do_multithreaded(&job);
2145	}
2146
2147	/* Sanity check that the next zone really is unpopulated */
2148	WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone));
2149
2150	pr_info("node %d deferred pages initialised in %ums\n",
2151		pgdat->node_id, jiffies_to_msecs(jiffies - start));
2152
2153	pgdat_init_report_one_done();
2154	return 0;
2155}
2156
2157/*
2158 * If this zone has deferred pages, try to grow it by initializing enough
2159 * deferred pages to satisfy the allocation specified by order, rounded up to
2160 * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2161 * of SECTION_SIZE bytes by initializing struct pages in increments of
2162 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2163 *
2164 * Return true when zone was grown, otherwise return false. We return true even
2165 * when we grow less than requested, to let the caller decide if there are
2166 * enough pages to satisfy the allocation.
2167 */
2168bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
2169{
2170	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2171	pg_data_t *pgdat = zone->zone_pgdat;
2172	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2173	unsigned long spfn, epfn, flags;
2174	unsigned long nr_pages = 0;
2175	u64 i = 0;
2176
2177	/* Only the last zone may have deferred pages */
2178	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2179		return false;
2180
2181	pgdat_resize_lock(pgdat, &flags);
2182
2183	/*
2184	 * If someone grew this zone while we were waiting for spinlock, return
2185	 * true, as there might be enough pages already.
2186	 */
2187	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2188		pgdat_resize_unlock(pgdat, &flags);
2189		return true;
2190	}
2191
2192	/* If the zone is empty somebody else may have cleared out the zone */
2193	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2194						 first_deferred_pfn)) {
2195		pgdat->first_deferred_pfn = ULONG_MAX;
2196		pgdat_resize_unlock(pgdat, &flags);
2197		/* Retry only once. */
2198		return first_deferred_pfn != ULONG_MAX;
2199	}
2200
2201	/*
2202	 * Initialize and free pages in MAX_PAGE_ORDER sized increments so
2203	 * that we can avoid introducing any issues with the buddy
2204	 * allocator.
2205	 */
2206	while (spfn < epfn) {
2207		/* update our first deferred PFN for this section */
2208		first_deferred_pfn = spfn;
2209
2210		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2211		touch_nmi_watchdog();
2212
2213		/* We should only stop along section boundaries */
2214		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2215			continue;
2216
2217		/* If our quota has been met we can stop here */
2218		if (nr_pages >= nr_pages_needed)
2219			break;
2220	}
2221
2222	pgdat->first_deferred_pfn = spfn;
2223	pgdat_resize_unlock(pgdat, &flags);
2224
2225	return nr_pages > 0;
2226}
2227
2228#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2229
2230#ifdef CONFIG_CMA
2231void __init init_cma_reserved_pageblock(struct page *page)
2232{
2233	unsigned i = pageblock_nr_pages;
2234	struct page *p = page;
2235
2236	do {
2237		__ClearPageReserved(p);
2238		set_page_count(p, 0);
2239	} while (++p, --i);
2240
2241	set_pageblock_migratetype(page, MIGRATE_CMA);
2242	set_page_refcounted(page);
2243	/* pages were reserved and not allocated */
2244	clear_page_tag_ref(page);
2245	__free_pages(page, pageblock_order);
2246
2247	adjust_managed_page_count(page, pageblock_nr_pages);
2248	page_zone(page)->cma_pages += pageblock_nr_pages;
2249}
2250#endif
2251
2252void set_zone_contiguous(struct zone *zone)
2253{
2254	unsigned long block_start_pfn = zone->zone_start_pfn;
2255	unsigned long block_end_pfn;
2256
2257	block_end_pfn = pageblock_end_pfn(block_start_pfn);
2258	for (; block_start_pfn < zone_end_pfn(zone);
2259			block_start_pfn = block_end_pfn,
2260			 block_end_pfn += pageblock_nr_pages) {
2261
2262		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
2263
2264		if (!__pageblock_pfn_to_page(block_start_pfn,
2265					     block_end_pfn, zone))
2266			return;
2267		cond_resched();
2268	}
2269
2270	/* We confirm that there is no hole */
2271	zone->contiguous = true;
2272}
2273
2274static void __init mem_init_print_info(void);
2275void __init page_alloc_init_late(void)
2276{
2277	struct zone *zone;
2278	int nid;
2279
2280#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2281
2282	/* There will be num_node_state(N_MEMORY) threads */
2283	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2284	for_each_node_state(nid, N_MEMORY) {
2285		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2286	}
2287
2288	/* Block until all are initialised */
2289	wait_for_completion(&pgdat_init_all_done_comp);
2290
2291	/*
2292	 * We initialized the rest of the deferred pages.  Permanently disable
2293	 * on-demand struct page initialization.
2294	 */
2295	static_branch_disable(&deferred_pages);
2296
2297	/* Reinit limits that are based on free pages after the kernel is up */
2298	files_maxfiles_init();
2299#endif
2300
2301	/* Accounting of total+free memory is stable at this point. */
2302	mem_init_print_info();
2303	buffer_init();
2304
2305	/* Discard memblock private memory */
2306	memblock_discard();
2307
2308	for_each_node_state(nid, N_MEMORY)
2309		shuffle_free_memory(NODE_DATA(nid));
2310
2311	for_each_populated_zone(zone)
2312		set_zone_contiguous(zone);
2313
2314	/* Initialize page ext after all struct pages are initialized. */
2315	if (deferred_struct_pages)
2316		page_ext_init();
2317
2318	page_alloc_sysctl_init();
2319}
2320
2321/*
2322 * Adaptive scale is meant to reduce sizes of hash tables on large memory
2323 * machines. As memory size is increased the scale is also increased but at
2324 * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
2325 * quadruples the scale is increased by one, which means the size of hash table
2326 * only doubles, instead of quadrupling as well.
2327 * Because 32-bit systems cannot have large physical memory, where this scaling
2328 * makes sense, it is disabled on such platforms.
2329 */
2330#if __BITS_PER_LONG > 32
2331#define ADAPT_SCALE_BASE	(64ul << 30)
2332#define ADAPT_SCALE_SHIFT	2
2333#define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
2334#endif
2335
2336/*
2337 * allocate a large system hash table from bootmem
2338 * - it is assumed that the hash table must contain an exact power-of-2
2339 *   quantity of entries
2340 * - limit is the number of hash buckets, not the total allocation size
2341 */
2342void *__init alloc_large_system_hash(const char *tablename,
2343				     unsigned long bucketsize,
2344				     unsigned long numentries,
2345				     int scale,
2346				     int flags,
2347				     unsigned int *_hash_shift,
2348				     unsigned int *_hash_mask,
2349				     unsigned long low_limit,
2350				     unsigned long high_limit)
2351{
2352	unsigned long long max = high_limit;
2353	unsigned long log2qty, size;
2354	void *table;
2355	gfp_t gfp_flags;
2356	bool virt;
2357	bool huge;
2358
2359	/* allow the kernel cmdline to have a say */
2360	if (!numentries) {
2361		/* round applicable memory size up to nearest megabyte */
2362		numentries = nr_kernel_pages;
2363
2364		/* It isn't necessary when PAGE_SIZE >= 1MB */
2365		if (PAGE_SIZE < SZ_1M)
2366			numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
2367
2368#if __BITS_PER_LONG > 32
2369		if (!high_limit) {
2370			unsigned long adapt;
2371
2372			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
2373			     adapt <<= ADAPT_SCALE_SHIFT)
2374				scale++;
2375		}
2376#endif
2377
2378		/* limit to 1 bucket per 2^scale bytes of low memory */
2379		if (scale > PAGE_SHIFT)
2380			numentries >>= (scale - PAGE_SHIFT);
2381		else
2382			numentries <<= (PAGE_SHIFT - scale);
2383
2384		if (unlikely((numentries * bucketsize) < PAGE_SIZE))
2385			numentries = PAGE_SIZE / bucketsize;
2386	}
2387	numentries = roundup_pow_of_two(numentries);
2388
2389	/* limit allocation size to 1/16 total memory by default */
2390	if (max == 0) {
2391		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2392		do_div(max, bucketsize);
2393	}
2394	max = min(max, 0x80000000ULL);
2395
2396	if (numentries < low_limit)
2397		numentries = low_limit;
2398	if (numentries > max)
2399		numentries = max;
2400
2401	log2qty = ilog2(numentries);
2402
2403	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
2404	do {
2405		virt = false;
2406		size = bucketsize << log2qty;
2407		if (flags & HASH_EARLY) {
2408			if (flags & HASH_ZERO)
2409				table = memblock_alloc(size, SMP_CACHE_BYTES);
2410			else
2411				table = memblock_alloc_raw(size,
2412							   SMP_CACHE_BYTES);
2413		} else if (get_order(size) > MAX_PAGE_ORDER || hashdist) {
2414			table = vmalloc_huge(size, gfp_flags);
2415			virt = true;
2416			if (table)
2417				huge = is_vm_area_hugepages(table);
2418		} else {
2419			/*
2420			 * If bucketsize is not a power-of-two, we may free
2421			 * some pages at the end of hash table which
2422			 * alloc_pages_exact() automatically does
2423			 */
2424			table = alloc_pages_exact(size, gfp_flags);
2425			kmemleak_alloc(table, size, 1, gfp_flags);
2426		}
2427	} while (!table && size > PAGE_SIZE && --log2qty);
2428
2429	if (!table)
2430		panic("Failed to allocate %s hash table\n", tablename);
2431
2432	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
2433		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
2434		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
2435
2436	if (_hash_shift)
2437		*_hash_shift = log2qty;
2438	if (_hash_mask)
2439		*_hash_mask = (1 << log2qty) - 1;
2440
2441	return table;
2442}
2443
2444void __init memblock_free_pages(struct page *page, unsigned long pfn,
2445							unsigned int order)
2446{
2447	if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
2448		int nid = early_pfn_to_nid(pfn);
2449
2450		if (!early_page_initialised(pfn, nid))
2451			return;
2452	}
2453
2454	if (!kmsan_memblock_free_pages(page, order)) {
2455		/* KMSAN will take care of these pages. */
2456		return;
2457	}
2458
2459	/* pages were reserved and not allocated */
2460	clear_page_tag_ref(page);
2461	__free_pages_core(page, order, MEMINIT_EARLY);
2462}
2463
2464DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
2465EXPORT_SYMBOL(init_on_alloc);
2466
2467DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
2468EXPORT_SYMBOL(init_on_free);
2469
2470static bool _init_on_alloc_enabled_early __read_mostly
2471				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
2472static int __init early_init_on_alloc(char *buf)
2473{
2474
2475	return kstrtobool(buf, &_init_on_alloc_enabled_early);
2476}
2477early_param("init_on_alloc", early_init_on_alloc);
2478
2479static bool _init_on_free_enabled_early __read_mostly
2480				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
2481static int __init early_init_on_free(char *buf)
2482{
2483	return kstrtobool(buf, &_init_on_free_enabled_early);
2484}
2485early_param("init_on_free", early_init_on_free);
2486
2487DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
2488
2489/*
2490 * Enable static keys related to various memory debugging and hardening options.
2491 * Some override others, and depend on early params that are evaluated in the
2492 * order of appearance. So we need to first gather the full picture of what was
2493 * enabled, and then make decisions.
2494 */
2495static void __init mem_debugging_and_hardening_init(void)
2496{
2497	bool page_poisoning_requested = false;
2498	bool want_check_pages = false;
2499
2500#ifdef CONFIG_PAGE_POISONING
2501	/*
2502	 * Page poisoning is debug page alloc for some arches. If
2503	 * either of those options are enabled, enable poisoning.
2504	 */
2505	if (page_poisoning_enabled() ||
2506	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
2507	      debug_pagealloc_enabled())) {
2508		static_branch_enable(&_page_poisoning_enabled);
2509		page_poisoning_requested = true;
2510		want_check_pages = true;
2511	}
2512#endif
2513
2514	if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
2515	    page_poisoning_requested) {
2516		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2517			"will take precedence over init_on_alloc and init_on_free\n");
2518		_init_on_alloc_enabled_early = false;
2519		_init_on_free_enabled_early = false;
2520	}
2521
2522	if (_init_on_alloc_enabled_early) {
2523		want_check_pages = true;
2524		static_branch_enable(&init_on_alloc);
2525	} else {
2526		static_branch_disable(&init_on_alloc);
2527	}
2528
2529	if (_init_on_free_enabled_early) {
2530		want_check_pages = true;
2531		static_branch_enable(&init_on_free);
2532	} else {
2533		static_branch_disable(&init_on_free);
2534	}
2535
2536	if (IS_ENABLED(CONFIG_KMSAN) &&
2537	    (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
2538		pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2539
2540#ifdef CONFIG_DEBUG_PAGEALLOC
2541	if (debug_pagealloc_enabled()) {
2542		want_check_pages = true;
2543		static_branch_enable(&_debug_pagealloc_enabled);
2544
2545		if (debug_guardpage_minorder())
2546			static_branch_enable(&_debug_guardpage_enabled);
2547	}
2548#endif
2549
2550	/*
2551	 * Any page debugging or hardening option also enables sanity checking
2552	 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
2553	 * enabled already.
2554	 */
2555	if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
2556		static_branch_enable(&check_pages_enabled);
2557}
2558
2559/* Report memory auto-initialization states for this boot. */
2560static void __init report_meminit(void)
2561{
2562	const char *stack;
2563
2564	if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
2565		stack = "all(pattern)";
2566	else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
2567		stack = "all(zero)";
2568	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
2569		stack = "byref_all(zero)";
2570	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
2571		stack = "byref(zero)";
2572	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
2573		stack = "__user(zero)";
2574	else
2575		stack = "off";
2576
2577	pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2578		stack, str_on_off(want_init_on_alloc(GFP_KERNEL)),
2579		str_on_off(want_init_on_free()));
2580	if (want_init_on_free())
2581		pr_info("mem auto-init: clearing system memory may take some time...\n");
2582}
2583
2584static void __init mem_init_print_info(void)
2585{
2586	unsigned long physpages, codesize, datasize, rosize, bss_size;
2587	unsigned long init_code_size, init_data_size;
2588
2589	physpages = get_num_physpages();
2590	codesize = _etext - _stext;
2591	datasize = _edata - _sdata;
2592	rosize = __end_rodata - __start_rodata;
2593	bss_size = __bss_stop - __bss_start;
2594	init_data_size = __init_end - __init_begin;
2595	init_code_size = _einittext - _sinittext;
2596
2597	/*
2598	 * Detect special cases and adjust section sizes accordingly:
2599	 * 1) .init.* may be embedded into .data sections
2600	 * 2) .init.text.* may be out of [__init_begin, __init_end],
2601	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
2602	 * 3) .rodata.* may be embedded into .text or .data sections.
2603	 */
2604#define adj_init_size(start, end, size, pos, adj) \
2605	do { \
2606		if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2607			size -= adj; \
2608	} while (0)
2609
2610	adj_init_size(__init_begin, __init_end, init_data_size,
2611		     _sinittext, init_code_size);
2612	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
2613	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
2614	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
2615	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
2616
2617#undef	adj_init_size
2618
2619	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2620#ifdef	CONFIG_HIGHMEM
2621		", %luK highmem"
2622#endif
2623		")\n",
2624		K(nr_free_pages()), K(physpages),
2625		codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
2626		(init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
2627		K(physpages - totalram_pages() - totalcma_pages),
2628		K(totalcma_pages)
2629#ifdef	CONFIG_HIGHMEM
2630		, K(totalhigh_pages())
2631#endif
2632		);
2633}
2634
2635/*
2636 * Set up kernel memory allocators
2637 */
2638void __init mm_core_init(void)
2639{
2640	/* Initializations relying on SMP setup */
2641	BUILD_BUG_ON(MAX_ZONELISTS > 2);
2642	build_all_zonelists(NULL);
2643	page_alloc_init_cpuhp();
2644	alloc_tag_sec_init();
2645	/*
2646	 * page_ext requires contiguous pages,
2647	 * bigger than MAX_PAGE_ORDER unless SPARSEMEM.
2648	 */
2649	page_ext_init_flatmem();
2650	mem_debugging_and_hardening_init();
2651	kfence_alloc_pool_and_metadata();
2652	report_meminit();
2653	kmsan_init_shadow();
2654	stack_depot_early_init();
2655	mem_init();
2656	kmem_cache_init();
2657	/*
2658	 * page_owner must be initialized after buddy is ready, and also after
2659	 * slab is ready so that stack_depot_init() works properly
2660	 */
2661	page_ext_init_flatmem_late();
2662	kmemleak_init();
2663	ptlock_cache_init();
2664	pgtable_cache_init();
2665	debug_objects_mem_init();
2666	vmalloc_init();
2667	/* If no deferred init page_ext now, as vmap is fully initialized */
2668	if (!deferred_struct_pages)
2669		page_ext_init();
2670	/* Should be run before the first non-init thread is created */
2671	init_espfix_bsp();
2672	/* Should be run after espfix64 is set up. */
2673	pti_init();
2674	kmsan_init_runtime();
2675	mm_cache_init();
2676	execmem_init();
2677}