Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * Copyright (c) 2000, 2003 Silicon Graphics, Inc.  All rights reserved.
  3 * Copyright (c) 2001 Intel Corp.
  4 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
  5 * Copyright (c) 2002 NEC Corp.
  6 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
  7 * Copyright (c) 2004 Silicon Graphics, Inc
  8 *	Russ Anderson <rja@sgi.com>
  9 *	Jesse Barnes <jbarnes@sgi.com>
 10 *	Jack Steiner <steiner@sgi.com>
 11 */
 12
 13/*
 14 * Platform initialization for Discontig Memory
 15 */
 16
 17#include <linux/kernel.h>
 18#include <linux/mm.h>
 19#include <linux/nmi.h>
 20#include <linux/swap.h>
 21#include <linux/bootmem.h>
 22#include <linux/acpi.h>
 23#include <linux/efi.h>
 24#include <linux/nodemask.h>
 25#include <linux/slab.h>
 26#include <asm/pgalloc.h>
 27#include <asm/tlb.h>
 28#include <asm/meminit.h>
 29#include <asm/numa.h>
 30#include <asm/sections.h>
 31
 32/*
 33 * Track per-node information needed to setup the boot memory allocator, the
 34 * per-node areas, and the real VM.
 35 */
 36struct early_node_data {
 37	struct ia64_node_data *node_data;
 38	unsigned long pernode_addr;
 39	unsigned long pernode_size;
 40#ifdef CONFIG_ZONE_DMA
 41	unsigned long num_dma_physpages;
 42#endif
 43	unsigned long min_pfn;
 44	unsigned long max_pfn;
 45};
 46
 47static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
 48static nodemask_t memory_less_mask __initdata;
 49
 50pg_data_t *pgdat_list[MAX_NUMNODES];
 51
 52/*
 53 * To prevent cache aliasing effects, align per-node structures so that they
 54 * start at addresses that are strided by node number.
 55 */
 56#define MAX_NODE_ALIGN_OFFSET	(32 * 1024 * 1024)
 57#define NODEDATA_ALIGN(addr, node)						\
 58	((((addr) + 1024*1024-1) & ~(1024*1024-1)) + 				\
 59	     (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
 60
 61/**
 62 * build_node_maps - callback to setup bootmem structs for each node
 63 * @start: physical start of range
 64 * @len: length of range
 65 * @node: node where this range resides
 66 *
 67 * We allocate a struct bootmem_data for each piece of memory that we wish to
 68 * treat as a virtually contiguous block (i.e. each node). Each such block
 69 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
 70 * if necessary.  Any non-existent pages will simply be part of the virtual
 71 * memmap.  We also update min_low_pfn and max_low_pfn here as we receive
 72 * memory ranges from the caller.
 73 */
 74static int __init build_node_maps(unsigned long start, unsigned long len,
 75				  int node)
 76{
 77	unsigned long spfn, epfn, end = start + len;
 78	struct bootmem_data *bdp = &bootmem_node_data[node];
 79
 80	epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
 81	spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
 82
 83	if (!bdp->node_low_pfn) {
 84		bdp->node_min_pfn = spfn;
 85		bdp->node_low_pfn = epfn;
 86	} else {
 87		bdp->node_min_pfn = min(spfn, bdp->node_min_pfn);
 88		bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
 89	}
 90
 91	return 0;
 92}
 93
 94/**
 95 * early_nr_cpus_node - return number of cpus on a given node
 96 * @node: node to check
 97 *
 98 * Count the number of cpus on @node.  We can't use nr_cpus_node() yet because
 99 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
100 * called yet.  Note that node 0 will also count all non-existent cpus.
101 */
102static int __meminit early_nr_cpus_node(int node)
103{
104	int cpu, n = 0;
105
106	for_each_possible_early_cpu(cpu)
107		if (node == node_cpuid[cpu].nid)
108			n++;
109
110	return n;
111}
112
113/**
114 * compute_pernodesize - compute size of pernode data
115 * @node: the node id.
116 */
117static unsigned long __meminit compute_pernodesize(int node)
118{
119	unsigned long pernodesize = 0, cpus;
120
121	cpus = early_nr_cpus_node(node);
122	pernodesize += PERCPU_PAGE_SIZE * cpus;
123	pernodesize += node * L1_CACHE_BYTES;
124	pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
125	pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
126	pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
127	pernodesize = PAGE_ALIGN(pernodesize);
128	return pernodesize;
129}
130
131/**
132 * per_cpu_node_setup - setup per-cpu areas on each node
133 * @cpu_data: per-cpu area on this node
134 * @node: node to setup
135 *
136 * Copy the static per-cpu data into the region we just set aside and then
137 * setup __per_cpu_offset for each CPU on this node.  Return a pointer to
138 * the end of the area.
139 */
140static void *per_cpu_node_setup(void *cpu_data, int node)
141{
142#ifdef CONFIG_SMP
143	int cpu;
144
145	for_each_possible_early_cpu(cpu) {
146		void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
147
148		if (node != node_cpuid[cpu].nid)
149			continue;
150
151		memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
152		__per_cpu_offset[cpu] = (char *)__va(cpu_data) -
153			__per_cpu_start;
154
155		/*
156		 * percpu area for cpu0 is moved from the __init area
157		 * which is setup by head.S and used till this point.
158		 * Update ar.k3.  This move is ensures that percpu
159		 * area for cpu0 is on the correct node and its
160		 * virtual address isn't insanely far from other
161		 * percpu areas which is important for congruent
162		 * percpu allocator.
163		 */
164		if (cpu == 0)
165			ia64_set_kr(IA64_KR_PER_CPU_DATA,
166				    (unsigned long)cpu_data -
167				    (unsigned long)__per_cpu_start);
168
169		cpu_data += PERCPU_PAGE_SIZE;
170	}
171#endif
172	return cpu_data;
173}
174
175#ifdef CONFIG_SMP
176/**
177 * setup_per_cpu_areas - setup percpu areas
178 *
179 * Arch code has already allocated and initialized percpu areas.  All
180 * this function has to do is to teach the determined layout to the
181 * dynamic percpu allocator, which happens to be more complex than
182 * creating whole new ones using helpers.
183 */
184void __init setup_per_cpu_areas(void)
185{
186	struct pcpu_alloc_info *ai;
187	struct pcpu_group_info *uninitialized_var(gi);
188	unsigned int *cpu_map;
189	void *base;
190	unsigned long base_offset;
191	unsigned int cpu;
192	ssize_t static_size, reserved_size, dyn_size;
193	int node, prev_node, unit, nr_units, rc;
194
195	ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
196	if (!ai)
197		panic("failed to allocate pcpu_alloc_info");
198	cpu_map = ai->groups[0].cpu_map;
199
200	/* determine base */
201	base = (void *)ULONG_MAX;
202	for_each_possible_cpu(cpu)
203		base = min(base,
204			   (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
205	base_offset = (void *)__per_cpu_start - base;
206
207	/* build cpu_map, units are grouped by node */
208	unit = 0;
209	for_each_node(node)
210		for_each_possible_cpu(cpu)
211			if (node == node_cpuid[cpu].nid)
212				cpu_map[unit++] = cpu;
213	nr_units = unit;
214
215	/* set basic parameters */
216	static_size = __per_cpu_end - __per_cpu_start;
217	reserved_size = PERCPU_MODULE_RESERVE;
218	dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
219	if (dyn_size < 0)
220		panic("percpu area overflow static=%zd reserved=%zd\n",
221		      static_size, reserved_size);
222
223	ai->static_size		= static_size;
224	ai->reserved_size	= reserved_size;
225	ai->dyn_size		= dyn_size;
226	ai->unit_size		= PERCPU_PAGE_SIZE;
227	ai->atom_size		= PAGE_SIZE;
228	ai->alloc_size		= PERCPU_PAGE_SIZE;
229
230	/*
231	 * CPUs are put into groups according to node.  Walk cpu_map
232	 * and create new groups at node boundaries.
233	 */
234	prev_node = -1;
235	ai->nr_groups = 0;
236	for (unit = 0; unit < nr_units; unit++) {
237		cpu = cpu_map[unit];
238		node = node_cpuid[cpu].nid;
239
240		if (node == prev_node) {
241			gi->nr_units++;
242			continue;
243		}
244		prev_node = node;
245
246		gi = &ai->groups[ai->nr_groups++];
247		gi->nr_units		= 1;
248		gi->base_offset		= __per_cpu_offset[cpu] + base_offset;
249		gi->cpu_map		= &cpu_map[unit];
250	}
251
252	rc = pcpu_setup_first_chunk(ai, base);
253	if (rc)
254		panic("failed to setup percpu area (err=%d)", rc);
255
256	pcpu_free_alloc_info(ai);
257}
258#endif
259
260/**
261 * fill_pernode - initialize pernode data.
262 * @node: the node id.
263 * @pernode: physical address of pernode data
264 * @pernodesize: size of the pernode data
265 */
266static void __init fill_pernode(int node, unsigned long pernode,
267	unsigned long pernodesize)
268{
269	void *cpu_data;
270	int cpus = early_nr_cpus_node(node);
271	struct bootmem_data *bdp = &bootmem_node_data[node];
272
273	mem_data[node].pernode_addr = pernode;
274	mem_data[node].pernode_size = pernodesize;
275	memset(__va(pernode), 0, pernodesize);
276
277	cpu_data = (void *)pernode;
278	pernode += PERCPU_PAGE_SIZE * cpus;
279	pernode += node * L1_CACHE_BYTES;
280
281	pgdat_list[node] = __va(pernode);
282	pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
283
284	mem_data[node].node_data = __va(pernode);
285	pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
286
287	pgdat_list[node]->bdata = bdp;
288	pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
289
290	cpu_data = per_cpu_node_setup(cpu_data, node);
291
292	return;
293}
294
295/**
296 * find_pernode_space - allocate memory for memory map and per-node structures
297 * @start: physical start of range
298 * @len: length of range
299 * @node: node where this range resides
300 *
301 * This routine reserves space for the per-cpu data struct, the list of
302 * pg_data_ts and the per-node data struct.  Each node will have something like
303 * the following in the first chunk of addr. space large enough to hold it.
304 *
305 *    ________________________
306 *   |                        |
307 *   |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
308 *   |    PERCPU_PAGE_SIZE *  |     start and length big enough
309 *   |    cpus_on_this_node   | Node 0 will also have entries for all non-existent cpus.
310 *   |------------------------|
311 *   |   local pg_data_t *    |
312 *   |------------------------|
313 *   |  local ia64_node_data  |
314 *   |------------------------|
315 *   |          ???           |
316 *   |________________________|
317 *
318 * Once this space has been set aside, the bootmem maps are initialized.  We
319 * could probably move the allocation of the per-cpu and ia64_node_data space
320 * outside of this function and use alloc_bootmem_node(), but doing it here
321 * is straightforward and we get the alignments we want so...
322 */
323static int __init find_pernode_space(unsigned long start, unsigned long len,
324				     int node)
325{
326	unsigned long spfn, epfn;
327	unsigned long pernodesize = 0, pernode, pages, mapsize;
328	struct bootmem_data *bdp = &bootmem_node_data[node];
329
330	spfn = start >> PAGE_SHIFT;
331	epfn = (start + len) >> PAGE_SHIFT;
332
333	pages = bdp->node_low_pfn - bdp->node_min_pfn;
334	mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
335
336	/*
337	 * Make sure this memory falls within this node's usable memory
338	 * since we may have thrown some away in build_maps().
339	 */
340	if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn)
341		return 0;
342
343	/* Don't setup this node's local space twice... */
344	if (mem_data[node].pernode_addr)
345		return 0;
346
347	/*
348	 * Calculate total size needed, incl. what's necessary
349	 * for good alignment and alias prevention.
350	 */
351	pernodesize = compute_pernodesize(node);
352	pernode = NODEDATA_ALIGN(start, node);
353
354	/* Is this range big enough for what we want to store here? */
355	if (start + len > (pernode + pernodesize + mapsize))
356		fill_pernode(node, pernode, pernodesize);
357
358	return 0;
359}
360
361/**
362 * free_node_bootmem - free bootmem allocator memory for use
363 * @start: physical start of range
364 * @len: length of range
365 * @node: node where this range resides
366 *
367 * Simply calls the bootmem allocator to free the specified ranged from
368 * the given pg_data_t's bdata struct.  After this function has been called
369 * for all the entries in the EFI memory map, the bootmem allocator will
370 * be ready to service allocation requests.
371 */
372static int __init free_node_bootmem(unsigned long start, unsigned long len,
373				    int node)
374{
375	free_bootmem_node(pgdat_list[node], start, len);
376
377	return 0;
378}
379
380/**
381 * reserve_pernode_space - reserve memory for per-node space
382 *
383 * Reserve the space used by the bootmem maps & per-node space in the boot
384 * allocator so that when we actually create the real mem maps we don't
385 * use their memory.
386 */
387static void __init reserve_pernode_space(void)
388{
389	unsigned long base, size, pages;
390	struct bootmem_data *bdp;
391	int node;
392
393	for_each_online_node(node) {
394		pg_data_t *pdp = pgdat_list[node];
395
396		if (node_isset(node, memory_less_mask))
397			continue;
398
399		bdp = pdp->bdata;
400
401		/* First the bootmem_map itself */
402		pages = bdp->node_low_pfn - bdp->node_min_pfn;
403		size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
404		base = __pa(bdp->node_bootmem_map);
405		reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
406
407		/* Now the per-node space */
408		size = mem_data[node].pernode_size;
409		base = __pa(mem_data[node].pernode_addr);
410		reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
411	}
412}
413
414static void __meminit scatter_node_data(void)
415{
416	pg_data_t **dst;
417	int node;
418
419	/*
420	 * for_each_online_node() can't be used at here.
421	 * node_online_map is not set for hot-added nodes at this time,
422	 * because we are halfway through initialization of the new node's
423	 * structures.  If for_each_online_node() is used, a new node's
424	 * pg_data_ptrs will be not initialized. Instead of using it,
425	 * pgdat_list[] is checked.
426	 */
427	for_each_node(node) {
428		if (pgdat_list[node]) {
429			dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
430			memcpy(dst, pgdat_list, sizeof(pgdat_list));
431		}
432	}
433}
434
435/**
436 * initialize_pernode_data - fixup per-cpu & per-node pointers
437 *
438 * Each node's per-node area has a copy of the global pg_data_t list, so
439 * we copy that to each node here, as well as setting the per-cpu pointer
440 * to the local node data structure.  The active_cpus field of the per-node
441 * structure gets setup by the platform_cpu_init() function later.
442 */
443static void __init initialize_pernode_data(void)
444{
445	int cpu, node;
446
447	scatter_node_data();
448
449#ifdef CONFIG_SMP
450	/* Set the node_data pointer for each per-cpu struct */
451	for_each_possible_early_cpu(cpu) {
452		node = node_cpuid[cpu].nid;
453		per_cpu(ia64_cpu_info, cpu).node_data =
454			mem_data[node].node_data;
455	}
456#else
457	{
458		struct cpuinfo_ia64 *cpu0_cpu_info;
459		cpu = 0;
460		node = node_cpuid[cpu].nid;
461		cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
462			((char *)&ia64_cpu_info - __per_cpu_start));
463		cpu0_cpu_info->node_data = mem_data[node].node_data;
464	}
465#endif /* CONFIG_SMP */
466}
467
468/**
469 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
470 * 	node but fall back to any other node when __alloc_bootmem_node fails
471 *	for best.
472 * @nid: node id
473 * @pernodesize: size of this node's pernode data
474 */
475static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
476{
477	void *ptr = NULL;
478	u8 best = 0xff;
479	int bestnode = -1, node, anynode = 0;
480
481	for_each_online_node(node) {
482		if (node_isset(node, memory_less_mask))
483			continue;
484		else if (node_distance(nid, node) < best) {
485			best = node_distance(nid, node);
486			bestnode = node;
487		}
488		anynode = node;
489	}
490
491	if (bestnode == -1)
492		bestnode = anynode;
493
494	ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
495		PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
496
497	return ptr;
498}
499
500/**
501 * memory_less_nodes - allocate and initialize CPU only nodes pernode
502 *	information.
503 */
504static void __init memory_less_nodes(void)
505{
506	unsigned long pernodesize;
507	void *pernode;
508	int node;
509
510	for_each_node_mask(node, memory_less_mask) {
511		pernodesize = compute_pernodesize(node);
512		pernode = memory_less_node_alloc(node, pernodesize);
513		fill_pernode(node, __pa(pernode), pernodesize);
514	}
515
516	return;
517}
518
519/**
520 * find_memory - walk the EFI memory map and setup the bootmem allocator
521 *
522 * Called early in boot to setup the bootmem allocator, and to
523 * allocate the per-cpu and per-node structures.
524 */
525void __init find_memory(void)
526{
527	int node;
528
529	reserve_memory();
530
531	if (num_online_nodes() == 0) {
532		printk(KERN_ERR "node info missing!\n");
533		node_set_online(0);
534	}
535
536	nodes_or(memory_less_mask, memory_less_mask, node_online_map);
537	min_low_pfn = -1;
538	max_low_pfn = 0;
539
540	/* These actually end up getting called by call_pernode_memory() */
541	efi_memmap_walk(filter_rsvd_memory, build_node_maps);
542	efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
543	efi_memmap_walk(find_max_min_low_pfn, NULL);
544
545	for_each_online_node(node)
546		if (bootmem_node_data[node].node_low_pfn) {
547			node_clear(node, memory_less_mask);
548			mem_data[node].min_pfn = ~0UL;
549		}
550
551	efi_memmap_walk(filter_memory, register_active_ranges);
552
553	/*
554	 * Initialize the boot memory maps in reverse order since that's
555	 * what the bootmem allocator expects
556	 */
557	for (node = MAX_NUMNODES - 1; node >= 0; node--) {
558		unsigned long pernode, pernodesize, map;
559		struct bootmem_data *bdp;
560
561		if (!node_online(node))
562			continue;
563		else if (node_isset(node, memory_less_mask))
564			continue;
565
566		bdp = &bootmem_node_data[node];
567		pernode = mem_data[node].pernode_addr;
568		pernodesize = mem_data[node].pernode_size;
569		map = pernode + pernodesize;
570
571		init_bootmem_node(pgdat_list[node],
572				  map>>PAGE_SHIFT,
573				  bdp->node_min_pfn,
574				  bdp->node_low_pfn);
575	}
576
577	efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
578
579	reserve_pernode_space();
580	memory_less_nodes();
581	initialize_pernode_data();
582
583	max_pfn = max_low_pfn;
584
585	find_initrd();
586}
587
588#ifdef CONFIG_SMP
589/**
590 * per_cpu_init - setup per-cpu variables
591 *
592 * find_pernode_space() does most of this already, we just need to set
593 * local_per_cpu_offset
594 */
595void *per_cpu_init(void)
596{
597	int cpu;
598	static int first_time = 1;
599
600	if (first_time) {
601		first_time = 0;
602		for_each_possible_early_cpu(cpu)
603			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
604	}
605
606	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
607}
608#endif /* CONFIG_SMP */
609
610/**
611 * call_pernode_memory - use SRAT to call callback functions with node info
612 * @start: physical start of range
613 * @len: length of range
614 * @arg: function to call for each range
615 *
616 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
617 * out to which node a block of memory belongs.  Ignore memory that we cannot
618 * identify, and split blocks that run across multiple nodes.
619 *
620 * Take this opportunity to round the start address up and the end address
621 * down to page boundaries.
622 */
623void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
624{
625	unsigned long rs, re, end = start + len;
626	void (*func)(unsigned long, unsigned long, int);
627	int i;
628
629	start = PAGE_ALIGN(start);
630	end &= PAGE_MASK;
631	if (start >= end)
632		return;
633
634	func = arg;
635
636	if (!num_node_memblks) {
637		/* No SRAT table, so assume one node (node 0) */
638		if (start < end)
639			(*func)(start, end - start, 0);
640		return;
641	}
642
643	for (i = 0; i < num_node_memblks; i++) {
644		rs = max(start, node_memblk[i].start_paddr);
645		re = min(end, node_memblk[i].start_paddr +
646			 node_memblk[i].size);
647
648		if (rs < re)
649			(*func)(rs, re - rs, node_memblk[i].nid);
650
651		if (re == end)
652			break;
653	}
654}
655
656/**
657 * count_node_pages - callback to build per-node memory info structures
658 * @start: physical start of range
659 * @len: length of range
660 * @node: node where this range resides
661 *
662 * Each node has it's own number of physical pages, DMAable pages, start, and
663 * end page frame number.  This routine will be called by call_pernode_memory()
664 * for each piece of usable memory and will setup these values for each node.
665 * Very similar to build_maps().
666 */
667static __init int count_node_pages(unsigned long start, unsigned long len, int node)
668{
669	unsigned long end = start + len;
670
671#ifdef CONFIG_ZONE_DMA
672	if (start <= __pa(MAX_DMA_ADDRESS))
673		mem_data[node].num_dma_physpages +=
674			(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
675#endif
676	start = GRANULEROUNDDOWN(start);
677	end = GRANULEROUNDUP(end);
678	mem_data[node].max_pfn = max(mem_data[node].max_pfn,
679				     end >> PAGE_SHIFT);
680	mem_data[node].min_pfn = min(mem_data[node].min_pfn,
681				     start >> PAGE_SHIFT);
682
683	return 0;
684}
685
686/**
687 * paging_init - setup page tables
688 *
689 * paging_init() sets up the page tables for each node of the system and frees
690 * the bootmem allocator memory for general use.
691 */
692void __init paging_init(void)
693{
694	unsigned long max_dma;
695	unsigned long pfn_offset = 0;
696	unsigned long max_pfn = 0;
697	int node;
698	unsigned long max_zone_pfns[MAX_NR_ZONES];
699
700	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
701
702	efi_memmap_walk(filter_rsvd_memory, count_node_pages);
703
704	sparse_memory_present_with_active_regions(MAX_NUMNODES);
705	sparse_init();
706
707#ifdef CONFIG_VIRTUAL_MEM_MAP
708	VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
709		sizeof(struct page));
710	vmem_map = (struct page *) VMALLOC_END;
711	efi_memmap_walk(create_mem_map_page_table, NULL);
712	printk("Virtual mem_map starts at 0x%p\n", vmem_map);
713#endif
714
715	for_each_online_node(node) {
716		pfn_offset = mem_data[node].min_pfn;
717
718#ifdef CONFIG_VIRTUAL_MEM_MAP
719		NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
720#endif
721		if (mem_data[node].max_pfn > max_pfn)
722			max_pfn = mem_data[node].max_pfn;
723	}
724
725	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
726#ifdef CONFIG_ZONE_DMA
727	max_zone_pfns[ZONE_DMA] = max_dma;
728#endif
729	max_zone_pfns[ZONE_NORMAL] = max_pfn;
730	free_area_init_nodes(max_zone_pfns);
731
732	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
733}
734
735#ifdef CONFIG_MEMORY_HOTPLUG
736pg_data_t *arch_alloc_nodedata(int nid)
737{
738	unsigned long size = compute_pernodesize(nid);
739
740	return kzalloc(size, GFP_KERNEL);
741}
742
743void arch_free_nodedata(pg_data_t *pgdat)
744{
745	kfree(pgdat);
746}
747
748void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
749{
750	pgdat_list[update_node] = update_pgdat;
751	scatter_node_data();
752}
753#endif
754
755#ifdef CONFIG_SPARSEMEM_VMEMMAP
756int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
757{
758	return vmemmap_populate_basepages(start, end, node);
759}
760
761void vmemmap_free(unsigned long start, unsigned long end)
762{
763}
764#endif