Linux Audio

Check our new training course

Loading...
v3.1
  1/* Common code for 32 and 64-bit NUMA */
  2#include <linux/kernel.h>
  3#include <linux/mm.h>
  4#include <linux/string.h>
  5#include <linux/init.h>
  6#include <linux/bootmem.h>
  7#include <linux/memblock.h>
  8#include <linux/mmzone.h>
  9#include <linux/ctype.h>
 10#include <linux/module.h>
 11#include <linux/nodemask.h>
 12#include <linux/sched.h>
 13#include <linux/topology.h>
 14
 15#include <asm/e820.h>
 16#include <asm/proto.h>
 17#include <asm/dma.h>
 18#include <asm/acpi.h>
 19#include <asm/amd_nb.h>
 20
 21#include "numa_internal.h"
 22
 23int __initdata numa_off;
 24nodemask_t numa_nodes_parsed __initdata;
 25
 26struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 27EXPORT_SYMBOL(node_data);
 28
 29static struct numa_meminfo numa_meminfo
 30#ifndef CONFIG_MEMORY_HOTPLUG
 31__initdata
 32#endif
 33;
 34
 35static int numa_distance_cnt;
 36static u8 *numa_distance;
 37
 38static __init int numa_setup(char *opt)
 39{
 40	if (!opt)
 41		return -EINVAL;
 42	if (!strncmp(opt, "off", 3))
 43		numa_off = 1;
 44#ifdef CONFIG_NUMA_EMU
 45	if (!strncmp(opt, "fake=", 5))
 46		numa_emu_cmdline(opt + 5);
 47#endif
 48#ifdef CONFIG_ACPI_NUMA
 49	if (!strncmp(opt, "noacpi", 6))
 50		acpi_numa = -1;
 51#endif
 52	return 0;
 53}
 54early_param("numa", numa_setup);
 55
 56/*
 57 * apicid, cpu, node mappings
 58 */
 59s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
 60	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
 61};
 62
 63int __cpuinit numa_cpu_node(int cpu)
 64{
 65	int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
 66
 67	if (apicid != BAD_APICID)
 68		return __apicid_to_node[apicid];
 69	return NUMA_NO_NODE;
 70}
 71
 72cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
 73EXPORT_SYMBOL(node_to_cpumask_map);
 74
 75/*
 76 * Map cpu index to node index
 77 */
 78DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
 79EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
 80
 81void __cpuinit numa_set_node(int cpu, int node)
 82{
 83	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
 84
 85	/* early setting, no percpu area yet */
 86	if (cpu_to_node_map) {
 87		cpu_to_node_map[cpu] = node;
 88		return;
 89	}
 90
 91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
 92	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
 93		printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
 94		dump_stack();
 95		return;
 96	}
 97#endif
 98	per_cpu(x86_cpu_to_node_map, cpu) = node;
 99
100	if (node != NUMA_NO_NODE)
101		set_cpu_numa_node(cpu, node);
102}
103
104void __cpuinit numa_clear_node(int cpu)
105{
106	numa_set_node(cpu, NUMA_NO_NODE);
107}
108
109/*
110 * Allocate node_to_cpumask_map based on number of available nodes
111 * Requires node_possible_map to be valid.
112 *
113 * Note: node_to_cpumask() is not valid until after this is done.
114 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
115 */
116void __init setup_node_to_cpumask_map(void)
117{
118	unsigned int node, num = 0;
119
120	/* setup nr_node_ids if not done yet */
121	if (nr_node_ids == MAX_NUMNODES) {
122		for_each_node_mask(node, node_possible_map)
123			num = node;
124		nr_node_ids = num + 1;
125	}
126
127	/* allocate the map */
128	for (node = 0; node < nr_node_ids; node++)
129		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
130
131	/* cpumask_of_node() will now work */
132	pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
133}
134
135static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
136				     struct numa_meminfo *mi)
137{
138	/* ignore zero length blks */
139	if (start == end)
140		return 0;
141
142	/* whine about and ignore invalid blks */
143	if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
144		pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
145			   nid, start, end);
146		return 0;
147	}
148
149	if (mi->nr_blks >= NR_NODE_MEMBLKS) {
150		pr_err("NUMA: too many memblk ranges\n");
151		return -EINVAL;
152	}
153
154	mi->blk[mi->nr_blks].start = start;
155	mi->blk[mi->nr_blks].end = end;
156	mi->blk[mi->nr_blks].nid = nid;
157	mi->nr_blks++;
158	return 0;
159}
160
161/**
162 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
163 * @idx: Index of memblk to remove
164 * @mi: numa_meminfo to remove memblk from
165 *
166 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
167 * decrementing @mi->nr_blks.
168 */
169void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
170{
171	mi->nr_blks--;
172	memmove(&mi->blk[idx], &mi->blk[idx + 1],
173		(mi->nr_blks - idx) * sizeof(mi->blk[0]));
174}
175
176/**
177 * numa_add_memblk - Add one numa_memblk to numa_meminfo
178 * @nid: NUMA node ID of the new memblk
179 * @start: Start address of the new memblk
180 * @end: End address of the new memblk
181 *
182 * Add a new memblk to the default numa_meminfo.
183 *
184 * RETURNS:
185 * 0 on success, -errno on failure.
186 */
187int __init numa_add_memblk(int nid, u64 start, u64 end)
188{
189	return numa_add_memblk_to(nid, start, end, &numa_meminfo);
190}
191
192/* Initialize NODE_DATA for a node on the local memory */
193static void __init setup_node_data(int nid, u64 start, u64 end)
194{
195	const u64 nd_low = PFN_PHYS(MAX_DMA_PFN);
196	const u64 nd_high = PFN_PHYS(max_pfn_mapped);
197	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
198	bool remapped = false;
199	u64 nd_pa;
200	void *nd;
201	int tnid;
202
203	/*
204	 * Don't confuse VM with a node that doesn't have the
205	 * minimum amount of memory:
206	 */
207	if (end && (end - start) < NODE_MIN_SIZE)
208		return;
209
210	/* initialize remap allocator before aligning to ZONE_ALIGN */
211	init_alloc_remap(nid, start, end);
212
213	start = roundup(start, ZONE_ALIGN);
214
215	printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
216	       nid, start, end);
217
218	/*
219	 * Allocate node data.  Try remap allocator first, node-local
220	 * memory and then any node.  Never allocate in DMA zone.
221	 */
222	nd = alloc_remap(nid, nd_size);
223	if (nd) {
224		nd_pa = __pa(nd);
225		remapped = true;
226	} else {
227		nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
228						nd_size, SMP_CACHE_BYTES);
229		if (nd_pa == MEMBLOCK_ERROR)
230			nd_pa = memblock_find_in_range(nd_low, nd_high,
231						nd_size, SMP_CACHE_BYTES);
232		if (nd_pa == MEMBLOCK_ERROR) {
233			pr_err("Cannot find %zu bytes in node %d\n",
234			       nd_size, nid);
235			return;
236		}
237		memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
238		nd = __va(nd_pa);
239	}
 
240
241	/* report and initialize */
242	printk(KERN_INFO "  NODE_DATA [%016Lx - %016Lx]%s\n",
243	       nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
244	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
245	if (!remapped && tnid != nid)
246		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
247
248	node_data[nid] = nd;
249	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
250	NODE_DATA(nid)->node_id = nid;
251	NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
252	NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;
253
254	node_set_online(nid);
255}
256
257/**
258 * numa_cleanup_meminfo - Cleanup a numa_meminfo
259 * @mi: numa_meminfo to clean up
260 *
261 * Sanitize @mi by merging and removing unncessary memblks.  Also check for
262 * conflicts and clear unused memblks.
263 *
264 * RETURNS:
265 * 0 on success, -errno on failure.
266 */
267int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
268{
269	const u64 low = 0;
270	const u64 high = PFN_PHYS(max_pfn);
271	int i, j, k;
272
273	/* first, trim all entries */
274	for (i = 0; i < mi->nr_blks; i++) {
275		struct numa_memblk *bi = &mi->blk[i];
276
277		/* make sure all blocks are inside the limits */
278		bi->start = max(bi->start, low);
279		bi->end = min(bi->end, high);
280
281		/* and there's no empty block */
282		if (bi->start >= bi->end)
283			numa_remove_memblk_from(i--, mi);
284	}
285
286	/* merge neighboring / overlapping entries */
287	for (i = 0; i < mi->nr_blks; i++) {
288		struct numa_memblk *bi = &mi->blk[i];
289
290		for (j = i + 1; j < mi->nr_blks; j++) {
291			struct numa_memblk *bj = &mi->blk[j];
292			u64 start, end;
293
294			/*
295			 * See whether there are overlapping blocks.  Whine
296			 * about but allow overlaps of the same nid.  They
297			 * will be merged below.
298			 */
299			if (bi->end > bj->start && bi->start < bj->end) {
300				if (bi->nid != bj->nid) {
301					pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
302					       bi->nid, bi->start, bi->end,
303					       bj->nid, bj->start, bj->end);
304					return -EINVAL;
305				}
306				pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
307					   bi->nid, bi->start, bi->end,
308					   bj->start, bj->end);
309			}
310
311			/*
312			 * Join together blocks on the same node, holes
313			 * between which don't overlap with memory on other
314			 * nodes.
315			 */
316			if (bi->nid != bj->nid)
317				continue;
318			start = min(bi->start, bj->start);
319			end = max(bi->end, bj->end);
320			for (k = 0; k < mi->nr_blks; k++) {
321				struct numa_memblk *bk = &mi->blk[k];
322
323				if (bi->nid == bk->nid)
324					continue;
325				if (start < bk->end && end > bk->start)
326					break;
327			}
328			if (k < mi->nr_blks)
329				continue;
330			printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n",
331			       bi->nid, bi->start, bi->end, bj->start, bj->end,
332			       start, end);
333			bi->start = start;
334			bi->end = end;
335			numa_remove_memblk_from(j--, mi);
336		}
337	}
338
339	/* clear unused ones */
340	for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
341		mi->blk[i].start = mi->blk[i].end = 0;
342		mi->blk[i].nid = NUMA_NO_NODE;
343	}
344
345	return 0;
346}
347
348/*
349 * Set nodes, which have memory in @mi, in *@nodemask.
350 */
351static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
352					      const struct numa_meminfo *mi)
353{
354	int i;
355
356	for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
357		if (mi->blk[i].start != mi->blk[i].end &&
358		    mi->blk[i].nid != NUMA_NO_NODE)
359			node_set(mi->blk[i].nid, *nodemask);
360}
361
362/**
363 * numa_reset_distance - Reset NUMA distance table
364 *
365 * The current table is freed.  The next numa_set_distance() call will
366 * create a new one.
367 */
368void __init numa_reset_distance(void)
369{
370	size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
371
372	/* numa_distance could be 1LU marking allocation failure, test cnt */
373	if (numa_distance_cnt)
374		memblock_x86_free_range(__pa(numa_distance),
375					__pa(numa_distance) + size);
376	numa_distance_cnt = 0;
377	numa_distance = NULL;	/* enable table creation */
378}
379
380static int __init numa_alloc_distance(void)
381{
382	nodemask_t nodes_parsed;
383	size_t size;
384	int i, j, cnt = 0;
385	u64 phys;
386
387	/* size the new table and allocate it */
388	nodes_parsed = numa_nodes_parsed;
389	numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
390
391	for_each_node_mask(i, nodes_parsed)
392		cnt = i;
393	cnt++;
394	size = cnt * cnt * sizeof(numa_distance[0]);
395
396	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
397				      size, PAGE_SIZE);
398	if (phys == MEMBLOCK_ERROR) {
399		pr_warning("NUMA: Warning: can't allocate distance table!\n");
400		/* don't retry until explicitly reset */
401		numa_distance = (void *)1LU;
402		return -ENOMEM;
403	}
404	memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
405
406	numa_distance = __va(phys);
407	numa_distance_cnt = cnt;
408
409	/* fill with the default distances */
410	for (i = 0; i < cnt; i++)
411		for (j = 0; j < cnt; j++)
412			numa_distance[i * cnt + j] = i == j ?
413				LOCAL_DISTANCE : REMOTE_DISTANCE;
414	printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
415
416	return 0;
417}
418
419/**
420 * numa_set_distance - Set NUMA distance from one NUMA to another
421 * @from: the 'from' node to set distance
422 * @to: the 'to'  node to set distance
423 * @distance: NUMA distance
424 *
425 * Set the distance from node @from to @to to @distance.  If distance table
426 * doesn't exist, one which is large enough to accommodate all the currently
427 * known nodes will be created.
428 *
429 * If such table cannot be allocated, a warning is printed and further
430 * calls are ignored until the distance table is reset with
431 * numa_reset_distance().
432 *
433 * If @from or @to is higher than the highest known node at the time of
434 * table creation or @distance doesn't make sense, the call is ignored.
 
435 * This is to allow simplification of specific NUMA config implementations.
436 */
437void __init numa_set_distance(int from, int to, int distance)
438{
439	if (!numa_distance && numa_alloc_distance() < 0)
440		return;
441
442	if (from >= numa_distance_cnt || to >= numa_distance_cnt) {
443		printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n",
 
444			    from, to, distance);
445		return;
446	}
447
448	if ((u8)distance != distance ||
449	    (from == to && distance != LOCAL_DISTANCE)) {
450		pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
451			     from, to, distance);
452		return;
453	}
454
455	numa_distance[from * numa_distance_cnt + to] = distance;
456}
457
458int __node_distance(int from, int to)
459{
460	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
461		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
462	return numa_distance[from * numa_distance_cnt + to];
463}
464EXPORT_SYMBOL(__node_distance);
465
466/*
467 * Sanity check to catch more bad NUMA configurations (they are amazingly
468 * common).  Make sure the nodes cover all memory.
469 */
470static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
471{
472	u64 numaram, e820ram;
473	int i;
474
475	numaram = 0;
476	for (i = 0; i < mi->nr_blks; i++) {
477		u64 s = mi->blk[i].start >> PAGE_SHIFT;
478		u64 e = mi->blk[i].end >> PAGE_SHIFT;
479		numaram += e - s;
480		numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
481		if ((s64)numaram < 0)
482			numaram = 0;
483	}
484
485	e820ram = max_pfn - (memblock_x86_hole_size(0,
486					PFN_PHYS(max_pfn)) >> PAGE_SHIFT);
487	/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
488	if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
489		printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
490		       (numaram << PAGE_SHIFT) >> 20,
491		       (e820ram << PAGE_SHIFT) >> 20);
492		return false;
493	}
494	return true;
495}
496
497static int __init numa_register_memblks(struct numa_meminfo *mi)
498{
499	unsigned long uninitialized_var(pfn_align);
500	int i, nid;
501
502	/* Account for nodes with cpus and no memory */
503	node_possible_map = numa_nodes_parsed;
504	numa_nodemask_from_meminfo(&node_possible_map, mi);
505	if (WARN_ON(nodes_empty(node_possible_map)))
506		return -EINVAL;
507
508	for (i = 0; i < mi->nr_blks; i++)
509		memblock_x86_register_active_regions(mi->blk[i].nid,
510					mi->blk[i].start >> PAGE_SHIFT,
511					mi->blk[i].end >> PAGE_SHIFT);
512
513	/* for out of order entries */
514	sort_node_map();
515
516	/*
517	 * If sections array is gonna be used for pfn -> nid mapping, check
518	 * whether its granularity is fine enough.
519	 */
520#ifdef NODE_NOT_IN_PAGE_FLAGS
521	pfn_align = node_map_pfn_alignment();
522	if (pfn_align && pfn_align < PAGES_PER_SECTION) {
523		printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
524		       PFN_PHYS(pfn_align) >> 20,
525		       PFN_PHYS(PAGES_PER_SECTION) >> 20);
526		return -EINVAL;
527	}
528#endif
529	if (!numa_meminfo_cover_memory(mi))
530		return -EINVAL;
531
532	/* Finally register nodes. */
533	for_each_node_mask(nid, node_possible_map) {
534		u64 start = PFN_PHYS(max_pfn);
535		u64 end = 0;
536
537		for (i = 0; i < mi->nr_blks; i++) {
538			if (nid != mi->blk[i].nid)
539				continue;
540			start = min(mi->blk[i].start, start);
541			end = max(mi->blk[i].end, end);
542		}
543
544		if (start < end)
545			setup_node_data(nid, start, end);
546	}
547
 
 
548	return 0;
549}
550
551/*
552 * There are unfortunately some poorly designed mainboards around that
553 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
554 * mapping. To avoid this fill in the mapping for all possible CPUs,
555 * as the number of CPUs is not known yet. We round robin the existing
556 * nodes.
557 */
558static void __init numa_init_array(void)
559{
560	int rr, i;
561
562	rr = first_node(node_online_map);
563	for (i = 0; i < nr_cpu_ids; i++) {
564		if (early_cpu_to_node(i) != NUMA_NO_NODE)
565			continue;
566		numa_set_node(i, rr);
567		rr = next_node(rr, node_online_map);
568		if (rr == MAX_NUMNODES)
569			rr = first_node(node_online_map);
570	}
571}
572
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
573static int __init numa_init(int (*init_func)(void))
574{
575	int i;
576	int ret;
577
578	for (i = 0; i < MAX_LOCAL_APIC; i++)
579		set_apicid_to_node(i, NUMA_NO_NODE);
580
581	nodes_clear(numa_nodes_parsed);
582	nodes_clear(node_possible_map);
583	nodes_clear(node_online_map);
584	memset(&numa_meminfo, 0, sizeof(numa_meminfo));
585	remove_all_active_ranges();
 
 
 
 
 
586	numa_reset_distance();
587
588	ret = init_func();
589	if (ret < 0)
590		return ret;
 
 
 
 
 
 
 
 
 
 
 
591	ret = numa_cleanup_meminfo(&numa_meminfo);
592	if (ret < 0)
593		return ret;
594
595	numa_emulation(&numa_meminfo, numa_distance_cnt);
596
597	ret = numa_register_memblks(&numa_meminfo);
598	if (ret < 0)
599		return ret;
600
601	for (i = 0; i < nr_cpu_ids; i++) {
602		int nid = early_cpu_to_node(i);
603
604		if (nid == NUMA_NO_NODE)
605			continue;
606		if (!node_online(nid))
607			numa_clear_node(i);
608	}
609	numa_init_array();
 
 
 
 
 
 
 
 
 
 
610	return 0;
611}
612
613/**
614 * dummy_numa_init - Fallback dummy NUMA init
615 *
616 * Used if there's no underlying NUMA architecture, NUMA initialization
617 * fails, or NUMA is disabled on the command line.
618 *
619 * Must online at least one node and add memory blocks that cover all
620 * allowed memory.  This function must not fail.
621 */
622static int __init dummy_numa_init(void)
623{
624	printk(KERN_INFO "%s\n",
625	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
626	printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n",
627	       0LLU, PFN_PHYS(max_pfn));
628
629	node_set(0, numa_nodes_parsed);
630	numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
631
632	return 0;
633}
634
635/**
636 * x86_numa_init - Initialize NUMA
637 *
638 * Try each configured NUMA initialization method until one succeeds.  The
639 * last fallback is dummy single node config encomapssing whole memory and
640 * never fails.
641 */
642void __init x86_numa_init(void)
643{
644	if (!numa_off) {
645#ifdef CONFIG_X86_NUMAQ
646		if (!numa_init(numaq_numa_init))
647			return;
648#endif
649#ifdef CONFIG_ACPI_NUMA
650		if (!numa_init(x86_acpi_numa_init))
651			return;
652#endif
653#ifdef CONFIG_AMD_NUMA
654		if (!numa_init(amd_numa_init))
655			return;
656#endif
657	}
658
659	numa_init(dummy_numa_init);
660}
661
662static __init int find_near_online_node(int node)
663{
664	int n, val;
665	int min_val = INT_MAX;
666	int best_node = -1;
667
668	for_each_online_node(n) {
669		val = node_distance(node, n);
670
671		if (val < min_val) {
672			min_val = val;
673			best_node = n;
674		}
675	}
676
677	return best_node;
678}
679
680/*
681 * Setup early cpu_to_node.
682 *
683 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
684 * and apicid_to_node[] tables have valid entries for a CPU.
685 * This means we skip cpu_to_node[] initialisation for NUMA
686 * emulation and faking node case (when running a kernel compiled
687 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
688 * is already initialized in a round robin manner at numa_init_array,
689 * prior to this call, and this initialization is good enough
690 * for the fake NUMA cases.
691 *
692 * Called before the per_cpu areas are setup.
693 */
694void __init init_cpu_to_node(void)
695{
696	int cpu;
697	u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
698
699	BUG_ON(cpu_to_apicid == NULL);
700
701	for_each_possible_cpu(cpu) {
702		int node = numa_cpu_node(cpu);
703
704		if (node == NUMA_NO_NODE)
705			continue;
706		if (!node_online(node))
707			node = find_near_online_node(node);
708		numa_set_node(cpu, node);
709	}
710}
711
712#ifndef CONFIG_DEBUG_PER_CPU_MAPS
713
714# ifndef CONFIG_NUMA_EMU
715void __cpuinit numa_add_cpu(int cpu)
716{
717	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
718}
719
720void __cpuinit numa_remove_cpu(int cpu)
721{
722	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
723}
724# endif	/* !CONFIG_NUMA_EMU */
725
726#else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
727
728int __cpu_to_node(int cpu)
729{
730	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
731		printk(KERN_WARNING
732			"cpu_to_node(%d): usage too early!\n", cpu);
733		dump_stack();
734		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
735	}
736	return per_cpu(x86_cpu_to_node_map, cpu);
737}
738EXPORT_SYMBOL(__cpu_to_node);
739
740/*
741 * Same function as cpu_to_node() but used if called before the
742 * per_cpu areas are setup.
743 */
744int early_cpu_to_node(int cpu)
745{
746	if (early_per_cpu_ptr(x86_cpu_to_node_map))
747		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
748
749	if (!cpu_possible(cpu)) {
750		printk(KERN_WARNING
751			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
752		dump_stack();
753		return NUMA_NO_NODE;
754	}
755	return per_cpu(x86_cpu_to_node_map, cpu);
756}
757
758void debug_cpumask_set_cpu(int cpu, int node, bool enable)
759{
760	struct cpumask *mask;
761	char buf[64];
762
763	if (node == NUMA_NO_NODE) {
764		/* early_cpu_to_node() already emits a warning and trace */
765		return;
766	}
767	mask = node_to_cpumask_map[node];
768	if (!mask) {
769		pr_err("node_to_cpumask_map[%i] NULL\n", node);
770		dump_stack();
771		return;
772	}
773
774	if (enable)
775		cpumask_set_cpu(cpu, mask);
776	else
777		cpumask_clear_cpu(cpu, mask);
778
779	cpulist_scnprintf(buf, sizeof(buf), mask);
780	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
781		enable ? "numa_add_cpu" : "numa_remove_cpu",
782		cpu, node, buf);
783	return;
784}
785
786# ifndef CONFIG_NUMA_EMU
787static void __cpuinit numa_set_cpumask(int cpu, bool enable)
788{
789	debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
790}
791
792void __cpuinit numa_add_cpu(int cpu)
793{
794	numa_set_cpumask(cpu, true);
795}
796
797void __cpuinit numa_remove_cpu(int cpu)
798{
799	numa_set_cpumask(cpu, false);
800}
801# endif	/* !CONFIG_NUMA_EMU */
802
803/*
804 * Returns a pointer to the bitmask of CPUs on Node 'node'.
805 */
806const struct cpumask *cpumask_of_node(int node)
807{
808	if (node >= nr_node_ids) {
809		printk(KERN_WARNING
810			"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
811			node, nr_node_ids);
812		dump_stack();
813		return cpu_none_mask;
814	}
815	if (node_to_cpumask_map[node] == NULL) {
816		printk(KERN_WARNING
817			"cpumask_of_node(%d): no node_to_cpumask_map!\n",
818			node);
819		dump_stack();
820		return cpu_online_mask;
821	}
822	return node_to_cpumask_map[node];
823}
824EXPORT_SYMBOL(cpumask_of_node);
825
826#endif	/* !CONFIG_DEBUG_PER_CPU_MAPS */
827
828#ifdef CONFIG_MEMORY_HOTPLUG
829int memory_add_physaddr_to_nid(u64 start)
830{
831	struct numa_meminfo *mi = &numa_meminfo;
832	int nid = mi->blk[0].nid;
833	int i;
834
835	for (i = 0; i < mi->nr_blks; i++)
836		if (mi->blk[i].start <= start && mi->blk[i].end > start)
837			nid = mi->blk[i].nid;
838	return nid;
839}
840EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
841#endif
v3.15
  1/* Common code for 32 and 64-bit NUMA */
  2#include <linux/kernel.h>
  3#include <linux/mm.h>
  4#include <linux/string.h>
  5#include <linux/init.h>
  6#include <linux/bootmem.h>
  7#include <linux/memblock.h>
  8#include <linux/mmzone.h>
  9#include <linux/ctype.h>
 10#include <linux/module.h>
 11#include <linux/nodemask.h>
 12#include <linux/sched.h>
 13#include <linux/topology.h>
 14
 15#include <asm/e820.h>
 16#include <asm/proto.h>
 17#include <asm/dma.h>
 18#include <asm/acpi.h>
 19#include <asm/amd_nb.h>
 20
 21#include "numa_internal.h"
 22
 23int __initdata numa_off;
 24nodemask_t numa_nodes_parsed __initdata;
 25
 26struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 27EXPORT_SYMBOL(node_data);
 28
 29static struct numa_meminfo numa_meminfo
 30#ifndef CONFIG_MEMORY_HOTPLUG
 31__initdata
 32#endif
 33;
 34
 35static int numa_distance_cnt;
 36static u8 *numa_distance;
 37
 38static __init int numa_setup(char *opt)
 39{
 40	if (!opt)
 41		return -EINVAL;
 42	if (!strncmp(opt, "off", 3))
 43		numa_off = 1;
 44#ifdef CONFIG_NUMA_EMU
 45	if (!strncmp(opt, "fake=", 5))
 46		numa_emu_cmdline(opt + 5);
 47#endif
 48#ifdef CONFIG_ACPI_NUMA
 49	if (!strncmp(opt, "noacpi", 6))
 50		acpi_numa = -1;
 51#endif
 52	return 0;
 53}
 54early_param("numa", numa_setup);
 55
 56/*
 57 * apicid, cpu, node mappings
 58 */
 59s16 __apicid_to_node[MAX_LOCAL_APIC] = {
 60	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
 61};
 62
 63int numa_cpu_node(int cpu)
 64{
 65	int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
 66
 67	if (apicid != BAD_APICID)
 68		return __apicid_to_node[apicid];
 69	return NUMA_NO_NODE;
 70}
 71
 72cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
 73EXPORT_SYMBOL(node_to_cpumask_map);
 74
 75/*
 76 * Map cpu index to node index
 77 */
 78DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
 79EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
 80
 81void numa_set_node(int cpu, int node)
 82{
 83	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
 84
 85	/* early setting, no percpu area yet */
 86	if (cpu_to_node_map) {
 87		cpu_to_node_map[cpu] = node;
 88		return;
 89	}
 90
 91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
 92	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
 93		printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
 94		dump_stack();
 95		return;
 96	}
 97#endif
 98	per_cpu(x86_cpu_to_node_map, cpu) = node;
 99
100	set_cpu_numa_node(cpu, node);
 
101}
102
103void numa_clear_node(int cpu)
104{
105	numa_set_node(cpu, NUMA_NO_NODE);
106}
107
108/*
109 * Allocate node_to_cpumask_map based on number of available nodes
110 * Requires node_possible_map to be valid.
111 *
112 * Note: cpumask_of_node() is not valid until after this is done.
113 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
114 */
115void __init setup_node_to_cpumask_map(void)
116{
117	unsigned int node;
118
119	/* setup nr_node_ids if not done yet */
120	if (nr_node_ids == MAX_NUMNODES)
121		setup_nr_node_ids();
 
 
 
122
123	/* allocate the map */
124	for (node = 0; node < nr_node_ids; node++)
125		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
126
127	/* cpumask_of_node() will now work */
128	pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
129}
130
131static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
132				     struct numa_meminfo *mi)
133{
134	/* ignore zero length blks */
135	if (start == end)
136		return 0;
137
138	/* whine about and ignore invalid blks */
139	if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
140		pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
141			   nid, start, end - 1);
142		return 0;
143	}
144
145	if (mi->nr_blks >= NR_NODE_MEMBLKS) {
146		pr_err("NUMA: too many memblk ranges\n");
147		return -EINVAL;
148	}
149
150	mi->blk[mi->nr_blks].start = start;
151	mi->blk[mi->nr_blks].end = end;
152	mi->blk[mi->nr_blks].nid = nid;
153	mi->nr_blks++;
154	return 0;
155}
156
157/**
158 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
159 * @idx: Index of memblk to remove
160 * @mi: numa_meminfo to remove memblk from
161 *
162 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
163 * decrementing @mi->nr_blks.
164 */
165void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
166{
167	mi->nr_blks--;
168	memmove(&mi->blk[idx], &mi->blk[idx + 1],
169		(mi->nr_blks - idx) * sizeof(mi->blk[0]));
170}
171
172/**
173 * numa_add_memblk - Add one numa_memblk to numa_meminfo
174 * @nid: NUMA node ID of the new memblk
175 * @start: Start address of the new memblk
176 * @end: End address of the new memblk
177 *
178 * Add a new memblk to the default numa_meminfo.
179 *
180 * RETURNS:
181 * 0 on success, -errno on failure.
182 */
183int __init numa_add_memblk(int nid, u64 start, u64 end)
184{
185	return numa_add_memblk_to(nid, start, end, &numa_meminfo);
186}
187
188/* Initialize NODE_DATA for a node on the local memory */
189static void __init setup_node_data(int nid, u64 start, u64 end)
190{
 
 
191	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
 
192	u64 nd_pa;
193	void *nd;
194	int tnid;
195
196	/*
197	 * Don't confuse VM with a node that doesn't have the
198	 * minimum amount of memory:
199	 */
200	if (end && (end - start) < NODE_MIN_SIZE)
201		return;
202
 
 
 
203	start = roundup(start, ZONE_ALIGN);
204
205	printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
206	       nid, start, end - 1);
207
208	/*
209	 * Allocate node data.  Try node-local memory and then any node.
210	 * Never allocate in DMA zone.
211	 */
212	nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
213	if (!nd_pa) {
214		nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
215					      MEMBLOCK_ALLOC_ACCESSIBLE);
216		if (!nd_pa) {
 
 
 
 
 
 
217			pr_err("Cannot find %zu bytes in node %d\n",
218			       nd_size, nid);
219			return;
220		}
 
 
221	}
222	nd = __va(nd_pa);
223
224	/* report and initialize */
225	printk(KERN_INFO "  NODE_DATA [mem %#010Lx-%#010Lx]\n",
226	       nd_pa, nd_pa + nd_size - 1);
227	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
228	if (tnid != nid)
229		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
230
231	node_data[nid] = nd;
232	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
233	NODE_DATA(nid)->node_id = nid;
234	NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
235	NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;
236
237	node_set_online(nid);
238}
239
240/**
241 * numa_cleanup_meminfo - Cleanup a numa_meminfo
242 * @mi: numa_meminfo to clean up
243 *
244 * Sanitize @mi by merging and removing unncessary memblks.  Also check for
245 * conflicts and clear unused memblks.
246 *
247 * RETURNS:
248 * 0 on success, -errno on failure.
249 */
250int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
251{
252	const u64 low = 0;
253	const u64 high = PFN_PHYS(max_pfn);
254	int i, j, k;
255
256	/* first, trim all entries */
257	for (i = 0; i < mi->nr_blks; i++) {
258		struct numa_memblk *bi = &mi->blk[i];
259
260		/* make sure all blocks are inside the limits */
261		bi->start = max(bi->start, low);
262		bi->end = min(bi->end, high);
263
264		/* and there's no empty block */
265		if (bi->start >= bi->end)
266			numa_remove_memblk_from(i--, mi);
267	}
268
269	/* merge neighboring / overlapping entries */
270	for (i = 0; i < mi->nr_blks; i++) {
271		struct numa_memblk *bi = &mi->blk[i];
272
273		for (j = i + 1; j < mi->nr_blks; j++) {
274			struct numa_memblk *bj = &mi->blk[j];
275			u64 start, end;
276
277			/*
278			 * See whether there are overlapping blocks.  Whine
279			 * about but allow overlaps of the same nid.  They
280			 * will be merged below.
281			 */
282			if (bi->end > bj->start && bi->start < bj->end) {
283				if (bi->nid != bj->nid) {
284					pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
285					       bi->nid, bi->start, bi->end - 1,
286					       bj->nid, bj->start, bj->end - 1);
287					return -EINVAL;
288				}
289				pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
290					   bi->nid, bi->start, bi->end - 1,
291					   bj->start, bj->end - 1);
292			}
293
294			/*
295			 * Join together blocks on the same node, holes
296			 * between which don't overlap with memory on other
297			 * nodes.
298			 */
299			if (bi->nid != bj->nid)
300				continue;
301			start = min(bi->start, bj->start);
302			end = max(bi->end, bj->end);
303			for (k = 0; k < mi->nr_blks; k++) {
304				struct numa_memblk *bk = &mi->blk[k];
305
306				if (bi->nid == bk->nid)
307					continue;
308				if (start < bk->end && end > bk->start)
309					break;
310			}
311			if (k < mi->nr_blks)
312				continue;
313			printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
314			       bi->nid, bi->start, bi->end - 1, bj->start,
315			       bj->end - 1, start, end - 1);
316			bi->start = start;
317			bi->end = end;
318			numa_remove_memblk_from(j--, mi);
319		}
320	}
321
322	/* clear unused ones */
323	for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
324		mi->blk[i].start = mi->blk[i].end = 0;
325		mi->blk[i].nid = NUMA_NO_NODE;
326	}
327
328	return 0;
329}
330
331/*
332 * Set nodes, which have memory in @mi, in *@nodemask.
333 */
334static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
335					      const struct numa_meminfo *mi)
336{
337	int i;
338
339	for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
340		if (mi->blk[i].start != mi->blk[i].end &&
341		    mi->blk[i].nid != NUMA_NO_NODE)
342			node_set(mi->blk[i].nid, *nodemask);
343}
344
345/**
346 * numa_reset_distance - Reset NUMA distance table
347 *
348 * The current table is freed.  The next numa_set_distance() call will
349 * create a new one.
350 */
351void __init numa_reset_distance(void)
352{
353	size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
354
355	/* numa_distance could be 1LU marking allocation failure, test cnt */
356	if (numa_distance_cnt)
357		memblock_free(__pa(numa_distance), size);
 
358	numa_distance_cnt = 0;
359	numa_distance = NULL;	/* enable table creation */
360}
361
362static int __init numa_alloc_distance(void)
363{
364	nodemask_t nodes_parsed;
365	size_t size;
366	int i, j, cnt = 0;
367	u64 phys;
368
369	/* size the new table and allocate it */
370	nodes_parsed = numa_nodes_parsed;
371	numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
372
373	for_each_node_mask(i, nodes_parsed)
374		cnt = i;
375	cnt++;
376	size = cnt * cnt * sizeof(numa_distance[0]);
377
378	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
379				      size, PAGE_SIZE);
380	if (!phys) {
381		pr_warning("NUMA: Warning: can't allocate distance table!\n");
382		/* don't retry until explicitly reset */
383		numa_distance = (void *)1LU;
384		return -ENOMEM;
385	}
386	memblock_reserve(phys, size);
387
388	numa_distance = __va(phys);
389	numa_distance_cnt = cnt;
390
391	/* fill with the default distances */
392	for (i = 0; i < cnt; i++)
393		for (j = 0; j < cnt; j++)
394			numa_distance[i * cnt + j] = i == j ?
395				LOCAL_DISTANCE : REMOTE_DISTANCE;
396	printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
397
398	return 0;
399}
400
401/**
402 * numa_set_distance - Set NUMA distance from one NUMA to another
403 * @from: the 'from' node to set distance
404 * @to: the 'to'  node to set distance
405 * @distance: NUMA distance
406 *
407 * Set the distance from node @from to @to to @distance.  If distance table
408 * doesn't exist, one which is large enough to accommodate all the currently
409 * known nodes will be created.
410 *
411 * If such table cannot be allocated, a warning is printed and further
412 * calls are ignored until the distance table is reset with
413 * numa_reset_distance().
414 *
415 * If @from or @to is higher than the highest known node or lower than zero
416 * at the time of table creation or @distance doesn't make sense, the call
417 * is ignored.
418 * This is to allow simplification of specific NUMA config implementations.
419 */
420void __init numa_set_distance(int from, int to, int distance)
421{
422	if (!numa_distance && numa_alloc_distance() < 0)
423		return;
424
425	if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
426			from < 0 || to < 0) {
427		pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
428			    from, to, distance);
429		return;
430	}
431
432	if ((u8)distance != distance ||
433	    (from == to && distance != LOCAL_DISTANCE)) {
434		pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
435			     from, to, distance);
436		return;
437	}
438
439	numa_distance[from * numa_distance_cnt + to] = distance;
440}
441
442int __node_distance(int from, int to)
443{
444	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
445		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
446	return numa_distance[from * numa_distance_cnt + to];
447}
448EXPORT_SYMBOL(__node_distance);
449
450/*
451 * Sanity check to catch more bad NUMA configurations (they are amazingly
452 * common).  Make sure the nodes cover all memory.
453 */
454static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
455{
456	u64 numaram, e820ram;
457	int i;
458
459	numaram = 0;
460	for (i = 0; i < mi->nr_blks; i++) {
461		u64 s = mi->blk[i].start >> PAGE_SHIFT;
462		u64 e = mi->blk[i].end >> PAGE_SHIFT;
463		numaram += e - s;
464		numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
465		if ((s64)numaram < 0)
466			numaram = 0;
467	}
468
469	e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
470
471	/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
472	if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
473		printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
474		       (numaram << PAGE_SHIFT) >> 20,
475		       (e820ram << PAGE_SHIFT) >> 20);
476		return false;
477	}
478	return true;
479}
480
481static int __init numa_register_memblks(struct numa_meminfo *mi)
482{
483	unsigned long uninitialized_var(pfn_align);
484	int i, nid;
485
486	/* Account for nodes with cpus and no memory */
487	node_possible_map = numa_nodes_parsed;
488	numa_nodemask_from_meminfo(&node_possible_map, mi);
489	if (WARN_ON(nodes_empty(node_possible_map)))
490		return -EINVAL;
491
492	for (i = 0; i < mi->nr_blks; i++) {
493		struct numa_memblk *mb = &mi->blk[i];
494		memblock_set_node(mb->start, mb->end - mb->start,
495				  &memblock.memory, mb->nid);
496	}
 
 
497
498	/*
499	 * If sections array is gonna be used for pfn -> nid mapping, check
500	 * whether its granularity is fine enough.
501	 */
502#ifdef NODE_NOT_IN_PAGE_FLAGS
503	pfn_align = node_map_pfn_alignment();
504	if (pfn_align && pfn_align < PAGES_PER_SECTION) {
505		printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
506		       PFN_PHYS(pfn_align) >> 20,
507		       PFN_PHYS(PAGES_PER_SECTION) >> 20);
508		return -EINVAL;
509	}
510#endif
511	if (!numa_meminfo_cover_memory(mi))
512		return -EINVAL;
513
514	/* Finally register nodes. */
515	for_each_node_mask(nid, node_possible_map) {
516		u64 start = PFN_PHYS(max_pfn);
517		u64 end = 0;
518
519		for (i = 0; i < mi->nr_blks; i++) {
520			if (nid != mi->blk[i].nid)
521				continue;
522			start = min(mi->blk[i].start, start);
523			end = max(mi->blk[i].end, end);
524		}
525
526		if (start < end)
527			setup_node_data(nid, start, end);
528	}
529
530	/* Dump memblock with node info and return. */
531	memblock_dump_all();
532	return 0;
533}
534
535/*
536 * There are unfortunately some poorly designed mainboards around that
537 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
538 * mapping. To avoid this fill in the mapping for all possible CPUs,
539 * as the number of CPUs is not known yet. We round robin the existing
540 * nodes.
541 */
542static void __init numa_init_array(void)
543{
544	int rr, i;
545
546	rr = first_node(node_online_map);
547	for (i = 0; i < nr_cpu_ids; i++) {
548		if (early_cpu_to_node(i) != NUMA_NO_NODE)
549			continue;
550		numa_set_node(i, rr);
551		rr = next_node(rr, node_online_map);
552		if (rr == MAX_NUMNODES)
553			rr = first_node(node_online_map);
554	}
555}
556
557static void __init numa_clear_kernel_node_hotplug(void)
558{
559	int i, nid;
560	nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
561	unsigned long start, end;
562	struct memblock_type *type = &memblock.reserved;
563
564	/*
565	 * At this time, all memory regions reserved by memblock are
566	 * used by the kernel. Set the nid in memblock.reserved will
567	 * mark out all the nodes the kernel resides in.
568	 */
569	for (i = 0; i < numa_meminfo.nr_blks; i++) {
570		struct numa_memblk *mb = &numa_meminfo.blk[i];
571		memblock_set_node(mb->start, mb->end - mb->start,
572				  &memblock.reserved, mb->nid);
573	}
574
575	/* Mark all kernel nodes. */
576	for (i = 0; i < type->cnt; i++)
577		node_set(type->regions[i].nid, numa_kernel_nodes);
578
579	/* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
580	for (i = 0; i < numa_meminfo.nr_blks; i++) {
581		nid = numa_meminfo.blk[i].nid;
582		if (!node_isset(nid, numa_kernel_nodes))
583			continue;
584
585		start = numa_meminfo.blk[i].start;
586		end = numa_meminfo.blk[i].end;
587
588		memblock_clear_hotplug(start, end - start);
589	}
590}
591
592static int __init numa_init(int (*init_func)(void))
593{
594	int i;
595	int ret;
596
597	for (i = 0; i < MAX_LOCAL_APIC; i++)
598		set_apicid_to_node(i, NUMA_NO_NODE);
599
600	nodes_clear(numa_nodes_parsed);
601	nodes_clear(node_possible_map);
602	nodes_clear(node_online_map);
603	memset(&numa_meminfo, 0, sizeof(numa_meminfo));
604	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
605				  MAX_NUMNODES));
606	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
607				  MAX_NUMNODES));
608	/* In case that parsing SRAT failed. */
609	WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
610	numa_reset_distance();
611
612	ret = init_func();
613	if (ret < 0)
614		return ret;
615
616	/*
617	 * We reset memblock back to the top-down direction
618	 * here because if we configured ACPI_NUMA, we have
619	 * parsed SRAT in init_func(). It is ok to have the
620	 * reset here even if we did't configure ACPI_NUMA
621	 * or acpi numa init fails and fallbacks to dummy
622	 * numa init.
623	 */
624	memblock_set_bottom_up(false);
625
626	ret = numa_cleanup_meminfo(&numa_meminfo);
627	if (ret < 0)
628		return ret;
629
630	numa_emulation(&numa_meminfo, numa_distance_cnt);
631
632	ret = numa_register_memblks(&numa_meminfo);
633	if (ret < 0)
634		return ret;
635
636	for (i = 0; i < nr_cpu_ids; i++) {
637		int nid = early_cpu_to_node(i);
638
639		if (nid == NUMA_NO_NODE)
640			continue;
641		if (!node_online(nid))
642			numa_clear_node(i);
643	}
644	numa_init_array();
645
646	/*
647	 * At very early time, the kernel have to use some memory such as
648	 * loading the kernel image. We cannot prevent this anyway. So any
649	 * node the kernel resides in should be un-hotpluggable.
650	 *
651	 * And when we come here, numa_init() won't fail.
652	 */
653	numa_clear_kernel_node_hotplug();
654
655	return 0;
656}
657
658/**
659 * dummy_numa_init - Fallback dummy NUMA init
660 *
661 * Used if there's no underlying NUMA architecture, NUMA initialization
662 * fails, or NUMA is disabled on the command line.
663 *
664 * Must online at least one node and add memory blocks that cover all
665 * allowed memory.  This function must not fail.
666 */
667static int __init dummy_numa_init(void)
668{
669	printk(KERN_INFO "%s\n",
670	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
671	printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
672	       0LLU, PFN_PHYS(max_pfn) - 1);
673
674	node_set(0, numa_nodes_parsed);
675	numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
676
677	return 0;
678}
679
680/**
681 * x86_numa_init - Initialize NUMA
682 *
683 * Try each configured NUMA initialization method until one succeeds.  The
684 * last fallback is dummy single node config encomapssing whole memory and
685 * never fails.
686 */
687void __init x86_numa_init(void)
688{
689	if (!numa_off) {
 
 
 
 
690#ifdef CONFIG_ACPI_NUMA
691		if (!numa_init(x86_acpi_numa_init))
692			return;
693#endif
694#ifdef CONFIG_AMD_NUMA
695		if (!numa_init(amd_numa_init))
696			return;
697#endif
698	}
699
700	numa_init(dummy_numa_init);
701}
702
703static __init int find_near_online_node(int node)
704{
705	int n, val;
706	int min_val = INT_MAX;
707	int best_node = -1;
708
709	for_each_online_node(n) {
710		val = node_distance(node, n);
711
712		if (val < min_val) {
713			min_val = val;
714			best_node = n;
715		}
716	}
717
718	return best_node;
719}
720
721/*
722 * Setup early cpu_to_node.
723 *
724 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
725 * and apicid_to_node[] tables have valid entries for a CPU.
726 * This means we skip cpu_to_node[] initialisation for NUMA
727 * emulation and faking node case (when running a kernel compiled
728 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
729 * is already initialized in a round robin manner at numa_init_array,
730 * prior to this call, and this initialization is good enough
731 * for the fake NUMA cases.
732 *
733 * Called before the per_cpu areas are setup.
734 */
735void __init init_cpu_to_node(void)
736{
737	int cpu;
738	u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
739
740	BUG_ON(cpu_to_apicid == NULL);
741
742	for_each_possible_cpu(cpu) {
743		int node = numa_cpu_node(cpu);
744
745		if (node == NUMA_NO_NODE)
746			continue;
747		if (!node_online(node))
748			node = find_near_online_node(node);
749		numa_set_node(cpu, node);
750	}
751}
752
753#ifndef CONFIG_DEBUG_PER_CPU_MAPS
754
755# ifndef CONFIG_NUMA_EMU
756void numa_add_cpu(int cpu)
757{
758	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
759}
760
761void numa_remove_cpu(int cpu)
762{
763	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
764}
765# endif	/* !CONFIG_NUMA_EMU */
766
767#else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
768
769int __cpu_to_node(int cpu)
770{
771	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
772		printk(KERN_WARNING
773			"cpu_to_node(%d): usage too early!\n", cpu);
774		dump_stack();
775		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
776	}
777	return per_cpu(x86_cpu_to_node_map, cpu);
778}
779EXPORT_SYMBOL(__cpu_to_node);
780
781/*
782 * Same function as cpu_to_node() but used if called before the
783 * per_cpu areas are setup.
784 */
785int early_cpu_to_node(int cpu)
786{
787	if (early_per_cpu_ptr(x86_cpu_to_node_map))
788		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
789
790	if (!cpu_possible(cpu)) {
791		printk(KERN_WARNING
792			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
793		dump_stack();
794		return NUMA_NO_NODE;
795	}
796	return per_cpu(x86_cpu_to_node_map, cpu);
797}
798
799void debug_cpumask_set_cpu(int cpu, int node, bool enable)
800{
801	struct cpumask *mask;
802	char buf[64];
803
804	if (node == NUMA_NO_NODE) {
805		/* early_cpu_to_node() already emits a warning and trace */
806		return;
807	}
808	mask = node_to_cpumask_map[node];
809	if (!mask) {
810		pr_err("node_to_cpumask_map[%i] NULL\n", node);
811		dump_stack();
812		return;
813	}
814
815	if (enable)
816		cpumask_set_cpu(cpu, mask);
817	else
818		cpumask_clear_cpu(cpu, mask);
819
820	cpulist_scnprintf(buf, sizeof(buf), mask);
821	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
822		enable ? "numa_add_cpu" : "numa_remove_cpu",
823		cpu, node, buf);
824	return;
825}
826
827# ifndef CONFIG_NUMA_EMU
828static void numa_set_cpumask(int cpu, bool enable)
829{
830	debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
831}
832
833void numa_add_cpu(int cpu)
834{
835	numa_set_cpumask(cpu, true);
836}
837
838void numa_remove_cpu(int cpu)
839{
840	numa_set_cpumask(cpu, false);
841}
842# endif	/* !CONFIG_NUMA_EMU */
843
844/*
845 * Returns a pointer to the bitmask of CPUs on Node 'node'.
846 */
847const struct cpumask *cpumask_of_node(int node)
848{
849	if (node >= nr_node_ids) {
850		printk(KERN_WARNING
851			"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
852			node, nr_node_ids);
853		dump_stack();
854		return cpu_none_mask;
855	}
856	if (node_to_cpumask_map[node] == NULL) {
857		printk(KERN_WARNING
858			"cpumask_of_node(%d): no node_to_cpumask_map!\n",
859			node);
860		dump_stack();
861		return cpu_online_mask;
862	}
863	return node_to_cpumask_map[node];
864}
865EXPORT_SYMBOL(cpumask_of_node);
866
867#endif	/* !CONFIG_DEBUG_PER_CPU_MAPS */
868
869#ifdef CONFIG_MEMORY_HOTPLUG
870int memory_add_physaddr_to_nid(u64 start)
871{
872	struct numa_meminfo *mi = &numa_meminfo;
873	int nid = mi->blk[0].nid;
874	int i;
875
876	for (i = 0; i < mi->nr_blks; i++)
877		if (mi->blk[i].start <= start && mi->blk[i].end > start)
878			nid = mi->blk[i].nid;
879	return nid;
880}
881EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
882#endif