Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Common code for 32 and 64-bit NUMA */
  3#include <linux/acpi.h>
  4#include <linux/kernel.h>
  5#include <linux/mm.h>
  6#include <linux/string.h>
  7#include <linux/init.h>
 
  8#include <linux/memblock.h>
  9#include <linux/mmzone.h>
 10#include <linux/ctype.h>
 11#include <linux/nodemask.h>
 12#include <linux/sched.h>
 13#include <linux/topology.h>
 14
 15#include <asm/e820/api.h>
 16#include <asm/proto.h>
 17#include <asm/dma.h>
 18#include <asm/amd_nb.h>
 19
 20#include "numa_internal.h"
 21
 22int numa_off;
 23nodemask_t numa_nodes_parsed __initdata;
 24
 25struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 26EXPORT_SYMBOL(node_data);
 27
 28static struct numa_meminfo numa_meminfo
 29#ifndef CONFIG_MEMORY_HOTPLUG
 30__initdata
 31#endif
 32;
 33
 34static int numa_distance_cnt;
 35static u8 *numa_distance;
 36
 37static __init int numa_setup(char *opt)
 38{
 39	if (!opt)
 40		return -EINVAL;
 41	if (!strncmp(opt, "off", 3))
 42		numa_off = 1;
 43#ifdef CONFIG_NUMA_EMU
 44	if (!strncmp(opt, "fake=", 5))
 45		numa_emu_cmdline(opt + 5);
 46#endif
 47#ifdef CONFIG_ACPI_NUMA
 48	if (!strncmp(opt, "noacpi", 6))
 49		acpi_numa = -1;
 50#endif
 51	return 0;
 52}
 53early_param("numa", numa_setup);
 54
 55/*
 56 * apicid, cpu, node mappings
 57 */
 58s16 __apicid_to_node[MAX_LOCAL_APIC] = {
 59	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
 60};
 61
 62int numa_cpu_node(int cpu)
 63{
 64	int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
 65
 66	if (apicid != BAD_APICID)
 67		return __apicid_to_node[apicid];
 68	return NUMA_NO_NODE;
 69}
 70
 71cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
 72EXPORT_SYMBOL(node_to_cpumask_map);
 73
 74/*
 75 * Map cpu index to node index
 76 */
 77DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
 78EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
 79
 80void numa_set_node(int cpu, int node)
 81{
 82	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
 83
 84	/* early setting, no percpu area yet */
 85	if (cpu_to_node_map) {
 86		cpu_to_node_map[cpu] = node;
 87		return;
 88	}
 89
 90#ifdef CONFIG_DEBUG_PER_CPU_MAPS
 91	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
 92		printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
 93		dump_stack();
 94		return;
 95	}
 96#endif
 97	per_cpu(x86_cpu_to_node_map, cpu) = node;
 98
 99	set_cpu_numa_node(cpu, node);
100}
101
102void numa_clear_node(int cpu)
103{
104	numa_set_node(cpu, NUMA_NO_NODE);
105}
106
107/*
108 * Allocate node_to_cpumask_map based on number of available nodes
109 * Requires node_possible_map to be valid.
110 *
111 * Note: cpumask_of_node() is not valid until after this is done.
112 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
113 */
114void __init setup_node_to_cpumask_map(void)
115{
116	unsigned int node;
117
118	/* setup nr_node_ids if not done yet */
119	if (nr_node_ids == MAX_NUMNODES)
120		setup_nr_node_ids();
121
122	/* allocate the map */
123	for (node = 0; node < nr_node_ids; node++)
124		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
125
126	/* cpumask_of_node() will now work */
127	pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
128}
129
130static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
131				     struct numa_meminfo *mi)
132{
133	/* ignore zero length blks */
134	if (start == end)
135		return 0;
136
137	/* whine about and ignore invalid blks */
138	if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
139		pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
140			nid, start, end - 1);
141		return 0;
142	}
143
144	if (mi->nr_blks >= NR_NODE_MEMBLKS) {
145		pr_err("too many memblk ranges\n");
146		return -EINVAL;
147	}
148
149	mi->blk[mi->nr_blks].start = start;
150	mi->blk[mi->nr_blks].end = end;
151	mi->blk[mi->nr_blks].nid = nid;
152	mi->nr_blks++;
153	return 0;
154}
155
156/**
157 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
158 * @idx: Index of memblk to remove
159 * @mi: numa_meminfo to remove memblk from
160 *
161 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
162 * decrementing @mi->nr_blks.
163 */
164void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
165{
166	mi->nr_blks--;
167	memmove(&mi->blk[idx], &mi->blk[idx + 1],
168		(mi->nr_blks - idx) * sizeof(mi->blk[0]));
169}
170
171/**
172 * numa_add_memblk - Add one numa_memblk to numa_meminfo
173 * @nid: NUMA node ID of the new memblk
174 * @start: Start address of the new memblk
175 * @end: End address of the new memblk
176 *
177 * Add a new memblk to the default numa_meminfo.
178 *
179 * RETURNS:
180 * 0 on success, -errno on failure.
181 */
182int __init numa_add_memblk(int nid, u64 start, u64 end)
183{
184	return numa_add_memblk_to(nid, start, end, &numa_meminfo);
185}
186
187/* Allocate NODE_DATA for a node on the local memory */
188static void __init alloc_node_data(int nid)
189{
190	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
191	u64 nd_pa;
192	void *nd;
193	int tnid;
194
195	/*
196	 * Allocate node data.  Try node-local memory and then any node.
197	 * Never allocate in DMA zone.
198	 */
199	nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
200	if (!nd_pa) {
201		pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
202		       nd_size, nid);
203		return;
 
 
 
 
204	}
205	nd = __va(nd_pa);
206
207	/* report and initialize */
208	printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
209	       nd_pa, nd_pa + nd_size - 1);
210	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
211	if (tnid != nid)
212		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
213
214	node_data[nid] = nd;
215	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
216
217	node_set_online(nid);
218}
219
220/**
221 * numa_cleanup_meminfo - Cleanup a numa_meminfo
222 * @mi: numa_meminfo to clean up
223 *
224 * Sanitize @mi by merging and removing unnecessary memblks.  Also check for
225 * conflicts and clear unused memblks.
226 *
227 * RETURNS:
228 * 0 on success, -errno on failure.
229 */
230int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
231{
232	const u64 low = 0;
233	const u64 high = PFN_PHYS(max_pfn);
234	int i, j, k;
235
236	/* first, trim all entries */
237	for (i = 0; i < mi->nr_blks; i++) {
238		struct numa_memblk *bi = &mi->blk[i];
239
240		/* make sure all blocks are inside the limits */
241		bi->start = max(bi->start, low);
242		bi->end = min(bi->end, high);
243
244		/* and there's no empty or non-exist block */
245		if (bi->start >= bi->end ||
246		    !memblock_overlaps_region(&memblock.memory,
247			bi->start, bi->end - bi->start))
248			numa_remove_memblk_from(i--, mi);
249	}
250
251	/* merge neighboring / overlapping entries */
252	for (i = 0; i < mi->nr_blks; i++) {
253		struct numa_memblk *bi = &mi->blk[i];
254
255		for (j = i + 1; j < mi->nr_blks; j++) {
256			struct numa_memblk *bj = &mi->blk[j];
257			u64 start, end;
258
259			/*
260			 * See whether there are overlapping blocks.  Whine
261			 * about but allow overlaps of the same nid.  They
262			 * will be merged below.
263			 */
264			if (bi->end > bj->start && bi->start < bj->end) {
265				if (bi->nid != bj->nid) {
266					pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
267					       bi->nid, bi->start, bi->end - 1,
268					       bj->nid, bj->start, bj->end - 1);
269					return -EINVAL;
270				}
271				pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
272					bi->nid, bi->start, bi->end - 1,
273					bj->start, bj->end - 1);
274			}
275
276			/*
277			 * Join together blocks on the same node, holes
278			 * between which don't overlap with memory on other
279			 * nodes.
280			 */
281			if (bi->nid != bj->nid)
282				continue;
283			start = min(bi->start, bj->start);
284			end = max(bi->end, bj->end);
285			for (k = 0; k < mi->nr_blks; k++) {
286				struct numa_memblk *bk = &mi->blk[k];
287
288				if (bi->nid == bk->nid)
289					continue;
290				if (start < bk->end && end > bk->start)
291					break;
292			}
293			if (k < mi->nr_blks)
294				continue;
295			printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
296			       bi->nid, bi->start, bi->end - 1, bj->start,
297			       bj->end - 1, start, end - 1);
298			bi->start = start;
299			bi->end = end;
300			numa_remove_memblk_from(j--, mi);
301		}
302	}
303
304	/* clear unused ones */
305	for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
306		mi->blk[i].start = mi->blk[i].end = 0;
307		mi->blk[i].nid = NUMA_NO_NODE;
308	}
309
310	return 0;
311}
312
313/*
314 * Set nodes, which have memory in @mi, in *@nodemask.
315 */
316static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
317					      const struct numa_meminfo *mi)
318{
319	int i;
320
321	for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
322		if (mi->blk[i].start != mi->blk[i].end &&
323		    mi->blk[i].nid != NUMA_NO_NODE)
324			node_set(mi->blk[i].nid, *nodemask);
325}
326
327/**
328 * numa_reset_distance - Reset NUMA distance table
329 *
330 * The current table is freed.  The next numa_set_distance() call will
331 * create a new one.
332 */
333void __init numa_reset_distance(void)
334{
335	size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
336
337	/* numa_distance could be 1LU marking allocation failure, test cnt */
338	if (numa_distance_cnt)
339		memblock_free(__pa(numa_distance), size);
340	numa_distance_cnt = 0;
341	numa_distance = NULL;	/* enable table creation */
342}
343
344static int __init numa_alloc_distance(void)
345{
346	nodemask_t nodes_parsed;
347	size_t size;
348	int i, j, cnt = 0;
349	u64 phys;
350
351	/* size the new table and allocate it */
352	nodes_parsed = numa_nodes_parsed;
353	numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
354
355	for_each_node_mask(i, nodes_parsed)
356		cnt = i;
357	cnt++;
358	size = cnt * cnt * sizeof(numa_distance[0]);
359
360	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
361				      size, PAGE_SIZE);
362	if (!phys) {
363		pr_warn("Warning: can't allocate distance table!\n");
364		/* don't retry until explicitly reset */
365		numa_distance = (void *)1LU;
366		return -ENOMEM;
367	}
368	memblock_reserve(phys, size);
369
370	numa_distance = __va(phys);
371	numa_distance_cnt = cnt;
372
373	/* fill with the default distances */
374	for (i = 0; i < cnt; i++)
375		for (j = 0; j < cnt; j++)
376			numa_distance[i * cnt + j] = i == j ?
377				LOCAL_DISTANCE : REMOTE_DISTANCE;
378	printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
379
380	return 0;
381}
382
383/**
384 * numa_set_distance - Set NUMA distance from one NUMA to another
385 * @from: the 'from' node to set distance
386 * @to: the 'to'  node to set distance
387 * @distance: NUMA distance
388 *
389 * Set the distance from node @from to @to to @distance.  If distance table
390 * doesn't exist, one which is large enough to accommodate all the currently
391 * known nodes will be created.
392 *
393 * If such table cannot be allocated, a warning is printed and further
394 * calls are ignored until the distance table is reset with
395 * numa_reset_distance().
396 *
397 * If @from or @to is higher than the highest known node or lower than zero
398 * at the time of table creation or @distance doesn't make sense, the call
399 * is ignored.
400 * This is to allow simplification of specific NUMA config implementations.
401 */
402void __init numa_set_distance(int from, int to, int distance)
403{
404	if (!numa_distance && numa_alloc_distance() < 0)
405		return;
406
407	if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
408			from < 0 || to < 0) {
409		pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
410			     from, to, distance);
411		return;
412	}
413
414	if ((u8)distance != distance ||
415	    (from == to && distance != LOCAL_DISTANCE)) {
416		pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
417			     from, to, distance);
418		return;
419	}
420
421	numa_distance[from * numa_distance_cnt + to] = distance;
422}
423
424int __node_distance(int from, int to)
425{
426	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
427		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
428	return numa_distance[from * numa_distance_cnt + to];
429}
430EXPORT_SYMBOL(__node_distance);
431
432/*
433 * Sanity check to catch more bad NUMA configurations (they are amazingly
434 * common).  Make sure the nodes cover all memory.
435 */
436static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
437{
438	u64 numaram, e820ram;
439	int i;
440
441	numaram = 0;
442	for (i = 0; i < mi->nr_blks; i++) {
443		u64 s = mi->blk[i].start >> PAGE_SHIFT;
444		u64 e = mi->blk[i].end >> PAGE_SHIFT;
445		numaram += e - s;
446		numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
447		if ((s64)numaram < 0)
448			numaram = 0;
449	}
450
451	e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
452
453	/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
454	if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
455		printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
456		       (numaram << PAGE_SHIFT) >> 20,
457		       (e820ram << PAGE_SHIFT) >> 20);
458		return false;
459	}
460	return true;
461}
462
463/*
464 * Mark all currently memblock-reserved physical memory (which covers the
465 * kernel's own memory ranges) as hot-unswappable.
466 */
467static void __init numa_clear_kernel_node_hotplug(void)
468{
469	nodemask_t reserved_nodemask = NODE_MASK_NONE;
470	struct memblock_region *mb_region;
471	int i;
472
473	/*
474	 * We have to do some preprocessing of memblock regions, to
475	 * make them suitable for reservation.
476	 *
477	 * At this time, all memory regions reserved by memblock are
478	 * used by the kernel, but those regions are not split up
479	 * along node boundaries yet, and don't necessarily have their
480	 * node ID set yet either.
481	 *
482	 * So iterate over all memory known to the x86 architecture,
483	 * and use those ranges to set the nid in memblock.reserved.
484	 * This will split up the memblock regions along node
485	 * boundaries and will set the node IDs as well.
486	 */
487	for (i = 0; i < numa_meminfo.nr_blks; i++) {
488		struct numa_memblk *mb = numa_meminfo.blk + i;
489		int ret;
490
491		ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
492		WARN_ON_ONCE(ret);
493	}
494
495	/*
496	 * Now go over all reserved memblock regions, to construct a
497	 * node mask of all kernel reserved memory areas.
498	 *
499	 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
500	 *   numa_meminfo might not include all memblock.reserved
501	 *   memory ranges, because quirks such as trim_snb_memory()
502	 *   reserve specific pages for Sandy Bridge graphics. ]
503	 */
504	for_each_memblock(reserved, mb_region) {
505		if (mb_region->nid != MAX_NUMNODES)
506			node_set(mb_region->nid, reserved_nodemask);
507	}
508
509	/*
510	 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
511	 * belonging to the reserved node mask.
512	 *
513	 * Note that this will include memory regions that reside
514	 * on nodes that contain kernel memory - entire nodes
515	 * become hot-unpluggable:
516	 */
517	for (i = 0; i < numa_meminfo.nr_blks; i++) {
518		struct numa_memblk *mb = numa_meminfo.blk + i;
519
520		if (!node_isset(mb->nid, reserved_nodemask))
521			continue;
522
523		memblock_clear_hotplug(mb->start, mb->end - mb->start);
524	}
525}
526
527static int __init numa_register_memblks(struct numa_meminfo *mi)
528{
529	unsigned long uninitialized_var(pfn_align);
530	int i, nid;
531
532	/* Account for nodes with cpus and no memory */
533	node_possible_map = numa_nodes_parsed;
534	numa_nodemask_from_meminfo(&node_possible_map, mi);
535	if (WARN_ON(nodes_empty(node_possible_map)))
536		return -EINVAL;
537
538	for (i = 0; i < mi->nr_blks; i++) {
539		struct numa_memblk *mb = &mi->blk[i];
540		memblock_set_node(mb->start, mb->end - mb->start,
541				  &memblock.memory, mb->nid);
542	}
543
544	/*
545	 * At very early time, the kernel have to use some memory such as
546	 * loading the kernel image. We cannot prevent this anyway. So any
547	 * node the kernel resides in should be un-hotpluggable.
548	 *
549	 * And when we come here, alloc node data won't fail.
550	 */
551	numa_clear_kernel_node_hotplug();
552
553	/*
554	 * If sections array is gonna be used for pfn -> nid mapping, check
555	 * whether its granularity is fine enough.
556	 */
557#ifdef NODE_NOT_IN_PAGE_FLAGS
558	pfn_align = node_map_pfn_alignment();
559	if (pfn_align && pfn_align < PAGES_PER_SECTION) {
560		printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
561		       PFN_PHYS(pfn_align) >> 20,
562		       PFN_PHYS(PAGES_PER_SECTION) >> 20);
563		return -EINVAL;
564	}
565#endif
566	if (!numa_meminfo_cover_memory(mi))
567		return -EINVAL;
568
569	/* Finally register nodes. */
570	for_each_node_mask(nid, node_possible_map) {
571		u64 start = PFN_PHYS(max_pfn);
572		u64 end = 0;
573
574		for (i = 0; i < mi->nr_blks; i++) {
575			if (nid != mi->blk[i].nid)
576				continue;
577			start = min(mi->blk[i].start, start);
578			end = max(mi->blk[i].end, end);
579		}
580
581		if (start >= end)
582			continue;
583
584		/*
585		 * Don't confuse VM with a node that doesn't have the
586		 * minimum amount of memory:
587		 */
588		if (end && (end - start) < NODE_MIN_SIZE)
589			continue;
590
591		alloc_node_data(nid);
592	}
593
594	/* Dump memblock with node info and return. */
595	memblock_dump_all();
596	return 0;
597}
598
599/*
600 * There are unfortunately some poorly designed mainboards around that
601 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
602 * mapping. To avoid this fill in the mapping for all possible CPUs,
603 * as the number of CPUs is not known yet. We round robin the existing
604 * nodes.
605 */
606static void __init numa_init_array(void)
607{
608	int rr, i;
609
610	rr = first_node(node_online_map);
611	for (i = 0; i < nr_cpu_ids; i++) {
612		if (early_cpu_to_node(i) != NUMA_NO_NODE)
613			continue;
614		numa_set_node(i, rr);
615		rr = next_node_in(rr, node_online_map);
616	}
617}
618
619static int __init numa_init(int (*init_func)(void))
620{
621	int i;
622	int ret;
623
624	for (i = 0; i < MAX_LOCAL_APIC; i++)
625		set_apicid_to_node(i, NUMA_NO_NODE);
626
627	nodes_clear(numa_nodes_parsed);
628	nodes_clear(node_possible_map);
629	nodes_clear(node_online_map);
630	memset(&numa_meminfo, 0, sizeof(numa_meminfo));
631	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
632				  MAX_NUMNODES));
633	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
634				  MAX_NUMNODES));
635	/* In case that parsing SRAT failed. */
636	WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
637	numa_reset_distance();
638
639	ret = init_func();
640	if (ret < 0)
641		return ret;
642
643	/*
644	 * We reset memblock back to the top-down direction
645	 * here because if we configured ACPI_NUMA, we have
646	 * parsed SRAT in init_func(). It is ok to have the
647	 * reset here even if we did't configure ACPI_NUMA
648	 * or acpi numa init fails and fallbacks to dummy
649	 * numa init.
650	 */
651	memblock_set_bottom_up(false);
652
653	ret = numa_cleanup_meminfo(&numa_meminfo);
654	if (ret < 0)
655		return ret;
656
657	numa_emulation(&numa_meminfo, numa_distance_cnt);
658
659	ret = numa_register_memblks(&numa_meminfo);
660	if (ret < 0)
661		return ret;
662
663	for (i = 0; i < nr_cpu_ids; i++) {
664		int nid = early_cpu_to_node(i);
665
666		if (nid == NUMA_NO_NODE)
667			continue;
668		if (!node_online(nid))
669			numa_clear_node(i);
670	}
671	numa_init_array();
672
673	return 0;
674}
675
676/**
677 * dummy_numa_init - Fallback dummy NUMA init
678 *
679 * Used if there's no underlying NUMA architecture, NUMA initialization
680 * fails, or NUMA is disabled on the command line.
681 *
682 * Must online at least one node and add memory blocks that cover all
683 * allowed memory.  This function must not fail.
684 */
685static int __init dummy_numa_init(void)
686{
687	printk(KERN_INFO "%s\n",
688	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
689	printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
690	       0LLU, PFN_PHYS(max_pfn) - 1);
691
692	node_set(0, numa_nodes_parsed);
693	numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
694
695	return 0;
696}
697
698/**
699 * x86_numa_init - Initialize NUMA
700 *
701 * Try each configured NUMA initialization method until one succeeds.  The
702 * last fallback is dummy single node config encomapssing whole memory and
703 * never fails.
704 */
705void __init x86_numa_init(void)
706{
707	if (!numa_off) {
708#ifdef CONFIG_ACPI_NUMA
709		if (!numa_init(x86_acpi_numa_init))
710			return;
711#endif
712#ifdef CONFIG_AMD_NUMA
713		if (!numa_init(amd_numa_init))
714			return;
715#endif
716	}
717
718	numa_init(dummy_numa_init);
719}
720
721static void __init init_memory_less_node(int nid)
722{
723	unsigned long zones_size[MAX_NR_ZONES] = {0};
724	unsigned long zholes_size[MAX_NR_ZONES] = {0};
725
726	/* Allocate and initialize node data. Memory-less node is now online.*/
727	alloc_node_data(nid);
728	free_area_init_node(nid, zones_size, 0, zholes_size);
729
730	/*
731	 * All zonelists will be built later in start_kernel() after per cpu
732	 * areas are initialized.
733	 */
734}
735
736/*
737 * Setup early cpu_to_node.
738 *
739 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
740 * and apicid_to_node[] tables have valid entries for a CPU.
741 * This means we skip cpu_to_node[] initialisation for NUMA
742 * emulation and faking node case (when running a kernel compiled
743 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
744 * is already initialized in a round robin manner at numa_init_array,
745 * prior to this call, and this initialization is good enough
746 * for the fake NUMA cases.
747 *
748 * Called before the per_cpu areas are setup.
749 */
750void __init init_cpu_to_node(void)
751{
752	int cpu;
753	u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
754
755	BUG_ON(cpu_to_apicid == NULL);
756
757	for_each_possible_cpu(cpu) {
758		int node = numa_cpu_node(cpu);
759
760		if (node == NUMA_NO_NODE)
761			continue;
762
763		if (!node_online(node))
764			init_memory_less_node(node);
765
766		numa_set_node(cpu, node);
767	}
768}
769
770#ifndef CONFIG_DEBUG_PER_CPU_MAPS
771
772# ifndef CONFIG_NUMA_EMU
773void numa_add_cpu(int cpu)
774{
775	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
776}
777
778void numa_remove_cpu(int cpu)
779{
780	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
781}
782# endif	/* !CONFIG_NUMA_EMU */
783
784#else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
785
786int __cpu_to_node(int cpu)
787{
788	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
789		printk(KERN_WARNING
790			"cpu_to_node(%d): usage too early!\n", cpu);
791		dump_stack();
792		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
793	}
794	return per_cpu(x86_cpu_to_node_map, cpu);
795}
796EXPORT_SYMBOL(__cpu_to_node);
797
798/*
799 * Same function as cpu_to_node() but used if called before the
800 * per_cpu areas are setup.
801 */
802int early_cpu_to_node(int cpu)
803{
804	if (early_per_cpu_ptr(x86_cpu_to_node_map))
805		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
806
807	if (!cpu_possible(cpu)) {
808		printk(KERN_WARNING
809			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
810		dump_stack();
811		return NUMA_NO_NODE;
812	}
813	return per_cpu(x86_cpu_to_node_map, cpu);
814}
815
816void debug_cpumask_set_cpu(int cpu, int node, bool enable)
817{
818	struct cpumask *mask;
819
820	if (node == NUMA_NO_NODE) {
821		/* early_cpu_to_node() already emits a warning and trace */
822		return;
823	}
824	mask = node_to_cpumask_map[node];
825	if (!mask) {
826		pr_err("node_to_cpumask_map[%i] NULL\n", node);
827		dump_stack();
828		return;
829	}
830
831	if (enable)
832		cpumask_set_cpu(cpu, mask);
833	else
834		cpumask_clear_cpu(cpu, mask);
835
836	printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
837		enable ? "numa_add_cpu" : "numa_remove_cpu",
838		cpu, node, cpumask_pr_args(mask));
839	return;
840}
841
842# ifndef CONFIG_NUMA_EMU
843static void numa_set_cpumask(int cpu, bool enable)
844{
845	debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
846}
847
848void numa_add_cpu(int cpu)
849{
850	numa_set_cpumask(cpu, true);
851}
852
853void numa_remove_cpu(int cpu)
854{
855	numa_set_cpumask(cpu, false);
856}
857# endif	/* !CONFIG_NUMA_EMU */
858
859/*
860 * Returns a pointer to the bitmask of CPUs on Node 'node'.
861 */
862const struct cpumask *cpumask_of_node(int node)
863{
864	if ((unsigned)node >= nr_node_ids) {
865		printk(KERN_WARNING
866			"cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
867			node, nr_node_ids);
868		dump_stack();
869		return cpu_none_mask;
870	}
871	if (node_to_cpumask_map[node] == NULL) {
872		printk(KERN_WARNING
873			"cpumask_of_node(%d): no node_to_cpumask_map!\n",
874			node);
875		dump_stack();
876		return cpu_online_mask;
877	}
878	return node_to_cpumask_map[node];
879}
880EXPORT_SYMBOL(cpumask_of_node);
881
882#endif	/* !CONFIG_DEBUG_PER_CPU_MAPS */
883
884#ifdef CONFIG_MEMORY_HOTPLUG
885int memory_add_physaddr_to_nid(u64 start)
886{
887	struct numa_meminfo *mi = &numa_meminfo;
888	int nid = mi->blk[0].nid;
889	int i;
890
891	for (i = 0; i < mi->nr_blks; i++)
892		if (mi->blk[i].start <= start && mi->blk[i].end > start)
893			nid = mi->blk[i].nid;
894	return nid;
895}
896EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
897#endif
v4.10.11
 
  1/* Common code for 32 and 64-bit NUMA */
  2#include <linux/acpi.h>
  3#include <linux/kernel.h>
  4#include <linux/mm.h>
  5#include <linux/string.h>
  6#include <linux/init.h>
  7#include <linux/bootmem.h>
  8#include <linux/memblock.h>
  9#include <linux/mmzone.h>
 10#include <linux/ctype.h>
 11#include <linux/nodemask.h>
 12#include <linux/sched.h>
 13#include <linux/topology.h>
 14
 15#include <asm/e820.h>
 16#include <asm/proto.h>
 17#include <asm/dma.h>
 18#include <asm/amd_nb.h>
 19
 20#include "numa_internal.h"
 21
 22int numa_off;
 23nodemask_t numa_nodes_parsed __initdata;
 24
 25struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 26EXPORT_SYMBOL(node_data);
 27
 28static struct numa_meminfo numa_meminfo
 29#ifndef CONFIG_MEMORY_HOTPLUG
 30__initdata
 31#endif
 32;
 33
 34static int numa_distance_cnt;
 35static u8 *numa_distance;
 36
 37static __init int numa_setup(char *opt)
 38{
 39	if (!opt)
 40		return -EINVAL;
 41	if (!strncmp(opt, "off", 3))
 42		numa_off = 1;
 43#ifdef CONFIG_NUMA_EMU
 44	if (!strncmp(opt, "fake=", 5))
 45		numa_emu_cmdline(opt + 5);
 46#endif
 47#ifdef CONFIG_ACPI_NUMA
 48	if (!strncmp(opt, "noacpi", 6))
 49		acpi_numa = -1;
 50#endif
 51	return 0;
 52}
 53early_param("numa", numa_setup);
 54
 55/*
 56 * apicid, cpu, node mappings
 57 */
 58s16 __apicid_to_node[MAX_LOCAL_APIC] = {
 59	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
 60};
 61
 62int numa_cpu_node(int cpu)
 63{
 64	int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
 65
 66	if (apicid != BAD_APICID)
 67		return __apicid_to_node[apicid];
 68	return NUMA_NO_NODE;
 69}
 70
 71cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
 72EXPORT_SYMBOL(node_to_cpumask_map);
 73
 74/*
 75 * Map cpu index to node index
 76 */
 77DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
 78EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
 79
 80void numa_set_node(int cpu, int node)
 81{
 82	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
 83
 84	/* early setting, no percpu area yet */
 85	if (cpu_to_node_map) {
 86		cpu_to_node_map[cpu] = node;
 87		return;
 88	}
 89
 90#ifdef CONFIG_DEBUG_PER_CPU_MAPS
 91	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
 92		printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
 93		dump_stack();
 94		return;
 95	}
 96#endif
 97	per_cpu(x86_cpu_to_node_map, cpu) = node;
 98
 99	set_cpu_numa_node(cpu, node);
100}
101
102void numa_clear_node(int cpu)
103{
104	numa_set_node(cpu, NUMA_NO_NODE);
105}
106
107/*
108 * Allocate node_to_cpumask_map based on number of available nodes
109 * Requires node_possible_map to be valid.
110 *
111 * Note: cpumask_of_node() is not valid until after this is done.
112 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
113 */
114void __init setup_node_to_cpumask_map(void)
115{
116	unsigned int node;
117
118	/* setup nr_node_ids if not done yet */
119	if (nr_node_ids == MAX_NUMNODES)
120		setup_nr_node_ids();
121
122	/* allocate the map */
123	for (node = 0; node < nr_node_ids; node++)
124		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
125
126	/* cpumask_of_node() will now work */
127	pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
128}
129
130static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
131				     struct numa_meminfo *mi)
132{
133	/* ignore zero length blks */
134	if (start == end)
135		return 0;
136
137	/* whine about and ignore invalid blks */
138	if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
139		pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
140			   nid, start, end - 1);
141		return 0;
142	}
143
144	if (mi->nr_blks >= NR_NODE_MEMBLKS) {
145		pr_err("NUMA: too many memblk ranges\n");
146		return -EINVAL;
147	}
148
149	mi->blk[mi->nr_blks].start = start;
150	mi->blk[mi->nr_blks].end = end;
151	mi->blk[mi->nr_blks].nid = nid;
152	mi->nr_blks++;
153	return 0;
154}
155
156/**
157 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
158 * @idx: Index of memblk to remove
159 * @mi: numa_meminfo to remove memblk from
160 *
161 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
162 * decrementing @mi->nr_blks.
163 */
164void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
165{
166	mi->nr_blks--;
167	memmove(&mi->blk[idx], &mi->blk[idx + 1],
168		(mi->nr_blks - idx) * sizeof(mi->blk[0]));
169}
170
171/**
172 * numa_add_memblk - Add one numa_memblk to numa_meminfo
173 * @nid: NUMA node ID of the new memblk
174 * @start: Start address of the new memblk
175 * @end: End address of the new memblk
176 *
177 * Add a new memblk to the default numa_meminfo.
178 *
179 * RETURNS:
180 * 0 on success, -errno on failure.
181 */
182int __init numa_add_memblk(int nid, u64 start, u64 end)
183{
184	return numa_add_memblk_to(nid, start, end, &numa_meminfo);
185}
186
187/* Allocate NODE_DATA for a node on the local memory */
188static void __init alloc_node_data(int nid)
189{
190	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
191	u64 nd_pa;
192	void *nd;
193	int tnid;
194
195	/*
196	 * Allocate node data.  Try node-local memory and then any node.
197	 * Never allocate in DMA zone.
198	 */
199	nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
200	if (!nd_pa) {
201		nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
202					      MEMBLOCK_ALLOC_ACCESSIBLE);
203		if (!nd_pa) {
204			pr_err("Cannot find %zu bytes in node %d\n",
205			       nd_size, nid);
206			return;
207		}
208	}
209	nd = __va(nd_pa);
210
211	/* report and initialize */
212	printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
213	       nd_pa, nd_pa + nd_size - 1);
214	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
215	if (tnid != nid)
216		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
217
218	node_data[nid] = nd;
219	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
220
221	node_set_online(nid);
222}
223
224/**
225 * numa_cleanup_meminfo - Cleanup a numa_meminfo
226 * @mi: numa_meminfo to clean up
227 *
228 * Sanitize @mi by merging and removing unncessary memblks.  Also check for
229 * conflicts and clear unused memblks.
230 *
231 * RETURNS:
232 * 0 on success, -errno on failure.
233 */
234int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
235{
236	const u64 low = 0;
237	const u64 high = PFN_PHYS(max_pfn);
238	int i, j, k;
239
240	/* first, trim all entries */
241	for (i = 0; i < mi->nr_blks; i++) {
242		struct numa_memblk *bi = &mi->blk[i];
243
244		/* make sure all blocks are inside the limits */
245		bi->start = max(bi->start, low);
246		bi->end = min(bi->end, high);
247
248		/* and there's no empty or non-exist block */
249		if (bi->start >= bi->end ||
250		    !memblock_overlaps_region(&memblock.memory,
251			bi->start, bi->end - bi->start))
252			numa_remove_memblk_from(i--, mi);
253	}
254
255	/* merge neighboring / overlapping entries */
256	for (i = 0; i < mi->nr_blks; i++) {
257		struct numa_memblk *bi = &mi->blk[i];
258
259		for (j = i + 1; j < mi->nr_blks; j++) {
260			struct numa_memblk *bj = &mi->blk[j];
261			u64 start, end;
262
263			/*
264			 * See whether there are overlapping blocks.  Whine
265			 * about but allow overlaps of the same nid.  They
266			 * will be merged below.
267			 */
268			if (bi->end > bj->start && bi->start < bj->end) {
269				if (bi->nid != bj->nid) {
270					pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
271					       bi->nid, bi->start, bi->end - 1,
272					       bj->nid, bj->start, bj->end - 1);
273					return -EINVAL;
274				}
275				pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
276					   bi->nid, bi->start, bi->end - 1,
277					   bj->start, bj->end - 1);
278			}
279
280			/*
281			 * Join together blocks on the same node, holes
282			 * between which don't overlap with memory on other
283			 * nodes.
284			 */
285			if (bi->nid != bj->nid)
286				continue;
287			start = min(bi->start, bj->start);
288			end = max(bi->end, bj->end);
289			for (k = 0; k < mi->nr_blks; k++) {
290				struct numa_memblk *bk = &mi->blk[k];
291
292				if (bi->nid == bk->nid)
293					continue;
294				if (start < bk->end && end > bk->start)
295					break;
296			}
297			if (k < mi->nr_blks)
298				continue;
299			printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
300			       bi->nid, bi->start, bi->end - 1, bj->start,
301			       bj->end - 1, start, end - 1);
302			bi->start = start;
303			bi->end = end;
304			numa_remove_memblk_from(j--, mi);
305		}
306	}
307
308	/* clear unused ones */
309	for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
310		mi->blk[i].start = mi->blk[i].end = 0;
311		mi->blk[i].nid = NUMA_NO_NODE;
312	}
313
314	return 0;
315}
316
317/*
318 * Set nodes, which have memory in @mi, in *@nodemask.
319 */
320static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
321					      const struct numa_meminfo *mi)
322{
323	int i;
324
325	for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
326		if (mi->blk[i].start != mi->blk[i].end &&
327		    mi->blk[i].nid != NUMA_NO_NODE)
328			node_set(mi->blk[i].nid, *nodemask);
329}
330
331/**
332 * numa_reset_distance - Reset NUMA distance table
333 *
334 * The current table is freed.  The next numa_set_distance() call will
335 * create a new one.
336 */
337void __init numa_reset_distance(void)
338{
339	size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
340
341	/* numa_distance could be 1LU marking allocation failure, test cnt */
342	if (numa_distance_cnt)
343		memblock_free(__pa(numa_distance), size);
344	numa_distance_cnt = 0;
345	numa_distance = NULL;	/* enable table creation */
346}
347
348static int __init numa_alloc_distance(void)
349{
350	nodemask_t nodes_parsed;
351	size_t size;
352	int i, j, cnt = 0;
353	u64 phys;
354
355	/* size the new table and allocate it */
356	nodes_parsed = numa_nodes_parsed;
357	numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
358
359	for_each_node_mask(i, nodes_parsed)
360		cnt = i;
361	cnt++;
362	size = cnt * cnt * sizeof(numa_distance[0]);
363
364	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
365				      size, PAGE_SIZE);
366	if (!phys) {
367		pr_warning("NUMA: Warning: can't allocate distance table!\n");
368		/* don't retry until explicitly reset */
369		numa_distance = (void *)1LU;
370		return -ENOMEM;
371	}
372	memblock_reserve(phys, size);
373
374	numa_distance = __va(phys);
375	numa_distance_cnt = cnt;
376
377	/* fill with the default distances */
378	for (i = 0; i < cnt; i++)
379		for (j = 0; j < cnt; j++)
380			numa_distance[i * cnt + j] = i == j ?
381				LOCAL_DISTANCE : REMOTE_DISTANCE;
382	printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
383
384	return 0;
385}
386
387/**
388 * numa_set_distance - Set NUMA distance from one NUMA to another
389 * @from: the 'from' node to set distance
390 * @to: the 'to'  node to set distance
391 * @distance: NUMA distance
392 *
393 * Set the distance from node @from to @to to @distance.  If distance table
394 * doesn't exist, one which is large enough to accommodate all the currently
395 * known nodes will be created.
396 *
397 * If such table cannot be allocated, a warning is printed and further
398 * calls are ignored until the distance table is reset with
399 * numa_reset_distance().
400 *
401 * If @from or @to is higher than the highest known node or lower than zero
402 * at the time of table creation or @distance doesn't make sense, the call
403 * is ignored.
404 * This is to allow simplification of specific NUMA config implementations.
405 */
406void __init numa_set_distance(int from, int to, int distance)
407{
408	if (!numa_distance && numa_alloc_distance() < 0)
409		return;
410
411	if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
412			from < 0 || to < 0) {
413		pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
414			    from, to, distance);
415		return;
416	}
417
418	if ((u8)distance != distance ||
419	    (from == to && distance != LOCAL_DISTANCE)) {
420		pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
421			     from, to, distance);
422		return;
423	}
424
425	numa_distance[from * numa_distance_cnt + to] = distance;
426}
427
428int __node_distance(int from, int to)
429{
430	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
431		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
432	return numa_distance[from * numa_distance_cnt + to];
433}
434EXPORT_SYMBOL(__node_distance);
435
436/*
437 * Sanity check to catch more bad NUMA configurations (they are amazingly
438 * common).  Make sure the nodes cover all memory.
439 */
440static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
441{
442	u64 numaram, e820ram;
443	int i;
444
445	numaram = 0;
446	for (i = 0; i < mi->nr_blks; i++) {
447		u64 s = mi->blk[i].start >> PAGE_SHIFT;
448		u64 e = mi->blk[i].end >> PAGE_SHIFT;
449		numaram += e - s;
450		numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
451		if ((s64)numaram < 0)
452			numaram = 0;
453	}
454
455	e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
456
457	/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
458	if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
459		printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
460		       (numaram << PAGE_SHIFT) >> 20,
461		       (e820ram << PAGE_SHIFT) >> 20);
462		return false;
463	}
464	return true;
465}
466
467/*
468 * Mark all currently memblock-reserved physical memory (which covers the
469 * kernel's own memory ranges) as hot-unswappable.
470 */
471static void __init numa_clear_kernel_node_hotplug(void)
472{
473	nodemask_t reserved_nodemask = NODE_MASK_NONE;
474	struct memblock_region *mb_region;
475	int i;
476
477	/*
478	 * We have to do some preprocessing of memblock regions, to
479	 * make them suitable for reservation.
480	 *
481	 * At this time, all memory regions reserved by memblock are
482	 * used by the kernel, but those regions are not split up
483	 * along node boundaries yet, and don't necessarily have their
484	 * node ID set yet either.
485	 *
486	 * So iterate over all memory known to the x86 architecture,
487	 * and use those ranges to set the nid in memblock.reserved.
488	 * This will split up the memblock regions along node
489	 * boundaries and will set the node IDs as well.
490	 */
491	for (i = 0; i < numa_meminfo.nr_blks; i++) {
492		struct numa_memblk *mb = numa_meminfo.blk + i;
493		int ret;
494
495		ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
496		WARN_ON_ONCE(ret);
497	}
498
499	/*
500	 * Now go over all reserved memblock regions, to construct a
501	 * node mask of all kernel reserved memory areas.
502	 *
503	 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
504	 *   numa_meminfo might not include all memblock.reserved
505	 *   memory ranges, because quirks such as trim_snb_memory()
506	 *   reserve specific pages for Sandy Bridge graphics. ]
507	 */
508	for_each_memblock(reserved, mb_region) {
509		if (mb_region->nid != MAX_NUMNODES)
510			node_set(mb_region->nid, reserved_nodemask);
511	}
512
513	/*
514	 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
515	 * belonging to the reserved node mask.
516	 *
517	 * Note that this will include memory regions that reside
518	 * on nodes that contain kernel memory - entire nodes
519	 * become hot-unpluggable:
520	 */
521	for (i = 0; i < numa_meminfo.nr_blks; i++) {
522		struct numa_memblk *mb = numa_meminfo.blk + i;
523
524		if (!node_isset(mb->nid, reserved_nodemask))
525			continue;
526
527		memblock_clear_hotplug(mb->start, mb->end - mb->start);
528	}
529}
530
531static int __init numa_register_memblks(struct numa_meminfo *mi)
532{
533	unsigned long uninitialized_var(pfn_align);
534	int i, nid;
535
536	/* Account for nodes with cpus and no memory */
537	node_possible_map = numa_nodes_parsed;
538	numa_nodemask_from_meminfo(&node_possible_map, mi);
539	if (WARN_ON(nodes_empty(node_possible_map)))
540		return -EINVAL;
541
542	for (i = 0; i < mi->nr_blks; i++) {
543		struct numa_memblk *mb = &mi->blk[i];
544		memblock_set_node(mb->start, mb->end - mb->start,
545				  &memblock.memory, mb->nid);
546	}
547
548	/*
549	 * At very early time, the kernel have to use some memory such as
550	 * loading the kernel image. We cannot prevent this anyway. So any
551	 * node the kernel resides in should be un-hotpluggable.
552	 *
553	 * And when we come here, alloc node data won't fail.
554	 */
555	numa_clear_kernel_node_hotplug();
556
557	/*
558	 * If sections array is gonna be used for pfn -> nid mapping, check
559	 * whether its granularity is fine enough.
560	 */
561#ifdef NODE_NOT_IN_PAGE_FLAGS
562	pfn_align = node_map_pfn_alignment();
563	if (pfn_align && pfn_align < PAGES_PER_SECTION) {
564		printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
565		       PFN_PHYS(pfn_align) >> 20,
566		       PFN_PHYS(PAGES_PER_SECTION) >> 20);
567		return -EINVAL;
568	}
569#endif
570	if (!numa_meminfo_cover_memory(mi))
571		return -EINVAL;
572
573	/* Finally register nodes. */
574	for_each_node_mask(nid, node_possible_map) {
575		u64 start = PFN_PHYS(max_pfn);
576		u64 end = 0;
577
578		for (i = 0; i < mi->nr_blks; i++) {
579			if (nid != mi->blk[i].nid)
580				continue;
581			start = min(mi->blk[i].start, start);
582			end = max(mi->blk[i].end, end);
583		}
584
585		if (start >= end)
586			continue;
587
588		/*
589		 * Don't confuse VM with a node that doesn't have the
590		 * minimum amount of memory:
591		 */
592		if (end && (end - start) < NODE_MIN_SIZE)
593			continue;
594
595		alloc_node_data(nid);
596	}
597
598	/* Dump memblock with node info and return. */
599	memblock_dump_all();
600	return 0;
601}
602
603/*
604 * There are unfortunately some poorly designed mainboards around that
605 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
606 * mapping. To avoid this fill in the mapping for all possible CPUs,
607 * as the number of CPUs is not known yet. We round robin the existing
608 * nodes.
609 */
610static void __init numa_init_array(void)
611{
612	int rr, i;
613
614	rr = first_node(node_online_map);
615	for (i = 0; i < nr_cpu_ids; i++) {
616		if (early_cpu_to_node(i) != NUMA_NO_NODE)
617			continue;
618		numa_set_node(i, rr);
619		rr = next_node_in(rr, node_online_map);
620	}
621}
622
623static int __init numa_init(int (*init_func)(void))
624{
625	int i;
626	int ret;
627
628	for (i = 0; i < MAX_LOCAL_APIC; i++)
629		set_apicid_to_node(i, NUMA_NO_NODE);
630
631	nodes_clear(numa_nodes_parsed);
632	nodes_clear(node_possible_map);
633	nodes_clear(node_online_map);
634	memset(&numa_meminfo, 0, sizeof(numa_meminfo));
635	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
636				  MAX_NUMNODES));
637	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
638				  MAX_NUMNODES));
639	/* In case that parsing SRAT failed. */
640	WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
641	numa_reset_distance();
642
643	ret = init_func();
644	if (ret < 0)
645		return ret;
646
647	/*
648	 * We reset memblock back to the top-down direction
649	 * here because if we configured ACPI_NUMA, we have
650	 * parsed SRAT in init_func(). It is ok to have the
651	 * reset here even if we did't configure ACPI_NUMA
652	 * or acpi numa init fails and fallbacks to dummy
653	 * numa init.
654	 */
655	memblock_set_bottom_up(false);
656
657	ret = numa_cleanup_meminfo(&numa_meminfo);
658	if (ret < 0)
659		return ret;
660
661	numa_emulation(&numa_meminfo, numa_distance_cnt);
662
663	ret = numa_register_memblks(&numa_meminfo);
664	if (ret < 0)
665		return ret;
666
667	for (i = 0; i < nr_cpu_ids; i++) {
668		int nid = early_cpu_to_node(i);
669
670		if (nid == NUMA_NO_NODE)
671			continue;
672		if (!node_online(nid))
673			numa_clear_node(i);
674	}
675	numa_init_array();
676
677	return 0;
678}
679
680/**
681 * dummy_numa_init - Fallback dummy NUMA init
682 *
683 * Used if there's no underlying NUMA architecture, NUMA initialization
684 * fails, or NUMA is disabled on the command line.
685 *
686 * Must online at least one node and add memory blocks that cover all
687 * allowed memory.  This function must not fail.
688 */
689static int __init dummy_numa_init(void)
690{
691	printk(KERN_INFO "%s\n",
692	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
693	printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
694	       0LLU, PFN_PHYS(max_pfn) - 1);
695
696	node_set(0, numa_nodes_parsed);
697	numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
698
699	return 0;
700}
701
702/**
703 * x86_numa_init - Initialize NUMA
704 *
705 * Try each configured NUMA initialization method until one succeeds.  The
706 * last fallback is dummy single node config encomapssing whole memory and
707 * never fails.
708 */
709void __init x86_numa_init(void)
710{
711	if (!numa_off) {
712#ifdef CONFIG_ACPI_NUMA
713		if (!numa_init(x86_acpi_numa_init))
714			return;
715#endif
716#ifdef CONFIG_AMD_NUMA
717		if (!numa_init(amd_numa_init))
718			return;
719#endif
720	}
721
722	numa_init(dummy_numa_init);
723}
724
725static void __init init_memory_less_node(int nid)
726{
727	unsigned long zones_size[MAX_NR_ZONES] = {0};
728	unsigned long zholes_size[MAX_NR_ZONES] = {0};
729
730	/* Allocate and initialize node data. Memory-less node is now online.*/
731	alloc_node_data(nid);
732	free_area_init_node(nid, zones_size, 0, zholes_size);
733
734	/*
735	 * All zonelists will be built later in start_kernel() after per cpu
736	 * areas are initialized.
737	 */
738}
739
740/*
741 * Setup early cpu_to_node.
742 *
743 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
744 * and apicid_to_node[] tables have valid entries for a CPU.
745 * This means we skip cpu_to_node[] initialisation for NUMA
746 * emulation and faking node case (when running a kernel compiled
747 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
748 * is already initialized in a round robin manner at numa_init_array,
749 * prior to this call, and this initialization is good enough
750 * for the fake NUMA cases.
751 *
752 * Called before the per_cpu areas are setup.
753 */
754void __init init_cpu_to_node(void)
755{
756	int cpu;
757	u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
758
759	BUG_ON(cpu_to_apicid == NULL);
760
761	for_each_possible_cpu(cpu) {
762		int node = numa_cpu_node(cpu);
763
764		if (node == NUMA_NO_NODE)
765			continue;
766
767		if (!node_online(node))
768			init_memory_less_node(node);
769
770		numa_set_node(cpu, node);
771	}
772}
773
774#ifndef CONFIG_DEBUG_PER_CPU_MAPS
775
776# ifndef CONFIG_NUMA_EMU
777void numa_add_cpu(int cpu)
778{
779	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
780}
781
782void numa_remove_cpu(int cpu)
783{
784	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
785}
786# endif	/* !CONFIG_NUMA_EMU */
787
788#else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
789
790int __cpu_to_node(int cpu)
791{
792	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
793		printk(KERN_WARNING
794			"cpu_to_node(%d): usage too early!\n", cpu);
795		dump_stack();
796		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
797	}
798	return per_cpu(x86_cpu_to_node_map, cpu);
799}
800EXPORT_SYMBOL(__cpu_to_node);
801
802/*
803 * Same function as cpu_to_node() but used if called before the
804 * per_cpu areas are setup.
805 */
806int early_cpu_to_node(int cpu)
807{
808	if (early_per_cpu_ptr(x86_cpu_to_node_map))
809		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
810
811	if (!cpu_possible(cpu)) {
812		printk(KERN_WARNING
813			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
814		dump_stack();
815		return NUMA_NO_NODE;
816	}
817	return per_cpu(x86_cpu_to_node_map, cpu);
818}
819
820void debug_cpumask_set_cpu(int cpu, int node, bool enable)
821{
822	struct cpumask *mask;
823
824	if (node == NUMA_NO_NODE) {
825		/* early_cpu_to_node() already emits a warning and trace */
826		return;
827	}
828	mask = node_to_cpumask_map[node];
829	if (!mask) {
830		pr_err("node_to_cpumask_map[%i] NULL\n", node);
831		dump_stack();
832		return;
833	}
834
835	if (enable)
836		cpumask_set_cpu(cpu, mask);
837	else
838		cpumask_clear_cpu(cpu, mask);
839
840	printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
841		enable ? "numa_add_cpu" : "numa_remove_cpu",
842		cpu, node, cpumask_pr_args(mask));
843	return;
844}
845
846# ifndef CONFIG_NUMA_EMU
847static void numa_set_cpumask(int cpu, bool enable)
848{
849	debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
850}
851
852void numa_add_cpu(int cpu)
853{
854	numa_set_cpumask(cpu, true);
855}
856
857void numa_remove_cpu(int cpu)
858{
859	numa_set_cpumask(cpu, false);
860}
861# endif	/* !CONFIG_NUMA_EMU */
862
863/*
864 * Returns a pointer to the bitmask of CPUs on Node 'node'.
865 */
866const struct cpumask *cpumask_of_node(int node)
867{
868	if (node >= nr_node_ids) {
869		printk(KERN_WARNING
870			"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
871			node, nr_node_ids);
872		dump_stack();
873		return cpu_none_mask;
874	}
875	if (node_to_cpumask_map[node] == NULL) {
876		printk(KERN_WARNING
877			"cpumask_of_node(%d): no node_to_cpumask_map!\n",
878			node);
879		dump_stack();
880		return cpu_online_mask;
881	}
882	return node_to_cpumask_map[node];
883}
884EXPORT_SYMBOL(cpumask_of_node);
885
886#endif	/* !CONFIG_DEBUG_PER_CPU_MAPS */
887
888#ifdef CONFIG_MEMORY_HOTPLUG
889int memory_add_physaddr_to_nid(u64 start)
890{
891	struct numa_meminfo *mi = &numa_meminfo;
892	int nid = mi->blk[0].nid;
893	int i;
894
895	for (i = 0; i < mi->nr_blks; i++)
896		if (mi->blk[i].start <= start && mi->blk[i].end > start)
897			nid = mi->blk[i].nid;
898	return nid;
899}
900EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
901#endif