Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * pSeries NUMA support
4 *
5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 */
7#define pr_fmt(fmt) "numa: " fmt
8
9#include <linux/threads.h>
10#include <linux/memblock.h>
11#include <linux/init.h>
12#include <linux/mm.h>
13#include <linux/mmzone.h>
14#include <linux/export.h>
15#include <linux/nodemask.h>
16#include <linux/cpu.h>
17#include <linux/notifier.h>
18#include <linux/of.h>
19#include <linux/pfn.h>
20#include <linux/cpuset.h>
21#include <linux/node.h>
22#include <linux/stop_machine.h>
23#include <linux/proc_fs.h>
24#include <linux/seq_file.h>
25#include <linux/uaccess.h>
26#include <linux/slab.h>
27#include <asm/cputhreads.h>
28#include <asm/sparsemem.h>
29#include <asm/smp.h>
30#include <asm/topology.h>
31#include <asm/firmware.h>
32#include <asm/paca.h>
33#include <asm/hvcall.h>
34#include <asm/setup.h>
35#include <asm/vdso.h>
36#include <asm/drmem.h>
37
38static int numa_enabled = 1;
39
40static char *cmdline __initdata;
41
42int numa_cpu_lookup_table[NR_CPUS];
43cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
44struct pglist_data *node_data[MAX_NUMNODES];
45
46EXPORT_SYMBOL(numa_cpu_lookup_table);
47EXPORT_SYMBOL(node_to_cpumask_map);
48EXPORT_SYMBOL(node_data);
49
50static int primary_domain_index;
51static int n_mem_addr_cells, n_mem_size_cells;
52
53#define FORM0_AFFINITY 0
54#define FORM1_AFFINITY 1
55#define FORM2_AFFINITY 2
56static int affinity_form;
57
58#define MAX_DISTANCE_REF_POINTS 4
59static int distance_ref_points_depth;
60static const __be32 *distance_ref_points;
61static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
62static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = {
63 [0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 }
64};
65static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE };
66
67/*
68 * Allocate node_to_cpumask_map based on number of available nodes
69 * Requires node_possible_map to be valid.
70 *
71 * Note: cpumask_of_node() is not valid until after this is done.
72 */
73static void __init setup_node_to_cpumask_map(void)
74{
75 unsigned int node;
76
77 /* setup nr_node_ids if not done yet */
78 if (nr_node_ids == MAX_NUMNODES)
79 setup_nr_node_ids();
80
81 /* allocate the map */
82 for_each_node(node)
83 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
84
85 /* cpumask_of_node() will now work */
86 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
87}
88
89static int __init fake_numa_create_new_node(unsigned long end_pfn,
90 unsigned int *nid)
91{
92 unsigned long long mem;
93 char *p = cmdline;
94 static unsigned int fake_nid;
95 static unsigned long long curr_boundary;
96
97 /*
98 * Modify node id, iff we started creating NUMA nodes
99 * We want to continue from where we left of the last time
100 */
101 if (fake_nid)
102 *nid = fake_nid;
103 /*
104 * In case there are no more arguments to parse, the
105 * node_id should be the same as the last fake node id
106 * (we've handled this above).
107 */
108 if (!p)
109 return 0;
110
111 mem = memparse(p, &p);
112 if (!mem)
113 return 0;
114
115 if (mem < curr_boundary)
116 return 0;
117
118 curr_boundary = mem;
119
120 if ((end_pfn << PAGE_SHIFT) > mem) {
121 /*
122 * Skip commas and spaces
123 */
124 while (*p == ',' || *p == ' ' || *p == '\t')
125 p++;
126
127 cmdline = p;
128 fake_nid++;
129 *nid = fake_nid;
130 pr_debug("created new fake_node with id %d\n", fake_nid);
131 return 1;
132 }
133 return 0;
134}
135
136static void __init reset_numa_cpu_lookup_table(void)
137{
138 unsigned int cpu;
139
140 for_each_possible_cpu(cpu)
141 numa_cpu_lookup_table[cpu] = -1;
142}
143
144void map_cpu_to_node(int cpu, int node)
145{
146 update_numa_cpu_lookup_table(cpu, node);
147
148 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) {
149 pr_debug("adding cpu %d to node %d\n", cpu, node);
150 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
151 }
152}
153
154#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
155void unmap_cpu_from_node(unsigned long cpu)
156{
157 int node = numa_cpu_lookup_table[cpu];
158
159 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
160 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
161 pr_debug("removing cpu %lu from node %d\n", cpu, node);
162 } else {
163 pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node);
164 }
165}
166#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
167
168static int __associativity_to_nid(const __be32 *associativity,
169 int max_array_sz)
170{
171 int nid;
172 /*
173 * primary_domain_index is 1 based array index.
174 */
175 int index = primary_domain_index - 1;
176
177 if (!numa_enabled || index >= max_array_sz)
178 return NUMA_NO_NODE;
179
180 nid = of_read_number(&associativity[index], 1);
181
182 /* POWER4 LPAR uses 0xffff as invalid node */
183 if (nid == 0xffff || nid >= nr_node_ids)
184 nid = NUMA_NO_NODE;
185 return nid;
186}
187/*
188 * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
189 * info is found.
190 */
191static int associativity_to_nid(const __be32 *associativity)
192{
193 int array_sz = of_read_number(associativity, 1);
194
195 /* Skip the first element in the associativity array */
196 return __associativity_to_nid((associativity + 1), array_sz);
197}
198
199static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
200{
201 int dist;
202 int node1, node2;
203
204 node1 = associativity_to_nid(cpu1_assoc);
205 node2 = associativity_to_nid(cpu2_assoc);
206
207 dist = numa_distance_table[node1][node2];
208 if (dist <= LOCAL_DISTANCE)
209 return 0;
210 else if (dist <= REMOTE_DISTANCE)
211 return 1;
212 else
213 return 2;
214}
215
216static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
217{
218 int dist = 0;
219
220 int i, index;
221
222 for (i = 0; i < distance_ref_points_depth; i++) {
223 index = be32_to_cpu(distance_ref_points[i]);
224 if (cpu1_assoc[index] == cpu2_assoc[index])
225 break;
226 dist++;
227 }
228
229 return dist;
230}
231
232int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
233{
234 /* We should not get called with FORM0 */
235 VM_WARN_ON(affinity_form == FORM0_AFFINITY);
236 if (affinity_form == FORM1_AFFINITY)
237 return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc);
238 return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc);
239}
240
241/* must hold reference to node during call */
242static const __be32 *of_get_associativity(struct device_node *dev)
243{
244 return of_get_property(dev, "ibm,associativity", NULL);
245}
246
247int __node_distance(int a, int b)
248{
249 int i;
250 int distance = LOCAL_DISTANCE;
251
252 if (affinity_form == FORM2_AFFINITY)
253 return numa_distance_table[a][b];
254 else if (affinity_form == FORM0_AFFINITY)
255 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
256
257 for (i = 0; i < distance_ref_points_depth; i++) {
258 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
259 break;
260
261 /* Double the distance for each NUMA level */
262 distance *= 2;
263 }
264
265 return distance;
266}
267EXPORT_SYMBOL(__node_distance);
268
269/* Returns the nid associated with the given device tree node,
270 * or -1 if not found.
271 */
272static int of_node_to_nid_single(struct device_node *device)
273{
274 int nid = NUMA_NO_NODE;
275 const __be32 *tmp;
276
277 tmp = of_get_associativity(device);
278 if (tmp)
279 nid = associativity_to_nid(tmp);
280 return nid;
281}
282
283/* Walk the device tree upwards, looking for an associativity id */
284int of_node_to_nid(struct device_node *device)
285{
286 int nid = NUMA_NO_NODE;
287
288 of_node_get(device);
289 while (device) {
290 nid = of_node_to_nid_single(device);
291 if (nid != -1)
292 break;
293
294 device = of_get_next_parent(device);
295 }
296 of_node_put(device);
297
298 return nid;
299}
300EXPORT_SYMBOL(of_node_to_nid);
301
302static void __initialize_form1_numa_distance(const __be32 *associativity,
303 int max_array_sz)
304{
305 int i, nid;
306
307 if (affinity_form != FORM1_AFFINITY)
308 return;
309
310 nid = __associativity_to_nid(associativity, max_array_sz);
311 if (nid != NUMA_NO_NODE) {
312 for (i = 0; i < distance_ref_points_depth; i++) {
313 const __be32 *entry;
314 int index = be32_to_cpu(distance_ref_points[i]) - 1;
315
316 /*
317 * broken hierarchy, return with broken distance table
318 */
319 if (WARN(index >= max_array_sz, "Broken ibm,associativity property"))
320 return;
321
322 entry = &associativity[index];
323 distance_lookup_table[nid][i] = of_read_number(entry, 1);
324 }
325 }
326}
327
328static void initialize_form1_numa_distance(const __be32 *associativity)
329{
330 int array_sz;
331
332 array_sz = of_read_number(associativity, 1);
333 /* Skip the first element in the associativity array */
334 __initialize_form1_numa_distance(associativity + 1, array_sz);
335}
336
337/*
338 * Used to update distance information w.r.t newly added node.
339 */
340void update_numa_distance(struct device_node *node)
341{
342 int nid;
343
344 if (affinity_form == FORM0_AFFINITY)
345 return;
346 else if (affinity_form == FORM1_AFFINITY) {
347 const __be32 *associativity;
348
349 associativity = of_get_associativity(node);
350 if (!associativity)
351 return;
352
353 initialize_form1_numa_distance(associativity);
354 return;
355 }
356
357 /* FORM2 affinity */
358 nid = of_node_to_nid_single(node);
359 if (nid == NUMA_NO_NODE)
360 return;
361
362 /*
363 * With FORM2 we expect NUMA distance of all possible NUMA
364 * nodes to be provided during boot.
365 */
366 WARN(numa_distance_table[nid][nid] == -1,
367 "NUMA distance details for node %d not provided\n", nid);
368}
369
370/*
371 * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
372 * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements}
373 */
374static void __init initialize_form2_numa_distance_lookup_table(void)
375{
376 int i, j;
377 struct device_node *root;
378 const __u8 *form2_distances;
379 const __be32 *numa_lookup_index;
380 int form2_distances_length;
381 int max_numa_index, distance_index;
382
383 if (firmware_has_feature(FW_FEATURE_OPAL))
384 root = of_find_node_by_path("/ibm,opal");
385 else
386 root = of_find_node_by_path("/rtas");
387 if (!root)
388 root = of_find_node_by_path("/");
389
390 numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL);
391 max_numa_index = of_read_number(&numa_lookup_index[0], 1);
392
393 /* first element of the array is the size and is encode-int */
394 form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL);
395 form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1);
396 /* Skip the size which is encoded int */
397 form2_distances += sizeof(__be32);
398
399 pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n",
400 form2_distances_length, max_numa_index);
401
402 for (i = 0; i < max_numa_index; i++)
403 /* +1 skip the max_numa_index in the property */
404 numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
405
406
407 if (form2_distances_length != max_numa_index * max_numa_index) {
408 WARN(1, "Wrong NUMA distance information\n");
409 form2_distances = NULL; // don't use it
410 }
411 distance_index = 0;
412 for (i = 0; i < max_numa_index; i++) {
413 for (j = 0; j < max_numa_index; j++) {
414 int nodeA = numa_id_index_table[i];
415 int nodeB = numa_id_index_table[j];
416 int dist;
417
418 if (form2_distances)
419 dist = form2_distances[distance_index++];
420 else if (nodeA == nodeB)
421 dist = LOCAL_DISTANCE;
422 else
423 dist = REMOTE_DISTANCE;
424 numa_distance_table[nodeA][nodeB] = dist;
425 pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist);
426 }
427 }
428
429 of_node_put(root);
430}
431
432static int __init find_primary_domain_index(void)
433{
434 int index;
435 struct device_node *root;
436
437 /*
438 * Check for which form of affinity.
439 */
440 if (firmware_has_feature(FW_FEATURE_OPAL)) {
441 affinity_form = FORM1_AFFINITY;
442 } else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) {
443 pr_debug("Using form 2 affinity\n");
444 affinity_form = FORM2_AFFINITY;
445 } else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) {
446 pr_debug("Using form 1 affinity\n");
447 affinity_form = FORM1_AFFINITY;
448 } else
449 affinity_form = FORM0_AFFINITY;
450
451 if (firmware_has_feature(FW_FEATURE_OPAL))
452 root = of_find_node_by_path("/ibm,opal");
453 else
454 root = of_find_node_by_path("/rtas");
455 if (!root)
456 root = of_find_node_by_path("/");
457
458 /*
459 * This property is a set of 32-bit integers, each representing
460 * an index into the ibm,associativity nodes.
461 *
462 * With form 0 affinity the first integer is for an SMP configuration
463 * (should be all 0's) and the second is for a normal NUMA
464 * configuration. We have only one level of NUMA.
465 *
466 * With form 1 affinity the first integer is the most significant
467 * NUMA boundary and the following are progressively less significant
468 * boundaries. There can be more than one level of NUMA.
469 */
470 distance_ref_points = of_get_property(root,
471 "ibm,associativity-reference-points",
472 &distance_ref_points_depth);
473
474 if (!distance_ref_points) {
475 pr_debug("ibm,associativity-reference-points not found.\n");
476 goto err;
477 }
478
479 distance_ref_points_depth /= sizeof(int);
480 if (affinity_form == FORM0_AFFINITY) {
481 if (distance_ref_points_depth < 2) {
482 pr_warn("short ibm,associativity-reference-points\n");
483 goto err;
484 }
485
486 index = of_read_number(&distance_ref_points[1], 1);
487 } else {
488 /*
489 * Both FORM1 and FORM2 affinity find the primary domain details
490 * at the same offset.
491 */
492 index = of_read_number(distance_ref_points, 1);
493 }
494 /*
495 * Warn and cap if the hardware supports more than
496 * MAX_DISTANCE_REF_POINTS domains.
497 */
498 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
499 pr_warn("distance array capped at %d entries\n",
500 MAX_DISTANCE_REF_POINTS);
501 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
502 }
503
504 of_node_put(root);
505 return index;
506
507err:
508 of_node_put(root);
509 return -1;
510}
511
512static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
513{
514 struct device_node *memory = NULL;
515
516 memory = of_find_node_by_type(memory, "memory");
517 if (!memory)
518 panic("numa.c: No memory nodes found!");
519
520 *n_addr_cells = of_n_addr_cells(memory);
521 *n_size_cells = of_n_size_cells(memory);
522 of_node_put(memory);
523}
524
525static unsigned long read_n_cells(int n, const __be32 **buf)
526{
527 unsigned long result = 0;
528
529 while (n--) {
530 result = (result << 32) | of_read_number(*buf, 1);
531 (*buf)++;
532 }
533 return result;
534}
535
536struct assoc_arrays {
537 u32 n_arrays;
538 u32 array_sz;
539 const __be32 *arrays;
540};
541
542/*
543 * Retrieve and validate the list of associativity arrays for drconf
544 * memory from the ibm,associativity-lookup-arrays property of the
545 * device tree..
546 *
547 * The layout of the ibm,associativity-lookup-arrays property is a number N
548 * indicating the number of associativity arrays, followed by a number M
549 * indicating the size of each associativity array, followed by a list
550 * of N associativity arrays.
551 */
552static int of_get_assoc_arrays(struct assoc_arrays *aa)
553{
554 struct device_node *memory;
555 const __be32 *prop;
556 u32 len;
557
558 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
559 if (!memory)
560 return -1;
561
562 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
563 if (!prop || len < 2 * sizeof(unsigned int)) {
564 of_node_put(memory);
565 return -1;
566 }
567
568 aa->n_arrays = of_read_number(prop++, 1);
569 aa->array_sz = of_read_number(prop++, 1);
570
571 of_node_put(memory);
572
573 /* Now that we know the number of arrays and size of each array,
574 * revalidate the size of the property read in.
575 */
576 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
577 return -1;
578
579 aa->arrays = prop;
580 return 0;
581}
582
583static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb)
584{
585 struct assoc_arrays aa = { .arrays = NULL };
586 int default_nid = NUMA_NO_NODE;
587 int nid = default_nid;
588 int rc, index;
589
590 if ((primary_domain_index < 0) || !numa_enabled)
591 return default_nid;
592
593 rc = of_get_assoc_arrays(&aa);
594 if (rc)
595 return default_nid;
596
597 if (primary_domain_index <= aa.array_sz &&
598 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
599 const __be32 *associativity;
600
601 index = lmb->aa_index * aa.array_sz;
602 associativity = &aa.arrays[index];
603 nid = __associativity_to_nid(associativity, aa.array_sz);
604 if (nid > 0 && affinity_form == FORM1_AFFINITY) {
605 /*
606 * lookup array associativity entries have
607 * no length of the array as the first element.
608 */
609 __initialize_form1_numa_distance(associativity, aa.array_sz);
610 }
611 }
612 return nid;
613}
614
615/*
616 * This is like of_node_to_nid_single() for memory represented in the
617 * ibm,dynamic-reconfiguration-memory node.
618 */
619int of_drconf_to_nid_single(struct drmem_lmb *lmb)
620{
621 struct assoc_arrays aa = { .arrays = NULL };
622 int default_nid = NUMA_NO_NODE;
623 int nid = default_nid;
624 int rc, index;
625
626 if ((primary_domain_index < 0) || !numa_enabled)
627 return default_nid;
628
629 rc = of_get_assoc_arrays(&aa);
630 if (rc)
631 return default_nid;
632
633 if (primary_domain_index <= aa.array_sz &&
634 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
635 const __be32 *associativity;
636
637 index = lmb->aa_index * aa.array_sz;
638 associativity = &aa.arrays[index];
639 nid = __associativity_to_nid(associativity, aa.array_sz);
640 }
641 return nid;
642}
643
644#ifdef CONFIG_PPC_SPLPAR
645
646static int __vphn_get_associativity(long lcpu, __be32 *associativity)
647{
648 long rc, hwid;
649
650 /*
651 * On a shared lpar, device tree will not have node associativity.
652 * At this time lppaca, or its __old_status field may not be
653 * updated. Hence kernel cannot detect if its on a shared lpar. So
654 * request an explicit associativity irrespective of whether the
655 * lpar is shared or dedicated. Use the device tree property as a
656 * fallback. cpu_to_phys_id is only valid between
657 * smp_setup_cpu_maps() and smp_setup_pacas().
658 */
659 if (firmware_has_feature(FW_FEATURE_VPHN)) {
660 if (cpu_to_phys_id)
661 hwid = cpu_to_phys_id[lcpu];
662 else
663 hwid = get_hard_smp_processor_id(lcpu);
664
665 rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
666 if (rc == H_SUCCESS)
667 return 0;
668 }
669
670 return -1;
671}
672
673static int vphn_get_nid(long lcpu)
674{
675 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
676
677
678 if (!__vphn_get_associativity(lcpu, associativity))
679 return associativity_to_nid(associativity);
680
681 return NUMA_NO_NODE;
682
683}
684#else
685
686static int __vphn_get_associativity(long lcpu, __be32 *associativity)
687{
688 return -1;
689}
690
691static int vphn_get_nid(long unused)
692{
693 return NUMA_NO_NODE;
694}
695#endif /* CONFIG_PPC_SPLPAR */
696
697/*
698 * Figure out to which domain a cpu belongs and stick it there.
699 * Return the id of the domain used.
700 */
701static int numa_setup_cpu(unsigned long lcpu)
702{
703 struct device_node *cpu;
704 int fcpu = cpu_first_thread_sibling(lcpu);
705 int nid = NUMA_NO_NODE;
706
707 if (!cpu_present(lcpu)) {
708 set_cpu_numa_node(lcpu, first_online_node);
709 return first_online_node;
710 }
711
712 /*
713 * If a valid cpu-to-node mapping is already available, use it
714 * directly instead of querying the firmware, since it represents
715 * the most recent mapping notified to us by the platform (eg: VPHN).
716 * Since cpu_to_node binding remains the same for all threads in the
717 * core. If a valid cpu-to-node mapping is already available, for
718 * the first thread in the core, use it.
719 */
720 nid = numa_cpu_lookup_table[fcpu];
721 if (nid >= 0) {
722 map_cpu_to_node(lcpu, nid);
723 return nid;
724 }
725
726 nid = vphn_get_nid(lcpu);
727 if (nid != NUMA_NO_NODE)
728 goto out_present;
729
730 cpu = of_get_cpu_node(lcpu, NULL);
731
732 if (!cpu) {
733 WARN_ON(1);
734 if (cpu_present(lcpu))
735 goto out_present;
736 else
737 goto out;
738 }
739
740 nid = of_node_to_nid_single(cpu);
741 of_node_put(cpu);
742
743out_present:
744 if (nid < 0 || !node_possible(nid))
745 nid = first_online_node;
746
747 /*
748 * Update for the first thread of the core. All threads of a core
749 * have to be part of the same node. This not only avoids querying
750 * for every other thread in the core, but always avoids a case
751 * where virtual node associativity change causes subsequent threads
752 * of a core to be associated with different nid. However if first
753 * thread is already online, expect it to have a valid mapping.
754 */
755 if (fcpu != lcpu) {
756 WARN_ON(cpu_online(fcpu));
757 map_cpu_to_node(fcpu, nid);
758 }
759
760 map_cpu_to_node(lcpu, nid);
761out:
762 return nid;
763}
764
765static void verify_cpu_node_mapping(int cpu, int node)
766{
767 int base, sibling, i;
768
769 /* Verify that all the threads in the core belong to the same node */
770 base = cpu_first_thread_sibling(cpu);
771
772 for (i = 0; i < threads_per_core; i++) {
773 sibling = base + i;
774
775 if (sibling == cpu || cpu_is_offline(sibling))
776 continue;
777
778 if (cpu_to_node(sibling) != node) {
779 WARN(1, "CPU thread siblings %d and %d don't belong"
780 " to the same node!\n", cpu, sibling);
781 break;
782 }
783 }
784}
785
786/* Must run before sched domains notifier. */
787static int ppc_numa_cpu_prepare(unsigned int cpu)
788{
789 int nid;
790
791 nid = numa_setup_cpu(cpu);
792 verify_cpu_node_mapping(cpu, nid);
793 return 0;
794}
795
796static int ppc_numa_cpu_dead(unsigned int cpu)
797{
798 return 0;
799}
800
801/*
802 * Check and possibly modify a memory region to enforce the memory limit.
803 *
804 * Returns the size the region should have to enforce the memory limit.
805 * This will either be the original value of size, a truncated value,
806 * or zero. If the returned value of size is 0 the region should be
807 * discarded as it lies wholly above the memory limit.
808 */
809static unsigned long __init numa_enforce_memory_limit(unsigned long start,
810 unsigned long size)
811{
812 /*
813 * We use memblock_end_of_DRAM() in here instead of memory_limit because
814 * we've already adjusted it for the limit and it takes care of
815 * having memory holes below the limit. Also, in the case of
816 * iommu_is_off, memory_limit is not set but is implicitly enforced.
817 */
818
819 if (start + size <= memblock_end_of_DRAM())
820 return size;
821
822 if (start >= memblock_end_of_DRAM())
823 return 0;
824
825 return memblock_end_of_DRAM() - start;
826}
827
828/*
829 * Reads the counter for a given entry in
830 * linux,drconf-usable-memory property
831 */
832static inline int __init read_usm_ranges(const __be32 **usm)
833{
834 /*
835 * For each lmb in ibm,dynamic-memory a corresponding
836 * entry in linux,drconf-usable-memory property contains
837 * a counter followed by that many (base, size) duple.
838 * read the counter from linux,drconf-usable-memory
839 */
840 return read_n_cells(n_mem_size_cells, usm);
841}
842
843/*
844 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
845 * node. This assumes n_mem_{addr,size}_cells have been set.
846 */
847static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
848 const __be32 **usm,
849 void *data)
850{
851 unsigned int ranges, is_kexec_kdump = 0;
852 unsigned long base, size, sz;
853 int nid;
854
855 /*
856 * Skip this block if the reserved bit is set in flags (0x80)
857 * or if the block is not assigned to this partition (0x8)
858 */
859 if ((lmb->flags & DRCONF_MEM_RESERVED)
860 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
861 return 0;
862
863 if (*usm)
864 is_kexec_kdump = 1;
865
866 base = lmb->base_addr;
867 size = drmem_lmb_size();
868 ranges = 1;
869
870 if (is_kexec_kdump) {
871 ranges = read_usm_ranges(usm);
872 if (!ranges) /* there are no (base, size) duple */
873 return 0;
874 }
875
876 do {
877 if (is_kexec_kdump) {
878 base = read_n_cells(n_mem_addr_cells, usm);
879 size = read_n_cells(n_mem_size_cells, usm);
880 }
881
882 nid = get_nid_and_numa_distance(lmb);
883 fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
884 &nid);
885 node_set_online(nid);
886 sz = numa_enforce_memory_limit(base, size);
887 if (sz)
888 memblock_set_node(base, sz, &memblock.memory, nid);
889 } while (--ranges);
890
891 return 0;
892}
893
894static int __init parse_numa_properties(void)
895{
896 struct device_node *memory;
897 int default_nid = 0;
898 unsigned long i;
899 const __be32 *associativity;
900
901 if (numa_enabled == 0) {
902 pr_warn("disabled by user\n");
903 return -1;
904 }
905
906 primary_domain_index = find_primary_domain_index();
907
908 if (primary_domain_index < 0) {
909 /*
910 * if we fail to parse primary_domain_index from device tree
911 * mark the numa disabled, boot with numa disabled.
912 */
913 numa_enabled = false;
914 return primary_domain_index;
915 }
916
917 pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index);
918
919 /*
920 * If it is FORM2 initialize the distance table here.
921 */
922 if (affinity_form == FORM2_AFFINITY)
923 initialize_form2_numa_distance_lookup_table();
924
925 /*
926 * Even though we connect cpus to numa domains later in SMP
927 * init, we need to know the node ids now. This is because
928 * each node to be onlined must have NODE_DATA etc backing it.
929 */
930 for_each_present_cpu(i) {
931 __be32 vphn_assoc[VPHN_ASSOC_BUFSIZE];
932 struct device_node *cpu;
933 int nid = NUMA_NO_NODE;
934
935 memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32));
936
937 if (__vphn_get_associativity(i, vphn_assoc) == 0) {
938 nid = associativity_to_nid(vphn_assoc);
939 initialize_form1_numa_distance(vphn_assoc);
940 } else {
941
942 /*
943 * Don't fall back to default_nid yet -- we will plug
944 * cpus into nodes once the memory scan has discovered
945 * the topology.
946 */
947 cpu = of_get_cpu_node(i, NULL);
948 BUG_ON(!cpu);
949
950 associativity = of_get_associativity(cpu);
951 if (associativity) {
952 nid = associativity_to_nid(associativity);
953 initialize_form1_numa_distance(associativity);
954 }
955 of_node_put(cpu);
956 }
957
958 /* node_set_online() is an UB if 'nid' is negative */
959 if (likely(nid >= 0))
960 node_set_online(nid);
961 }
962
963 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
964
965 for_each_node_by_type(memory, "memory") {
966 unsigned long start;
967 unsigned long size;
968 int nid;
969 int ranges;
970 const __be32 *memcell_buf;
971 unsigned int len;
972
973 memcell_buf = of_get_property(memory,
974 "linux,usable-memory", &len);
975 if (!memcell_buf || len <= 0)
976 memcell_buf = of_get_property(memory, "reg", &len);
977 if (!memcell_buf || len <= 0)
978 continue;
979
980 /* ranges in cell */
981 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
982new_range:
983 /* these are order-sensitive, and modify the buffer pointer */
984 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
985 size = read_n_cells(n_mem_size_cells, &memcell_buf);
986
987 /*
988 * Assumption: either all memory nodes or none will
989 * have associativity properties. If none, then
990 * everything goes to default_nid.
991 */
992 associativity = of_get_associativity(memory);
993 if (associativity) {
994 nid = associativity_to_nid(associativity);
995 initialize_form1_numa_distance(associativity);
996 } else
997 nid = default_nid;
998
999 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
1000 node_set_online(nid);
1001
1002 size = numa_enforce_memory_limit(start, size);
1003 if (size)
1004 memblock_set_node(start, size, &memblock.memory, nid);
1005
1006 if (--ranges)
1007 goto new_range;
1008 }
1009
1010 /*
1011 * Now do the same thing for each MEMBLOCK listed in the
1012 * ibm,dynamic-memory property in the
1013 * ibm,dynamic-reconfiguration-memory node.
1014 */
1015 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1016 if (memory) {
1017 walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb);
1018 of_node_put(memory);
1019 }
1020
1021 return 0;
1022}
1023
1024static void __init setup_nonnuma(void)
1025{
1026 unsigned long top_of_ram = memblock_end_of_DRAM();
1027 unsigned long total_ram = memblock_phys_mem_size();
1028 unsigned long start_pfn, end_pfn;
1029 unsigned int nid = 0;
1030 int i;
1031
1032 pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram);
1033 pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20);
1034
1035 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
1036 fake_numa_create_new_node(end_pfn, &nid);
1037 memblock_set_node(PFN_PHYS(start_pfn),
1038 PFN_PHYS(end_pfn - start_pfn),
1039 &memblock.memory, nid);
1040 node_set_online(nid);
1041 }
1042}
1043
1044void __init dump_numa_cpu_topology(void)
1045{
1046 unsigned int node;
1047 unsigned int cpu, count;
1048
1049 if (!numa_enabled)
1050 return;
1051
1052 for_each_online_node(node) {
1053 pr_info("Node %d CPUs:", node);
1054
1055 count = 0;
1056 /*
1057 * If we used a CPU iterator here we would miss printing
1058 * the holes in the cpumap.
1059 */
1060 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1061 if (cpumask_test_cpu(cpu,
1062 node_to_cpumask_map[node])) {
1063 if (count == 0)
1064 pr_cont(" %u", cpu);
1065 ++count;
1066 } else {
1067 if (count > 1)
1068 pr_cont("-%u", cpu - 1);
1069 count = 0;
1070 }
1071 }
1072
1073 if (count > 1)
1074 pr_cont("-%u", nr_cpu_ids - 1);
1075 pr_cont("\n");
1076 }
1077}
1078
1079/* Initialize NODE_DATA for a node on the local memory */
1080static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
1081{
1082 u64 spanned_pages = end_pfn - start_pfn;
1083 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
1084 u64 nd_pa;
1085 void *nd;
1086 int tnid;
1087
1088 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
1089 if (!nd_pa)
1090 panic("Cannot allocate %zu bytes for node %d data\n",
1091 nd_size, nid);
1092
1093 nd = __va(nd_pa);
1094
1095 /* report and initialize */
1096 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
1097 nd_pa, nd_pa + nd_size - 1);
1098 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
1099 if (tnid != nid)
1100 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
1101
1102 node_data[nid] = nd;
1103 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
1104 NODE_DATA(nid)->node_id = nid;
1105 NODE_DATA(nid)->node_start_pfn = start_pfn;
1106 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
1107}
1108
1109static void __init find_possible_nodes(void)
1110{
1111 struct device_node *rtas;
1112 const __be32 *domains = NULL;
1113 int prop_length, max_nodes;
1114 u32 i;
1115
1116 if (!numa_enabled)
1117 return;
1118
1119 rtas = of_find_node_by_path("/rtas");
1120 if (!rtas)
1121 return;
1122
1123 /*
1124 * ibm,current-associativity-domains is a fairly recent property. If
1125 * it doesn't exist, then fallback on ibm,max-associativity-domains.
1126 * Current denotes what the platform can support compared to max
1127 * which denotes what the Hypervisor can support.
1128 *
1129 * If the LPAR is migratable, new nodes might be activated after a LPM,
1130 * so we should consider the max number in that case.
1131 */
1132 if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
1133 domains = of_get_property(rtas,
1134 "ibm,current-associativity-domains",
1135 &prop_length);
1136 if (!domains) {
1137 domains = of_get_property(rtas, "ibm,max-associativity-domains",
1138 &prop_length);
1139 if (!domains)
1140 goto out;
1141 }
1142
1143 max_nodes = of_read_number(&domains[primary_domain_index], 1);
1144 pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
1145
1146 for (i = 0; i < max_nodes; i++) {
1147 if (!node_possible(i))
1148 node_set(i, node_possible_map);
1149 }
1150
1151 prop_length /= sizeof(int);
1152 if (prop_length > primary_domain_index + 2)
1153 coregroup_enabled = 1;
1154
1155out:
1156 of_node_put(rtas);
1157}
1158
1159void __init mem_topology_setup(void)
1160{
1161 int cpu;
1162
1163 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1164 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
1165
1166 /*
1167 * Linux/mm assumes node 0 to be online at boot. However this is not
1168 * true on PowerPC, where node 0 is similar to any other node, it
1169 * could be cpuless, memoryless node. So force node 0 to be offline
1170 * for now. This will prevent cpuless, memoryless node 0 showing up
1171 * unnecessarily as online. If a node has cpus or memory that need
1172 * to be online, then node will anyway be marked online.
1173 */
1174 node_set_offline(0);
1175
1176 if (parse_numa_properties())
1177 setup_nonnuma();
1178
1179 /*
1180 * Modify the set of possible NUMA nodes to reflect information
1181 * available about the set of online nodes, and the set of nodes
1182 * that we expect to make use of for this platform's affinity
1183 * calculations.
1184 */
1185 nodes_and(node_possible_map, node_possible_map, node_online_map);
1186
1187 find_possible_nodes();
1188
1189 setup_node_to_cpumask_map();
1190
1191 reset_numa_cpu_lookup_table();
1192
1193 for_each_possible_cpu(cpu) {
1194 /*
1195 * Powerpc with CONFIG_NUMA always used to have a node 0,
1196 * even if it was memoryless or cpuless. For all cpus that
1197 * are possible but not present, cpu_to_node() would point
1198 * to node 0. To remove a cpuless, memoryless dummy node,
1199 * powerpc need to make sure all possible but not present
1200 * cpu_to_node are set to a proper node.
1201 */
1202 numa_setup_cpu(cpu);
1203 }
1204}
1205
1206void __init initmem_init(void)
1207{
1208 int nid;
1209
1210 memblock_dump_all();
1211
1212 for_each_online_node(nid) {
1213 unsigned long start_pfn, end_pfn;
1214
1215 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1216 setup_node_data(nid, start_pfn, end_pfn);
1217 }
1218
1219 sparse_init();
1220
1221 /*
1222 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
1223 * even before we online them, so that we can use cpu_to_{node,mem}
1224 * early in boot, cf. smp_prepare_cpus().
1225 * _nocalls() + manual invocation is used because cpuhp is not yet
1226 * initialized for the boot CPU.
1227 */
1228 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
1229 ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
1230}
1231
1232static int __init early_numa(char *p)
1233{
1234 if (!p)
1235 return 0;
1236
1237 if (strstr(p, "off"))
1238 numa_enabled = 0;
1239
1240 p = strstr(p, "fake=");
1241 if (p)
1242 cmdline = p + strlen("fake=");
1243
1244 return 0;
1245}
1246early_param("numa", early_numa);
1247
1248#ifdef CONFIG_MEMORY_HOTPLUG
1249/*
1250 * Find the node associated with a hot added memory section for
1251 * memory represented in the device tree by the property
1252 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1253 */
1254static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
1255{
1256 struct drmem_lmb *lmb;
1257 unsigned long lmb_size;
1258 int nid = NUMA_NO_NODE;
1259
1260 lmb_size = drmem_lmb_size();
1261
1262 for_each_drmem_lmb(lmb) {
1263 /* skip this block if it is reserved or not assigned to
1264 * this partition */
1265 if ((lmb->flags & DRCONF_MEM_RESERVED)
1266 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
1267 continue;
1268
1269 if ((scn_addr < lmb->base_addr)
1270 || (scn_addr >= (lmb->base_addr + lmb_size)))
1271 continue;
1272
1273 nid = of_drconf_to_nid_single(lmb);
1274 break;
1275 }
1276
1277 return nid;
1278}
1279
1280/*
1281 * Find the node associated with a hot added memory section for memory
1282 * represented in the device tree as a node (i.e. memory@XXXX) for
1283 * each memblock.
1284 */
1285static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1286{
1287 struct device_node *memory;
1288 int nid = NUMA_NO_NODE;
1289
1290 for_each_node_by_type(memory, "memory") {
1291 unsigned long start, size;
1292 int ranges;
1293 const __be32 *memcell_buf;
1294 unsigned int len;
1295
1296 memcell_buf = of_get_property(memory, "reg", &len);
1297 if (!memcell_buf || len <= 0)
1298 continue;
1299
1300 /* ranges in cell */
1301 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1302
1303 while (ranges--) {
1304 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1305 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1306
1307 if ((scn_addr < start) || (scn_addr >= (start + size)))
1308 continue;
1309
1310 nid = of_node_to_nid_single(memory);
1311 break;
1312 }
1313
1314 if (nid >= 0)
1315 break;
1316 }
1317
1318 of_node_put(memory);
1319
1320 return nid;
1321}
1322
1323/*
1324 * Find the node associated with a hot added memory section. Section
1325 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1326 * sections are fully contained within a single MEMBLOCK.
1327 */
1328int hot_add_scn_to_nid(unsigned long scn_addr)
1329{
1330 struct device_node *memory = NULL;
1331 int nid;
1332
1333 if (!numa_enabled)
1334 return first_online_node;
1335
1336 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1337 if (memory) {
1338 nid = hot_add_drconf_scn_to_nid(scn_addr);
1339 of_node_put(memory);
1340 } else {
1341 nid = hot_add_node_scn_to_nid(scn_addr);
1342 }
1343
1344 if (nid < 0 || !node_possible(nid))
1345 nid = first_online_node;
1346
1347 return nid;
1348}
1349
1350static u64 hot_add_drconf_memory_max(void)
1351{
1352 struct device_node *memory = NULL;
1353 struct device_node *dn = NULL;
1354 const __be64 *lrdr = NULL;
1355
1356 dn = of_find_node_by_path("/rtas");
1357 if (dn) {
1358 lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1359 of_node_put(dn);
1360 if (lrdr)
1361 return be64_to_cpup(lrdr);
1362 }
1363
1364 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1365 if (memory) {
1366 of_node_put(memory);
1367 return drmem_lmb_memory_max();
1368 }
1369 return 0;
1370}
1371
1372/*
1373 * memory_hotplug_max - return max address of memory that may be added
1374 *
1375 * This is currently only used on systems that support drconfig memory
1376 * hotplug.
1377 */
1378u64 memory_hotplug_max(void)
1379{
1380 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1381}
1382#endif /* CONFIG_MEMORY_HOTPLUG */
1383
1384/* Virtual Processor Home Node (VPHN) support */
1385#ifdef CONFIG_PPC_SPLPAR
1386static int topology_inited;
1387
1388/*
1389 * Retrieve the new associativity information for a virtual processor's
1390 * home node.
1391 */
1392static long vphn_get_associativity(unsigned long cpu,
1393 __be32 *associativity)
1394{
1395 long rc;
1396
1397 rc = hcall_vphn(get_hard_smp_processor_id(cpu),
1398 VPHN_FLAG_VCPU, associativity);
1399
1400 switch (rc) {
1401 case H_SUCCESS:
1402 pr_debug("VPHN hcall succeeded. Reset polling...\n");
1403 goto out;
1404
1405 case H_FUNCTION:
1406 pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
1407 break;
1408 case H_HARDWARE:
1409 pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
1410 "preventing VPHN. Disabling polling...\n");
1411 break;
1412 case H_PARAMETER:
1413 pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
1414 "Disabling polling...\n");
1415 break;
1416 default:
1417 pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
1418 , rc);
1419 break;
1420 }
1421out:
1422 return rc;
1423}
1424
1425void find_and_update_cpu_nid(int cpu)
1426{
1427 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1428 int new_nid;
1429
1430 /* Use associativity from first thread for all siblings */
1431 if (vphn_get_associativity(cpu, associativity))
1432 return;
1433
1434 /* Do not have previous associativity, so find it now. */
1435 new_nid = associativity_to_nid(associativity);
1436
1437 if (new_nid < 0 || !node_possible(new_nid))
1438 new_nid = first_online_node;
1439 else
1440 // Associate node <-> cpu, so cpu_up() calls
1441 // try_online_node() on the right node.
1442 set_cpu_numa_node(cpu, new_nid);
1443
1444 pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid);
1445}
1446
1447int cpu_to_coregroup_id(int cpu)
1448{
1449 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1450 int index;
1451
1452 if (cpu < 0 || cpu > nr_cpu_ids)
1453 return -1;
1454
1455 if (!coregroup_enabled)
1456 goto out;
1457
1458 if (!firmware_has_feature(FW_FEATURE_VPHN))
1459 goto out;
1460
1461 if (vphn_get_associativity(cpu, associativity))
1462 goto out;
1463
1464 index = of_read_number(associativity, 1);
1465 if (index > primary_domain_index + 1)
1466 return of_read_number(&associativity[index - 1], 1);
1467
1468out:
1469 return cpu_to_core_id(cpu);
1470}
1471
1472static int topology_update_init(void)
1473{
1474 topology_inited = 1;
1475 return 0;
1476}
1477device_initcall(topology_update_init);
1478#endif /* CONFIG_PPC_SPLPAR */
1/*
2 * pSeries NUMA support
3 *
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/threads.h>
12#include <linux/bootmem.h>
13#include <linux/init.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/export.h>
17#include <linux/nodemask.h>
18#include <linux/cpu.h>
19#include <linux/notifier.h>
20#include <linux/memblock.h>
21#include <linux/of.h>
22#include <linux/pfn.h>
23#include <linux/cpuset.h>
24#include <linux/node.h>
25#include <asm/sparsemem.h>
26#include <asm/prom.h>
27#include <asm/smp.h>
28#include <asm/firmware.h>
29#include <asm/paca.h>
30#include <asm/hvcall.h>
31#include <asm/setup.h>
32
33static int numa_enabled = 1;
34
35static char *cmdline __initdata;
36
37static int numa_debug;
38#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
39
40int numa_cpu_lookup_table[NR_CPUS];
41cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
42struct pglist_data *node_data[MAX_NUMNODES];
43
44EXPORT_SYMBOL(numa_cpu_lookup_table);
45EXPORT_SYMBOL(node_to_cpumask_map);
46EXPORT_SYMBOL(node_data);
47
48static int min_common_depth;
49static int n_mem_addr_cells, n_mem_size_cells;
50static int form1_affinity;
51
52#define MAX_DISTANCE_REF_POINTS 4
53static int distance_ref_points_depth;
54static const unsigned int *distance_ref_points;
55static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
56
57/*
58 * Allocate node_to_cpumask_map based on number of available nodes
59 * Requires node_possible_map to be valid.
60 *
61 * Note: cpumask_of_node() is not valid until after this is done.
62 */
63static void __init setup_node_to_cpumask_map(void)
64{
65 unsigned int node, num = 0;
66
67 /* setup nr_node_ids if not done yet */
68 if (nr_node_ids == MAX_NUMNODES) {
69 for_each_node_mask(node, node_possible_map)
70 num = node;
71 nr_node_ids = num + 1;
72 }
73
74 /* allocate the map */
75 for (node = 0; node < nr_node_ids; node++)
76 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
77
78 /* cpumask_of_node() will now work */
79 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
80}
81
82static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
83 unsigned int *nid)
84{
85 unsigned long long mem;
86 char *p = cmdline;
87 static unsigned int fake_nid;
88 static unsigned long long curr_boundary;
89
90 /*
91 * Modify node id, iff we started creating NUMA nodes
92 * We want to continue from where we left of the last time
93 */
94 if (fake_nid)
95 *nid = fake_nid;
96 /*
97 * In case there are no more arguments to parse, the
98 * node_id should be the same as the last fake node id
99 * (we've handled this above).
100 */
101 if (!p)
102 return 0;
103
104 mem = memparse(p, &p);
105 if (!mem)
106 return 0;
107
108 if (mem < curr_boundary)
109 return 0;
110
111 curr_boundary = mem;
112
113 if ((end_pfn << PAGE_SHIFT) > mem) {
114 /*
115 * Skip commas and spaces
116 */
117 while (*p == ',' || *p == ' ' || *p == '\t')
118 p++;
119
120 cmdline = p;
121 fake_nid++;
122 *nid = fake_nid;
123 dbg("created new fake_node with id %d\n", fake_nid);
124 return 1;
125 }
126 return 0;
127}
128
129/*
130 * get_node_active_region - Return active region containing pfn
131 * Active range returned is empty if none found.
132 * @pfn: The page to return the region for
133 * @node_ar: Returned set to the active region containing @pfn
134 */
135static void __init get_node_active_region(unsigned long pfn,
136 struct node_active_region *node_ar)
137{
138 unsigned long start_pfn, end_pfn;
139 int i, nid;
140
141 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
142 if (pfn >= start_pfn && pfn < end_pfn) {
143 node_ar->nid = nid;
144 node_ar->start_pfn = start_pfn;
145 node_ar->end_pfn = end_pfn;
146 break;
147 }
148 }
149}
150
151static void map_cpu_to_node(int cpu, int node)
152{
153 numa_cpu_lookup_table[cpu] = node;
154
155 dbg("adding cpu %d to node %d\n", cpu, node);
156
157 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
158 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
159}
160
161#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
162static void unmap_cpu_from_node(unsigned long cpu)
163{
164 int node = numa_cpu_lookup_table[cpu];
165
166 dbg("removing cpu %lu from node %d\n", cpu, node);
167
168 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
169 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
170 } else {
171 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
172 cpu, node);
173 }
174}
175#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
176
177/* must hold reference to node during call */
178static const int *of_get_associativity(struct device_node *dev)
179{
180 return of_get_property(dev, "ibm,associativity", NULL);
181}
182
183/*
184 * Returns the property linux,drconf-usable-memory if
185 * it exists (the property exists only in kexec/kdump kernels,
186 * added by kexec-tools)
187 */
188static const u32 *of_get_usable_memory(struct device_node *memory)
189{
190 const u32 *prop;
191 u32 len;
192 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
193 if (!prop || len < sizeof(unsigned int))
194 return 0;
195 return prop;
196}
197
198int __node_distance(int a, int b)
199{
200 int i;
201 int distance = LOCAL_DISTANCE;
202
203 if (!form1_affinity)
204 return distance;
205
206 for (i = 0; i < distance_ref_points_depth; i++) {
207 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
208 break;
209
210 /* Double the distance for each NUMA level */
211 distance *= 2;
212 }
213
214 return distance;
215}
216
217static void initialize_distance_lookup_table(int nid,
218 const unsigned int *associativity)
219{
220 int i;
221
222 if (!form1_affinity)
223 return;
224
225 for (i = 0; i < distance_ref_points_depth; i++) {
226 distance_lookup_table[nid][i] =
227 associativity[distance_ref_points[i]];
228 }
229}
230
231/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
232 * info is found.
233 */
234static int associativity_to_nid(const unsigned int *associativity)
235{
236 int nid = -1;
237
238 if (min_common_depth == -1)
239 goto out;
240
241 if (associativity[0] >= min_common_depth)
242 nid = associativity[min_common_depth];
243
244 /* POWER4 LPAR uses 0xffff as invalid node */
245 if (nid == 0xffff || nid >= MAX_NUMNODES)
246 nid = -1;
247
248 if (nid > 0 && associativity[0] >= distance_ref_points_depth)
249 initialize_distance_lookup_table(nid, associativity);
250
251out:
252 return nid;
253}
254
255/* Returns the nid associated with the given device tree node,
256 * or -1 if not found.
257 */
258static int of_node_to_nid_single(struct device_node *device)
259{
260 int nid = -1;
261 const unsigned int *tmp;
262
263 tmp = of_get_associativity(device);
264 if (tmp)
265 nid = associativity_to_nid(tmp);
266 return nid;
267}
268
269/* Walk the device tree upwards, looking for an associativity id */
270int of_node_to_nid(struct device_node *device)
271{
272 struct device_node *tmp;
273 int nid = -1;
274
275 of_node_get(device);
276 while (device) {
277 nid = of_node_to_nid_single(device);
278 if (nid != -1)
279 break;
280
281 tmp = device;
282 device = of_get_parent(tmp);
283 of_node_put(tmp);
284 }
285 of_node_put(device);
286
287 return nid;
288}
289EXPORT_SYMBOL_GPL(of_node_to_nid);
290
291static int __init find_min_common_depth(void)
292{
293 int depth;
294 struct device_node *chosen;
295 struct device_node *root;
296 const char *vec5;
297
298 if (firmware_has_feature(FW_FEATURE_OPAL))
299 root = of_find_node_by_path("/ibm,opal");
300 else
301 root = of_find_node_by_path("/rtas");
302 if (!root)
303 root = of_find_node_by_path("/");
304
305 /*
306 * This property is a set of 32-bit integers, each representing
307 * an index into the ibm,associativity nodes.
308 *
309 * With form 0 affinity the first integer is for an SMP configuration
310 * (should be all 0's) and the second is for a normal NUMA
311 * configuration. We have only one level of NUMA.
312 *
313 * With form 1 affinity the first integer is the most significant
314 * NUMA boundary and the following are progressively less significant
315 * boundaries. There can be more than one level of NUMA.
316 */
317 distance_ref_points = of_get_property(root,
318 "ibm,associativity-reference-points",
319 &distance_ref_points_depth);
320
321 if (!distance_ref_points) {
322 dbg("NUMA: ibm,associativity-reference-points not found.\n");
323 goto err;
324 }
325
326 distance_ref_points_depth /= sizeof(int);
327
328#define VEC5_AFFINITY_BYTE 5
329#define VEC5_AFFINITY 0x80
330
331 if (firmware_has_feature(FW_FEATURE_OPAL))
332 form1_affinity = 1;
333 else {
334 chosen = of_find_node_by_path("/chosen");
335 if (chosen) {
336 vec5 = of_get_property(chosen,
337 "ibm,architecture-vec-5", NULL);
338 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] &
339 VEC5_AFFINITY)) {
340 dbg("Using form 1 affinity\n");
341 form1_affinity = 1;
342 }
343 }
344 }
345
346 if (form1_affinity) {
347 depth = distance_ref_points[0];
348 } else {
349 if (distance_ref_points_depth < 2) {
350 printk(KERN_WARNING "NUMA: "
351 "short ibm,associativity-reference-points\n");
352 goto err;
353 }
354
355 depth = distance_ref_points[1];
356 }
357
358 /*
359 * Warn and cap if the hardware supports more than
360 * MAX_DISTANCE_REF_POINTS domains.
361 */
362 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
363 printk(KERN_WARNING "NUMA: distance array capped at "
364 "%d entries\n", MAX_DISTANCE_REF_POINTS);
365 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
366 }
367
368 of_node_put(root);
369 return depth;
370
371err:
372 of_node_put(root);
373 return -1;
374}
375
376static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
377{
378 struct device_node *memory = NULL;
379
380 memory = of_find_node_by_type(memory, "memory");
381 if (!memory)
382 panic("numa.c: No memory nodes found!");
383
384 *n_addr_cells = of_n_addr_cells(memory);
385 *n_size_cells = of_n_size_cells(memory);
386 of_node_put(memory);
387}
388
389static unsigned long read_n_cells(int n, const unsigned int **buf)
390{
391 unsigned long result = 0;
392
393 while (n--) {
394 result = (result << 32) | **buf;
395 (*buf)++;
396 }
397 return result;
398}
399
400struct of_drconf_cell {
401 u64 base_addr;
402 u32 drc_index;
403 u32 reserved;
404 u32 aa_index;
405 u32 flags;
406};
407
408#define DRCONF_MEM_ASSIGNED 0x00000008
409#define DRCONF_MEM_AI_INVALID 0x00000040
410#define DRCONF_MEM_RESERVED 0x00000080
411
412/*
413 * Read the next memblock list entry from the ibm,dynamic-memory property
414 * and return the information in the provided of_drconf_cell structure.
415 */
416static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
417{
418 const u32 *cp;
419
420 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
421
422 cp = *cellp;
423 drmem->drc_index = cp[0];
424 drmem->reserved = cp[1];
425 drmem->aa_index = cp[2];
426 drmem->flags = cp[3];
427
428 *cellp = cp + 4;
429}
430
431/*
432 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
433 *
434 * The layout of the ibm,dynamic-memory property is a number N of memblock
435 * list entries followed by N memblock list entries. Each memblock list entry
436 * contains information as laid out in the of_drconf_cell struct above.
437 */
438static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
439{
440 const u32 *prop;
441 u32 len, entries;
442
443 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
444 if (!prop || len < sizeof(unsigned int))
445 return 0;
446
447 entries = *prop++;
448
449 /* Now that we know the number of entries, revalidate the size
450 * of the property read in to ensure we have everything
451 */
452 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
453 return 0;
454
455 *dm = prop;
456 return entries;
457}
458
459/*
460 * Retrieve and validate the ibm,lmb-size property for drconf memory
461 * from the device tree.
462 */
463static u64 of_get_lmb_size(struct device_node *memory)
464{
465 const u32 *prop;
466 u32 len;
467
468 prop = of_get_property(memory, "ibm,lmb-size", &len);
469 if (!prop || len < sizeof(unsigned int))
470 return 0;
471
472 return read_n_cells(n_mem_size_cells, &prop);
473}
474
475struct assoc_arrays {
476 u32 n_arrays;
477 u32 array_sz;
478 const u32 *arrays;
479};
480
481/*
482 * Retrieve and validate the list of associativity arrays for drconf
483 * memory from the ibm,associativity-lookup-arrays property of the
484 * device tree..
485 *
486 * The layout of the ibm,associativity-lookup-arrays property is a number N
487 * indicating the number of associativity arrays, followed by a number M
488 * indicating the size of each associativity array, followed by a list
489 * of N associativity arrays.
490 */
491static int of_get_assoc_arrays(struct device_node *memory,
492 struct assoc_arrays *aa)
493{
494 const u32 *prop;
495 u32 len;
496
497 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
498 if (!prop || len < 2 * sizeof(unsigned int))
499 return -1;
500
501 aa->n_arrays = *prop++;
502 aa->array_sz = *prop++;
503
504 /* Now that we know the number of arrays and size of each array,
505 * revalidate the size of the property read in.
506 */
507 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
508 return -1;
509
510 aa->arrays = prop;
511 return 0;
512}
513
514/*
515 * This is like of_node_to_nid_single() for memory represented in the
516 * ibm,dynamic-reconfiguration-memory node.
517 */
518static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
519 struct assoc_arrays *aa)
520{
521 int default_nid = 0;
522 int nid = default_nid;
523 int index;
524
525 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
526 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
527 drmem->aa_index < aa->n_arrays) {
528 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
529 nid = aa->arrays[index];
530
531 if (nid == 0xffff || nid >= MAX_NUMNODES)
532 nid = default_nid;
533 }
534
535 return nid;
536}
537
538/*
539 * Figure out to which domain a cpu belongs and stick it there.
540 * Return the id of the domain used.
541 */
542static int __cpuinit numa_setup_cpu(unsigned long lcpu)
543{
544 int nid = 0;
545 struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
546
547 if (!cpu) {
548 WARN_ON(1);
549 goto out;
550 }
551
552 nid = of_node_to_nid_single(cpu);
553
554 if (nid < 0 || !node_online(nid))
555 nid = first_online_node;
556out:
557 map_cpu_to_node(lcpu, nid);
558
559 of_node_put(cpu);
560
561 return nid;
562}
563
564static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
565 unsigned long action,
566 void *hcpu)
567{
568 unsigned long lcpu = (unsigned long)hcpu;
569 int ret = NOTIFY_DONE;
570
571 switch (action) {
572 case CPU_UP_PREPARE:
573 case CPU_UP_PREPARE_FROZEN:
574 numa_setup_cpu(lcpu);
575 ret = NOTIFY_OK;
576 break;
577#ifdef CONFIG_HOTPLUG_CPU
578 case CPU_DEAD:
579 case CPU_DEAD_FROZEN:
580 case CPU_UP_CANCELED:
581 case CPU_UP_CANCELED_FROZEN:
582 unmap_cpu_from_node(lcpu);
583 break;
584 ret = NOTIFY_OK;
585#endif
586 }
587 return ret;
588}
589
590/*
591 * Check and possibly modify a memory region to enforce the memory limit.
592 *
593 * Returns the size the region should have to enforce the memory limit.
594 * This will either be the original value of size, a truncated value,
595 * or zero. If the returned value of size is 0 the region should be
596 * discarded as it lies wholly above the memory limit.
597 */
598static unsigned long __init numa_enforce_memory_limit(unsigned long start,
599 unsigned long size)
600{
601 /*
602 * We use memblock_end_of_DRAM() in here instead of memory_limit because
603 * we've already adjusted it for the limit and it takes care of
604 * having memory holes below the limit. Also, in the case of
605 * iommu_is_off, memory_limit is not set but is implicitly enforced.
606 */
607
608 if (start + size <= memblock_end_of_DRAM())
609 return size;
610
611 if (start >= memblock_end_of_DRAM())
612 return 0;
613
614 return memblock_end_of_DRAM() - start;
615}
616
617/*
618 * Reads the counter for a given entry in
619 * linux,drconf-usable-memory property
620 */
621static inline int __init read_usm_ranges(const u32 **usm)
622{
623 /*
624 * For each lmb in ibm,dynamic-memory a corresponding
625 * entry in linux,drconf-usable-memory property contains
626 * a counter followed by that many (base, size) duple.
627 * read the counter from linux,drconf-usable-memory
628 */
629 return read_n_cells(n_mem_size_cells, usm);
630}
631
632/*
633 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
634 * node. This assumes n_mem_{addr,size}_cells have been set.
635 */
636static void __init parse_drconf_memory(struct device_node *memory)
637{
638 const u32 *uninitialized_var(dm), *usm;
639 unsigned int n, rc, ranges, is_kexec_kdump = 0;
640 unsigned long lmb_size, base, size, sz;
641 int nid;
642 struct assoc_arrays aa = { .arrays = NULL };
643
644 n = of_get_drconf_memory(memory, &dm);
645 if (!n)
646 return;
647
648 lmb_size = of_get_lmb_size(memory);
649 if (!lmb_size)
650 return;
651
652 rc = of_get_assoc_arrays(memory, &aa);
653 if (rc)
654 return;
655
656 /* check if this is a kexec/kdump kernel */
657 usm = of_get_usable_memory(memory);
658 if (usm != NULL)
659 is_kexec_kdump = 1;
660
661 for (; n != 0; --n) {
662 struct of_drconf_cell drmem;
663
664 read_drconf_cell(&drmem, &dm);
665
666 /* skip this block if the reserved bit is set in flags (0x80)
667 or if the block is not assigned to this partition (0x8) */
668 if ((drmem.flags & DRCONF_MEM_RESERVED)
669 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
670 continue;
671
672 base = drmem.base_addr;
673 size = lmb_size;
674 ranges = 1;
675
676 if (is_kexec_kdump) {
677 ranges = read_usm_ranges(&usm);
678 if (!ranges) /* there are no (base, size) duple */
679 continue;
680 }
681 do {
682 if (is_kexec_kdump) {
683 base = read_n_cells(n_mem_addr_cells, &usm);
684 size = read_n_cells(n_mem_size_cells, &usm);
685 }
686 nid = of_drconf_to_nid_single(&drmem, &aa);
687 fake_numa_create_new_node(
688 ((base + size) >> PAGE_SHIFT),
689 &nid);
690 node_set_online(nid);
691 sz = numa_enforce_memory_limit(base, size);
692 if (sz)
693 memblock_set_node(base, sz, nid);
694 } while (--ranges);
695 }
696}
697
698static int __init parse_numa_properties(void)
699{
700 struct device_node *memory;
701 int default_nid = 0;
702 unsigned long i;
703
704 if (numa_enabled == 0) {
705 printk(KERN_WARNING "NUMA disabled by user\n");
706 return -1;
707 }
708
709 min_common_depth = find_min_common_depth();
710
711 if (min_common_depth < 0)
712 return min_common_depth;
713
714 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
715
716 /*
717 * Even though we connect cpus to numa domains later in SMP
718 * init, we need to know the node ids now. This is because
719 * each node to be onlined must have NODE_DATA etc backing it.
720 */
721 for_each_present_cpu(i) {
722 struct device_node *cpu;
723 int nid;
724
725 cpu = of_get_cpu_node(i, NULL);
726 BUG_ON(!cpu);
727 nid = of_node_to_nid_single(cpu);
728 of_node_put(cpu);
729
730 /*
731 * Don't fall back to default_nid yet -- we will plug
732 * cpus into nodes once the memory scan has discovered
733 * the topology.
734 */
735 if (nid < 0)
736 continue;
737 node_set_online(nid);
738 }
739
740 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
741
742 for_each_node_by_type(memory, "memory") {
743 unsigned long start;
744 unsigned long size;
745 int nid;
746 int ranges;
747 const unsigned int *memcell_buf;
748 unsigned int len;
749
750 memcell_buf = of_get_property(memory,
751 "linux,usable-memory", &len);
752 if (!memcell_buf || len <= 0)
753 memcell_buf = of_get_property(memory, "reg", &len);
754 if (!memcell_buf || len <= 0)
755 continue;
756
757 /* ranges in cell */
758 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
759new_range:
760 /* these are order-sensitive, and modify the buffer pointer */
761 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
762 size = read_n_cells(n_mem_size_cells, &memcell_buf);
763
764 /*
765 * Assumption: either all memory nodes or none will
766 * have associativity properties. If none, then
767 * everything goes to default_nid.
768 */
769 nid = of_node_to_nid_single(memory);
770 if (nid < 0)
771 nid = default_nid;
772
773 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
774 node_set_online(nid);
775
776 if (!(size = numa_enforce_memory_limit(start, size))) {
777 if (--ranges)
778 goto new_range;
779 else
780 continue;
781 }
782
783 memblock_set_node(start, size, nid);
784
785 if (--ranges)
786 goto new_range;
787 }
788
789 /*
790 * Now do the same thing for each MEMBLOCK listed in the
791 * ibm,dynamic-memory property in the
792 * ibm,dynamic-reconfiguration-memory node.
793 */
794 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
795 if (memory)
796 parse_drconf_memory(memory);
797
798 return 0;
799}
800
801static void __init setup_nonnuma(void)
802{
803 unsigned long top_of_ram = memblock_end_of_DRAM();
804 unsigned long total_ram = memblock_phys_mem_size();
805 unsigned long start_pfn, end_pfn;
806 unsigned int nid = 0;
807 struct memblock_region *reg;
808
809 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
810 top_of_ram, total_ram);
811 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
812 (top_of_ram - total_ram) >> 20);
813
814 for_each_memblock(memory, reg) {
815 start_pfn = memblock_region_memory_base_pfn(reg);
816 end_pfn = memblock_region_memory_end_pfn(reg);
817
818 fake_numa_create_new_node(end_pfn, &nid);
819 memblock_set_node(PFN_PHYS(start_pfn),
820 PFN_PHYS(end_pfn - start_pfn), nid);
821 node_set_online(nid);
822 }
823}
824
825void __init dump_numa_cpu_topology(void)
826{
827 unsigned int node;
828 unsigned int cpu, count;
829
830 if (min_common_depth == -1 || !numa_enabled)
831 return;
832
833 for_each_online_node(node) {
834 printk(KERN_DEBUG "Node %d CPUs:", node);
835
836 count = 0;
837 /*
838 * If we used a CPU iterator here we would miss printing
839 * the holes in the cpumap.
840 */
841 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
842 if (cpumask_test_cpu(cpu,
843 node_to_cpumask_map[node])) {
844 if (count == 0)
845 printk(" %u", cpu);
846 ++count;
847 } else {
848 if (count > 1)
849 printk("-%u", cpu - 1);
850 count = 0;
851 }
852 }
853
854 if (count > 1)
855 printk("-%u", nr_cpu_ids - 1);
856 printk("\n");
857 }
858}
859
860static void __init dump_numa_memory_topology(void)
861{
862 unsigned int node;
863 unsigned int count;
864
865 if (min_common_depth == -1 || !numa_enabled)
866 return;
867
868 for_each_online_node(node) {
869 unsigned long i;
870
871 printk(KERN_DEBUG "Node %d Memory:", node);
872
873 count = 0;
874
875 for (i = 0; i < memblock_end_of_DRAM();
876 i += (1 << SECTION_SIZE_BITS)) {
877 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
878 if (count == 0)
879 printk(" 0x%lx", i);
880 ++count;
881 } else {
882 if (count > 0)
883 printk("-0x%lx", i);
884 count = 0;
885 }
886 }
887
888 if (count > 0)
889 printk("-0x%lx", i);
890 printk("\n");
891 }
892}
893
894/*
895 * Allocate some memory, satisfying the memblock or bootmem allocator where
896 * required. nid is the preferred node and end is the physical address of
897 * the highest address in the node.
898 *
899 * Returns the virtual address of the memory.
900 */
901static void __init *careful_zallocation(int nid, unsigned long size,
902 unsigned long align,
903 unsigned long end_pfn)
904{
905 void *ret;
906 int new_nid;
907 unsigned long ret_paddr;
908
909 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
910
911 /* retry over all memory */
912 if (!ret_paddr)
913 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
914
915 if (!ret_paddr)
916 panic("numa.c: cannot allocate %lu bytes for node %d",
917 size, nid);
918
919 ret = __va(ret_paddr);
920
921 /*
922 * We initialize the nodes in numeric order: 0, 1, 2...
923 * and hand over control from the MEMBLOCK allocator to the
924 * bootmem allocator. If this function is called for
925 * node 5, then we know that all nodes <5 are using the
926 * bootmem allocator instead of the MEMBLOCK allocator.
927 *
928 * So, check the nid from which this allocation came
929 * and double check to see if we need to use bootmem
930 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
931 * since it would be useless.
932 */
933 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
934 if (new_nid < nid) {
935 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
936 size, align, 0);
937
938 dbg("alloc_bootmem %p %lx\n", ret, size);
939 }
940
941 memset(ret, 0, size);
942 return ret;
943}
944
945static struct notifier_block __cpuinitdata ppc64_numa_nb = {
946 .notifier_call = cpu_numa_callback,
947 .priority = 1 /* Must run before sched domains notifier. */
948};
949
950static void __init mark_reserved_regions_for_nid(int nid)
951{
952 struct pglist_data *node = NODE_DATA(nid);
953 struct memblock_region *reg;
954
955 for_each_memblock(reserved, reg) {
956 unsigned long physbase = reg->base;
957 unsigned long size = reg->size;
958 unsigned long start_pfn = physbase >> PAGE_SHIFT;
959 unsigned long end_pfn = PFN_UP(physbase + size);
960 struct node_active_region node_ar;
961 unsigned long node_end_pfn = node->node_start_pfn +
962 node->node_spanned_pages;
963
964 /*
965 * Check to make sure that this memblock.reserved area is
966 * within the bounds of the node that we care about.
967 * Checking the nid of the start and end points is not
968 * sufficient because the reserved area could span the
969 * entire node.
970 */
971 if (end_pfn <= node->node_start_pfn ||
972 start_pfn >= node_end_pfn)
973 continue;
974
975 get_node_active_region(start_pfn, &node_ar);
976 while (start_pfn < end_pfn &&
977 node_ar.start_pfn < node_ar.end_pfn) {
978 unsigned long reserve_size = size;
979 /*
980 * if reserved region extends past active region
981 * then trim size to active region
982 */
983 if (end_pfn > node_ar.end_pfn)
984 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
985 - physbase;
986 /*
987 * Only worry about *this* node, others may not
988 * yet have valid NODE_DATA().
989 */
990 if (node_ar.nid == nid) {
991 dbg("reserve_bootmem %lx %lx nid=%d\n",
992 physbase, reserve_size, node_ar.nid);
993 reserve_bootmem_node(NODE_DATA(node_ar.nid),
994 physbase, reserve_size,
995 BOOTMEM_DEFAULT);
996 }
997 /*
998 * if reserved region is contained in the active region
999 * then done.
1000 */
1001 if (end_pfn <= node_ar.end_pfn)
1002 break;
1003
1004 /*
1005 * reserved region extends past the active region
1006 * get next active region that contains this
1007 * reserved region
1008 */
1009 start_pfn = node_ar.end_pfn;
1010 physbase = start_pfn << PAGE_SHIFT;
1011 size = size - reserve_size;
1012 get_node_active_region(start_pfn, &node_ar);
1013 }
1014 }
1015}
1016
1017
1018void __init do_init_bootmem(void)
1019{
1020 int nid;
1021
1022 min_low_pfn = 0;
1023 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1024 max_pfn = max_low_pfn;
1025
1026 if (parse_numa_properties())
1027 setup_nonnuma();
1028 else
1029 dump_numa_memory_topology();
1030
1031 for_each_online_node(nid) {
1032 unsigned long start_pfn, end_pfn;
1033 void *bootmem_vaddr;
1034 unsigned long bootmap_pages;
1035
1036 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1037
1038 /*
1039 * Allocate the node structure node local if possible
1040 *
1041 * Be careful moving this around, as it relies on all
1042 * previous nodes' bootmem to be initialized and have
1043 * all reserved areas marked.
1044 */
1045 NODE_DATA(nid) = careful_zallocation(nid,
1046 sizeof(struct pglist_data),
1047 SMP_CACHE_BYTES, end_pfn);
1048
1049 dbg("node %d\n", nid);
1050 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1051
1052 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1053 NODE_DATA(nid)->node_start_pfn = start_pfn;
1054 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1055
1056 if (NODE_DATA(nid)->node_spanned_pages == 0)
1057 continue;
1058
1059 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1060 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1061
1062 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1063 bootmem_vaddr = careful_zallocation(nid,
1064 bootmap_pages << PAGE_SHIFT,
1065 PAGE_SIZE, end_pfn);
1066
1067 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1068
1069 init_bootmem_node(NODE_DATA(nid),
1070 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1071 start_pfn, end_pfn);
1072
1073 free_bootmem_with_active_regions(nid, end_pfn);
1074 /*
1075 * Be very careful about moving this around. Future
1076 * calls to careful_zallocation() depend on this getting
1077 * done correctly.
1078 */
1079 mark_reserved_regions_for_nid(nid);
1080 sparse_memory_present_with_active_regions(nid);
1081 }
1082
1083 init_bootmem_done = 1;
1084
1085 /*
1086 * Now bootmem is initialised we can create the node to cpumask
1087 * lookup tables and setup the cpu callback to populate them.
1088 */
1089 setup_node_to_cpumask_map();
1090
1091 register_cpu_notifier(&ppc64_numa_nb);
1092 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1093 (void *)(unsigned long)boot_cpuid);
1094}
1095
1096void __init paging_init(void)
1097{
1098 unsigned long max_zone_pfns[MAX_NR_ZONES];
1099 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1100 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1101 free_area_init_nodes(max_zone_pfns);
1102}
1103
1104static int __init early_numa(char *p)
1105{
1106 if (!p)
1107 return 0;
1108
1109 if (strstr(p, "off"))
1110 numa_enabled = 0;
1111
1112 if (strstr(p, "debug"))
1113 numa_debug = 1;
1114
1115 p = strstr(p, "fake=");
1116 if (p)
1117 cmdline = p + strlen("fake=");
1118
1119 return 0;
1120}
1121early_param("numa", early_numa);
1122
1123#ifdef CONFIG_MEMORY_HOTPLUG
1124/*
1125 * Find the node associated with a hot added memory section for
1126 * memory represented in the device tree by the property
1127 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1128 */
1129static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1130 unsigned long scn_addr)
1131{
1132 const u32 *dm;
1133 unsigned int drconf_cell_cnt, rc;
1134 unsigned long lmb_size;
1135 struct assoc_arrays aa;
1136 int nid = -1;
1137
1138 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1139 if (!drconf_cell_cnt)
1140 return -1;
1141
1142 lmb_size = of_get_lmb_size(memory);
1143 if (!lmb_size)
1144 return -1;
1145
1146 rc = of_get_assoc_arrays(memory, &aa);
1147 if (rc)
1148 return -1;
1149
1150 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1151 struct of_drconf_cell drmem;
1152
1153 read_drconf_cell(&drmem, &dm);
1154
1155 /* skip this block if it is reserved or not assigned to
1156 * this partition */
1157 if ((drmem.flags & DRCONF_MEM_RESERVED)
1158 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1159 continue;
1160
1161 if ((scn_addr < drmem.base_addr)
1162 || (scn_addr >= (drmem.base_addr + lmb_size)))
1163 continue;
1164
1165 nid = of_drconf_to_nid_single(&drmem, &aa);
1166 break;
1167 }
1168
1169 return nid;
1170}
1171
1172/*
1173 * Find the node associated with a hot added memory section for memory
1174 * represented in the device tree as a node (i.e. memory@XXXX) for
1175 * each memblock.
1176 */
1177int hot_add_node_scn_to_nid(unsigned long scn_addr)
1178{
1179 struct device_node *memory;
1180 int nid = -1;
1181
1182 for_each_node_by_type(memory, "memory") {
1183 unsigned long start, size;
1184 int ranges;
1185 const unsigned int *memcell_buf;
1186 unsigned int len;
1187
1188 memcell_buf = of_get_property(memory, "reg", &len);
1189 if (!memcell_buf || len <= 0)
1190 continue;
1191
1192 /* ranges in cell */
1193 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1194
1195 while (ranges--) {
1196 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1197 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1198
1199 if ((scn_addr < start) || (scn_addr >= (start + size)))
1200 continue;
1201
1202 nid = of_node_to_nid_single(memory);
1203 break;
1204 }
1205
1206 if (nid >= 0)
1207 break;
1208 }
1209
1210 of_node_put(memory);
1211
1212 return nid;
1213}
1214
1215/*
1216 * Find the node associated with a hot added memory section. Section
1217 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1218 * sections are fully contained within a single MEMBLOCK.
1219 */
1220int hot_add_scn_to_nid(unsigned long scn_addr)
1221{
1222 struct device_node *memory = NULL;
1223 int nid, found = 0;
1224
1225 if (!numa_enabled || (min_common_depth < 0))
1226 return first_online_node;
1227
1228 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1229 if (memory) {
1230 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1231 of_node_put(memory);
1232 } else {
1233 nid = hot_add_node_scn_to_nid(scn_addr);
1234 }
1235
1236 if (nid < 0 || !node_online(nid))
1237 nid = first_online_node;
1238
1239 if (NODE_DATA(nid)->node_spanned_pages)
1240 return nid;
1241
1242 for_each_online_node(nid) {
1243 if (NODE_DATA(nid)->node_spanned_pages) {
1244 found = 1;
1245 break;
1246 }
1247 }
1248
1249 BUG_ON(!found);
1250 return nid;
1251}
1252
1253static u64 hot_add_drconf_memory_max(void)
1254{
1255 struct device_node *memory = NULL;
1256 unsigned int drconf_cell_cnt = 0;
1257 u64 lmb_size = 0;
1258 const u32 *dm = 0;
1259
1260 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1261 if (memory) {
1262 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1263 lmb_size = of_get_lmb_size(memory);
1264 of_node_put(memory);
1265 }
1266 return lmb_size * drconf_cell_cnt;
1267}
1268
1269/*
1270 * memory_hotplug_max - return max address of memory that may be added
1271 *
1272 * This is currently only used on systems that support drconfig memory
1273 * hotplug.
1274 */
1275u64 memory_hotplug_max(void)
1276{
1277 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1278}
1279#endif /* CONFIG_MEMORY_HOTPLUG */
1280
1281/* Virtual Processor Home Node (VPHN) support */
1282#ifdef CONFIG_PPC_SPLPAR
1283static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1284static cpumask_t cpu_associativity_changes_mask;
1285static int vphn_enabled;
1286static void set_topology_timer(void);
1287
1288/*
1289 * Store the current values of the associativity change counters in the
1290 * hypervisor.
1291 */
1292static void setup_cpu_associativity_change_counters(void)
1293{
1294 int cpu;
1295
1296 /* The VPHN feature supports a maximum of 8 reference points */
1297 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1298
1299 for_each_possible_cpu(cpu) {
1300 int i;
1301 u8 *counts = vphn_cpu_change_counts[cpu];
1302 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1303
1304 for (i = 0; i < distance_ref_points_depth; i++)
1305 counts[i] = hypervisor_counts[i];
1306 }
1307}
1308
1309/*
1310 * The hypervisor maintains a set of 8 associativity change counters in
1311 * the VPA of each cpu that correspond to the associativity levels in the
1312 * ibm,associativity-reference-points property. When an associativity
1313 * level changes, the corresponding counter is incremented.
1314 *
1315 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1316 * node associativity levels have changed.
1317 *
1318 * Returns the number of cpus with unhandled associativity changes.
1319 */
1320static int update_cpu_associativity_changes_mask(void)
1321{
1322 int cpu, nr_cpus = 0;
1323 cpumask_t *changes = &cpu_associativity_changes_mask;
1324
1325 cpumask_clear(changes);
1326
1327 for_each_possible_cpu(cpu) {
1328 int i, changed = 0;
1329 u8 *counts = vphn_cpu_change_counts[cpu];
1330 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1331
1332 for (i = 0; i < distance_ref_points_depth; i++) {
1333 if (hypervisor_counts[i] != counts[i]) {
1334 counts[i] = hypervisor_counts[i];
1335 changed = 1;
1336 }
1337 }
1338 if (changed) {
1339 cpumask_set_cpu(cpu, changes);
1340 nr_cpus++;
1341 }
1342 }
1343
1344 return nr_cpus;
1345}
1346
1347/*
1348 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1349 * the complete property we have to add the length in the first cell.
1350 */
1351#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1352
1353/*
1354 * Convert the associativity domain numbers returned from the hypervisor
1355 * to the sequence they would appear in the ibm,associativity property.
1356 */
1357static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1358{
1359 int i, nr_assoc_doms = 0;
1360 const u16 *field = (const u16*) packed;
1361
1362#define VPHN_FIELD_UNUSED (0xffff)
1363#define VPHN_FIELD_MSB (0x8000)
1364#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1365
1366 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1367 if (*field == VPHN_FIELD_UNUSED) {
1368 /* All significant fields processed, and remaining
1369 * fields contain the reserved value of all 1's.
1370 * Just store them.
1371 */
1372 unpacked[i] = *((u32*)field);
1373 field += 2;
1374 } else if (*field & VPHN_FIELD_MSB) {
1375 /* Data is in the lower 15 bits of this field */
1376 unpacked[i] = *field & VPHN_FIELD_MASK;
1377 field++;
1378 nr_assoc_doms++;
1379 } else {
1380 /* Data is in the lower 15 bits of this field
1381 * concatenated with the next 16 bit field
1382 */
1383 unpacked[i] = *((u32*)field);
1384 field += 2;
1385 nr_assoc_doms++;
1386 }
1387 }
1388
1389 /* The first cell contains the length of the property */
1390 unpacked[0] = nr_assoc_doms;
1391
1392 return nr_assoc_doms;
1393}
1394
1395/*
1396 * Retrieve the new associativity information for a virtual processor's
1397 * home node.
1398 */
1399static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1400{
1401 long rc;
1402 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1403 u64 flags = 1;
1404 int hwcpu = get_hard_smp_processor_id(cpu);
1405
1406 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1407 vphn_unpack_associativity(retbuf, associativity);
1408
1409 return rc;
1410}
1411
1412static long vphn_get_associativity(unsigned long cpu,
1413 unsigned int *associativity)
1414{
1415 long rc;
1416
1417 rc = hcall_vphn(cpu, associativity);
1418
1419 switch (rc) {
1420 case H_FUNCTION:
1421 printk(KERN_INFO
1422 "VPHN is not supported. Disabling polling...\n");
1423 stop_topology_update();
1424 break;
1425 case H_HARDWARE:
1426 printk(KERN_ERR
1427 "hcall_vphn() experienced a hardware fault "
1428 "preventing VPHN. Disabling polling...\n");
1429 stop_topology_update();
1430 }
1431
1432 return rc;
1433}
1434
1435/*
1436 * Update the node maps and sysfs entries for each cpu whose home node
1437 * has changed.
1438 */
1439int arch_update_cpu_topology(void)
1440{
1441 int cpu, nid, old_nid;
1442 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1443 struct device *dev;
1444
1445 for_each_cpu(cpu,&cpu_associativity_changes_mask) {
1446 vphn_get_associativity(cpu, associativity);
1447 nid = associativity_to_nid(associativity);
1448
1449 if (nid < 0 || !node_online(nid))
1450 nid = first_online_node;
1451
1452 old_nid = numa_cpu_lookup_table[cpu];
1453
1454 /* Disable hotplug while we update the cpu
1455 * masks and sysfs.
1456 */
1457 get_online_cpus();
1458 unregister_cpu_under_node(cpu, old_nid);
1459 unmap_cpu_from_node(cpu);
1460 map_cpu_to_node(cpu, nid);
1461 register_cpu_under_node(cpu, nid);
1462 put_online_cpus();
1463
1464 dev = get_cpu_device(cpu);
1465 if (dev)
1466 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1467 }
1468
1469 return 1;
1470}
1471
1472static void topology_work_fn(struct work_struct *work)
1473{
1474 rebuild_sched_domains();
1475}
1476static DECLARE_WORK(topology_work, topology_work_fn);
1477
1478void topology_schedule_update(void)
1479{
1480 schedule_work(&topology_work);
1481}
1482
1483static void topology_timer_fn(unsigned long ignored)
1484{
1485 if (!vphn_enabled)
1486 return;
1487 if (update_cpu_associativity_changes_mask() > 0)
1488 topology_schedule_update();
1489 set_topology_timer();
1490}
1491static struct timer_list topology_timer =
1492 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1493
1494static void set_topology_timer(void)
1495{
1496 topology_timer.data = 0;
1497 topology_timer.expires = jiffies + 60 * HZ;
1498 add_timer(&topology_timer);
1499}
1500
1501/*
1502 * Start polling for VPHN associativity changes.
1503 */
1504int start_topology_update(void)
1505{
1506 int rc = 0;
1507
1508 /* Disabled until races with load balancing are fixed */
1509 if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
1510 get_lppaca()->shared_proc) {
1511 vphn_enabled = 1;
1512 setup_cpu_associativity_change_counters();
1513 init_timer_deferrable(&topology_timer);
1514 set_topology_timer();
1515 rc = 1;
1516 }
1517
1518 return rc;
1519}
1520__initcall(start_topology_update);
1521
1522/*
1523 * Disable polling for VPHN associativity changes.
1524 */
1525int stop_topology_update(void)
1526{
1527 vphn_enabled = 0;
1528 return del_timer_sync(&topology_timer);
1529}
1530#endif /* CONFIG_PPC_SPLPAR */