Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * pSeries NUMA support
4 *
5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 */
7#define pr_fmt(fmt) "numa: " fmt
8
9#include <linux/threads.h>
10#include <linux/memblock.h>
11#include <linux/init.h>
12#include <linux/mm.h>
13#include <linux/mmzone.h>
14#include <linux/export.h>
15#include <linux/nodemask.h>
16#include <linux/cpu.h>
17#include <linux/notifier.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/pfn.h>
21#include <linux/cpuset.h>
22#include <linux/node.h>
23#include <linux/stop_machine.h>
24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
26#include <linux/uaccess.h>
27#include <linux/slab.h>
28#include <asm/cputhreads.h>
29#include <asm/sparsemem.h>
30#include <asm/smp.h>
31#include <asm/topology.h>
32#include <asm/firmware.h>
33#include <asm/paca.h>
34#include <asm/hvcall.h>
35#include <asm/setup.h>
36#include <asm/vdso.h>
37#include <asm/vphn.h>
38#include <asm/drmem.h>
39
40static int numa_enabled = 1;
41
42static char *cmdline __initdata;
43
44int numa_cpu_lookup_table[NR_CPUS];
45cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
46struct pglist_data *node_data[MAX_NUMNODES];
47
48EXPORT_SYMBOL(numa_cpu_lookup_table);
49EXPORT_SYMBOL(node_to_cpumask_map);
50EXPORT_SYMBOL(node_data);
51
52static int primary_domain_index;
53static int n_mem_addr_cells, n_mem_size_cells;
54
55#define FORM0_AFFINITY 0
56#define FORM1_AFFINITY 1
57#define FORM2_AFFINITY 2
58static int affinity_form;
59
60#define MAX_DISTANCE_REF_POINTS 4
61static int distance_ref_points_depth;
62static const __be32 *distance_ref_points;
63static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
64static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = {
65 [0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 }
66};
67static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE };
68
69/*
70 * Allocate node_to_cpumask_map based on number of available nodes
71 * Requires node_possible_map to be valid.
72 *
73 * Note: cpumask_of_node() is not valid until after this is done.
74 */
75static void __init setup_node_to_cpumask_map(void)
76{
77 unsigned int node;
78
79 /* setup nr_node_ids if not done yet */
80 if (nr_node_ids == MAX_NUMNODES)
81 setup_nr_node_ids();
82
83 /* allocate the map */
84 for_each_node(node)
85 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
86
87 /* cpumask_of_node() will now work */
88 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
89}
90
91static int __init fake_numa_create_new_node(unsigned long end_pfn,
92 unsigned int *nid)
93{
94 unsigned long long mem;
95 char *p = cmdline;
96 static unsigned int fake_nid;
97 static unsigned long long curr_boundary;
98
99 /*
100 * Modify node id, iff we started creating NUMA nodes
101 * We want to continue from where we left of the last time
102 */
103 if (fake_nid)
104 *nid = fake_nid;
105 /*
106 * In case there are no more arguments to parse, the
107 * node_id should be the same as the last fake node id
108 * (we've handled this above).
109 */
110 if (!p)
111 return 0;
112
113 mem = memparse(p, &p);
114 if (!mem)
115 return 0;
116
117 if (mem < curr_boundary)
118 return 0;
119
120 curr_boundary = mem;
121
122 if ((end_pfn << PAGE_SHIFT) > mem) {
123 /*
124 * Skip commas and spaces
125 */
126 while (*p == ',' || *p == ' ' || *p == '\t')
127 p++;
128
129 cmdline = p;
130 fake_nid++;
131 *nid = fake_nid;
132 pr_debug("created new fake_node with id %d\n", fake_nid);
133 return 1;
134 }
135 return 0;
136}
137
138static void __init reset_numa_cpu_lookup_table(void)
139{
140 unsigned int cpu;
141
142 for_each_possible_cpu(cpu)
143 numa_cpu_lookup_table[cpu] = -1;
144}
145
146void map_cpu_to_node(int cpu, int node)
147{
148 update_numa_cpu_lookup_table(cpu, node);
149
150 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) {
151 pr_debug("adding cpu %d to node %d\n", cpu, node);
152 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
153 }
154}
155
156#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
157void unmap_cpu_from_node(unsigned long cpu)
158{
159 int node = numa_cpu_lookup_table[cpu];
160
161 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
162 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
163 pr_debug("removing cpu %lu from node %d\n", cpu, node);
164 } else {
165 pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node);
166 }
167}
168#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
169
170static int __associativity_to_nid(const __be32 *associativity,
171 int max_array_sz)
172{
173 int nid;
174 /*
175 * primary_domain_index is 1 based array index.
176 */
177 int index = primary_domain_index - 1;
178
179 if (!numa_enabled || index >= max_array_sz)
180 return NUMA_NO_NODE;
181
182 nid = of_read_number(&associativity[index], 1);
183
184 /* POWER4 LPAR uses 0xffff as invalid node */
185 if (nid == 0xffff || nid >= nr_node_ids)
186 nid = NUMA_NO_NODE;
187 return nid;
188}
189/*
190 * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
191 * info is found.
192 */
193static int associativity_to_nid(const __be32 *associativity)
194{
195 int array_sz = of_read_number(associativity, 1);
196
197 /* Skip the first element in the associativity array */
198 return __associativity_to_nid((associativity + 1), array_sz);
199}
200
201static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
202{
203 int dist;
204 int node1, node2;
205
206 node1 = associativity_to_nid(cpu1_assoc);
207 node2 = associativity_to_nid(cpu2_assoc);
208
209 dist = numa_distance_table[node1][node2];
210 if (dist <= LOCAL_DISTANCE)
211 return 0;
212 else if (dist <= REMOTE_DISTANCE)
213 return 1;
214 else
215 return 2;
216}
217
218static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
219{
220 int dist = 0;
221
222 int i, index;
223
224 for (i = 0; i < distance_ref_points_depth; i++) {
225 index = be32_to_cpu(distance_ref_points[i]);
226 if (cpu1_assoc[index] == cpu2_assoc[index])
227 break;
228 dist++;
229 }
230
231 return dist;
232}
233
234int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
235{
236 /* We should not get called with FORM0 */
237 VM_WARN_ON(affinity_form == FORM0_AFFINITY);
238 if (affinity_form == FORM1_AFFINITY)
239 return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc);
240 return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc);
241}
242
243/* must hold reference to node during call */
244static const __be32 *of_get_associativity(struct device_node *dev)
245{
246 return of_get_property(dev, "ibm,associativity", NULL);
247}
248
249int __node_distance(int a, int b)
250{
251 int i;
252 int distance = LOCAL_DISTANCE;
253
254 if (affinity_form == FORM2_AFFINITY)
255 return numa_distance_table[a][b];
256 else if (affinity_form == FORM0_AFFINITY)
257 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
258
259 for (i = 0; i < distance_ref_points_depth; i++) {
260 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
261 break;
262
263 /* Double the distance for each NUMA level */
264 distance *= 2;
265 }
266
267 return distance;
268}
269EXPORT_SYMBOL(__node_distance);
270
271/* Returns the nid associated with the given device tree node,
272 * or -1 if not found.
273 */
274static int of_node_to_nid_single(struct device_node *device)
275{
276 int nid = NUMA_NO_NODE;
277 const __be32 *tmp;
278
279 tmp = of_get_associativity(device);
280 if (tmp)
281 nid = associativity_to_nid(tmp);
282 return nid;
283}
284
285/* Walk the device tree upwards, looking for an associativity id */
286int of_node_to_nid(struct device_node *device)
287{
288 int nid = NUMA_NO_NODE;
289
290 of_node_get(device);
291 while (device) {
292 nid = of_node_to_nid_single(device);
293 if (nid != -1)
294 break;
295
296 device = of_get_next_parent(device);
297 }
298 of_node_put(device);
299
300 return nid;
301}
302EXPORT_SYMBOL(of_node_to_nid);
303
304static void __initialize_form1_numa_distance(const __be32 *associativity,
305 int max_array_sz)
306{
307 int i, nid;
308
309 if (affinity_form != FORM1_AFFINITY)
310 return;
311
312 nid = __associativity_to_nid(associativity, max_array_sz);
313 if (nid != NUMA_NO_NODE) {
314 for (i = 0; i < distance_ref_points_depth; i++) {
315 const __be32 *entry;
316 int index = be32_to_cpu(distance_ref_points[i]) - 1;
317
318 /*
319 * broken hierarchy, return with broken distance table
320 */
321 if (WARN(index >= max_array_sz, "Broken ibm,associativity property"))
322 return;
323
324 entry = &associativity[index];
325 distance_lookup_table[nid][i] = of_read_number(entry, 1);
326 }
327 }
328}
329
330static void initialize_form1_numa_distance(const __be32 *associativity)
331{
332 int array_sz;
333
334 array_sz = of_read_number(associativity, 1);
335 /* Skip the first element in the associativity array */
336 __initialize_form1_numa_distance(associativity + 1, array_sz);
337}
338
339/*
340 * Used to update distance information w.r.t newly added node.
341 */
342void update_numa_distance(struct device_node *node)
343{
344 int nid;
345
346 if (affinity_form == FORM0_AFFINITY)
347 return;
348 else if (affinity_form == FORM1_AFFINITY) {
349 const __be32 *associativity;
350
351 associativity = of_get_associativity(node);
352 if (!associativity)
353 return;
354
355 initialize_form1_numa_distance(associativity);
356 return;
357 }
358
359 /* FORM2 affinity */
360 nid = of_node_to_nid_single(node);
361 if (nid == NUMA_NO_NODE)
362 return;
363
364 /*
365 * With FORM2 we expect NUMA distance of all possible NUMA
366 * nodes to be provided during boot.
367 */
368 WARN(numa_distance_table[nid][nid] == -1,
369 "NUMA distance details for node %d not provided\n", nid);
370}
371EXPORT_SYMBOL_GPL(update_numa_distance);
372
373/*
374 * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
375 * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements}
376 */
377static void __init initialize_form2_numa_distance_lookup_table(void)
378{
379 int i, j;
380 struct device_node *root;
381 const __u8 *form2_distances;
382 const __be32 *numa_lookup_index;
383 int form2_distances_length;
384 int max_numa_index, distance_index;
385
386 if (firmware_has_feature(FW_FEATURE_OPAL))
387 root = of_find_node_by_path("/ibm,opal");
388 else
389 root = of_find_node_by_path("/rtas");
390 if (!root)
391 root = of_find_node_by_path("/");
392
393 numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL);
394 max_numa_index = of_read_number(&numa_lookup_index[0], 1);
395
396 /* first element of the array is the size and is encode-int */
397 form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL);
398 form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1);
399 /* Skip the size which is encoded int */
400 form2_distances += sizeof(__be32);
401
402 pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n",
403 form2_distances_length, max_numa_index);
404
405 for (i = 0; i < max_numa_index; i++)
406 /* +1 skip the max_numa_index in the property */
407 numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
408
409
410 if (form2_distances_length != max_numa_index * max_numa_index) {
411 WARN(1, "Wrong NUMA distance information\n");
412 form2_distances = NULL; // don't use it
413 }
414 distance_index = 0;
415 for (i = 0; i < max_numa_index; i++) {
416 for (j = 0; j < max_numa_index; j++) {
417 int nodeA = numa_id_index_table[i];
418 int nodeB = numa_id_index_table[j];
419 int dist;
420
421 if (form2_distances)
422 dist = form2_distances[distance_index++];
423 else if (nodeA == nodeB)
424 dist = LOCAL_DISTANCE;
425 else
426 dist = REMOTE_DISTANCE;
427 numa_distance_table[nodeA][nodeB] = dist;
428 pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist);
429 }
430 }
431
432 of_node_put(root);
433}
434
435static int __init find_primary_domain_index(void)
436{
437 int index;
438 struct device_node *root;
439
440 /*
441 * Check for which form of affinity.
442 */
443 if (firmware_has_feature(FW_FEATURE_OPAL)) {
444 affinity_form = FORM1_AFFINITY;
445 } else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) {
446 pr_debug("Using form 2 affinity\n");
447 affinity_form = FORM2_AFFINITY;
448 } else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) {
449 pr_debug("Using form 1 affinity\n");
450 affinity_form = FORM1_AFFINITY;
451 } else
452 affinity_form = FORM0_AFFINITY;
453
454 if (firmware_has_feature(FW_FEATURE_OPAL))
455 root = of_find_node_by_path("/ibm,opal");
456 else
457 root = of_find_node_by_path("/rtas");
458 if (!root)
459 root = of_find_node_by_path("/");
460
461 /*
462 * This property is a set of 32-bit integers, each representing
463 * an index into the ibm,associativity nodes.
464 *
465 * With form 0 affinity the first integer is for an SMP configuration
466 * (should be all 0's) and the second is for a normal NUMA
467 * configuration. We have only one level of NUMA.
468 *
469 * With form 1 affinity the first integer is the most significant
470 * NUMA boundary and the following are progressively less significant
471 * boundaries. There can be more than one level of NUMA.
472 */
473 distance_ref_points = of_get_property(root,
474 "ibm,associativity-reference-points",
475 &distance_ref_points_depth);
476
477 if (!distance_ref_points) {
478 pr_debug("ibm,associativity-reference-points not found.\n");
479 goto err;
480 }
481
482 distance_ref_points_depth /= sizeof(int);
483 if (affinity_form == FORM0_AFFINITY) {
484 if (distance_ref_points_depth < 2) {
485 pr_warn("short ibm,associativity-reference-points\n");
486 goto err;
487 }
488
489 index = of_read_number(&distance_ref_points[1], 1);
490 } else {
491 /*
492 * Both FORM1 and FORM2 affinity find the primary domain details
493 * at the same offset.
494 */
495 index = of_read_number(distance_ref_points, 1);
496 }
497 /*
498 * Warn and cap if the hardware supports more than
499 * MAX_DISTANCE_REF_POINTS domains.
500 */
501 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
502 pr_warn("distance array capped at %d entries\n",
503 MAX_DISTANCE_REF_POINTS);
504 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
505 }
506
507 of_node_put(root);
508 return index;
509
510err:
511 of_node_put(root);
512 return -1;
513}
514
515static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
516{
517 struct device_node *memory = NULL;
518
519 memory = of_find_node_by_type(memory, "memory");
520 if (!memory)
521 panic("numa.c: No memory nodes found!");
522
523 *n_addr_cells = of_n_addr_cells(memory);
524 *n_size_cells = of_n_size_cells(memory);
525 of_node_put(memory);
526}
527
528static unsigned long read_n_cells(int n, const __be32 **buf)
529{
530 unsigned long result = 0;
531
532 while (n--) {
533 result = (result << 32) | of_read_number(*buf, 1);
534 (*buf)++;
535 }
536 return result;
537}
538
539struct assoc_arrays {
540 u32 n_arrays;
541 u32 array_sz;
542 const __be32 *arrays;
543};
544
545/*
546 * Retrieve and validate the list of associativity arrays for drconf
547 * memory from the ibm,associativity-lookup-arrays property of the
548 * device tree..
549 *
550 * The layout of the ibm,associativity-lookup-arrays property is a number N
551 * indicating the number of associativity arrays, followed by a number M
552 * indicating the size of each associativity array, followed by a list
553 * of N associativity arrays.
554 */
555static int of_get_assoc_arrays(struct assoc_arrays *aa)
556{
557 struct device_node *memory;
558 const __be32 *prop;
559 u32 len;
560
561 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
562 if (!memory)
563 return -1;
564
565 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
566 if (!prop || len < 2 * sizeof(unsigned int)) {
567 of_node_put(memory);
568 return -1;
569 }
570
571 aa->n_arrays = of_read_number(prop++, 1);
572 aa->array_sz = of_read_number(prop++, 1);
573
574 of_node_put(memory);
575
576 /* Now that we know the number of arrays and size of each array,
577 * revalidate the size of the property read in.
578 */
579 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
580 return -1;
581
582 aa->arrays = prop;
583 return 0;
584}
585
586static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb)
587{
588 struct assoc_arrays aa = { .arrays = NULL };
589 int default_nid = NUMA_NO_NODE;
590 int nid = default_nid;
591 int rc, index;
592
593 if ((primary_domain_index < 0) || !numa_enabled)
594 return default_nid;
595
596 rc = of_get_assoc_arrays(&aa);
597 if (rc)
598 return default_nid;
599
600 if (primary_domain_index <= aa.array_sz &&
601 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
602 const __be32 *associativity;
603
604 index = lmb->aa_index * aa.array_sz;
605 associativity = &aa.arrays[index];
606 nid = __associativity_to_nid(associativity, aa.array_sz);
607 if (nid > 0 && affinity_form == FORM1_AFFINITY) {
608 /*
609 * lookup array associativity entries have
610 * no length of the array as the first element.
611 */
612 __initialize_form1_numa_distance(associativity, aa.array_sz);
613 }
614 }
615 return nid;
616}
617
618/*
619 * This is like of_node_to_nid_single() for memory represented in the
620 * ibm,dynamic-reconfiguration-memory node.
621 */
622int of_drconf_to_nid_single(struct drmem_lmb *lmb)
623{
624 struct assoc_arrays aa = { .arrays = NULL };
625 int default_nid = NUMA_NO_NODE;
626 int nid = default_nid;
627 int rc, index;
628
629 if ((primary_domain_index < 0) || !numa_enabled)
630 return default_nid;
631
632 rc = of_get_assoc_arrays(&aa);
633 if (rc)
634 return default_nid;
635
636 if (primary_domain_index <= aa.array_sz &&
637 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
638 const __be32 *associativity;
639
640 index = lmb->aa_index * aa.array_sz;
641 associativity = &aa.arrays[index];
642 nid = __associativity_to_nid(associativity, aa.array_sz);
643 }
644 return nid;
645}
646
647#ifdef CONFIG_PPC_SPLPAR
648
649static int __vphn_get_associativity(long lcpu, __be32 *associativity)
650{
651 long rc, hwid;
652
653 /*
654 * On a shared lpar, device tree will not have node associativity.
655 * At this time lppaca, or its __old_status field may not be
656 * updated. Hence kernel cannot detect if its on a shared lpar. So
657 * request an explicit associativity irrespective of whether the
658 * lpar is shared or dedicated. Use the device tree property as a
659 * fallback. cpu_to_phys_id is only valid between
660 * smp_setup_cpu_maps() and smp_setup_pacas().
661 */
662 if (firmware_has_feature(FW_FEATURE_VPHN)) {
663 if (cpu_to_phys_id)
664 hwid = cpu_to_phys_id[lcpu];
665 else
666 hwid = get_hard_smp_processor_id(lcpu);
667
668 rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
669 if (rc == H_SUCCESS)
670 return 0;
671 }
672
673 return -1;
674}
675
676static int vphn_get_nid(long lcpu)
677{
678 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
679
680
681 if (!__vphn_get_associativity(lcpu, associativity))
682 return associativity_to_nid(associativity);
683
684 return NUMA_NO_NODE;
685
686}
687#else
688
689static int __vphn_get_associativity(long lcpu, __be32 *associativity)
690{
691 return -1;
692}
693
694static int vphn_get_nid(long unused)
695{
696 return NUMA_NO_NODE;
697}
698#endif /* CONFIG_PPC_SPLPAR */
699
700/*
701 * Figure out to which domain a cpu belongs and stick it there.
702 * Return the id of the domain used.
703 */
704static int numa_setup_cpu(unsigned long lcpu)
705{
706 struct device_node *cpu;
707 int fcpu = cpu_first_thread_sibling(lcpu);
708 int nid = NUMA_NO_NODE;
709
710 if (!cpu_present(lcpu)) {
711 set_cpu_numa_node(lcpu, first_online_node);
712 return first_online_node;
713 }
714
715 /*
716 * If a valid cpu-to-node mapping is already available, use it
717 * directly instead of querying the firmware, since it represents
718 * the most recent mapping notified to us by the platform (eg: VPHN).
719 * Since cpu_to_node binding remains the same for all threads in the
720 * core. If a valid cpu-to-node mapping is already available, for
721 * the first thread in the core, use it.
722 */
723 nid = numa_cpu_lookup_table[fcpu];
724 if (nid >= 0) {
725 map_cpu_to_node(lcpu, nid);
726 return nid;
727 }
728
729 nid = vphn_get_nid(lcpu);
730 if (nid != NUMA_NO_NODE)
731 goto out_present;
732
733 cpu = of_get_cpu_node(lcpu, NULL);
734
735 if (!cpu) {
736 WARN_ON(1);
737 if (cpu_present(lcpu))
738 goto out_present;
739 else
740 goto out;
741 }
742
743 nid = of_node_to_nid_single(cpu);
744 of_node_put(cpu);
745
746out_present:
747 if (nid < 0 || !node_possible(nid))
748 nid = first_online_node;
749
750 /*
751 * Update for the first thread of the core. All threads of a core
752 * have to be part of the same node. This not only avoids querying
753 * for every other thread in the core, but always avoids a case
754 * where virtual node associativity change causes subsequent threads
755 * of a core to be associated with different nid. However if first
756 * thread is already online, expect it to have a valid mapping.
757 */
758 if (fcpu != lcpu) {
759 WARN_ON(cpu_online(fcpu));
760 map_cpu_to_node(fcpu, nid);
761 }
762
763 map_cpu_to_node(lcpu, nid);
764out:
765 return nid;
766}
767
768static void verify_cpu_node_mapping(int cpu, int node)
769{
770 int base, sibling, i;
771
772 /* Verify that all the threads in the core belong to the same node */
773 base = cpu_first_thread_sibling(cpu);
774
775 for (i = 0; i < threads_per_core; i++) {
776 sibling = base + i;
777
778 if (sibling == cpu || cpu_is_offline(sibling))
779 continue;
780
781 if (cpu_to_node(sibling) != node) {
782 WARN(1, "CPU thread siblings %d and %d don't belong"
783 " to the same node!\n", cpu, sibling);
784 break;
785 }
786 }
787}
788
789/* Must run before sched domains notifier. */
790static int ppc_numa_cpu_prepare(unsigned int cpu)
791{
792 int nid;
793
794 nid = numa_setup_cpu(cpu);
795 verify_cpu_node_mapping(cpu, nid);
796 return 0;
797}
798
799static int ppc_numa_cpu_dead(unsigned int cpu)
800{
801 return 0;
802}
803
804/*
805 * Check and possibly modify a memory region to enforce the memory limit.
806 *
807 * Returns the size the region should have to enforce the memory limit.
808 * This will either be the original value of size, a truncated value,
809 * or zero. If the returned value of size is 0 the region should be
810 * discarded as it lies wholly above the memory limit.
811 */
812static unsigned long __init numa_enforce_memory_limit(unsigned long start,
813 unsigned long size)
814{
815 /*
816 * We use memblock_end_of_DRAM() in here instead of memory_limit because
817 * we've already adjusted it for the limit and it takes care of
818 * having memory holes below the limit. Also, in the case of
819 * iommu_is_off, memory_limit is not set but is implicitly enforced.
820 */
821
822 if (start + size <= memblock_end_of_DRAM())
823 return size;
824
825 if (start >= memblock_end_of_DRAM())
826 return 0;
827
828 return memblock_end_of_DRAM() - start;
829}
830
831/*
832 * Reads the counter for a given entry in
833 * linux,drconf-usable-memory property
834 */
835static inline int __init read_usm_ranges(const __be32 **usm)
836{
837 /*
838 * For each lmb in ibm,dynamic-memory a corresponding
839 * entry in linux,drconf-usable-memory property contains
840 * a counter followed by that many (base, size) duple.
841 * read the counter from linux,drconf-usable-memory
842 */
843 return read_n_cells(n_mem_size_cells, usm);
844}
845
846/*
847 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
848 * node. This assumes n_mem_{addr,size}_cells have been set.
849 */
850static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
851 const __be32 **usm,
852 void *data)
853{
854 unsigned int ranges, is_kexec_kdump = 0;
855 unsigned long base, size, sz;
856 int nid;
857
858 /*
859 * Skip this block if the reserved bit is set in flags (0x80)
860 * or if the block is not assigned to this partition (0x8)
861 */
862 if ((lmb->flags & DRCONF_MEM_RESERVED)
863 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
864 return 0;
865
866 if (*usm)
867 is_kexec_kdump = 1;
868
869 base = lmb->base_addr;
870 size = drmem_lmb_size();
871 ranges = 1;
872
873 if (is_kexec_kdump) {
874 ranges = read_usm_ranges(usm);
875 if (!ranges) /* there are no (base, size) duple */
876 return 0;
877 }
878
879 do {
880 if (is_kexec_kdump) {
881 base = read_n_cells(n_mem_addr_cells, usm);
882 size = read_n_cells(n_mem_size_cells, usm);
883 }
884
885 nid = get_nid_and_numa_distance(lmb);
886 fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
887 &nid);
888 node_set_online(nid);
889 sz = numa_enforce_memory_limit(base, size);
890 if (sz)
891 memblock_set_node(base, sz, &memblock.memory, nid);
892 } while (--ranges);
893
894 return 0;
895}
896
897static int __init parse_numa_properties(void)
898{
899 struct device_node *memory;
900 int default_nid = 0;
901 unsigned long i;
902 const __be32 *associativity;
903
904 if (numa_enabled == 0) {
905 pr_warn("disabled by user\n");
906 return -1;
907 }
908
909 primary_domain_index = find_primary_domain_index();
910
911 if (primary_domain_index < 0) {
912 /*
913 * if we fail to parse primary_domain_index from device tree
914 * mark the numa disabled, boot with numa disabled.
915 */
916 numa_enabled = false;
917 return primary_domain_index;
918 }
919
920 pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index);
921
922 /*
923 * If it is FORM2 initialize the distance table here.
924 */
925 if (affinity_form == FORM2_AFFINITY)
926 initialize_form2_numa_distance_lookup_table();
927
928 /*
929 * Even though we connect cpus to numa domains later in SMP
930 * init, we need to know the node ids now. This is because
931 * each node to be onlined must have NODE_DATA etc backing it.
932 */
933 for_each_present_cpu(i) {
934 __be32 vphn_assoc[VPHN_ASSOC_BUFSIZE];
935 struct device_node *cpu;
936 int nid = NUMA_NO_NODE;
937
938 memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32));
939
940 if (__vphn_get_associativity(i, vphn_assoc) == 0) {
941 nid = associativity_to_nid(vphn_assoc);
942 initialize_form1_numa_distance(vphn_assoc);
943 } else {
944
945 /*
946 * Don't fall back to default_nid yet -- we will plug
947 * cpus into nodes once the memory scan has discovered
948 * the topology.
949 */
950 cpu = of_get_cpu_node(i, NULL);
951 BUG_ON(!cpu);
952
953 associativity = of_get_associativity(cpu);
954 if (associativity) {
955 nid = associativity_to_nid(associativity);
956 initialize_form1_numa_distance(associativity);
957 }
958 of_node_put(cpu);
959 }
960
961 /* node_set_online() is an UB if 'nid' is negative */
962 if (likely(nid >= 0))
963 node_set_online(nid);
964 }
965
966 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
967
968 for_each_node_by_type(memory, "memory") {
969 unsigned long start;
970 unsigned long size;
971 int nid;
972 int ranges;
973 const __be32 *memcell_buf;
974 unsigned int len;
975
976 memcell_buf = of_get_property(memory,
977 "linux,usable-memory", &len);
978 if (!memcell_buf || len <= 0)
979 memcell_buf = of_get_property(memory, "reg", &len);
980 if (!memcell_buf || len <= 0)
981 continue;
982
983 /* ranges in cell */
984 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
985new_range:
986 /* these are order-sensitive, and modify the buffer pointer */
987 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
988 size = read_n_cells(n_mem_size_cells, &memcell_buf);
989
990 /*
991 * Assumption: either all memory nodes or none will
992 * have associativity properties. If none, then
993 * everything goes to default_nid.
994 */
995 associativity = of_get_associativity(memory);
996 if (associativity) {
997 nid = associativity_to_nid(associativity);
998 initialize_form1_numa_distance(associativity);
999 } else
1000 nid = default_nid;
1001
1002 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
1003 node_set_online(nid);
1004
1005 size = numa_enforce_memory_limit(start, size);
1006 if (size)
1007 memblock_set_node(start, size, &memblock.memory, nid);
1008
1009 if (--ranges)
1010 goto new_range;
1011 }
1012
1013 /*
1014 * Now do the same thing for each MEMBLOCK listed in the
1015 * ibm,dynamic-memory property in the
1016 * ibm,dynamic-reconfiguration-memory node.
1017 */
1018 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1019 if (memory) {
1020 walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb);
1021 of_node_put(memory);
1022 }
1023
1024 return 0;
1025}
1026
1027static void __init setup_nonnuma(void)
1028{
1029 unsigned long top_of_ram = memblock_end_of_DRAM();
1030 unsigned long total_ram = memblock_phys_mem_size();
1031 unsigned long start_pfn, end_pfn;
1032 unsigned int nid = 0;
1033 int i;
1034
1035 pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram);
1036 pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20);
1037
1038 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
1039 fake_numa_create_new_node(end_pfn, &nid);
1040 memblock_set_node(PFN_PHYS(start_pfn),
1041 PFN_PHYS(end_pfn - start_pfn),
1042 &memblock.memory, nid);
1043 node_set_online(nid);
1044 }
1045}
1046
1047void __init dump_numa_cpu_topology(void)
1048{
1049 unsigned int node;
1050 unsigned int cpu, count;
1051
1052 if (!numa_enabled)
1053 return;
1054
1055 for_each_online_node(node) {
1056 pr_info("Node %d CPUs:", node);
1057
1058 count = 0;
1059 /*
1060 * If we used a CPU iterator here we would miss printing
1061 * the holes in the cpumap.
1062 */
1063 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1064 if (cpumask_test_cpu(cpu,
1065 node_to_cpumask_map[node])) {
1066 if (count == 0)
1067 pr_cont(" %u", cpu);
1068 ++count;
1069 } else {
1070 if (count > 1)
1071 pr_cont("-%u", cpu - 1);
1072 count = 0;
1073 }
1074 }
1075
1076 if (count > 1)
1077 pr_cont("-%u", nr_cpu_ids - 1);
1078 pr_cont("\n");
1079 }
1080}
1081
1082/* Initialize NODE_DATA for a node on the local memory */
1083static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
1084{
1085 u64 spanned_pages = end_pfn - start_pfn;
1086 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
1087 u64 nd_pa;
1088 void *nd;
1089 int tnid;
1090
1091 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
1092 if (!nd_pa)
1093 panic("Cannot allocate %zu bytes for node %d data\n",
1094 nd_size, nid);
1095
1096 nd = __va(nd_pa);
1097
1098 /* report and initialize */
1099 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
1100 nd_pa, nd_pa + nd_size - 1);
1101 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
1102 if (tnid != nid)
1103 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
1104
1105 node_data[nid] = nd;
1106 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
1107 NODE_DATA(nid)->node_id = nid;
1108 NODE_DATA(nid)->node_start_pfn = start_pfn;
1109 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
1110}
1111
1112static void __init find_possible_nodes(void)
1113{
1114 struct device_node *rtas;
1115 const __be32 *domains = NULL;
1116 int prop_length, max_nodes;
1117 u32 i;
1118
1119 if (!numa_enabled)
1120 return;
1121
1122 rtas = of_find_node_by_path("/rtas");
1123 if (!rtas)
1124 return;
1125
1126 /*
1127 * ibm,current-associativity-domains is a fairly recent property. If
1128 * it doesn't exist, then fallback on ibm,max-associativity-domains.
1129 * Current denotes what the platform can support compared to max
1130 * which denotes what the Hypervisor can support.
1131 *
1132 * If the LPAR is migratable, new nodes might be activated after a LPM,
1133 * so we should consider the max number in that case.
1134 */
1135 if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
1136 domains = of_get_property(rtas,
1137 "ibm,current-associativity-domains",
1138 &prop_length);
1139 if (!domains) {
1140 domains = of_get_property(rtas, "ibm,max-associativity-domains",
1141 &prop_length);
1142 if (!domains)
1143 goto out;
1144 }
1145
1146 max_nodes = of_read_number(&domains[primary_domain_index], 1);
1147 pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
1148
1149 for (i = 0; i < max_nodes; i++) {
1150 if (!node_possible(i))
1151 node_set(i, node_possible_map);
1152 }
1153
1154 prop_length /= sizeof(int);
1155 if (prop_length > primary_domain_index + 2)
1156 coregroup_enabled = 1;
1157
1158out:
1159 of_node_put(rtas);
1160}
1161
1162void __init mem_topology_setup(void)
1163{
1164 int cpu;
1165
1166 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1167 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
1168
1169 /*
1170 * Linux/mm assumes node 0 to be online at boot. However this is not
1171 * true on PowerPC, where node 0 is similar to any other node, it
1172 * could be cpuless, memoryless node. So force node 0 to be offline
1173 * for now. This will prevent cpuless, memoryless node 0 showing up
1174 * unnecessarily as online. If a node has cpus or memory that need
1175 * to be online, then node will anyway be marked online.
1176 */
1177 node_set_offline(0);
1178
1179 if (parse_numa_properties())
1180 setup_nonnuma();
1181
1182 /*
1183 * Modify the set of possible NUMA nodes to reflect information
1184 * available about the set of online nodes, and the set of nodes
1185 * that we expect to make use of for this platform's affinity
1186 * calculations.
1187 */
1188 nodes_and(node_possible_map, node_possible_map, node_online_map);
1189
1190 find_possible_nodes();
1191
1192 setup_node_to_cpumask_map();
1193
1194 reset_numa_cpu_lookup_table();
1195
1196 for_each_possible_cpu(cpu) {
1197 /*
1198 * Powerpc with CONFIG_NUMA always used to have a node 0,
1199 * even if it was memoryless or cpuless. For all cpus that
1200 * are possible but not present, cpu_to_node() would point
1201 * to node 0. To remove a cpuless, memoryless dummy node,
1202 * powerpc need to make sure all possible but not present
1203 * cpu_to_node are set to a proper node.
1204 */
1205 numa_setup_cpu(cpu);
1206 }
1207}
1208
1209void __init initmem_init(void)
1210{
1211 int nid;
1212
1213 memblock_dump_all();
1214
1215 for_each_online_node(nid) {
1216 unsigned long start_pfn, end_pfn;
1217
1218 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1219 setup_node_data(nid, start_pfn, end_pfn);
1220 }
1221
1222 sparse_init();
1223
1224 /*
1225 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
1226 * even before we online them, so that we can use cpu_to_{node,mem}
1227 * early in boot, cf. smp_prepare_cpus().
1228 * _nocalls() + manual invocation is used because cpuhp is not yet
1229 * initialized for the boot CPU.
1230 */
1231 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
1232 ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
1233}
1234
1235static int __init early_numa(char *p)
1236{
1237 if (!p)
1238 return 0;
1239
1240 if (strstr(p, "off"))
1241 numa_enabled = 0;
1242
1243 p = strstr(p, "fake=");
1244 if (p)
1245 cmdline = p + strlen("fake=");
1246
1247 return 0;
1248}
1249early_param("numa", early_numa);
1250
1251#ifdef CONFIG_MEMORY_HOTPLUG
1252/*
1253 * Find the node associated with a hot added memory section for
1254 * memory represented in the device tree by the property
1255 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1256 */
1257static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
1258{
1259 struct drmem_lmb *lmb;
1260 unsigned long lmb_size;
1261 int nid = NUMA_NO_NODE;
1262
1263 lmb_size = drmem_lmb_size();
1264
1265 for_each_drmem_lmb(lmb) {
1266 /* skip this block if it is reserved or not assigned to
1267 * this partition */
1268 if ((lmb->flags & DRCONF_MEM_RESERVED)
1269 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
1270 continue;
1271
1272 if ((scn_addr < lmb->base_addr)
1273 || (scn_addr >= (lmb->base_addr + lmb_size)))
1274 continue;
1275
1276 nid = of_drconf_to_nid_single(lmb);
1277 break;
1278 }
1279
1280 return nid;
1281}
1282
1283/*
1284 * Find the node associated with a hot added memory section for memory
1285 * represented in the device tree as a node (i.e. memory@XXXX) for
1286 * each memblock.
1287 */
1288static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1289{
1290 struct device_node *memory;
1291 int nid = NUMA_NO_NODE;
1292
1293 for_each_node_by_type(memory, "memory") {
1294 int i = 0;
1295
1296 while (1) {
1297 struct resource res;
1298
1299 if (of_address_to_resource(memory, i++, &res))
1300 break;
1301
1302 if ((scn_addr < res.start) || (scn_addr > res.end))
1303 continue;
1304
1305 nid = of_node_to_nid_single(memory);
1306 break;
1307 }
1308
1309 if (nid >= 0)
1310 break;
1311 }
1312
1313 of_node_put(memory);
1314
1315 return nid;
1316}
1317
1318/*
1319 * Find the node associated with a hot added memory section. Section
1320 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1321 * sections are fully contained within a single MEMBLOCK.
1322 */
1323int hot_add_scn_to_nid(unsigned long scn_addr)
1324{
1325 struct device_node *memory = NULL;
1326 int nid;
1327
1328 if (!numa_enabled)
1329 return first_online_node;
1330
1331 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1332 if (memory) {
1333 nid = hot_add_drconf_scn_to_nid(scn_addr);
1334 of_node_put(memory);
1335 } else {
1336 nid = hot_add_node_scn_to_nid(scn_addr);
1337 }
1338
1339 if (nid < 0 || !node_possible(nid))
1340 nid = first_online_node;
1341
1342 return nid;
1343}
1344
1345static u64 hot_add_drconf_memory_max(void)
1346{
1347 struct device_node *memory = NULL;
1348 struct device_node *dn = NULL;
1349 const __be64 *lrdr = NULL;
1350
1351 dn = of_find_node_by_path("/rtas");
1352 if (dn) {
1353 lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1354 of_node_put(dn);
1355 if (lrdr)
1356 return be64_to_cpup(lrdr);
1357 }
1358
1359 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1360 if (memory) {
1361 of_node_put(memory);
1362 return drmem_lmb_memory_max();
1363 }
1364 return 0;
1365}
1366
1367/*
1368 * memory_hotplug_max - return max address of memory that may be added
1369 *
1370 * This is currently only used on systems that support drconfig memory
1371 * hotplug.
1372 */
1373u64 memory_hotplug_max(void)
1374{
1375 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1376}
1377#endif /* CONFIG_MEMORY_HOTPLUG */
1378
1379/* Virtual Processor Home Node (VPHN) support */
1380#ifdef CONFIG_PPC_SPLPAR
1381static int topology_inited;
1382
1383/*
1384 * Retrieve the new associativity information for a virtual processor's
1385 * home node.
1386 */
1387static long vphn_get_associativity(unsigned long cpu,
1388 __be32 *associativity)
1389{
1390 long rc;
1391
1392 rc = hcall_vphn(get_hard_smp_processor_id(cpu),
1393 VPHN_FLAG_VCPU, associativity);
1394
1395 switch (rc) {
1396 case H_SUCCESS:
1397 pr_debug("VPHN hcall succeeded. Reset polling...\n");
1398 goto out;
1399
1400 case H_FUNCTION:
1401 pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
1402 break;
1403 case H_HARDWARE:
1404 pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
1405 "preventing VPHN. Disabling polling...\n");
1406 break;
1407 case H_PARAMETER:
1408 pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
1409 "Disabling polling...\n");
1410 break;
1411 default:
1412 pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
1413 , rc);
1414 break;
1415 }
1416out:
1417 return rc;
1418}
1419
1420void find_and_update_cpu_nid(int cpu)
1421{
1422 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1423 int new_nid;
1424
1425 /* Use associativity from first thread for all siblings */
1426 if (vphn_get_associativity(cpu, associativity))
1427 return;
1428
1429 /* Do not have previous associativity, so find it now. */
1430 new_nid = associativity_to_nid(associativity);
1431
1432 if (new_nid < 0 || !node_possible(new_nid))
1433 new_nid = first_online_node;
1434 else
1435 // Associate node <-> cpu, so cpu_up() calls
1436 // try_online_node() on the right node.
1437 set_cpu_numa_node(cpu, new_nid);
1438
1439 pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid);
1440}
1441
1442int cpu_to_coregroup_id(int cpu)
1443{
1444 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1445 int index;
1446
1447 if (cpu < 0 || cpu > nr_cpu_ids)
1448 return -1;
1449
1450 if (!coregroup_enabled)
1451 goto out;
1452
1453 if (!firmware_has_feature(FW_FEATURE_VPHN))
1454 goto out;
1455
1456 if (vphn_get_associativity(cpu, associativity))
1457 goto out;
1458
1459 index = of_read_number(associativity, 1);
1460 if (index > primary_domain_index + 1)
1461 return of_read_number(&associativity[index - 1], 1);
1462
1463out:
1464 return cpu_to_core_id(cpu);
1465}
1466
1467static int topology_update_init(void)
1468{
1469 topology_inited = 1;
1470 return 0;
1471}
1472device_initcall(topology_update_init);
1473#endif /* CONFIG_PPC_SPLPAR */
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * pSeries NUMA support
4 *
5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 */
7#define pr_fmt(fmt) "numa: " fmt
8
9#include <linux/threads.h>
10#include <linux/memblock.h>
11#include <linux/init.h>
12#include <linux/mm.h>
13#include <linux/mmzone.h>
14#include <linux/export.h>
15#include <linux/nodemask.h>
16#include <linux/cpu.h>
17#include <linux/notifier.h>
18#include <linux/of.h>
19#include <linux/pfn.h>
20#include <linux/cpuset.h>
21#include <linux/node.h>
22#include <linux/stop_machine.h>
23#include <linux/proc_fs.h>
24#include <linux/seq_file.h>
25#include <linux/uaccess.h>
26#include <linux/slab.h>
27#include <asm/cputhreads.h>
28#include <asm/sparsemem.h>
29#include <asm/prom.h>
30#include <asm/smp.h>
31#include <asm/topology.h>
32#include <asm/firmware.h>
33#include <asm/paca.h>
34#include <asm/hvcall.h>
35#include <asm/setup.h>
36#include <asm/vdso.h>
37#include <asm/drmem.h>
38
39static int numa_enabled = 1;
40
41static char *cmdline __initdata;
42
43static int numa_debug;
44#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
45
46int numa_cpu_lookup_table[NR_CPUS];
47cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
48struct pglist_data *node_data[MAX_NUMNODES];
49
50EXPORT_SYMBOL(numa_cpu_lookup_table);
51EXPORT_SYMBOL(node_to_cpumask_map);
52EXPORT_SYMBOL(node_data);
53
54static int min_common_depth;
55static int n_mem_addr_cells, n_mem_size_cells;
56static int form1_affinity;
57
58#define MAX_DISTANCE_REF_POINTS 4
59static int distance_ref_points_depth;
60static const __be32 *distance_ref_points;
61static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
62
63/*
64 * Allocate node_to_cpumask_map based on number of available nodes
65 * Requires node_possible_map to be valid.
66 *
67 * Note: cpumask_of_node() is not valid until after this is done.
68 */
69static void __init setup_node_to_cpumask_map(void)
70{
71 unsigned int node;
72
73 /* setup nr_node_ids if not done yet */
74 if (nr_node_ids == MAX_NUMNODES)
75 setup_nr_node_ids();
76
77 /* allocate the map */
78 for_each_node(node)
79 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
80
81 /* cpumask_of_node() will now work */
82 dbg("Node to cpumask map for %u nodes\n", nr_node_ids);
83}
84
85static int __init fake_numa_create_new_node(unsigned long end_pfn,
86 unsigned int *nid)
87{
88 unsigned long long mem;
89 char *p = cmdline;
90 static unsigned int fake_nid;
91 static unsigned long long curr_boundary;
92
93 /*
94 * Modify node id, iff we started creating NUMA nodes
95 * We want to continue from where we left of the last time
96 */
97 if (fake_nid)
98 *nid = fake_nid;
99 /*
100 * In case there are no more arguments to parse, the
101 * node_id should be the same as the last fake node id
102 * (we've handled this above).
103 */
104 if (!p)
105 return 0;
106
107 mem = memparse(p, &p);
108 if (!mem)
109 return 0;
110
111 if (mem < curr_boundary)
112 return 0;
113
114 curr_boundary = mem;
115
116 if ((end_pfn << PAGE_SHIFT) > mem) {
117 /*
118 * Skip commas and spaces
119 */
120 while (*p == ',' || *p == ' ' || *p == '\t')
121 p++;
122
123 cmdline = p;
124 fake_nid++;
125 *nid = fake_nid;
126 dbg("created new fake_node with id %d\n", fake_nid);
127 return 1;
128 }
129 return 0;
130}
131
132static void reset_numa_cpu_lookup_table(void)
133{
134 unsigned int cpu;
135
136 for_each_possible_cpu(cpu)
137 numa_cpu_lookup_table[cpu] = -1;
138}
139
140static void map_cpu_to_node(int cpu, int node)
141{
142 update_numa_cpu_lookup_table(cpu, node);
143
144 dbg("adding cpu %d to node %d\n", cpu, node);
145
146 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
147 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
148}
149
150#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
151static void unmap_cpu_from_node(unsigned long cpu)
152{
153 int node = numa_cpu_lookup_table[cpu];
154
155 dbg("removing cpu %lu from node %d\n", cpu, node);
156
157 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
158 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
159 } else {
160 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
161 cpu, node);
162 }
163}
164#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
165
166int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
167{
168 int dist = 0;
169
170 int i, index;
171
172 for (i = 0; i < distance_ref_points_depth; i++) {
173 index = be32_to_cpu(distance_ref_points[i]);
174 if (cpu1_assoc[index] == cpu2_assoc[index])
175 break;
176 dist++;
177 }
178
179 return dist;
180}
181
182/* must hold reference to node during call */
183static const __be32 *of_get_associativity(struct device_node *dev)
184{
185 return of_get_property(dev, "ibm,associativity", NULL);
186}
187
188int __node_distance(int a, int b)
189{
190 int i;
191 int distance = LOCAL_DISTANCE;
192
193 if (!form1_affinity)
194 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
195
196 for (i = 0; i < distance_ref_points_depth; i++) {
197 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
198 break;
199
200 /* Double the distance for each NUMA level */
201 distance *= 2;
202 }
203
204 return distance;
205}
206EXPORT_SYMBOL(__node_distance);
207
208static void initialize_distance_lookup_table(int nid,
209 const __be32 *associativity)
210{
211 int i;
212
213 if (!form1_affinity)
214 return;
215
216 for (i = 0; i < distance_ref_points_depth; i++) {
217 const __be32 *entry;
218
219 entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
220 distance_lookup_table[nid][i] = of_read_number(entry, 1);
221 }
222}
223
224/*
225 * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
226 * info is found.
227 */
228static int associativity_to_nid(const __be32 *associativity)
229{
230 int nid = NUMA_NO_NODE;
231
232 if (!numa_enabled)
233 goto out;
234
235 if (of_read_number(associativity, 1) >= min_common_depth)
236 nid = of_read_number(&associativity[min_common_depth], 1);
237
238 /* POWER4 LPAR uses 0xffff as invalid node */
239 if (nid == 0xffff || nid >= nr_node_ids)
240 nid = NUMA_NO_NODE;
241
242 if (nid > 0 &&
243 of_read_number(associativity, 1) >= distance_ref_points_depth) {
244 /*
245 * Skip the length field and send start of associativity array
246 */
247 initialize_distance_lookup_table(nid, associativity + 1);
248 }
249
250out:
251 return nid;
252}
253
254/* Returns the nid associated with the given device tree node,
255 * or -1 if not found.
256 */
257static int of_node_to_nid_single(struct device_node *device)
258{
259 int nid = NUMA_NO_NODE;
260 const __be32 *tmp;
261
262 tmp = of_get_associativity(device);
263 if (tmp)
264 nid = associativity_to_nid(tmp);
265 return nid;
266}
267
268/* Walk the device tree upwards, looking for an associativity id */
269int of_node_to_nid(struct device_node *device)
270{
271 int nid = NUMA_NO_NODE;
272
273 of_node_get(device);
274 while (device) {
275 nid = of_node_to_nid_single(device);
276 if (nid != -1)
277 break;
278
279 device = of_get_next_parent(device);
280 }
281 of_node_put(device);
282
283 return nid;
284}
285EXPORT_SYMBOL(of_node_to_nid);
286
287static int __init find_min_common_depth(void)
288{
289 int depth;
290 struct device_node *root;
291
292 if (firmware_has_feature(FW_FEATURE_OPAL))
293 root = of_find_node_by_path("/ibm,opal");
294 else
295 root = of_find_node_by_path("/rtas");
296 if (!root)
297 root = of_find_node_by_path("/");
298
299 /*
300 * This property is a set of 32-bit integers, each representing
301 * an index into the ibm,associativity nodes.
302 *
303 * With form 0 affinity the first integer is for an SMP configuration
304 * (should be all 0's) and the second is for a normal NUMA
305 * configuration. We have only one level of NUMA.
306 *
307 * With form 1 affinity the first integer is the most significant
308 * NUMA boundary and the following are progressively less significant
309 * boundaries. There can be more than one level of NUMA.
310 */
311 distance_ref_points = of_get_property(root,
312 "ibm,associativity-reference-points",
313 &distance_ref_points_depth);
314
315 if (!distance_ref_points) {
316 dbg("NUMA: ibm,associativity-reference-points not found.\n");
317 goto err;
318 }
319
320 distance_ref_points_depth /= sizeof(int);
321
322 if (firmware_has_feature(FW_FEATURE_OPAL) ||
323 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
324 dbg("Using form 1 affinity\n");
325 form1_affinity = 1;
326 }
327
328 if (form1_affinity) {
329 depth = of_read_number(distance_ref_points, 1);
330 } else {
331 if (distance_ref_points_depth < 2) {
332 printk(KERN_WARNING "NUMA: "
333 "short ibm,associativity-reference-points\n");
334 goto err;
335 }
336
337 depth = of_read_number(&distance_ref_points[1], 1);
338 }
339
340 /*
341 * Warn and cap if the hardware supports more than
342 * MAX_DISTANCE_REF_POINTS domains.
343 */
344 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
345 printk(KERN_WARNING "NUMA: distance array capped at "
346 "%d entries\n", MAX_DISTANCE_REF_POINTS);
347 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
348 }
349
350 of_node_put(root);
351 return depth;
352
353err:
354 of_node_put(root);
355 return -1;
356}
357
358static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
359{
360 struct device_node *memory = NULL;
361
362 memory = of_find_node_by_type(memory, "memory");
363 if (!memory)
364 panic("numa.c: No memory nodes found!");
365
366 *n_addr_cells = of_n_addr_cells(memory);
367 *n_size_cells = of_n_size_cells(memory);
368 of_node_put(memory);
369}
370
371static unsigned long read_n_cells(int n, const __be32 **buf)
372{
373 unsigned long result = 0;
374
375 while (n--) {
376 result = (result << 32) | of_read_number(*buf, 1);
377 (*buf)++;
378 }
379 return result;
380}
381
382struct assoc_arrays {
383 u32 n_arrays;
384 u32 array_sz;
385 const __be32 *arrays;
386};
387
388/*
389 * Retrieve and validate the list of associativity arrays for drconf
390 * memory from the ibm,associativity-lookup-arrays property of the
391 * device tree..
392 *
393 * The layout of the ibm,associativity-lookup-arrays property is a number N
394 * indicating the number of associativity arrays, followed by a number M
395 * indicating the size of each associativity array, followed by a list
396 * of N associativity arrays.
397 */
398static int of_get_assoc_arrays(struct assoc_arrays *aa)
399{
400 struct device_node *memory;
401 const __be32 *prop;
402 u32 len;
403
404 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
405 if (!memory)
406 return -1;
407
408 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
409 if (!prop || len < 2 * sizeof(unsigned int)) {
410 of_node_put(memory);
411 return -1;
412 }
413
414 aa->n_arrays = of_read_number(prop++, 1);
415 aa->array_sz = of_read_number(prop++, 1);
416
417 of_node_put(memory);
418
419 /* Now that we know the number of arrays and size of each array,
420 * revalidate the size of the property read in.
421 */
422 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
423 return -1;
424
425 aa->arrays = prop;
426 return 0;
427}
428
429/*
430 * This is like of_node_to_nid_single() for memory represented in the
431 * ibm,dynamic-reconfiguration-memory node.
432 */
433static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
434{
435 struct assoc_arrays aa = { .arrays = NULL };
436 int default_nid = NUMA_NO_NODE;
437 int nid = default_nid;
438 int rc, index;
439
440 if ((min_common_depth < 0) || !numa_enabled)
441 return default_nid;
442
443 rc = of_get_assoc_arrays(&aa);
444 if (rc)
445 return default_nid;
446
447 if (min_common_depth <= aa.array_sz &&
448 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
449 index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
450 nid = of_read_number(&aa.arrays[index], 1);
451
452 if (nid == 0xffff || nid >= nr_node_ids)
453 nid = default_nid;
454
455 if (nid > 0) {
456 index = lmb->aa_index * aa.array_sz;
457 initialize_distance_lookup_table(nid,
458 &aa.arrays[index]);
459 }
460 }
461
462 return nid;
463}
464
465#ifdef CONFIG_PPC_SPLPAR
466static int vphn_get_nid(long lcpu)
467{
468 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
469 long rc, hwid;
470
471 /*
472 * On a shared lpar, device tree will not have node associativity.
473 * At this time lppaca, or its __old_status field may not be
474 * updated. Hence kernel cannot detect if its on a shared lpar. So
475 * request an explicit associativity irrespective of whether the
476 * lpar is shared or dedicated. Use the device tree property as a
477 * fallback. cpu_to_phys_id is only valid between
478 * smp_setup_cpu_maps() and smp_setup_pacas().
479 */
480 if (firmware_has_feature(FW_FEATURE_VPHN)) {
481 if (cpu_to_phys_id)
482 hwid = cpu_to_phys_id[lcpu];
483 else
484 hwid = get_hard_smp_processor_id(lcpu);
485
486 rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
487 if (rc == H_SUCCESS)
488 return associativity_to_nid(associativity);
489 }
490
491 return NUMA_NO_NODE;
492}
493#else
494static int vphn_get_nid(long unused)
495{
496 return NUMA_NO_NODE;
497}
498#endif /* CONFIG_PPC_SPLPAR */
499
500/*
501 * Figure out to which domain a cpu belongs and stick it there.
502 * Return the id of the domain used.
503 */
504static int numa_setup_cpu(unsigned long lcpu)
505{
506 struct device_node *cpu;
507 int fcpu = cpu_first_thread_sibling(lcpu);
508 int nid = NUMA_NO_NODE;
509
510 /*
511 * If a valid cpu-to-node mapping is already available, use it
512 * directly instead of querying the firmware, since it represents
513 * the most recent mapping notified to us by the platform (eg: VPHN).
514 * Since cpu_to_node binding remains the same for all threads in the
515 * core. If a valid cpu-to-node mapping is already available, for
516 * the first thread in the core, use it.
517 */
518 nid = numa_cpu_lookup_table[fcpu];
519 if (nid >= 0) {
520 map_cpu_to_node(lcpu, nid);
521 return nid;
522 }
523
524 nid = vphn_get_nid(lcpu);
525 if (nid != NUMA_NO_NODE)
526 goto out_present;
527
528 cpu = of_get_cpu_node(lcpu, NULL);
529
530 if (!cpu) {
531 WARN_ON(1);
532 if (cpu_present(lcpu))
533 goto out_present;
534 else
535 goto out;
536 }
537
538 nid = of_node_to_nid_single(cpu);
539 of_node_put(cpu);
540
541out_present:
542 if (nid < 0 || !node_possible(nid))
543 nid = first_online_node;
544
545 /*
546 * Update for the first thread of the core. All threads of a core
547 * have to be part of the same node. This not only avoids querying
548 * for every other thread in the core, but always avoids a case
549 * where virtual node associativity change causes subsequent threads
550 * of a core to be associated with different nid. However if first
551 * thread is already online, expect it to have a valid mapping.
552 */
553 if (fcpu != lcpu) {
554 WARN_ON(cpu_online(fcpu));
555 map_cpu_to_node(fcpu, nid);
556 }
557
558 map_cpu_to_node(lcpu, nid);
559out:
560 return nid;
561}
562
563static void verify_cpu_node_mapping(int cpu, int node)
564{
565 int base, sibling, i;
566
567 /* Verify that all the threads in the core belong to the same node */
568 base = cpu_first_thread_sibling(cpu);
569
570 for (i = 0; i < threads_per_core; i++) {
571 sibling = base + i;
572
573 if (sibling == cpu || cpu_is_offline(sibling))
574 continue;
575
576 if (cpu_to_node(sibling) != node) {
577 WARN(1, "CPU thread siblings %d and %d don't belong"
578 " to the same node!\n", cpu, sibling);
579 break;
580 }
581 }
582}
583
584/* Must run before sched domains notifier. */
585static int ppc_numa_cpu_prepare(unsigned int cpu)
586{
587 int nid;
588
589 nid = numa_setup_cpu(cpu);
590 verify_cpu_node_mapping(cpu, nid);
591 return 0;
592}
593
594static int ppc_numa_cpu_dead(unsigned int cpu)
595{
596#ifdef CONFIG_HOTPLUG_CPU
597 unmap_cpu_from_node(cpu);
598#endif
599 return 0;
600}
601
602/*
603 * Check and possibly modify a memory region to enforce the memory limit.
604 *
605 * Returns the size the region should have to enforce the memory limit.
606 * This will either be the original value of size, a truncated value,
607 * or zero. If the returned value of size is 0 the region should be
608 * discarded as it lies wholly above the memory limit.
609 */
610static unsigned long __init numa_enforce_memory_limit(unsigned long start,
611 unsigned long size)
612{
613 /*
614 * We use memblock_end_of_DRAM() in here instead of memory_limit because
615 * we've already adjusted it for the limit and it takes care of
616 * having memory holes below the limit. Also, in the case of
617 * iommu_is_off, memory_limit is not set but is implicitly enforced.
618 */
619
620 if (start + size <= memblock_end_of_DRAM())
621 return size;
622
623 if (start >= memblock_end_of_DRAM())
624 return 0;
625
626 return memblock_end_of_DRAM() - start;
627}
628
629/*
630 * Reads the counter for a given entry in
631 * linux,drconf-usable-memory property
632 */
633static inline int __init read_usm_ranges(const __be32 **usm)
634{
635 /*
636 * For each lmb in ibm,dynamic-memory a corresponding
637 * entry in linux,drconf-usable-memory property contains
638 * a counter followed by that many (base, size) duple.
639 * read the counter from linux,drconf-usable-memory
640 */
641 return read_n_cells(n_mem_size_cells, usm);
642}
643
644/*
645 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
646 * node. This assumes n_mem_{addr,size}_cells have been set.
647 */
648static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
649 const __be32 **usm,
650 void *data)
651{
652 unsigned int ranges, is_kexec_kdump = 0;
653 unsigned long base, size, sz;
654 int nid;
655
656 /*
657 * Skip this block if the reserved bit is set in flags (0x80)
658 * or if the block is not assigned to this partition (0x8)
659 */
660 if ((lmb->flags & DRCONF_MEM_RESERVED)
661 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
662 return 0;
663
664 if (*usm)
665 is_kexec_kdump = 1;
666
667 base = lmb->base_addr;
668 size = drmem_lmb_size();
669 ranges = 1;
670
671 if (is_kexec_kdump) {
672 ranges = read_usm_ranges(usm);
673 if (!ranges) /* there are no (base, size) duple */
674 return 0;
675 }
676
677 do {
678 if (is_kexec_kdump) {
679 base = read_n_cells(n_mem_addr_cells, usm);
680 size = read_n_cells(n_mem_size_cells, usm);
681 }
682
683 nid = of_drconf_to_nid_single(lmb);
684 fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
685 &nid);
686 node_set_online(nid);
687 sz = numa_enforce_memory_limit(base, size);
688 if (sz)
689 memblock_set_node(base, sz, &memblock.memory, nid);
690 } while (--ranges);
691
692 return 0;
693}
694
695static int __init parse_numa_properties(void)
696{
697 struct device_node *memory;
698 int default_nid = 0;
699 unsigned long i;
700
701 if (numa_enabled == 0) {
702 printk(KERN_WARNING "NUMA disabled by user\n");
703 return -1;
704 }
705
706 min_common_depth = find_min_common_depth();
707
708 if (min_common_depth < 0) {
709 /*
710 * if we fail to parse min_common_depth from device tree
711 * mark the numa disabled, boot with numa disabled.
712 */
713 numa_enabled = false;
714 return min_common_depth;
715 }
716
717 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
718
719 /*
720 * Even though we connect cpus to numa domains later in SMP
721 * init, we need to know the node ids now. This is because
722 * each node to be onlined must have NODE_DATA etc backing it.
723 */
724 for_each_present_cpu(i) {
725 struct device_node *cpu;
726 int nid;
727
728 cpu = of_get_cpu_node(i, NULL);
729 BUG_ON(!cpu);
730 nid = of_node_to_nid_single(cpu);
731 of_node_put(cpu);
732
733 /*
734 * Don't fall back to default_nid yet -- we will plug
735 * cpus into nodes once the memory scan has discovered
736 * the topology.
737 */
738 if (nid < 0)
739 continue;
740 node_set_online(nid);
741 }
742
743 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
744
745 for_each_node_by_type(memory, "memory") {
746 unsigned long start;
747 unsigned long size;
748 int nid;
749 int ranges;
750 const __be32 *memcell_buf;
751 unsigned int len;
752
753 memcell_buf = of_get_property(memory,
754 "linux,usable-memory", &len);
755 if (!memcell_buf || len <= 0)
756 memcell_buf = of_get_property(memory, "reg", &len);
757 if (!memcell_buf || len <= 0)
758 continue;
759
760 /* ranges in cell */
761 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
762new_range:
763 /* these are order-sensitive, and modify the buffer pointer */
764 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
765 size = read_n_cells(n_mem_size_cells, &memcell_buf);
766
767 /*
768 * Assumption: either all memory nodes or none will
769 * have associativity properties. If none, then
770 * everything goes to default_nid.
771 */
772 nid = of_node_to_nid_single(memory);
773 if (nid < 0)
774 nid = default_nid;
775
776 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
777 node_set_online(nid);
778
779 size = numa_enforce_memory_limit(start, size);
780 if (size)
781 memblock_set_node(start, size, &memblock.memory, nid);
782
783 if (--ranges)
784 goto new_range;
785 }
786
787 /*
788 * Now do the same thing for each MEMBLOCK listed in the
789 * ibm,dynamic-memory property in the
790 * ibm,dynamic-reconfiguration-memory node.
791 */
792 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
793 if (memory) {
794 walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb);
795 of_node_put(memory);
796 }
797
798 return 0;
799}
800
801static void __init setup_nonnuma(void)
802{
803 unsigned long top_of_ram = memblock_end_of_DRAM();
804 unsigned long total_ram = memblock_phys_mem_size();
805 unsigned long start_pfn, end_pfn;
806 unsigned int nid = 0;
807 struct memblock_region *reg;
808
809 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
810 top_of_ram, total_ram);
811 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
812 (top_of_ram - total_ram) >> 20);
813
814 for_each_memblock(memory, reg) {
815 start_pfn = memblock_region_memory_base_pfn(reg);
816 end_pfn = memblock_region_memory_end_pfn(reg);
817
818 fake_numa_create_new_node(end_pfn, &nid);
819 memblock_set_node(PFN_PHYS(start_pfn),
820 PFN_PHYS(end_pfn - start_pfn),
821 &memblock.memory, nid);
822 node_set_online(nid);
823 }
824}
825
826void __init dump_numa_cpu_topology(void)
827{
828 unsigned int node;
829 unsigned int cpu, count;
830
831 if (!numa_enabled)
832 return;
833
834 for_each_online_node(node) {
835 pr_info("Node %d CPUs:", node);
836
837 count = 0;
838 /*
839 * If we used a CPU iterator here we would miss printing
840 * the holes in the cpumap.
841 */
842 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
843 if (cpumask_test_cpu(cpu,
844 node_to_cpumask_map[node])) {
845 if (count == 0)
846 pr_cont(" %u", cpu);
847 ++count;
848 } else {
849 if (count > 1)
850 pr_cont("-%u", cpu - 1);
851 count = 0;
852 }
853 }
854
855 if (count > 1)
856 pr_cont("-%u", nr_cpu_ids - 1);
857 pr_cont("\n");
858 }
859}
860
861/* Initialize NODE_DATA for a node on the local memory */
862static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
863{
864 u64 spanned_pages = end_pfn - start_pfn;
865 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
866 u64 nd_pa;
867 void *nd;
868 int tnid;
869
870 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
871 if (!nd_pa)
872 panic("Cannot allocate %zu bytes for node %d data\n",
873 nd_size, nid);
874
875 nd = __va(nd_pa);
876
877 /* report and initialize */
878 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
879 nd_pa, nd_pa + nd_size - 1);
880 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
881 if (tnid != nid)
882 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
883
884 node_data[nid] = nd;
885 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
886 NODE_DATA(nid)->node_id = nid;
887 NODE_DATA(nid)->node_start_pfn = start_pfn;
888 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
889}
890
891static void __init find_possible_nodes(void)
892{
893 struct device_node *rtas;
894 u32 numnodes, i;
895
896 if (!numa_enabled)
897 return;
898
899 rtas = of_find_node_by_path("/rtas");
900 if (!rtas)
901 return;
902
903 if (of_property_read_u32_index(rtas,
904 "ibm,max-associativity-domains",
905 min_common_depth, &numnodes))
906 goto out;
907
908 for (i = 0; i < numnodes; i++) {
909 if (!node_possible(i))
910 node_set(i, node_possible_map);
911 }
912
913out:
914 of_node_put(rtas);
915}
916
917void __init mem_topology_setup(void)
918{
919 int cpu;
920
921 if (parse_numa_properties())
922 setup_nonnuma();
923
924 /*
925 * Modify the set of possible NUMA nodes to reflect information
926 * available about the set of online nodes, and the set of nodes
927 * that we expect to make use of for this platform's affinity
928 * calculations.
929 */
930 nodes_and(node_possible_map, node_possible_map, node_online_map);
931
932 find_possible_nodes();
933
934 setup_node_to_cpumask_map();
935
936 reset_numa_cpu_lookup_table();
937
938 for_each_present_cpu(cpu)
939 numa_setup_cpu(cpu);
940}
941
942void __init initmem_init(void)
943{
944 int nid;
945
946 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
947 max_pfn = max_low_pfn;
948
949 memblock_dump_all();
950
951 for_each_online_node(nid) {
952 unsigned long start_pfn, end_pfn;
953
954 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
955 setup_node_data(nid, start_pfn, end_pfn);
956 }
957
958 sparse_init();
959
960 /*
961 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
962 * even before we online them, so that we can use cpu_to_{node,mem}
963 * early in boot, cf. smp_prepare_cpus().
964 * _nocalls() + manual invocation is used because cpuhp is not yet
965 * initialized for the boot CPU.
966 */
967 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
968 ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
969}
970
971static int __init early_numa(char *p)
972{
973 if (!p)
974 return 0;
975
976 if (strstr(p, "off"))
977 numa_enabled = 0;
978
979 if (strstr(p, "debug"))
980 numa_debug = 1;
981
982 p = strstr(p, "fake=");
983 if (p)
984 cmdline = p + strlen("fake=");
985
986 return 0;
987}
988early_param("numa", early_numa);
989
990#ifdef CONFIG_MEMORY_HOTPLUG
991/*
992 * Find the node associated with a hot added memory section for
993 * memory represented in the device tree by the property
994 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
995 */
996static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
997{
998 struct drmem_lmb *lmb;
999 unsigned long lmb_size;
1000 int nid = NUMA_NO_NODE;
1001
1002 lmb_size = drmem_lmb_size();
1003
1004 for_each_drmem_lmb(lmb) {
1005 /* skip this block if it is reserved or not assigned to
1006 * this partition */
1007 if ((lmb->flags & DRCONF_MEM_RESERVED)
1008 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
1009 continue;
1010
1011 if ((scn_addr < lmb->base_addr)
1012 || (scn_addr >= (lmb->base_addr + lmb_size)))
1013 continue;
1014
1015 nid = of_drconf_to_nid_single(lmb);
1016 break;
1017 }
1018
1019 return nid;
1020}
1021
1022/*
1023 * Find the node associated with a hot added memory section for memory
1024 * represented in the device tree as a node (i.e. memory@XXXX) for
1025 * each memblock.
1026 */
1027static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1028{
1029 struct device_node *memory;
1030 int nid = NUMA_NO_NODE;
1031
1032 for_each_node_by_type(memory, "memory") {
1033 unsigned long start, size;
1034 int ranges;
1035 const __be32 *memcell_buf;
1036 unsigned int len;
1037
1038 memcell_buf = of_get_property(memory, "reg", &len);
1039 if (!memcell_buf || len <= 0)
1040 continue;
1041
1042 /* ranges in cell */
1043 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1044
1045 while (ranges--) {
1046 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1047 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1048
1049 if ((scn_addr < start) || (scn_addr >= (start + size)))
1050 continue;
1051
1052 nid = of_node_to_nid_single(memory);
1053 break;
1054 }
1055
1056 if (nid >= 0)
1057 break;
1058 }
1059
1060 of_node_put(memory);
1061
1062 return nid;
1063}
1064
1065/*
1066 * Find the node associated with a hot added memory section. Section
1067 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1068 * sections are fully contained within a single MEMBLOCK.
1069 */
1070int hot_add_scn_to_nid(unsigned long scn_addr)
1071{
1072 struct device_node *memory = NULL;
1073 int nid;
1074
1075 if (!numa_enabled)
1076 return first_online_node;
1077
1078 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1079 if (memory) {
1080 nid = hot_add_drconf_scn_to_nid(scn_addr);
1081 of_node_put(memory);
1082 } else {
1083 nid = hot_add_node_scn_to_nid(scn_addr);
1084 }
1085
1086 if (nid < 0 || !node_possible(nid))
1087 nid = first_online_node;
1088
1089 return nid;
1090}
1091
1092static u64 hot_add_drconf_memory_max(void)
1093{
1094 struct device_node *memory = NULL;
1095 struct device_node *dn = NULL;
1096 const __be64 *lrdr = NULL;
1097
1098 dn = of_find_node_by_path("/rtas");
1099 if (dn) {
1100 lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1101 of_node_put(dn);
1102 if (lrdr)
1103 return be64_to_cpup(lrdr);
1104 }
1105
1106 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1107 if (memory) {
1108 of_node_put(memory);
1109 return drmem_lmb_memory_max();
1110 }
1111 return 0;
1112}
1113
1114/*
1115 * memory_hotplug_max - return max address of memory that may be added
1116 *
1117 * This is currently only used on systems that support drconfig memory
1118 * hotplug.
1119 */
1120u64 memory_hotplug_max(void)
1121{
1122 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1123}
1124#endif /* CONFIG_MEMORY_HOTPLUG */
1125
1126/* Virtual Processor Home Node (VPHN) support */
1127#ifdef CONFIG_PPC_SPLPAR
1128static int topology_inited;
1129
1130/*
1131 * Retrieve the new associativity information for a virtual processor's
1132 * home node.
1133 */
1134static long vphn_get_associativity(unsigned long cpu,
1135 __be32 *associativity)
1136{
1137 long rc;
1138
1139 rc = hcall_vphn(get_hard_smp_processor_id(cpu),
1140 VPHN_FLAG_VCPU, associativity);
1141
1142 switch (rc) {
1143 case H_SUCCESS:
1144 dbg("VPHN hcall succeeded. Reset polling...\n");
1145 goto out;
1146
1147 case H_FUNCTION:
1148 pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
1149 break;
1150 case H_HARDWARE:
1151 pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
1152 "preventing VPHN. Disabling polling...\n");
1153 break;
1154 case H_PARAMETER:
1155 pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
1156 "Disabling polling...\n");
1157 break;
1158 default:
1159 pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
1160 , rc);
1161 break;
1162 }
1163out:
1164 return rc;
1165}
1166
1167int find_and_online_cpu_nid(int cpu)
1168{
1169 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1170 int new_nid;
1171
1172 /* Use associativity from first thread for all siblings */
1173 if (vphn_get_associativity(cpu, associativity))
1174 return cpu_to_node(cpu);
1175
1176 new_nid = associativity_to_nid(associativity);
1177 if (new_nid < 0 || !node_possible(new_nid))
1178 new_nid = first_online_node;
1179
1180 if (NODE_DATA(new_nid) == NULL) {
1181#ifdef CONFIG_MEMORY_HOTPLUG
1182 /*
1183 * Need to ensure that NODE_DATA is initialized for a node from
1184 * available memory (see memblock_alloc_try_nid). If unable to
1185 * init the node, then default to nearest node that has memory
1186 * installed. Skip onlining a node if the subsystems are not
1187 * yet initialized.
1188 */
1189 if (!topology_inited || try_online_node(new_nid))
1190 new_nid = first_online_node;
1191#else
1192 /*
1193 * Default to using the nearest node that has memory installed.
1194 * Otherwise, it would be necessary to patch the kernel MM code
1195 * to deal with more memoryless-node error conditions.
1196 */
1197 new_nid = first_online_node;
1198#endif
1199 }
1200
1201 pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__,
1202 cpu, new_nid);
1203 return new_nid;
1204}
1205
1206static int topology_update_init(void)
1207{
1208 topology_inited = 1;
1209 return 0;
1210}
1211device_initcall(topology_update_init);
1212#endif /* CONFIG_PPC_SPLPAR */