Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * pSeries NUMA support
4 *
5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 */
7#define pr_fmt(fmt) "numa: " fmt
8
9#include <linux/threads.h>
10#include <linux/memblock.h>
11#include <linux/init.h>
12#include <linux/mm.h>
13#include <linux/mmzone.h>
14#include <linux/export.h>
15#include <linux/nodemask.h>
16#include <linux/cpu.h>
17#include <linux/notifier.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/pfn.h>
21#include <linux/cpuset.h>
22#include <linux/node.h>
23#include <linux/stop_machine.h>
24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
26#include <linux/uaccess.h>
27#include <linux/slab.h>
28#include <asm/cputhreads.h>
29#include <asm/sparsemem.h>
30#include <asm/smp.h>
31#include <asm/topology.h>
32#include <asm/firmware.h>
33#include <asm/paca.h>
34#include <asm/hvcall.h>
35#include <asm/setup.h>
36#include <asm/vdso.h>
37#include <asm/vphn.h>
38#include <asm/drmem.h>
39
40static int numa_enabled = 1;
41
42static char *cmdline __initdata;
43
44int numa_cpu_lookup_table[NR_CPUS];
45cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
46struct pglist_data *node_data[MAX_NUMNODES];
47
48EXPORT_SYMBOL(numa_cpu_lookup_table);
49EXPORT_SYMBOL(node_to_cpumask_map);
50EXPORT_SYMBOL(node_data);
51
52static int primary_domain_index;
53static int n_mem_addr_cells, n_mem_size_cells;
54
55#define FORM0_AFFINITY 0
56#define FORM1_AFFINITY 1
57#define FORM2_AFFINITY 2
58static int affinity_form;
59
60#define MAX_DISTANCE_REF_POINTS 4
61static int distance_ref_points_depth;
62static const __be32 *distance_ref_points;
63static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
64static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = {
65 [0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 }
66};
67static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE };
68
69/*
70 * Allocate node_to_cpumask_map based on number of available nodes
71 * Requires node_possible_map to be valid.
72 *
73 * Note: cpumask_of_node() is not valid until after this is done.
74 */
75static void __init setup_node_to_cpumask_map(void)
76{
77 unsigned int node;
78
79 /* setup nr_node_ids if not done yet */
80 if (nr_node_ids == MAX_NUMNODES)
81 setup_nr_node_ids();
82
83 /* allocate the map */
84 for_each_node(node)
85 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
86
87 /* cpumask_of_node() will now work */
88 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
89}
90
91static int __init fake_numa_create_new_node(unsigned long end_pfn,
92 unsigned int *nid)
93{
94 unsigned long long mem;
95 char *p = cmdline;
96 static unsigned int fake_nid;
97 static unsigned long long curr_boundary;
98
99 /*
100 * Modify node id, iff we started creating NUMA nodes
101 * We want to continue from where we left of the last time
102 */
103 if (fake_nid)
104 *nid = fake_nid;
105 /*
106 * In case there are no more arguments to parse, the
107 * node_id should be the same as the last fake node id
108 * (we've handled this above).
109 */
110 if (!p)
111 return 0;
112
113 mem = memparse(p, &p);
114 if (!mem)
115 return 0;
116
117 if (mem < curr_boundary)
118 return 0;
119
120 curr_boundary = mem;
121
122 if ((end_pfn << PAGE_SHIFT) > mem) {
123 /*
124 * Skip commas and spaces
125 */
126 while (*p == ',' || *p == ' ' || *p == '\t')
127 p++;
128
129 cmdline = p;
130 fake_nid++;
131 *nid = fake_nid;
132 pr_debug("created new fake_node with id %d\n", fake_nid);
133 return 1;
134 }
135 return 0;
136}
137
138static void __init reset_numa_cpu_lookup_table(void)
139{
140 unsigned int cpu;
141
142 for_each_possible_cpu(cpu)
143 numa_cpu_lookup_table[cpu] = -1;
144}
145
146void map_cpu_to_node(int cpu, int node)
147{
148 update_numa_cpu_lookup_table(cpu, node);
149
150 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) {
151 pr_debug("adding cpu %d to node %d\n", cpu, node);
152 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
153 }
154}
155
156#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
157void unmap_cpu_from_node(unsigned long cpu)
158{
159 int node = numa_cpu_lookup_table[cpu];
160
161 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
162 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
163 pr_debug("removing cpu %lu from node %d\n", cpu, node);
164 } else {
165 pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node);
166 }
167}
168#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
169
170static int __associativity_to_nid(const __be32 *associativity,
171 int max_array_sz)
172{
173 int nid;
174 /*
175 * primary_domain_index is 1 based array index.
176 */
177 int index = primary_domain_index - 1;
178
179 if (!numa_enabled || index >= max_array_sz)
180 return NUMA_NO_NODE;
181
182 nid = of_read_number(&associativity[index], 1);
183
184 /* POWER4 LPAR uses 0xffff as invalid node */
185 if (nid == 0xffff || nid >= nr_node_ids)
186 nid = NUMA_NO_NODE;
187 return nid;
188}
189/*
190 * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
191 * info is found.
192 */
193static int associativity_to_nid(const __be32 *associativity)
194{
195 int array_sz = of_read_number(associativity, 1);
196
197 /* Skip the first element in the associativity array */
198 return __associativity_to_nid((associativity + 1), array_sz);
199}
200
201static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
202{
203 int dist;
204 int node1, node2;
205
206 node1 = associativity_to_nid(cpu1_assoc);
207 node2 = associativity_to_nid(cpu2_assoc);
208
209 dist = numa_distance_table[node1][node2];
210 if (dist <= LOCAL_DISTANCE)
211 return 0;
212 else if (dist <= REMOTE_DISTANCE)
213 return 1;
214 else
215 return 2;
216}
217
218static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
219{
220 int dist = 0;
221
222 int i, index;
223
224 for (i = 0; i < distance_ref_points_depth; i++) {
225 index = be32_to_cpu(distance_ref_points[i]);
226 if (cpu1_assoc[index] == cpu2_assoc[index])
227 break;
228 dist++;
229 }
230
231 return dist;
232}
233
234int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
235{
236 /* We should not get called with FORM0 */
237 VM_WARN_ON(affinity_form == FORM0_AFFINITY);
238 if (affinity_form == FORM1_AFFINITY)
239 return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc);
240 return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc);
241}
242
243/* must hold reference to node during call */
244static const __be32 *of_get_associativity(struct device_node *dev)
245{
246 return of_get_property(dev, "ibm,associativity", NULL);
247}
248
249int __node_distance(int a, int b)
250{
251 int i;
252 int distance = LOCAL_DISTANCE;
253
254 if (affinity_form == FORM2_AFFINITY)
255 return numa_distance_table[a][b];
256 else if (affinity_form == FORM0_AFFINITY)
257 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
258
259 for (i = 0; i < distance_ref_points_depth; i++) {
260 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
261 break;
262
263 /* Double the distance for each NUMA level */
264 distance *= 2;
265 }
266
267 return distance;
268}
269EXPORT_SYMBOL(__node_distance);
270
271/* Returns the nid associated with the given device tree node,
272 * or -1 if not found.
273 */
274static int of_node_to_nid_single(struct device_node *device)
275{
276 int nid = NUMA_NO_NODE;
277 const __be32 *tmp;
278
279 tmp = of_get_associativity(device);
280 if (tmp)
281 nid = associativity_to_nid(tmp);
282 return nid;
283}
284
285/* Walk the device tree upwards, looking for an associativity id */
286int of_node_to_nid(struct device_node *device)
287{
288 int nid = NUMA_NO_NODE;
289
290 of_node_get(device);
291 while (device) {
292 nid = of_node_to_nid_single(device);
293 if (nid != -1)
294 break;
295
296 device = of_get_next_parent(device);
297 }
298 of_node_put(device);
299
300 return nid;
301}
302EXPORT_SYMBOL(of_node_to_nid);
303
304static void __initialize_form1_numa_distance(const __be32 *associativity,
305 int max_array_sz)
306{
307 int i, nid;
308
309 if (affinity_form != FORM1_AFFINITY)
310 return;
311
312 nid = __associativity_to_nid(associativity, max_array_sz);
313 if (nid != NUMA_NO_NODE) {
314 for (i = 0; i < distance_ref_points_depth; i++) {
315 const __be32 *entry;
316 int index = be32_to_cpu(distance_ref_points[i]) - 1;
317
318 /*
319 * broken hierarchy, return with broken distance table
320 */
321 if (WARN(index >= max_array_sz, "Broken ibm,associativity property"))
322 return;
323
324 entry = &associativity[index];
325 distance_lookup_table[nid][i] = of_read_number(entry, 1);
326 }
327 }
328}
329
330static void initialize_form1_numa_distance(const __be32 *associativity)
331{
332 int array_sz;
333
334 array_sz = of_read_number(associativity, 1);
335 /* Skip the first element in the associativity array */
336 __initialize_form1_numa_distance(associativity + 1, array_sz);
337}
338
339/*
340 * Used to update distance information w.r.t newly added node.
341 */
342void update_numa_distance(struct device_node *node)
343{
344 int nid;
345
346 if (affinity_form == FORM0_AFFINITY)
347 return;
348 else if (affinity_form == FORM1_AFFINITY) {
349 const __be32 *associativity;
350
351 associativity = of_get_associativity(node);
352 if (!associativity)
353 return;
354
355 initialize_form1_numa_distance(associativity);
356 return;
357 }
358
359 /* FORM2 affinity */
360 nid = of_node_to_nid_single(node);
361 if (nid == NUMA_NO_NODE)
362 return;
363
364 /*
365 * With FORM2 we expect NUMA distance of all possible NUMA
366 * nodes to be provided during boot.
367 */
368 WARN(numa_distance_table[nid][nid] == -1,
369 "NUMA distance details for node %d not provided\n", nid);
370}
371EXPORT_SYMBOL_GPL(update_numa_distance);
372
373/*
374 * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
375 * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements}
376 */
377static void __init initialize_form2_numa_distance_lookup_table(void)
378{
379 int i, j;
380 struct device_node *root;
381 const __u8 *form2_distances;
382 const __be32 *numa_lookup_index;
383 int form2_distances_length;
384 int max_numa_index, distance_index;
385
386 if (firmware_has_feature(FW_FEATURE_OPAL))
387 root = of_find_node_by_path("/ibm,opal");
388 else
389 root = of_find_node_by_path("/rtas");
390 if (!root)
391 root = of_find_node_by_path("/");
392
393 numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL);
394 max_numa_index = of_read_number(&numa_lookup_index[0], 1);
395
396 /* first element of the array is the size and is encode-int */
397 form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL);
398 form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1);
399 /* Skip the size which is encoded int */
400 form2_distances += sizeof(__be32);
401
402 pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n",
403 form2_distances_length, max_numa_index);
404
405 for (i = 0; i < max_numa_index; i++)
406 /* +1 skip the max_numa_index in the property */
407 numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
408
409
410 if (form2_distances_length != max_numa_index * max_numa_index) {
411 WARN(1, "Wrong NUMA distance information\n");
412 form2_distances = NULL; // don't use it
413 }
414 distance_index = 0;
415 for (i = 0; i < max_numa_index; i++) {
416 for (j = 0; j < max_numa_index; j++) {
417 int nodeA = numa_id_index_table[i];
418 int nodeB = numa_id_index_table[j];
419 int dist;
420
421 if (form2_distances)
422 dist = form2_distances[distance_index++];
423 else if (nodeA == nodeB)
424 dist = LOCAL_DISTANCE;
425 else
426 dist = REMOTE_DISTANCE;
427 numa_distance_table[nodeA][nodeB] = dist;
428 pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist);
429 }
430 }
431
432 of_node_put(root);
433}
434
435static int __init find_primary_domain_index(void)
436{
437 int index;
438 struct device_node *root;
439
440 /*
441 * Check for which form of affinity.
442 */
443 if (firmware_has_feature(FW_FEATURE_OPAL)) {
444 affinity_form = FORM1_AFFINITY;
445 } else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) {
446 pr_debug("Using form 2 affinity\n");
447 affinity_form = FORM2_AFFINITY;
448 } else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) {
449 pr_debug("Using form 1 affinity\n");
450 affinity_form = FORM1_AFFINITY;
451 } else
452 affinity_form = FORM0_AFFINITY;
453
454 if (firmware_has_feature(FW_FEATURE_OPAL))
455 root = of_find_node_by_path("/ibm,opal");
456 else
457 root = of_find_node_by_path("/rtas");
458 if (!root)
459 root = of_find_node_by_path("/");
460
461 /*
462 * This property is a set of 32-bit integers, each representing
463 * an index into the ibm,associativity nodes.
464 *
465 * With form 0 affinity the first integer is for an SMP configuration
466 * (should be all 0's) and the second is for a normal NUMA
467 * configuration. We have only one level of NUMA.
468 *
469 * With form 1 affinity the first integer is the most significant
470 * NUMA boundary and the following are progressively less significant
471 * boundaries. There can be more than one level of NUMA.
472 */
473 distance_ref_points = of_get_property(root,
474 "ibm,associativity-reference-points",
475 &distance_ref_points_depth);
476
477 if (!distance_ref_points) {
478 pr_debug("ibm,associativity-reference-points not found.\n");
479 goto err;
480 }
481
482 distance_ref_points_depth /= sizeof(int);
483 if (affinity_form == FORM0_AFFINITY) {
484 if (distance_ref_points_depth < 2) {
485 pr_warn("short ibm,associativity-reference-points\n");
486 goto err;
487 }
488
489 index = of_read_number(&distance_ref_points[1], 1);
490 } else {
491 /*
492 * Both FORM1 and FORM2 affinity find the primary domain details
493 * at the same offset.
494 */
495 index = of_read_number(distance_ref_points, 1);
496 }
497 /*
498 * Warn and cap if the hardware supports more than
499 * MAX_DISTANCE_REF_POINTS domains.
500 */
501 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
502 pr_warn("distance array capped at %d entries\n",
503 MAX_DISTANCE_REF_POINTS);
504 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
505 }
506
507 of_node_put(root);
508 return index;
509
510err:
511 of_node_put(root);
512 return -1;
513}
514
515static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
516{
517 struct device_node *memory = NULL;
518
519 memory = of_find_node_by_type(memory, "memory");
520 if (!memory)
521 panic("numa.c: No memory nodes found!");
522
523 *n_addr_cells = of_n_addr_cells(memory);
524 *n_size_cells = of_n_size_cells(memory);
525 of_node_put(memory);
526}
527
528static unsigned long read_n_cells(int n, const __be32 **buf)
529{
530 unsigned long result = 0;
531
532 while (n--) {
533 result = (result << 32) | of_read_number(*buf, 1);
534 (*buf)++;
535 }
536 return result;
537}
538
539struct assoc_arrays {
540 u32 n_arrays;
541 u32 array_sz;
542 const __be32 *arrays;
543};
544
545/*
546 * Retrieve and validate the list of associativity arrays for drconf
547 * memory from the ibm,associativity-lookup-arrays property of the
548 * device tree..
549 *
550 * The layout of the ibm,associativity-lookup-arrays property is a number N
551 * indicating the number of associativity arrays, followed by a number M
552 * indicating the size of each associativity array, followed by a list
553 * of N associativity arrays.
554 */
555static int of_get_assoc_arrays(struct assoc_arrays *aa)
556{
557 struct device_node *memory;
558 const __be32 *prop;
559 u32 len;
560
561 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
562 if (!memory)
563 return -1;
564
565 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
566 if (!prop || len < 2 * sizeof(unsigned int)) {
567 of_node_put(memory);
568 return -1;
569 }
570
571 aa->n_arrays = of_read_number(prop++, 1);
572 aa->array_sz = of_read_number(prop++, 1);
573
574 of_node_put(memory);
575
576 /* Now that we know the number of arrays and size of each array,
577 * revalidate the size of the property read in.
578 */
579 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
580 return -1;
581
582 aa->arrays = prop;
583 return 0;
584}
585
586static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb)
587{
588 struct assoc_arrays aa = { .arrays = NULL };
589 int default_nid = NUMA_NO_NODE;
590 int nid = default_nid;
591 int rc, index;
592
593 if ((primary_domain_index < 0) || !numa_enabled)
594 return default_nid;
595
596 rc = of_get_assoc_arrays(&aa);
597 if (rc)
598 return default_nid;
599
600 if (primary_domain_index <= aa.array_sz &&
601 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
602 const __be32 *associativity;
603
604 index = lmb->aa_index * aa.array_sz;
605 associativity = &aa.arrays[index];
606 nid = __associativity_to_nid(associativity, aa.array_sz);
607 if (nid > 0 && affinity_form == FORM1_AFFINITY) {
608 /*
609 * lookup array associativity entries have
610 * no length of the array as the first element.
611 */
612 __initialize_form1_numa_distance(associativity, aa.array_sz);
613 }
614 }
615 return nid;
616}
617
618/*
619 * This is like of_node_to_nid_single() for memory represented in the
620 * ibm,dynamic-reconfiguration-memory node.
621 */
622int of_drconf_to_nid_single(struct drmem_lmb *lmb)
623{
624 struct assoc_arrays aa = { .arrays = NULL };
625 int default_nid = NUMA_NO_NODE;
626 int nid = default_nid;
627 int rc, index;
628
629 if ((primary_domain_index < 0) || !numa_enabled)
630 return default_nid;
631
632 rc = of_get_assoc_arrays(&aa);
633 if (rc)
634 return default_nid;
635
636 if (primary_domain_index <= aa.array_sz &&
637 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
638 const __be32 *associativity;
639
640 index = lmb->aa_index * aa.array_sz;
641 associativity = &aa.arrays[index];
642 nid = __associativity_to_nid(associativity, aa.array_sz);
643 }
644 return nid;
645}
646
647#ifdef CONFIG_PPC_SPLPAR
648
649static int __vphn_get_associativity(long lcpu, __be32 *associativity)
650{
651 long rc, hwid;
652
653 /*
654 * On a shared lpar, device tree will not have node associativity.
655 * At this time lppaca, or its __old_status field may not be
656 * updated. Hence kernel cannot detect if its on a shared lpar. So
657 * request an explicit associativity irrespective of whether the
658 * lpar is shared or dedicated. Use the device tree property as a
659 * fallback. cpu_to_phys_id is only valid between
660 * smp_setup_cpu_maps() and smp_setup_pacas().
661 */
662 if (firmware_has_feature(FW_FEATURE_VPHN)) {
663 if (cpu_to_phys_id)
664 hwid = cpu_to_phys_id[lcpu];
665 else
666 hwid = get_hard_smp_processor_id(lcpu);
667
668 rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
669 if (rc == H_SUCCESS)
670 return 0;
671 }
672
673 return -1;
674}
675
676static int vphn_get_nid(long lcpu)
677{
678 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
679
680
681 if (!__vphn_get_associativity(lcpu, associativity))
682 return associativity_to_nid(associativity);
683
684 return NUMA_NO_NODE;
685
686}
687#else
688
689static int __vphn_get_associativity(long lcpu, __be32 *associativity)
690{
691 return -1;
692}
693
694static int vphn_get_nid(long unused)
695{
696 return NUMA_NO_NODE;
697}
698#endif /* CONFIG_PPC_SPLPAR */
699
700/*
701 * Figure out to which domain a cpu belongs and stick it there.
702 * Return the id of the domain used.
703 */
704static int numa_setup_cpu(unsigned long lcpu)
705{
706 struct device_node *cpu;
707 int fcpu = cpu_first_thread_sibling(lcpu);
708 int nid = NUMA_NO_NODE;
709
710 if (!cpu_present(lcpu)) {
711 set_cpu_numa_node(lcpu, first_online_node);
712 return first_online_node;
713 }
714
715 /*
716 * If a valid cpu-to-node mapping is already available, use it
717 * directly instead of querying the firmware, since it represents
718 * the most recent mapping notified to us by the platform (eg: VPHN).
719 * Since cpu_to_node binding remains the same for all threads in the
720 * core. If a valid cpu-to-node mapping is already available, for
721 * the first thread in the core, use it.
722 */
723 nid = numa_cpu_lookup_table[fcpu];
724 if (nid >= 0) {
725 map_cpu_to_node(lcpu, nid);
726 return nid;
727 }
728
729 nid = vphn_get_nid(lcpu);
730 if (nid != NUMA_NO_NODE)
731 goto out_present;
732
733 cpu = of_get_cpu_node(lcpu, NULL);
734
735 if (!cpu) {
736 WARN_ON(1);
737 if (cpu_present(lcpu))
738 goto out_present;
739 else
740 goto out;
741 }
742
743 nid = of_node_to_nid_single(cpu);
744 of_node_put(cpu);
745
746out_present:
747 if (nid < 0 || !node_possible(nid))
748 nid = first_online_node;
749
750 /*
751 * Update for the first thread of the core. All threads of a core
752 * have to be part of the same node. This not only avoids querying
753 * for every other thread in the core, but always avoids a case
754 * where virtual node associativity change causes subsequent threads
755 * of a core to be associated with different nid. However if first
756 * thread is already online, expect it to have a valid mapping.
757 */
758 if (fcpu != lcpu) {
759 WARN_ON(cpu_online(fcpu));
760 map_cpu_to_node(fcpu, nid);
761 }
762
763 map_cpu_to_node(lcpu, nid);
764out:
765 return nid;
766}
767
768static void verify_cpu_node_mapping(int cpu, int node)
769{
770 int base, sibling, i;
771
772 /* Verify that all the threads in the core belong to the same node */
773 base = cpu_first_thread_sibling(cpu);
774
775 for (i = 0; i < threads_per_core; i++) {
776 sibling = base + i;
777
778 if (sibling == cpu || cpu_is_offline(sibling))
779 continue;
780
781 if (cpu_to_node(sibling) != node) {
782 WARN(1, "CPU thread siblings %d and %d don't belong"
783 " to the same node!\n", cpu, sibling);
784 break;
785 }
786 }
787}
788
789/* Must run before sched domains notifier. */
790static int ppc_numa_cpu_prepare(unsigned int cpu)
791{
792 int nid;
793
794 nid = numa_setup_cpu(cpu);
795 verify_cpu_node_mapping(cpu, nid);
796 return 0;
797}
798
799static int ppc_numa_cpu_dead(unsigned int cpu)
800{
801 return 0;
802}
803
804/*
805 * Check and possibly modify a memory region to enforce the memory limit.
806 *
807 * Returns the size the region should have to enforce the memory limit.
808 * This will either be the original value of size, a truncated value,
809 * or zero. If the returned value of size is 0 the region should be
810 * discarded as it lies wholly above the memory limit.
811 */
812static unsigned long __init numa_enforce_memory_limit(unsigned long start,
813 unsigned long size)
814{
815 /*
816 * We use memblock_end_of_DRAM() in here instead of memory_limit because
817 * we've already adjusted it for the limit and it takes care of
818 * having memory holes below the limit. Also, in the case of
819 * iommu_is_off, memory_limit is not set but is implicitly enforced.
820 */
821
822 if (start + size <= memblock_end_of_DRAM())
823 return size;
824
825 if (start >= memblock_end_of_DRAM())
826 return 0;
827
828 return memblock_end_of_DRAM() - start;
829}
830
831/*
832 * Reads the counter for a given entry in
833 * linux,drconf-usable-memory property
834 */
835static inline int __init read_usm_ranges(const __be32 **usm)
836{
837 /*
838 * For each lmb in ibm,dynamic-memory a corresponding
839 * entry in linux,drconf-usable-memory property contains
840 * a counter followed by that many (base, size) duple.
841 * read the counter from linux,drconf-usable-memory
842 */
843 return read_n_cells(n_mem_size_cells, usm);
844}
845
846/*
847 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
848 * node. This assumes n_mem_{addr,size}_cells have been set.
849 */
850static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
851 const __be32 **usm,
852 void *data)
853{
854 unsigned int ranges, is_kexec_kdump = 0;
855 unsigned long base, size, sz;
856 int nid;
857
858 /*
859 * Skip this block if the reserved bit is set in flags (0x80)
860 * or if the block is not assigned to this partition (0x8)
861 */
862 if ((lmb->flags & DRCONF_MEM_RESERVED)
863 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
864 return 0;
865
866 if (*usm)
867 is_kexec_kdump = 1;
868
869 base = lmb->base_addr;
870 size = drmem_lmb_size();
871 ranges = 1;
872
873 if (is_kexec_kdump) {
874 ranges = read_usm_ranges(usm);
875 if (!ranges) /* there are no (base, size) duple */
876 return 0;
877 }
878
879 do {
880 if (is_kexec_kdump) {
881 base = read_n_cells(n_mem_addr_cells, usm);
882 size = read_n_cells(n_mem_size_cells, usm);
883 }
884
885 nid = get_nid_and_numa_distance(lmb);
886 fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
887 &nid);
888 node_set_online(nid);
889 sz = numa_enforce_memory_limit(base, size);
890 if (sz)
891 memblock_set_node(base, sz, &memblock.memory, nid);
892 } while (--ranges);
893
894 return 0;
895}
896
897static int __init parse_numa_properties(void)
898{
899 struct device_node *memory;
900 int default_nid = 0;
901 unsigned long i;
902 const __be32 *associativity;
903
904 if (numa_enabled == 0) {
905 pr_warn("disabled by user\n");
906 return -1;
907 }
908
909 primary_domain_index = find_primary_domain_index();
910
911 if (primary_domain_index < 0) {
912 /*
913 * if we fail to parse primary_domain_index from device tree
914 * mark the numa disabled, boot with numa disabled.
915 */
916 numa_enabled = false;
917 return primary_domain_index;
918 }
919
920 pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index);
921
922 /*
923 * If it is FORM2 initialize the distance table here.
924 */
925 if (affinity_form == FORM2_AFFINITY)
926 initialize_form2_numa_distance_lookup_table();
927
928 /*
929 * Even though we connect cpus to numa domains later in SMP
930 * init, we need to know the node ids now. This is because
931 * each node to be onlined must have NODE_DATA etc backing it.
932 */
933 for_each_present_cpu(i) {
934 __be32 vphn_assoc[VPHN_ASSOC_BUFSIZE];
935 struct device_node *cpu;
936 int nid = NUMA_NO_NODE;
937
938 memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32));
939
940 if (__vphn_get_associativity(i, vphn_assoc) == 0) {
941 nid = associativity_to_nid(vphn_assoc);
942 initialize_form1_numa_distance(vphn_assoc);
943 } else {
944
945 /*
946 * Don't fall back to default_nid yet -- we will plug
947 * cpus into nodes once the memory scan has discovered
948 * the topology.
949 */
950 cpu = of_get_cpu_node(i, NULL);
951 BUG_ON(!cpu);
952
953 associativity = of_get_associativity(cpu);
954 if (associativity) {
955 nid = associativity_to_nid(associativity);
956 initialize_form1_numa_distance(associativity);
957 }
958 of_node_put(cpu);
959 }
960
961 /* node_set_online() is an UB if 'nid' is negative */
962 if (likely(nid >= 0))
963 node_set_online(nid);
964 }
965
966 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
967
968 for_each_node_by_type(memory, "memory") {
969 unsigned long start;
970 unsigned long size;
971 int nid;
972 int ranges;
973 const __be32 *memcell_buf;
974 unsigned int len;
975
976 memcell_buf = of_get_property(memory,
977 "linux,usable-memory", &len);
978 if (!memcell_buf || len <= 0)
979 memcell_buf = of_get_property(memory, "reg", &len);
980 if (!memcell_buf || len <= 0)
981 continue;
982
983 /* ranges in cell */
984 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
985new_range:
986 /* these are order-sensitive, and modify the buffer pointer */
987 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
988 size = read_n_cells(n_mem_size_cells, &memcell_buf);
989
990 /*
991 * Assumption: either all memory nodes or none will
992 * have associativity properties. If none, then
993 * everything goes to default_nid.
994 */
995 associativity = of_get_associativity(memory);
996 if (associativity) {
997 nid = associativity_to_nid(associativity);
998 initialize_form1_numa_distance(associativity);
999 } else
1000 nid = default_nid;
1001
1002 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
1003 node_set_online(nid);
1004
1005 size = numa_enforce_memory_limit(start, size);
1006 if (size)
1007 memblock_set_node(start, size, &memblock.memory, nid);
1008
1009 if (--ranges)
1010 goto new_range;
1011 }
1012
1013 /*
1014 * Now do the same thing for each MEMBLOCK listed in the
1015 * ibm,dynamic-memory property in the
1016 * ibm,dynamic-reconfiguration-memory node.
1017 */
1018 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1019 if (memory) {
1020 walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb);
1021 of_node_put(memory);
1022 }
1023
1024 return 0;
1025}
1026
1027static void __init setup_nonnuma(void)
1028{
1029 unsigned long top_of_ram = memblock_end_of_DRAM();
1030 unsigned long total_ram = memblock_phys_mem_size();
1031 unsigned long start_pfn, end_pfn;
1032 unsigned int nid = 0;
1033 int i;
1034
1035 pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram);
1036 pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20);
1037
1038 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
1039 fake_numa_create_new_node(end_pfn, &nid);
1040 memblock_set_node(PFN_PHYS(start_pfn),
1041 PFN_PHYS(end_pfn - start_pfn),
1042 &memblock.memory, nid);
1043 node_set_online(nid);
1044 }
1045}
1046
1047void __init dump_numa_cpu_topology(void)
1048{
1049 unsigned int node;
1050 unsigned int cpu, count;
1051
1052 if (!numa_enabled)
1053 return;
1054
1055 for_each_online_node(node) {
1056 pr_info("Node %d CPUs:", node);
1057
1058 count = 0;
1059 /*
1060 * If we used a CPU iterator here we would miss printing
1061 * the holes in the cpumap.
1062 */
1063 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1064 if (cpumask_test_cpu(cpu,
1065 node_to_cpumask_map[node])) {
1066 if (count == 0)
1067 pr_cont(" %u", cpu);
1068 ++count;
1069 } else {
1070 if (count > 1)
1071 pr_cont("-%u", cpu - 1);
1072 count = 0;
1073 }
1074 }
1075
1076 if (count > 1)
1077 pr_cont("-%u", nr_cpu_ids - 1);
1078 pr_cont("\n");
1079 }
1080}
1081
1082/* Initialize NODE_DATA for a node on the local memory */
1083static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
1084{
1085 u64 spanned_pages = end_pfn - start_pfn;
1086 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
1087 u64 nd_pa;
1088 void *nd;
1089 int tnid;
1090
1091 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
1092 if (!nd_pa)
1093 panic("Cannot allocate %zu bytes for node %d data\n",
1094 nd_size, nid);
1095
1096 nd = __va(nd_pa);
1097
1098 /* report and initialize */
1099 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
1100 nd_pa, nd_pa + nd_size - 1);
1101 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
1102 if (tnid != nid)
1103 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
1104
1105 node_data[nid] = nd;
1106 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
1107 NODE_DATA(nid)->node_id = nid;
1108 NODE_DATA(nid)->node_start_pfn = start_pfn;
1109 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
1110}
1111
1112static void __init find_possible_nodes(void)
1113{
1114 struct device_node *rtas;
1115 const __be32 *domains = NULL;
1116 int prop_length, max_nodes;
1117 u32 i;
1118
1119 if (!numa_enabled)
1120 return;
1121
1122 rtas = of_find_node_by_path("/rtas");
1123 if (!rtas)
1124 return;
1125
1126 /*
1127 * ibm,current-associativity-domains is a fairly recent property. If
1128 * it doesn't exist, then fallback on ibm,max-associativity-domains.
1129 * Current denotes what the platform can support compared to max
1130 * which denotes what the Hypervisor can support.
1131 *
1132 * If the LPAR is migratable, new nodes might be activated after a LPM,
1133 * so we should consider the max number in that case.
1134 */
1135 if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
1136 domains = of_get_property(rtas,
1137 "ibm,current-associativity-domains",
1138 &prop_length);
1139 if (!domains) {
1140 domains = of_get_property(rtas, "ibm,max-associativity-domains",
1141 &prop_length);
1142 if (!domains)
1143 goto out;
1144 }
1145
1146 max_nodes = of_read_number(&domains[primary_domain_index], 1);
1147 pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
1148
1149 for (i = 0; i < max_nodes; i++) {
1150 if (!node_possible(i))
1151 node_set(i, node_possible_map);
1152 }
1153
1154 prop_length /= sizeof(int);
1155 if (prop_length > primary_domain_index + 2)
1156 coregroup_enabled = 1;
1157
1158out:
1159 of_node_put(rtas);
1160}
1161
1162void __init mem_topology_setup(void)
1163{
1164 int cpu;
1165
1166 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1167 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
1168
1169 /*
1170 * Linux/mm assumes node 0 to be online at boot. However this is not
1171 * true on PowerPC, where node 0 is similar to any other node, it
1172 * could be cpuless, memoryless node. So force node 0 to be offline
1173 * for now. This will prevent cpuless, memoryless node 0 showing up
1174 * unnecessarily as online. If a node has cpus or memory that need
1175 * to be online, then node will anyway be marked online.
1176 */
1177 node_set_offline(0);
1178
1179 if (parse_numa_properties())
1180 setup_nonnuma();
1181
1182 /*
1183 * Modify the set of possible NUMA nodes to reflect information
1184 * available about the set of online nodes, and the set of nodes
1185 * that we expect to make use of for this platform's affinity
1186 * calculations.
1187 */
1188 nodes_and(node_possible_map, node_possible_map, node_online_map);
1189
1190 find_possible_nodes();
1191
1192 setup_node_to_cpumask_map();
1193
1194 reset_numa_cpu_lookup_table();
1195
1196 for_each_possible_cpu(cpu) {
1197 /*
1198 * Powerpc with CONFIG_NUMA always used to have a node 0,
1199 * even if it was memoryless or cpuless. For all cpus that
1200 * are possible but not present, cpu_to_node() would point
1201 * to node 0. To remove a cpuless, memoryless dummy node,
1202 * powerpc need to make sure all possible but not present
1203 * cpu_to_node are set to a proper node.
1204 */
1205 numa_setup_cpu(cpu);
1206 }
1207}
1208
1209void __init initmem_init(void)
1210{
1211 int nid;
1212
1213 memblock_dump_all();
1214
1215 for_each_online_node(nid) {
1216 unsigned long start_pfn, end_pfn;
1217
1218 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1219 setup_node_data(nid, start_pfn, end_pfn);
1220 }
1221
1222 sparse_init();
1223
1224 /*
1225 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
1226 * even before we online them, so that we can use cpu_to_{node,mem}
1227 * early in boot, cf. smp_prepare_cpus().
1228 * _nocalls() + manual invocation is used because cpuhp is not yet
1229 * initialized for the boot CPU.
1230 */
1231 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
1232 ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
1233}
1234
1235static int __init early_numa(char *p)
1236{
1237 if (!p)
1238 return 0;
1239
1240 if (strstr(p, "off"))
1241 numa_enabled = 0;
1242
1243 p = strstr(p, "fake=");
1244 if (p)
1245 cmdline = p + strlen("fake=");
1246
1247 return 0;
1248}
1249early_param("numa", early_numa);
1250
1251#ifdef CONFIG_MEMORY_HOTPLUG
1252/*
1253 * Find the node associated with a hot added memory section for
1254 * memory represented in the device tree by the property
1255 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1256 */
1257static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
1258{
1259 struct drmem_lmb *lmb;
1260 unsigned long lmb_size;
1261 int nid = NUMA_NO_NODE;
1262
1263 lmb_size = drmem_lmb_size();
1264
1265 for_each_drmem_lmb(lmb) {
1266 /* skip this block if it is reserved or not assigned to
1267 * this partition */
1268 if ((lmb->flags & DRCONF_MEM_RESERVED)
1269 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
1270 continue;
1271
1272 if ((scn_addr < lmb->base_addr)
1273 || (scn_addr >= (lmb->base_addr + lmb_size)))
1274 continue;
1275
1276 nid = of_drconf_to_nid_single(lmb);
1277 break;
1278 }
1279
1280 return nid;
1281}
1282
1283/*
1284 * Find the node associated with a hot added memory section for memory
1285 * represented in the device tree as a node (i.e. memory@XXXX) for
1286 * each memblock.
1287 */
1288static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1289{
1290 struct device_node *memory;
1291 int nid = NUMA_NO_NODE;
1292
1293 for_each_node_by_type(memory, "memory") {
1294 int i = 0;
1295
1296 while (1) {
1297 struct resource res;
1298
1299 if (of_address_to_resource(memory, i++, &res))
1300 break;
1301
1302 if ((scn_addr < res.start) || (scn_addr > res.end))
1303 continue;
1304
1305 nid = of_node_to_nid_single(memory);
1306 break;
1307 }
1308
1309 if (nid >= 0)
1310 break;
1311 }
1312
1313 of_node_put(memory);
1314
1315 return nid;
1316}
1317
1318/*
1319 * Find the node associated with a hot added memory section. Section
1320 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1321 * sections are fully contained within a single MEMBLOCK.
1322 */
1323int hot_add_scn_to_nid(unsigned long scn_addr)
1324{
1325 struct device_node *memory = NULL;
1326 int nid;
1327
1328 if (!numa_enabled)
1329 return first_online_node;
1330
1331 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1332 if (memory) {
1333 nid = hot_add_drconf_scn_to_nid(scn_addr);
1334 of_node_put(memory);
1335 } else {
1336 nid = hot_add_node_scn_to_nid(scn_addr);
1337 }
1338
1339 if (nid < 0 || !node_possible(nid))
1340 nid = first_online_node;
1341
1342 return nid;
1343}
1344
1345static u64 hot_add_drconf_memory_max(void)
1346{
1347 struct device_node *memory = NULL;
1348 struct device_node *dn = NULL;
1349 const __be64 *lrdr = NULL;
1350
1351 dn = of_find_node_by_path("/rtas");
1352 if (dn) {
1353 lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1354 of_node_put(dn);
1355 if (lrdr)
1356 return be64_to_cpup(lrdr);
1357 }
1358
1359 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1360 if (memory) {
1361 of_node_put(memory);
1362 return drmem_lmb_memory_max();
1363 }
1364 return 0;
1365}
1366
1367/*
1368 * memory_hotplug_max - return max address of memory that may be added
1369 *
1370 * This is currently only used on systems that support drconfig memory
1371 * hotplug.
1372 */
1373u64 memory_hotplug_max(void)
1374{
1375 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1376}
1377#endif /* CONFIG_MEMORY_HOTPLUG */
1378
1379/* Virtual Processor Home Node (VPHN) support */
1380#ifdef CONFIG_PPC_SPLPAR
1381static int topology_inited;
1382
1383/*
1384 * Retrieve the new associativity information for a virtual processor's
1385 * home node.
1386 */
1387static long vphn_get_associativity(unsigned long cpu,
1388 __be32 *associativity)
1389{
1390 long rc;
1391
1392 rc = hcall_vphn(get_hard_smp_processor_id(cpu),
1393 VPHN_FLAG_VCPU, associativity);
1394
1395 switch (rc) {
1396 case H_SUCCESS:
1397 pr_debug("VPHN hcall succeeded. Reset polling...\n");
1398 goto out;
1399
1400 case H_FUNCTION:
1401 pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
1402 break;
1403 case H_HARDWARE:
1404 pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
1405 "preventing VPHN. Disabling polling...\n");
1406 break;
1407 case H_PARAMETER:
1408 pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
1409 "Disabling polling...\n");
1410 break;
1411 default:
1412 pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
1413 , rc);
1414 break;
1415 }
1416out:
1417 return rc;
1418}
1419
1420void find_and_update_cpu_nid(int cpu)
1421{
1422 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1423 int new_nid;
1424
1425 /* Use associativity from first thread for all siblings */
1426 if (vphn_get_associativity(cpu, associativity))
1427 return;
1428
1429 /* Do not have previous associativity, so find it now. */
1430 new_nid = associativity_to_nid(associativity);
1431
1432 if (new_nid < 0 || !node_possible(new_nid))
1433 new_nid = first_online_node;
1434 else
1435 // Associate node <-> cpu, so cpu_up() calls
1436 // try_online_node() on the right node.
1437 set_cpu_numa_node(cpu, new_nid);
1438
1439 pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid);
1440}
1441
1442int cpu_to_coregroup_id(int cpu)
1443{
1444 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1445 int index;
1446
1447 if (cpu < 0 || cpu > nr_cpu_ids)
1448 return -1;
1449
1450 if (!coregroup_enabled)
1451 goto out;
1452
1453 if (!firmware_has_feature(FW_FEATURE_VPHN))
1454 goto out;
1455
1456 if (vphn_get_associativity(cpu, associativity))
1457 goto out;
1458
1459 index = of_read_number(associativity, 1);
1460 if (index > primary_domain_index + 1)
1461 return of_read_number(&associativity[index - 1], 1);
1462
1463out:
1464 return cpu_to_core_id(cpu);
1465}
1466
1467static int topology_update_init(void)
1468{
1469 topology_inited = 1;
1470 return 0;
1471}
1472device_initcall(topology_update_init);
1473#endif /* CONFIG_PPC_SPLPAR */
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * pSeries NUMA support
4 *
5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 */
7#define pr_fmt(fmt) "numa: " fmt
8
9#include <linux/threads.h>
10#include <linux/memblock.h>
11#include <linux/init.h>
12#include <linux/mm.h>
13#include <linux/mmzone.h>
14#include <linux/export.h>
15#include <linux/nodemask.h>
16#include <linux/cpu.h>
17#include <linux/notifier.h>
18#include <linux/of.h>
19#include <linux/pfn.h>
20#include <linux/cpuset.h>
21#include <linux/node.h>
22#include <linux/stop_machine.h>
23#include <linux/proc_fs.h>
24#include <linux/seq_file.h>
25#include <linux/uaccess.h>
26#include <linux/slab.h>
27#include <asm/cputhreads.h>
28#include <asm/sparsemem.h>
29#include <asm/prom.h>
30#include <asm/smp.h>
31#include <asm/topology.h>
32#include <asm/firmware.h>
33#include <asm/paca.h>
34#include <asm/hvcall.h>
35#include <asm/setup.h>
36#include <asm/vdso.h>
37#include <asm/drmem.h>
38
39static int numa_enabled = 1;
40
41static char *cmdline __initdata;
42
43static int numa_debug;
44#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
45
46int numa_cpu_lookup_table[NR_CPUS];
47cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
48struct pglist_data *node_data[MAX_NUMNODES];
49
50EXPORT_SYMBOL(numa_cpu_lookup_table);
51EXPORT_SYMBOL(node_to_cpumask_map);
52EXPORT_SYMBOL(node_data);
53
54static int min_common_depth;
55static int n_mem_addr_cells, n_mem_size_cells;
56static int form1_affinity;
57
58#define MAX_DISTANCE_REF_POINTS 4
59static int distance_ref_points_depth;
60static const __be32 *distance_ref_points;
61static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
62
63/*
64 * Allocate node_to_cpumask_map based on number of available nodes
65 * Requires node_possible_map to be valid.
66 *
67 * Note: cpumask_of_node() is not valid until after this is done.
68 */
69static void __init setup_node_to_cpumask_map(void)
70{
71 unsigned int node;
72
73 /* setup nr_node_ids if not done yet */
74 if (nr_node_ids == MAX_NUMNODES)
75 setup_nr_node_ids();
76
77 /* allocate the map */
78 for_each_node(node)
79 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
80
81 /* cpumask_of_node() will now work */
82 dbg("Node to cpumask map for %u nodes\n", nr_node_ids);
83}
84
85static int __init fake_numa_create_new_node(unsigned long end_pfn,
86 unsigned int *nid)
87{
88 unsigned long long mem;
89 char *p = cmdline;
90 static unsigned int fake_nid;
91 static unsigned long long curr_boundary;
92
93 /*
94 * Modify node id, iff we started creating NUMA nodes
95 * We want to continue from where we left of the last time
96 */
97 if (fake_nid)
98 *nid = fake_nid;
99 /*
100 * In case there are no more arguments to parse, the
101 * node_id should be the same as the last fake node id
102 * (we've handled this above).
103 */
104 if (!p)
105 return 0;
106
107 mem = memparse(p, &p);
108 if (!mem)
109 return 0;
110
111 if (mem < curr_boundary)
112 return 0;
113
114 curr_boundary = mem;
115
116 if ((end_pfn << PAGE_SHIFT) > mem) {
117 /*
118 * Skip commas and spaces
119 */
120 while (*p == ',' || *p == ' ' || *p == '\t')
121 p++;
122
123 cmdline = p;
124 fake_nid++;
125 *nid = fake_nid;
126 dbg("created new fake_node with id %d\n", fake_nid);
127 return 1;
128 }
129 return 0;
130}
131
132static void reset_numa_cpu_lookup_table(void)
133{
134 unsigned int cpu;
135
136 for_each_possible_cpu(cpu)
137 numa_cpu_lookup_table[cpu] = -1;
138}
139
140static void map_cpu_to_node(int cpu, int node)
141{
142 update_numa_cpu_lookup_table(cpu, node);
143
144 dbg("adding cpu %d to node %d\n", cpu, node);
145
146 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
147 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
148}
149
150#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
151static void unmap_cpu_from_node(unsigned long cpu)
152{
153 int node = numa_cpu_lookup_table[cpu];
154
155 dbg("removing cpu %lu from node %d\n", cpu, node);
156
157 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
158 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
159 } else {
160 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
161 cpu, node);
162 }
163}
164#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
165
166int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
167{
168 int dist = 0;
169
170 int i, index;
171
172 for (i = 0; i < distance_ref_points_depth; i++) {
173 index = be32_to_cpu(distance_ref_points[i]);
174 if (cpu1_assoc[index] == cpu2_assoc[index])
175 break;
176 dist++;
177 }
178
179 return dist;
180}
181
182/* must hold reference to node during call */
183static const __be32 *of_get_associativity(struct device_node *dev)
184{
185 return of_get_property(dev, "ibm,associativity", NULL);
186}
187
188int __node_distance(int a, int b)
189{
190 int i;
191 int distance = LOCAL_DISTANCE;
192
193 if (!form1_affinity)
194 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
195
196 for (i = 0; i < distance_ref_points_depth; i++) {
197 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
198 break;
199
200 /* Double the distance for each NUMA level */
201 distance *= 2;
202 }
203
204 return distance;
205}
206EXPORT_SYMBOL(__node_distance);
207
208static void initialize_distance_lookup_table(int nid,
209 const __be32 *associativity)
210{
211 int i;
212
213 if (!form1_affinity)
214 return;
215
216 for (i = 0; i < distance_ref_points_depth; i++) {
217 const __be32 *entry;
218
219 entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
220 distance_lookup_table[nid][i] = of_read_number(entry, 1);
221 }
222}
223
224/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
225 * info is found.
226 */
227static int associativity_to_nid(const __be32 *associativity)
228{
229 int nid = NUMA_NO_NODE;
230
231 if (!numa_enabled)
232 goto out;
233
234 if (of_read_number(associativity, 1) >= min_common_depth)
235 nid = of_read_number(&associativity[min_common_depth], 1);
236
237 /* POWER4 LPAR uses 0xffff as invalid node */
238 if (nid == 0xffff || nid >= MAX_NUMNODES)
239 nid = NUMA_NO_NODE;
240
241 if (nid > 0 &&
242 of_read_number(associativity, 1) >= distance_ref_points_depth) {
243 /*
244 * Skip the length field and send start of associativity array
245 */
246 initialize_distance_lookup_table(nid, associativity + 1);
247 }
248
249out:
250 return nid;
251}
252
253/* Returns the nid associated with the given device tree node,
254 * or -1 if not found.
255 */
256static int of_node_to_nid_single(struct device_node *device)
257{
258 int nid = NUMA_NO_NODE;
259 const __be32 *tmp;
260
261 tmp = of_get_associativity(device);
262 if (tmp)
263 nid = associativity_to_nid(tmp);
264 return nid;
265}
266
267/* Walk the device tree upwards, looking for an associativity id */
268int of_node_to_nid(struct device_node *device)
269{
270 int nid = NUMA_NO_NODE;
271
272 of_node_get(device);
273 while (device) {
274 nid = of_node_to_nid_single(device);
275 if (nid != -1)
276 break;
277
278 device = of_get_next_parent(device);
279 }
280 of_node_put(device);
281
282 return nid;
283}
284EXPORT_SYMBOL(of_node_to_nid);
285
286static int __init find_min_common_depth(void)
287{
288 int depth;
289 struct device_node *root;
290
291 if (firmware_has_feature(FW_FEATURE_OPAL))
292 root = of_find_node_by_path("/ibm,opal");
293 else
294 root = of_find_node_by_path("/rtas");
295 if (!root)
296 root = of_find_node_by_path("/");
297
298 /*
299 * This property is a set of 32-bit integers, each representing
300 * an index into the ibm,associativity nodes.
301 *
302 * With form 0 affinity the first integer is for an SMP configuration
303 * (should be all 0's) and the second is for a normal NUMA
304 * configuration. We have only one level of NUMA.
305 *
306 * With form 1 affinity the first integer is the most significant
307 * NUMA boundary and the following are progressively less significant
308 * boundaries. There can be more than one level of NUMA.
309 */
310 distance_ref_points = of_get_property(root,
311 "ibm,associativity-reference-points",
312 &distance_ref_points_depth);
313
314 if (!distance_ref_points) {
315 dbg("NUMA: ibm,associativity-reference-points not found.\n");
316 goto err;
317 }
318
319 distance_ref_points_depth /= sizeof(int);
320
321 if (firmware_has_feature(FW_FEATURE_OPAL) ||
322 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
323 dbg("Using form 1 affinity\n");
324 form1_affinity = 1;
325 }
326
327 if (form1_affinity) {
328 depth = of_read_number(distance_ref_points, 1);
329 } else {
330 if (distance_ref_points_depth < 2) {
331 printk(KERN_WARNING "NUMA: "
332 "short ibm,associativity-reference-points\n");
333 goto err;
334 }
335
336 depth = of_read_number(&distance_ref_points[1], 1);
337 }
338
339 /*
340 * Warn and cap if the hardware supports more than
341 * MAX_DISTANCE_REF_POINTS domains.
342 */
343 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
344 printk(KERN_WARNING "NUMA: distance array capped at "
345 "%d entries\n", MAX_DISTANCE_REF_POINTS);
346 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
347 }
348
349 of_node_put(root);
350 return depth;
351
352err:
353 of_node_put(root);
354 return -1;
355}
356
357static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
358{
359 struct device_node *memory = NULL;
360
361 memory = of_find_node_by_type(memory, "memory");
362 if (!memory)
363 panic("numa.c: No memory nodes found!");
364
365 *n_addr_cells = of_n_addr_cells(memory);
366 *n_size_cells = of_n_size_cells(memory);
367 of_node_put(memory);
368}
369
370static unsigned long read_n_cells(int n, const __be32 **buf)
371{
372 unsigned long result = 0;
373
374 while (n--) {
375 result = (result << 32) | of_read_number(*buf, 1);
376 (*buf)++;
377 }
378 return result;
379}
380
381struct assoc_arrays {
382 u32 n_arrays;
383 u32 array_sz;
384 const __be32 *arrays;
385};
386
387/*
388 * Retrieve and validate the list of associativity arrays for drconf
389 * memory from the ibm,associativity-lookup-arrays property of the
390 * device tree..
391 *
392 * The layout of the ibm,associativity-lookup-arrays property is a number N
393 * indicating the number of associativity arrays, followed by a number M
394 * indicating the size of each associativity array, followed by a list
395 * of N associativity arrays.
396 */
397static int of_get_assoc_arrays(struct assoc_arrays *aa)
398{
399 struct device_node *memory;
400 const __be32 *prop;
401 u32 len;
402
403 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
404 if (!memory)
405 return -1;
406
407 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
408 if (!prop || len < 2 * sizeof(unsigned int)) {
409 of_node_put(memory);
410 return -1;
411 }
412
413 aa->n_arrays = of_read_number(prop++, 1);
414 aa->array_sz = of_read_number(prop++, 1);
415
416 of_node_put(memory);
417
418 /* Now that we know the number of arrays and size of each array,
419 * revalidate the size of the property read in.
420 */
421 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
422 return -1;
423
424 aa->arrays = prop;
425 return 0;
426}
427
428/*
429 * This is like of_node_to_nid_single() for memory represented in the
430 * ibm,dynamic-reconfiguration-memory node.
431 */
432static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
433{
434 struct assoc_arrays aa = { .arrays = NULL };
435 int default_nid = NUMA_NO_NODE;
436 int nid = default_nid;
437 int rc, index;
438
439 if ((min_common_depth < 0) || !numa_enabled)
440 return default_nid;
441
442 rc = of_get_assoc_arrays(&aa);
443 if (rc)
444 return default_nid;
445
446 if (min_common_depth <= aa.array_sz &&
447 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
448 index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
449 nid = of_read_number(&aa.arrays[index], 1);
450
451 if (nid == 0xffff || nid >= MAX_NUMNODES)
452 nid = default_nid;
453
454 if (nid > 0) {
455 index = lmb->aa_index * aa.array_sz;
456 initialize_distance_lookup_table(nid,
457 &aa.arrays[index]);
458 }
459 }
460
461 return nid;
462}
463
464/*
465 * Figure out to which domain a cpu belongs and stick it there.
466 * Return the id of the domain used.
467 */
468static int numa_setup_cpu(unsigned long lcpu)
469{
470 int nid = NUMA_NO_NODE;
471 struct device_node *cpu;
472
473 /*
474 * If a valid cpu-to-node mapping is already available, use it
475 * directly instead of querying the firmware, since it represents
476 * the most recent mapping notified to us by the platform (eg: VPHN).
477 */
478 if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
479 map_cpu_to_node(lcpu, nid);
480 return nid;
481 }
482
483 cpu = of_get_cpu_node(lcpu, NULL);
484
485 if (!cpu) {
486 WARN_ON(1);
487 if (cpu_present(lcpu))
488 goto out_present;
489 else
490 goto out;
491 }
492
493 nid = of_node_to_nid_single(cpu);
494
495out_present:
496 if (nid < 0 || !node_possible(nid))
497 nid = first_online_node;
498
499 map_cpu_to_node(lcpu, nid);
500 of_node_put(cpu);
501out:
502 return nid;
503}
504
505static void verify_cpu_node_mapping(int cpu, int node)
506{
507 int base, sibling, i;
508
509 /* Verify that all the threads in the core belong to the same node */
510 base = cpu_first_thread_sibling(cpu);
511
512 for (i = 0; i < threads_per_core; i++) {
513 sibling = base + i;
514
515 if (sibling == cpu || cpu_is_offline(sibling))
516 continue;
517
518 if (cpu_to_node(sibling) != node) {
519 WARN(1, "CPU thread siblings %d and %d don't belong"
520 " to the same node!\n", cpu, sibling);
521 break;
522 }
523 }
524}
525
526/* Must run before sched domains notifier. */
527static int ppc_numa_cpu_prepare(unsigned int cpu)
528{
529 int nid;
530
531 nid = numa_setup_cpu(cpu);
532 verify_cpu_node_mapping(cpu, nid);
533 return 0;
534}
535
536static int ppc_numa_cpu_dead(unsigned int cpu)
537{
538#ifdef CONFIG_HOTPLUG_CPU
539 unmap_cpu_from_node(cpu);
540#endif
541 return 0;
542}
543
544/*
545 * Check and possibly modify a memory region to enforce the memory limit.
546 *
547 * Returns the size the region should have to enforce the memory limit.
548 * This will either be the original value of size, a truncated value,
549 * or zero. If the returned value of size is 0 the region should be
550 * discarded as it lies wholly above the memory limit.
551 */
552static unsigned long __init numa_enforce_memory_limit(unsigned long start,
553 unsigned long size)
554{
555 /*
556 * We use memblock_end_of_DRAM() in here instead of memory_limit because
557 * we've already adjusted it for the limit and it takes care of
558 * having memory holes below the limit. Also, in the case of
559 * iommu_is_off, memory_limit is not set but is implicitly enforced.
560 */
561
562 if (start + size <= memblock_end_of_DRAM())
563 return size;
564
565 if (start >= memblock_end_of_DRAM())
566 return 0;
567
568 return memblock_end_of_DRAM() - start;
569}
570
571/*
572 * Reads the counter for a given entry in
573 * linux,drconf-usable-memory property
574 */
575static inline int __init read_usm_ranges(const __be32 **usm)
576{
577 /*
578 * For each lmb in ibm,dynamic-memory a corresponding
579 * entry in linux,drconf-usable-memory property contains
580 * a counter followed by that many (base, size) duple.
581 * read the counter from linux,drconf-usable-memory
582 */
583 return read_n_cells(n_mem_size_cells, usm);
584}
585
586/*
587 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
588 * node. This assumes n_mem_{addr,size}_cells have been set.
589 */
590static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
591 const __be32 **usm)
592{
593 unsigned int ranges, is_kexec_kdump = 0;
594 unsigned long base, size, sz;
595 int nid;
596
597 /*
598 * Skip this block if the reserved bit is set in flags (0x80)
599 * or if the block is not assigned to this partition (0x8)
600 */
601 if ((lmb->flags & DRCONF_MEM_RESERVED)
602 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
603 return;
604
605 if (*usm)
606 is_kexec_kdump = 1;
607
608 base = lmb->base_addr;
609 size = drmem_lmb_size();
610 ranges = 1;
611
612 if (is_kexec_kdump) {
613 ranges = read_usm_ranges(usm);
614 if (!ranges) /* there are no (base, size) duple */
615 return;
616 }
617
618 do {
619 if (is_kexec_kdump) {
620 base = read_n_cells(n_mem_addr_cells, usm);
621 size = read_n_cells(n_mem_size_cells, usm);
622 }
623
624 nid = of_drconf_to_nid_single(lmb);
625 fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
626 &nid);
627 node_set_online(nid);
628 sz = numa_enforce_memory_limit(base, size);
629 if (sz)
630 memblock_set_node(base, sz, &memblock.memory, nid);
631 } while (--ranges);
632}
633
634static int __init parse_numa_properties(void)
635{
636 struct device_node *memory;
637 int default_nid = 0;
638 unsigned long i;
639
640 if (numa_enabled == 0) {
641 printk(KERN_WARNING "NUMA disabled by user\n");
642 return -1;
643 }
644
645 min_common_depth = find_min_common_depth();
646
647 if (min_common_depth < 0) {
648 /*
649 * if we fail to parse min_common_depth from device tree
650 * mark the numa disabled, boot with numa disabled.
651 */
652 numa_enabled = false;
653 return min_common_depth;
654 }
655
656 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
657
658 /*
659 * Even though we connect cpus to numa domains later in SMP
660 * init, we need to know the node ids now. This is because
661 * each node to be onlined must have NODE_DATA etc backing it.
662 */
663 for_each_present_cpu(i) {
664 struct device_node *cpu;
665 int nid;
666
667 cpu = of_get_cpu_node(i, NULL);
668 BUG_ON(!cpu);
669 nid = of_node_to_nid_single(cpu);
670 of_node_put(cpu);
671
672 /*
673 * Don't fall back to default_nid yet -- we will plug
674 * cpus into nodes once the memory scan has discovered
675 * the topology.
676 */
677 if (nid < 0)
678 continue;
679 node_set_online(nid);
680 }
681
682 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
683
684 for_each_node_by_type(memory, "memory") {
685 unsigned long start;
686 unsigned long size;
687 int nid;
688 int ranges;
689 const __be32 *memcell_buf;
690 unsigned int len;
691
692 memcell_buf = of_get_property(memory,
693 "linux,usable-memory", &len);
694 if (!memcell_buf || len <= 0)
695 memcell_buf = of_get_property(memory, "reg", &len);
696 if (!memcell_buf || len <= 0)
697 continue;
698
699 /* ranges in cell */
700 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
701new_range:
702 /* these are order-sensitive, and modify the buffer pointer */
703 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
704 size = read_n_cells(n_mem_size_cells, &memcell_buf);
705
706 /*
707 * Assumption: either all memory nodes or none will
708 * have associativity properties. If none, then
709 * everything goes to default_nid.
710 */
711 nid = of_node_to_nid_single(memory);
712 if (nid < 0)
713 nid = default_nid;
714
715 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
716 node_set_online(nid);
717
718 size = numa_enforce_memory_limit(start, size);
719 if (size)
720 memblock_set_node(start, size, &memblock.memory, nid);
721
722 if (--ranges)
723 goto new_range;
724 }
725
726 /*
727 * Now do the same thing for each MEMBLOCK listed in the
728 * ibm,dynamic-memory property in the
729 * ibm,dynamic-reconfiguration-memory node.
730 */
731 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
732 if (memory) {
733 walk_drmem_lmbs(memory, numa_setup_drmem_lmb);
734 of_node_put(memory);
735 }
736
737 return 0;
738}
739
740static void __init setup_nonnuma(void)
741{
742 unsigned long top_of_ram = memblock_end_of_DRAM();
743 unsigned long total_ram = memblock_phys_mem_size();
744 unsigned long start_pfn, end_pfn;
745 unsigned int nid = 0;
746 struct memblock_region *reg;
747
748 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
749 top_of_ram, total_ram);
750 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
751 (top_of_ram - total_ram) >> 20);
752
753 for_each_memblock(memory, reg) {
754 start_pfn = memblock_region_memory_base_pfn(reg);
755 end_pfn = memblock_region_memory_end_pfn(reg);
756
757 fake_numa_create_new_node(end_pfn, &nid);
758 memblock_set_node(PFN_PHYS(start_pfn),
759 PFN_PHYS(end_pfn - start_pfn),
760 &memblock.memory, nid);
761 node_set_online(nid);
762 }
763}
764
765void __init dump_numa_cpu_topology(void)
766{
767 unsigned int node;
768 unsigned int cpu, count;
769
770 if (!numa_enabled)
771 return;
772
773 for_each_online_node(node) {
774 pr_info("Node %d CPUs:", node);
775
776 count = 0;
777 /*
778 * If we used a CPU iterator here we would miss printing
779 * the holes in the cpumap.
780 */
781 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
782 if (cpumask_test_cpu(cpu,
783 node_to_cpumask_map[node])) {
784 if (count == 0)
785 pr_cont(" %u", cpu);
786 ++count;
787 } else {
788 if (count > 1)
789 pr_cont("-%u", cpu - 1);
790 count = 0;
791 }
792 }
793
794 if (count > 1)
795 pr_cont("-%u", nr_cpu_ids - 1);
796 pr_cont("\n");
797 }
798}
799
800/* Initialize NODE_DATA for a node on the local memory */
801static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
802{
803 u64 spanned_pages = end_pfn - start_pfn;
804 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
805 u64 nd_pa;
806 void *nd;
807 int tnid;
808
809 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
810 if (!nd_pa)
811 panic("Cannot allocate %zu bytes for node %d data\n",
812 nd_size, nid);
813
814 nd = __va(nd_pa);
815
816 /* report and initialize */
817 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
818 nd_pa, nd_pa + nd_size - 1);
819 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
820 if (tnid != nid)
821 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
822
823 node_data[nid] = nd;
824 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
825 NODE_DATA(nid)->node_id = nid;
826 NODE_DATA(nid)->node_start_pfn = start_pfn;
827 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
828}
829
830static void __init find_possible_nodes(void)
831{
832 struct device_node *rtas;
833 u32 numnodes, i;
834
835 if (!numa_enabled)
836 return;
837
838 rtas = of_find_node_by_path("/rtas");
839 if (!rtas)
840 return;
841
842 if (of_property_read_u32_index(rtas,
843 "ibm,max-associativity-domains",
844 min_common_depth, &numnodes))
845 goto out;
846
847 for (i = 0; i < numnodes; i++) {
848 if (!node_possible(i))
849 node_set(i, node_possible_map);
850 }
851
852out:
853 of_node_put(rtas);
854}
855
856void __init mem_topology_setup(void)
857{
858 int cpu;
859
860 if (parse_numa_properties())
861 setup_nonnuma();
862
863 /*
864 * Modify the set of possible NUMA nodes to reflect information
865 * available about the set of online nodes, and the set of nodes
866 * that we expect to make use of for this platform's affinity
867 * calculations.
868 */
869 nodes_and(node_possible_map, node_possible_map, node_online_map);
870
871 find_possible_nodes();
872
873 setup_node_to_cpumask_map();
874
875 reset_numa_cpu_lookup_table();
876
877 for_each_present_cpu(cpu)
878 numa_setup_cpu(cpu);
879}
880
881void __init initmem_init(void)
882{
883 int nid;
884
885 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
886 max_pfn = max_low_pfn;
887
888 memblock_dump_all();
889
890 for_each_online_node(nid) {
891 unsigned long start_pfn, end_pfn;
892
893 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
894 setup_node_data(nid, start_pfn, end_pfn);
895 sparse_memory_present_with_active_regions(nid);
896 }
897
898 sparse_init();
899
900 /*
901 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
902 * even before we online them, so that we can use cpu_to_{node,mem}
903 * early in boot, cf. smp_prepare_cpus().
904 * _nocalls() + manual invocation is used because cpuhp is not yet
905 * initialized for the boot CPU.
906 */
907 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
908 ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
909}
910
911static int __init early_numa(char *p)
912{
913 if (!p)
914 return 0;
915
916 if (strstr(p, "off"))
917 numa_enabled = 0;
918
919 if (strstr(p, "debug"))
920 numa_debug = 1;
921
922 p = strstr(p, "fake=");
923 if (p)
924 cmdline = p + strlen("fake=");
925
926 return 0;
927}
928early_param("numa", early_numa);
929
930/*
931 * The platform can inform us through one of several mechanisms
932 * (post-migration device tree updates, PRRN or VPHN) that the NUMA
933 * assignment of a resource has changed. This controls whether we act
934 * on that. Disabled by default.
935 */
936static bool topology_updates_enabled;
937
938static int __init early_topology_updates(char *p)
939{
940 if (!p)
941 return 0;
942
943 if (!strcmp(p, "on")) {
944 pr_warn("Caution: enabling topology updates\n");
945 topology_updates_enabled = true;
946 }
947
948 return 0;
949}
950early_param("topology_updates", early_topology_updates);
951
952#ifdef CONFIG_MEMORY_HOTPLUG
953/*
954 * Find the node associated with a hot added memory section for
955 * memory represented in the device tree by the property
956 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
957 */
958static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
959{
960 struct drmem_lmb *lmb;
961 unsigned long lmb_size;
962 int nid = NUMA_NO_NODE;
963
964 lmb_size = drmem_lmb_size();
965
966 for_each_drmem_lmb(lmb) {
967 /* skip this block if it is reserved or not assigned to
968 * this partition */
969 if ((lmb->flags & DRCONF_MEM_RESERVED)
970 || !(lmb->flags & DRCONF_MEM_ASSIGNED))
971 continue;
972
973 if ((scn_addr < lmb->base_addr)
974 || (scn_addr >= (lmb->base_addr + lmb_size)))
975 continue;
976
977 nid = of_drconf_to_nid_single(lmb);
978 break;
979 }
980
981 return nid;
982}
983
984/*
985 * Find the node associated with a hot added memory section for memory
986 * represented in the device tree as a node (i.e. memory@XXXX) for
987 * each memblock.
988 */
989static int hot_add_node_scn_to_nid(unsigned long scn_addr)
990{
991 struct device_node *memory;
992 int nid = NUMA_NO_NODE;
993
994 for_each_node_by_type(memory, "memory") {
995 unsigned long start, size;
996 int ranges;
997 const __be32 *memcell_buf;
998 unsigned int len;
999
1000 memcell_buf = of_get_property(memory, "reg", &len);
1001 if (!memcell_buf || len <= 0)
1002 continue;
1003
1004 /* ranges in cell */
1005 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1006
1007 while (ranges--) {
1008 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1009 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1010
1011 if ((scn_addr < start) || (scn_addr >= (start + size)))
1012 continue;
1013
1014 nid = of_node_to_nid_single(memory);
1015 break;
1016 }
1017
1018 if (nid >= 0)
1019 break;
1020 }
1021
1022 of_node_put(memory);
1023
1024 return nid;
1025}
1026
1027/*
1028 * Find the node associated with a hot added memory section. Section
1029 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1030 * sections are fully contained within a single MEMBLOCK.
1031 */
1032int hot_add_scn_to_nid(unsigned long scn_addr)
1033{
1034 struct device_node *memory = NULL;
1035 int nid;
1036
1037 if (!numa_enabled)
1038 return first_online_node;
1039
1040 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1041 if (memory) {
1042 nid = hot_add_drconf_scn_to_nid(scn_addr);
1043 of_node_put(memory);
1044 } else {
1045 nid = hot_add_node_scn_to_nid(scn_addr);
1046 }
1047
1048 if (nid < 0 || !node_possible(nid))
1049 nid = first_online_node;
1050
1051 return nid;
1052}
1053
1054static u64 hot_add_drconf_memory_max(void)
1055{
1056 struct device_node *memory = NULL;
1057 struct device_node *dn = NULL;
1058 const __be64 *lrdr = NULL;
1059
1060 dn = of_find_node_by_path("/rtas");
1061 if (dn) {
1062 lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1063 of_node_put(dn);
1064 if (lrdr)
1065 return be64_to_cpup(lrdr);
1066 }
1067
1068 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1069 if (memory) {
1070 of_node_put(memory);
1071 return drmem_lmb_memory_max();
1072 }
1073 return 0;
1074}
1075
1076/*
1077 * memory_hotplug_max - return max address of memory that may be added
1078 *
1079 * This is currently only used on systems that support drconfig memory
1080 * hotplug.
1081 */
1082u64 memory_hotplug_max(void)
1083{
1084 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1085}
1086#endif /* CONFIG_MEMORY_HOTPLUG */
1087
1088/* Virtual Processor Home Node (VPHN) support */
1089#ifdef CONFIG_PPC_SPLPAR
1090struct topology_update_data {
1091 struct topology_update_data *next;
1092 unsigned int cpu;
1093 int old_nid;
1094 int new_nid;
1095};
1096
1097#define TOPOLOGY_DEF_TIMER_SECS 60
1098
1099static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1100static cpumask_t cpu_associativity_changes_mask;
1101static int vphn_enabled;
1102static int prrn_enabled;
1103static void reset_topology_timer(void);
1104static int topology_timer_secs = 1;
1105static int topology_inited;
1106
1107/*
1108 * Change polling interval for associativity changes.
1109 */
1110int timed_topology_update(int nsecs)
1111{
1112 if (vphn_enabled) {
1113 if (nsecs > 0)
1114 topology_timer_secs = nsecs;
1115 else
1116 topology_timer_secs = TOPOLOGY_DEF_TIMER_SECS;
1117
1118 reset_topology_timer();
1119 }
1120
1121 return 0;
1122}
1123
1124/*
1125 * Store the current values of the associativity change counters in the
1126 * hypervisor.
1127 */
1128static void setup_cpu_associativity_change_counters(void)
1129{
1130 int cpu;
1131
1132 /* The VPHN feature supports a maximum of 8 reference points */
1133 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1134
1135 for_each_possible_cpu(cpu) {
1136 int i;
1137 u8 *counts = vphn_cpu_change_counts[cpu];
1138 volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
1139
1140 for (i = 0; i < distance_ref_points_depth; i++)
1141 counts[i] = hypervisor_counts[i];
1142 }
1143}
1144
1145/*
1146 * The hypervisor maintains a set of 8 associativity change counters in
1147 * the VPA of each cpu that correspond to the associativity levels in the
1148 * ibm,associativity-reference-points property. When an associativity
1149 * level changes, the corresponding counter is incremented.
1150 *
1151 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1152 * node associativity levels have changed.
1153 *
1154 * Returns the number of cpus with unhandled associativity changes.
1155 */
1156static int update_cpu_associativity_changes_mask(void)
1157{
1158 int cpu;
1159 cpumask_t *changes = &cpu_associativity_changes_mask;
1160
1161 for_each_possible_cpu(cpu) {
1162 int i, changed = 0;
1163 u8 *counts = vphn_cpu_change_counts[cpu];
1164 volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
1165
1166 for (i = 0; i < distance_ref_points_depth; i++) {
1167 if (hypervisor_counts[i] != counts[i]) {
1168 counts[i] = hypervisor_counts[i];
1169 changed = 1;
1170 }
1171 }
1172 if (changed) {
1173 cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1174 cpu = cpu_last_thread_sibling(cpu);
1175 }
1176 }
1177
1178 return cpumask_weight(changes);
1179}
1180
1181/*
1182 * Retrieve the new associativity information for a virtual processor's
1183 * home node.
1184 */
1185static long vphn_get_associativity(unsigned long cpu,
1186 __be32 *associativity)
1187{
1188 long rc;
1189
1190 rc = hcall_vphn(get_hard_smp_processor_id(cpu),
1191 VPHN_FLAG_VCPU, associativity);
1192
1193 switch (rc) {
1194 case H_FUNCTION:
1195 printk_once(KERN_INFO
1196 "VPHN is not supported. Disabling polling...\n");
1197 stop_topology_update();
1198 break;
1199 case H_HARDWARE:
1200 printk(KERN_ERR
1201 "hcall_vphn() experienced a hardware fault "
1202 "preventing VPHN. Disabling polling...\n");
1203 stop_topology_update();
1204 break;
1205 case H_SUCCESS:
1206 dbg("VPHN hcall succeeded. Reset polling...\n");
1207 timed_topology_update(0);
1208 break;
1209 }
1210
1211 return rc;
1212}
1213
1214int find_and_online_cpu_nid(int cpu)
1215{
1216 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1217 int new_nid;
1218
1219 /* Use associativity from first thread for all siblings */
1220 if (vphn_get_associativity(cpu, associativity))
1221 return cpu_to_node(cpu);
1222
1223 new_nid = associativity_to_nid(associativity);
1224 if (new_nid < 0 || !node_possible(new_nid))
1225 new_nid = first_online_node;
1226
1227 if (NODE_DATA(new_nid) == NULL) {
1228#ifdef CONFIG_MEMORY_HOTPLUG
1229 /*
1230 * Need to ensure that NODE_DATA is initialized for a node from
1231 * available memory (see memblock_alloc_try_nid). If unable to
1232 * init the node, then default to nearest node that has memory
1233 * installed. Skip onlining a node if the subsystems are not
1234 * yet initialized.
1235 */
1236 if (!topology_inited || try_online_node(new_nid))
1237 new_nid = first_online_node;
1238#else
1239 /*
1240 * Default to using the nearest node that has memory installed.
1241 * Otherwise, it would be necessary to patch the kernel MM code
1242 * to deal with more memoryless-node error conditions.
1243 */
1244 new_nid = first_online_node;
1245#endif
1246 }
1247
1248 pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__,
1249 cpu, new_nid);
1250 return new_nid;
1251}
1252
1253/*
1254 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1255 * characteristics change. This function doesn't perform any locking and is
1256 * only safe to call from stop_machine().
1257 */
1258static int update_cpu_topology(void *data)
1259{
1260 struct topology_update_data *update;
1261 unsigned long cpu;
1262
1263 if (!data)
1264 return -EINVAL;
1265
1266 cpu = smp_processor_id();
1267
1268 for (update = data; update; update = update->next) {
1269 int new_nid = update->new_nid;
1270 if (cpu != update->cpu)
1271 continue;
1272
1273 unmap_cpu_from_node(cpu);
1274 map_cpu_to_node(cpu, new_nid);
1275 set_cpu_numa_node(cpu, new_nid);
1276 set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1277 vdso_getcpu_init();
1278 }
1279
1280 return 0;
1281}
1282
1283static int update_lookup_table(void *data)
1284{
1285 struct topology_update_data *update;
1286
1287 if (!data)
1288 return -EINVAL;
1289
1290 /*
1291 * Upon topology update, the numa-cpu lookup table needs to be updated
1292 * for all threads in the core, including offline CPUs, to ensure that
1293 * future hotplug operations respect the cpu-to-node associativity
1294 * properly.
1295 */
1296 for (update = data; update; update = update->next) {
1297 int nid, base, j;
1298
1299 nid = update->new_nid;
1300 base = cpu_first_thread_sibling(update->cpu);
1301
1302 for (j = 0; j < threads_per_core; j++) {
1303 update_numa_cpu_lookup_table(base + j, nid);
1304 }
1305 }
1306
1307 return 0;
1308}
1309
1310/*
1311 * Update the node maps and sysfs entries for each cpu whose home node
1312 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1313 *
1314 * cpus_locked says whether we already hold cpu_hotplug_lock.
1315 */
1316int numa_update_cpu_topology(bool cpus_locked)
1317{
1318 unsigned int cpu, sibling, changed = 0;
1319 struct topology_update_data *updates, *ud;
1320 cpumask_t updated_cpus;
1321 struct device *dev;
1322 int weight, new_nid, i = 0;
1323
1324 if (!prrn_enabled && !vphn_enabled && topology_inited)
1325 return 0;
1326
1327 weight = cpumask_weight(&cpu_associativity_changes_mask);
1328 if (!weight)
1329 return 0;
1330
1331 updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL);
1332 if (!updates)
1333 return 0;
1334
1335 cpumask_clear(&updated_cpus);
1336
1337 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1338 /*
1339 * If siblings aren't flagged for changes, updates list
1340 * will be too short. Skip on this update and set for next
1341 * update.
1342 */
1343 if (!cpumask_subset(cpu_sibling_mask(cpu),
1344 &cpu_associativity_changes_mask)) {
1345 pr_info("Sibling bits not set for associativity "
1346 "change, cpu%d\n", cpu);
1347 cpumask_or(&cpu_associativity_changes_mask,
1348 &cpu_associativity_changes_mask,
1349 cpu_sibling_mask(cpu));
1350 cpu = cpu_last_thread_sibling(cpu);
1351 continue;
1352 }
1353
1354 new_nid = find_and_online_cpu_nid(cpu);
1355
1356 if (new_nid == numa_cpu_lookup_table[cpu]) {
1357 cpumask_andnot(&cpu_associativity_changes_mask,
1358 &cpu_associativity_changes_mask,
1359 cpu_sibling_mask(cpu));
1360 dbg("Assoc chg gives same node %d for cpu%d\n",
1361 new_nid, cpu);
1362 cpu = cpu_last_thread_sibling(cpu);
1363 continue;
1364 }
1365
1366 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1367 ud = &updates[i++];
1368 ud->next = &updates[i];
1369 ud->cpu = sibling;
1370 ud->new_nid = new_nid;
1371 ud->old_nid = numa_cpu_lookup_table[sibling];
1372 cpumask_set_cpu(sibling, &updated_cpus);
1373 }
1374 cpu = cpu_last_thread_sibling(cpu);
1375 }
1376
1377 /*
1378 * Prevent processing of 'updates' from overflowing array
1379 * where last entry filled in a 'next' pointer.
1380 */
1381 if (i)
1382 updates[i-1].next = NULL;
1383
1384 pr_debug("Topology update for the following CPUs:\n");
1385 if (cpumask_weight(&updated_cpus)) {
1386 for (ud = &updates[0]; ud; ud = ud->next) {
1387 pr_debug("cpu %d moving from node %d "
1388 "to %d\n", ud->cpu,
1389 ud->old_nid, ud->new_nid);
1390 }
1391 }
1392
1393 /*
1394 * In cases where we have nothing to update (because the updates list
1395 * is too short or because the new topology is same as the old one),
1396 * skip invoking update_cpu_topology() via stop-machine(). This is
1397 * necessary (and not just a fast-path optimization) since stop-machine
1398 * can end up electing a random CPU to run update_cpu_topology(), and
1399 * thus trick us into setting up incorrect cpu-node mappings (since
1400 * 'updates' is kzalloc()'ed).
1401 *
1402 * And for the similar reason, we will skip all the following updating.
1403 */
1404 if (!cpumask_weight(&updated_cpus))
1405 goto out;
1406
1407 if (cpus_locked)
1408 stop_machine_cpuslocked(update_cpu_topology, &updates[0],
1409 &updated_cpus);
1410 else
1411 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1412
1413 /*
1414 * Update the numa-cpu lookup table with the new mappings, even for
1415 * offline CPUs. It is best to perform this update from the stop-
1416 * machine context.
1417 */
1418 if (cpus_locked)
1419 stop_machine_cpuslocked(update_lookup_table, &updates[0],
1420 cpumask_of(raw_smp_processor_id()));
1421 else
1422 stop_machine(update_lookup_table, &updates[0],
1423 cpumask_of(raw_smp_processor_id()));
1424
1425 for (ud = &updates[0]; ud; ud = ud->next) {
1426 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1427 register_cpu_under_node(ud->cpu, ud->new_nid);
1428
1429 dev = get_cpu_device(ud->cpu);
1430 if (dev)
1431 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1432 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1433 changed = 1;
1434 }
1435
1436out:
1437 kfree(updates);
1438 return changed;
1439}
1440
1441int arch_update_cpu_topology(void)
1442{
1443 return numa_update_cpu_topology(true);
1444}
1445
1446static void topology_work_fn(struct work_struct *work)
1447{
1448 rebuild_sched_domains();
1449}
1450static DECLARE_WORK(topology_work, topology_work_fn);
1451
1452static void topology_schedule_update(void)
1453{
1454 schedule_work(&topology_work);
1455}
1456
1457static void topology_timer_fn(struct timer_list *unused)
1458{
1459 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1460 topology_schedule_update();
1461 else if (vphn_enabled) {
1462 if (update_cpu_associativity_changes_mask() > 0)
1463 topology_schedule_update();
1464 reset_topology_timer();
1465 }
1466}
1467static struct timer_list topology_timer;
1468
1469static void reset_topology_timer(void)
1470{
1471 if (vphn_enabled)
1472 mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
1473}
1474
1475#ifdef CONFIG_SMP
1476
1477static int dt_update_callback(struct notifier_block *nb,
1478 unsigned long action, void *data)
1479{
1480 struct of_reconfig_data *update = data;
1481 int rc = NOTIFY_DONE;
1482
1483 switch (action) {
1484 case OF_RECONFIG_UPDATE_PROPERTY:
1485 if (of_node_is_type(update->dn, "cpu") &&
1486 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1487 u32 core_id;
1488 of_property_read_u32(update->dn, "reg", &core_id);
1489 rc = dlpar_cpu_readd(core_id);
1490 rc = NOTIFY_OK;
1491 }
1492 break;
1493 }
1494
1495 return rc;
1496}
1497
1498static struct notifier_block dt_update_nb = {
1499 .notifier_call = dt_update_callback,
1500};
1501
1502#endif
1503
1504/*
1505 * Start polling for associativity changes.
1506 */
1507int start_topology_update(void)
1508{
1509 int rc = 0;
1510
1511 if (!topology_updates_enabled)
1512 return 0;
1513
1514 if (firmware_has_feature(FW_FEATURE_PRRN)) {
1515 if (!prrn_enabled) {
1516 prrn_enabled = 1;
1517#ifdef CONFIG_SMP
1518 rc = of_reconfig_notifier_register(&dt_update_nb);
1519#endif
1520 }
1521 }
1522 if (firmware_has_feature(FW_FEATURE_VPHN) &&
1523 lppaca_shared_proc(get_lppaca())) {
1524 if (!vphn_enabled) {
1525 vphn_enabled = 1;
1526 setup_cpu_associativity_change_counters();
1527 timer_setup(&topology_timer, topology_timer_fn,
1528 TIMER_DEFERRABLE);
1529 reset_topology_timer();
1530 }
1531 }
1532
1533 pr_info("Starting topology update%s%s\n",
1534 (prrn_enabled ? " prrn_enabled" : ""),
1535 (vphn_enabled ? " vphn_enabled" : ""));
1536
1537 return rc;
1538}
1539
1540/*
1541 * Disable polling for VPHN associativity changes.
1542 */
1543int stop_topology_update(void)
1544{
1545 int rc = 0;
1546
1547 if (!topology_updates_enabled)
1548 return 0;
1549
1550 if (prrn_enabled) {
1551 prrn_enabled = 0;
1552#ifdef CONFIG_SMP
1553 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1554#endif
1555 }
1556 if (vphn_enabled) {
1557 vphn_enabled = 0;
1558 rc = del_timer_sync(&topology_timer);
1559 }
1560
1561 pr_info("Stopping topology update\n");
1562
1563 return rc;
1564}
1565
1566int prrn_is_enabled(void)
1567{
1568 return prrn_enabled;
1569}
1570
1571void __init shared_proc_topology_init(void)
1572{
1573 if (lppaca_shared_proc(get_lppaca())) {
1574 bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
1575 nr_cpumask_bits);
1576 numa_update_cpu_topology(false);
1577 }
1578}
1579
1580static int topology_read(struct seq_file *file, void *v)
1581{
1582 if (vphn_enabled || prrn_enabled)
1583 seq_puts(file, "on\n");
1584 else
1585 seq_puts(file, "off\n");
1586
1587 return 0;
1588}
1589
1590static int topology_open(struct inode *inode, struct file *file)
1591{
1592 return single_open(file, topology_read, NULL);
1593}
1594
1595static ssize_t topology_write(struct file *file, const char __user *buf,
1596 size_t count, loff_t *off)
1597{
1598 char kbuf[4]; /* "on" or "off" plus null. */
1599 int read_len;
1600
1601 read_len = count < 3 ? count : 3;
1602 if (copy_from_user(kbuf, buf, read_len))
1603 return -EINVAL;
1604
1605 kbuf[read_len] = '\0';
1606
1607 if (!strncmp(kbuf, "on", 2)) {
1608 topology_updates_enabled = true;
1609 start_topology_update();
1610 } else if (!strncmp(kbuf, "off", 3)) {
1611 stop_topology_update();
1612 topology_updates_enabled = false;
1613 } else
1614 return -EINVAL;
1615
1616 return count;
1617}
1618
1619static const struct file_operations topology_ops = {
1620 .read = seq_read,
1621 .write = topology_write,
1622 .open = topology_open,
1623 .release = single_release
1624};
1625
1626static int topology_update_init(void)
1627{
1628 start_topology_update();
1629
1630 if (vphn_enabled)
1631 topology_schedule_update();
1632
1633 if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1634 return -ENOMEM;
1635
1636 topology_inited = 1;
1637 return 0;
1638}
1639device_initcall(topology_update_init);
1640#endif /* CONFIG_PPC_SPLPAR */