Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * pSeries NUMA support
   4 *
   5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 
 
 
 
 
   6 */
   7#define pr_fmt(fmt) "numa: " fmt
   8
   9#include <linux/threads.h>
  10#include <linux/memblock.h>
  11#include <linux/init.h>
  12#include <linux/mm.h>
  13#include <linux/mmzone.h>
  14#include <linux/export.h>
  15#include <linux/nodemask.h>
  16#include <linux/cpu.h>
  17#include <linux/notifier.h>
 
  18#include <linux/of.h>
  19#include <linux/of_address.h>
  20#include <linux/pfn.h>
  21#include <linux/cpuset.h>
  22#include <linux/node.h>
  23#include <linux/stop_machine.h>
  24#include <linux/proc_fs.h>
  25#include <linux/seq_file.h>
  26#include <linux/uaccess.h>
  27#include <linux/slab.h>
  28#include <asm/cputhreads.h>
  29#include <asm/sparsemem.h>
 
  30#include <asm/smp.h>
  31#include <asm/topology.h>
  32#include <asm/firmware.h>
  33#include <asm/paca.h>
  34#include <asm/hvcall.h>
  35#include <asm/setup.h>
  36#include <asm/vdso.h>
  37#include <asm/vphn.h>
  38#include <asm/drmem.h>
  39
  40static int numa_enabled = 1;
  41
  42static char *cmdline __initdata;
  43
 
 
 
  44int numa_cpu_lookup_table[NR_CPUS];
  45cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  46struct pglist_data *node_data[MAX_NUMNODES];
  47
  48EXPORT_SYMBOL(numa_cpu_lookup_table);
  49EXPORT_SYMBOL(node_to_cpumask_map);
  50EXPORT_SYMBOL(node_data);
  51
  52static int primary_domain_index;
  53static int n_mem_addr_cells, n_mem_size_cells;
  54
  55#define FORM0_AFFINITY 0
  56#define FORM1_AFFINITY 1
  57#define FORM2_AFFINITY 2
  58static int affinity_form;
  59
  60#define MAX_DISTANCE_REF_POINTS 4
  61static int distance_ref_points_depth;
  62static const __be32 *distance_ref_points;
  63static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
  64static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = {
  65	[0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 }
  66};
  67static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE };
  68
  69/*
  70 * Allocate node_to_cpumask_map based on number of available nodes
  71 * Requires node_possible_map to be valid.
  72 *
  73 * Note: cpumask_of_node() is not valid until after this is done.
  74 */
  75static void __init setup_node_to_cpumask_map(void)
  76{
  77	unsigned int node;
  78
  79	/* setup nr_node_ids if not done yet */
  80	if (nr_node_ids == MAX_NUMNODES)
  81		setup_nr_node_ids();
 
 
 
  82
  83	/* allocate the map */
  84	for_each_node(node)
  85		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
  86
  87	/* cpumask_of_node() will now work */
  88	pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
  89}
  90
  91static int __init fake_numa_create_new_node(unsigned long end_pfn,
  92						unsigned int *nid)
  93{
  94	unsigned long long mem;
  95	char *p = cmdline;
  96	static unsigned int fake_nid;
  97	static unsigned long long curr_boundary;
  98
  99	/*
 100	 * Modify node id, iff we started creating NUMA nodes
 101	 * We want to continue from where we left of the last time
 102	 */
 103	if (fake_nid)
 104		*nid = fake_nid;
 105	/*
 106	 * In case there are no more arguments to parse, the
 107	 * node_id should be the same as the last fake node id
 108	 * (we've handled this above).
 109	 */
 110	if (!p)
 111		return 0;
 112
 113	mem = memparse(p, &p);
 114	if (!mem)
 115		return 0;
 116
 117	if (mem < curr_boundary)
 118		return 0;
 119
 120	curr_boundary = mem;
 121
 122	if ((end_pfn << PAGE_SHIFT) > mem) {
 123		/*
 124		 * Skip commas and spaces
 125		 */
 126		while (*p == ',' || *p == ' ' || *p == '\t')
 127			p++;
 128
 129		cmdline = p;
 130		fake_nid++;
 131		*nid = fake_nid;
 132		pr_debug("created new fake_node with id %d\n", fake_nid);
 133		return 1;
 134	}
 135	return 0;
 136}
 137
 138static void __init reset_numa_cpu_lookup_table(void)
 
 
 
 
 
 
 
 139{
 140	unsigned int cpu;
 
 141
 142	for_each_possible_cpu(cpu)
 143		numa_cpu_lookup_table[cpu] = -1;
 
 
 
 
 
 
 144}
 145
 146void map_cpu_to_node(int cpu, int node)
 147{
 148	update_numa_cpu_lookup_table(cpu, node);
 
 
 149
 150	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) {
 151		pr_debug("adding cpu %d to node %d\n", cpu, node);
 152		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 153	}
 154}
 155
 156#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
 157void unmap_cpu_from_node(unsigned long cpu)
 158{
 159	int node = numa_cpu_lookup_table[cpu];
 160
 
 
 161	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
 162		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
 163		pr_debug("removing cpu %lu from node %d\n", cpu, node);
 164	} else {
 165		pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node);
 
 166	}
 167}
 168#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
 169
 170static int __associativity_to_nid(const __be32 *associativity,
 171				  int max_array_sz)
 172{
 173	int nid;
 174	/*
 175	 * primary_domain_index is 1 based array index.
 176	 */
 177	int index = primary_domain_index  - 1;
 178
 179	if (!numa_enabled || index >= max_array_sz)
 180		return NUMA_NO_NODE;
 181
 182	nid = of_read_number(&associativity[index], 1);
 183
 184	/* POWER4 LPAR uses 0xffff as invalid node */
 185	if (nid == 0xffff || nid >= nr_node_ids)
 186		nid = NUMA_NO_NODE;
 187	return nid;
 188}
 
 189/*
 190 * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
 191 * info is found.
 
 192 */
 193static int associativity_to_nid(const __be32 *associativity)
 194{
 195	int array_sz = of_read_number(associativity, 1);
 196
 197	/* Skip the first element in the associativity array */
 198	return __associativity_to_nid((associativity + 1), array_sz);
 199}
 200
 201static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
 202{
 203	int dist;
 204	int node1, node2;
 205
 206	node1 = associativity_to_nid(cpu1_assoc);
 207	node2 = associativity_to_nid(cpu2_assoc);
 208
 209	dist = numa_distance_table[node1][node2];
 210	if (dist <= LOCAL_DISTANCE)
 211		return 0;
 212	else if (dist <= REMOTE_DISTANCE)
 213		return 1;
 214	else
 215		return 2;
 216}
 217
 218static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
 219{
 220	int dist = 0;
 
 221
 222	int i, index;
 
 223
 224	for (i = 0; i < distance_ref_points_depth; i++) {
 225		index = be32_to_cpu(distance_ref_points[i]);
 226		if (cpu1_assoc[index] == cpu2_assoc[index])
 227			break;
 228		dist++;
 
 
 229	}
 230
 231	return dist;
 232}
 233
 234int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
 
 235{
 236	/* We should not get called with FORM0 */
 237	VM_WARN_ON(affinity_form == FORM0_AFFINITY);
 238	if (affinity_form == FORM1_AFFINITY)
 239		return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc);
 240	return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc);
 241}
 242
 243/* must hold reference to node during call */
 244static const __be32 *of_get_associativity(struct device_node *dev)
 245{
 246	return of_get_property(dev, "ibm,associativity", NULL);
 
 
 
 247}
 248
 249int __node_distance(int a, int b)
 
 
 
 250{
 251	int i;
 252	int distance = LOCAL_DISTANCE;
 253
 254	if (affinity_form == FORM2_AFFINITY)
 255		return numa_distance_table[a][b];
 256	else if (affinity_form == FORM0_AFFINITY)
 257		return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
 258
 259	for (i = 0; i < distance_ref_points_depth; i++) {
 260		if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
 261			break;
 262
 263		/* Double the distance for each NUMA level */
 264		distance *= 2;
 265	}
 266
 267	return distance;
 
 
 
 
 268}
 269EXPORT_SYMBOL(__node_distance);
 270
 271/* Returns the nid associated with the given device tree node,
 272 * or -1 if not found.
 273 */
 274static int of_node_to_nid_single(struct device_node *device)
 275{
 276	int nid = NUMA_NO_NODE;
 277	const __be32 *tmp;
 278
 279	tmp = of_get_associativity(device);
 280	if (tmp)
 281		nid = associativity_to_nid(tmp);
 282	return nid;
 283}
 284
 285/* Walk the device tree upwards, looking for an associativity id */
 286int of_node_to_nid(struct device_node *device)
 287{
 288	int nid = NUMA_NO_NODE;
 
 289
 290	of_node_get(device);
 291	while (device) {
 292		nid = of_node_to_nid_single(device);
 293		if (nid != -1)
 294			break;
 295
 296		device = of_get_next_parent(device);
 
 
 297	}
 298	of_node_put(device);
 299
 300	return nid;
 301}
 302EXPORT_SYMBOL(of_node_to_nid);
 303
 304static void __initialize_form1_numa_distance(const __be32 *associativity,
 305					     int max_array_sz)
 306{
 307	int i, nid;
 308
 309	if (affinity_form != FORM1_AFFINITY)
 310		return;
 311
 312	nid = __associativity_to_nid(associativity, max_array_sz);
 313	if (nid != NUMA_NO_NODE) {
 314		for (i = 0; i < distance_ref_points_depth; i++) {
 315			const __be32 *entry;
 316			int index = be32_to_cpu(distance_ref_points[i]) - 1;
 317
 318			/*
 319			 * broken hierarchy, return with broken distance table
 320			 */
 321			if (WARN(index >= max_array_sz, "Broken ibm,associativity property"))
 322				return;
 323
 324			entry = &associativity[index];
 325			distance_lookup_table[nid][i] = of_read_number(entry, 1);
 326		}
 327	}
 328}
 329
 330static void initialize_form1_numa_distance(const __be32 *associativity)
 331{
 332	int array_sz;
 333
 334	array_sz = of_read_number(associativity, 1);
 335	/* Skip the first element in the associativity array */
 336	__initialize_form1_numa_distance(associativity + 1, array_sz);
 337}
 338
 339/*
 340 * Used to update distance information w.r.t newly added node.
 341 */
 342void update_numa_distance(struct device_node *node)
 343{
 344	int nid;
 345
 346	if (affinity_form == FORM0_AFFINITY)
 347		return;
 348	else if (affinity_form == FORM1_AFFINITY) {
 349		const __be32 *associativity;
 350
 351		associativity = of_get_associativity(node);
 352		if (!associativity)
 353			return;
 354
 355		initialize_form1_numa_distance(associativity);
 356		return;
 357	}
 358
 359	/* FORM2 affinity  */
 360	nid = of_node_to_nid_single(node);
 361	if (nid == NUMA_NO_NODE)
 362		return;
 363
 364	/*
 365	 * With FORM2 we expect NUMA distance of all possible NUMA
 366	 * nodes to be provided during boot.
 367	 */
 368	WARN(numa_distance_table[nid][nid] == -1,
 369	     "NUMA distance details for node %d not provided\n", nid);
 370}
 371EXPORT_SYMBOL_GPL(update_numa_distance);
 372
 373/*
 374 * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
 375 * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements}
 376 */
 377static void __init initialize_form2_numa_distance_lookup_table(void)
 378{
 379	int i, j;
 380	struct device_node *root;
 381	const __u8 *form2_distances;
 382	const __be32 *numa_lookup_index;
 383	int form2_distances_length;
 384	int max_numa_index, distance_index;
 385
 386	if (firmware_has_feature(FW_FEATURE_OPAL))
 387		root = of_find_node_by_path("/ibm,opal");
 388	else
 389		root = of_find_node_by_path("/rtas");
 390	if (!root)
 391		root = of_find_node_by_path("/");
 392
 393	numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL);
 394	max_numa_index = of_read_number(&numa_lookup_index[0], 1);
 395
 396	/* first element of the array is the size and is encode-int */
 397	form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL);
 398	form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1);
 399	/* Skip the size which is encoded int */
 400	form2_distances += sizeof(__be32);
 401
 402	pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n",
 403		 form2_distances_length, max_numa_index);
 404
 405	for (i = 0; i < max_numa_index; i++)
 406		/* +1 skip the max_numa_index in the property */
 407		numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
 408
 409
 410	if (form2_distances_length != max_numa_index * max_numa_index) {
 411		WARN(1, "Wrong NUMA distance information\n");
 412		form2_distances = NULL; // don't use it
 413	}
 414	distance_index = 0;
 415	for (i = 0;  i < max_numa_index; i++) {
 416		for (j = 0; j < max_numa_index; j++) {
 417			int nodeA = numa_id_index_table[i];
 418			int nodeB = numa_id_index_table[j];
 419			int dist;
 420
 421			if (form2_distances)
 422				dist = form2_distances[distance_index++];
 423			else if (nodeA == nodeB)
 424				dist = LOCAL_DISTANCE;
 425			else
 426				dist = REMOTE_DISTANCE;
 427			numa_distance_table[nodeA][nodeB] = dist;
 428			pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist);
 429		}
 430	}
 431
 432	of_node_put(root);
 433}
 434
 435static int __init find_primary_domain_index(void)
 436{
 437	int index;
 438	struct device_node *root;
 439
 440	/*
 441	 * Check for which form of affinity.
 442	 */
 443	if (firmware_has_feature(FW_FEATURE_OPAL)) {
 444		affinity_form = FORM1_AFFINITY;
 445	} else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) {
 446		pr_debug("Using form 2 affinity\n");
 447		affinity_form = FORM2_AFFINITY;
 448	} else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) {
 449		pr_debug("Using form 1 affinity\n");
 450		affinity_form = FORM1_AFFINITY;
 451	} else
 452		affinity_form = FORM0_AFFINITY;
 453
 454	if (firmware_has_feature(FW_FEATURE_OPAL))
 455		root = of_find_node_by_path("/ibm,opal");
 456	else
 457		root = of_find_node_by_path("/rtas");
 458	if (!root)
 459		root = of_find_node_by_path("/");
 460
 461	/*
 462	 * This property is a set of 32-bit integers, each representing
 463	 * an index into the ibm,associativity nodes.
 464	 *
 465	 * With form 0 affinity the first integer is for an SMP configuration
 466	 * (should be all 0's) and the second is for a normal NUMA
 467	 * configuration. We have only one level of NUMA.
 468	 *
 469	 * With form 1 affinity the first integer is the most significant
 470	 * NUMA boundary and the following are progressively less significant
 471	 * boundaries. There can be more than one level of NUMA.
 472	 */
 473	distance_ref_points = of_get_property(root,
 474					"ibm,associativity-reference-points",
 475					&distance_ref_points_depth);
 476
 477	if (!distance_ref_points) {
 478		pr_debug("ibm,associativity-reference-points not found.\n");
 479		goto err;
 480	}
 481
 482	distance_ref_points_depth /= sizeof(int);
 483	if (affinity_form == FORM0_AFFINITY) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 484		if (distance_ref_points_depth < 2) {
 485			pr_warn("short ibm,associativity-reference-points\n");
 
 486			goto err;
 487		}
 488
 489		index = of_read_number(&distance_ref_points[1], 1);
 490	} else {
 491		/*
 492		 * Both FORM1 and FORM2 affinity find the primary domain details
 493		 * at the same offset.
 494		 */
 495		index = of_read_number(distance_ref_points, 1);
 496	}
 
 497	/*
 498	 * Warn and cap if the hardware supports more than
 499	 * MAX_DISTANCE_REF_POINTS domains.
 500	 */
 501	if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
 502		pr_warn("distance array capped at %d entries\n",
 503			MAX_DISTANCE_REF_POINTS);
 504		distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
 505	}
 506
 507	of_node_put(root);
 508	return index;
 509
 510err:
 511	of_node_put(root);
 512	return -1;
 513}
 514
 515static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
 516{
 517	struct device_node *memory = NULL;
 518
 519	memory = of_find_node_by_type(memory, "memory");
 520	if (!memory)
 521		panic("numa.c: No memory nodes found!");
 522
 523	*n_addr_cells = of_n_addr_cells(memory);
 524	*n_size_cells = of_n_size_cells(memory);
 525	of_node_put(memory);
 526}
 527
 528static unsigned long read_n_cells(int n, const __be32 **buf)
 529{
 530	unsigned long result = 0;
 531
 532	while (n--) {
 533		result = (result << 32) | of_read_number(*buf, 1);
 534		(*buf)++;
 535	}
 536	return result;
 537}
 538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539struct assoc_arrays {
 540	u32	n_arrays;
 541	u32	array_sz;
 542	const __be32 *arrays;
 543};
 544
 545/*
 546 * Retrieve and validate the list of associativity arrays for drconf
 547 * memory from the ibm,associativity-lookup-arrays property of the
 548 * device tree..
 549 *
 550 * The layout of the ibm,associativity-lookup-arrays property is a number N
 551 * indicating the number of associativity arrays, followed by a number M
 552 * indicating the size of each associativity array, followed by a list
 553 * of N associativity arrays.
 554 */
 555static int of_get_assoc_arrays(struct assoc_arrays *aa)
 
 556{
 557	struct device_node *memory;
 558	const __be32 *prop;
 559	u32 len;
 560
 561	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 562	if (!memory)
 563		return -1;
 564
 565	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
 566	if (!prop || len < 2 * sizeof(unsigned int)) {
 567		of_node_put(memory);
 568		return -1;
 569	}
 570
 571	aa->n_arrays = of_read_number(prop++, 1);
 572	aa->array_sz = of_read_number(prop++, 1);
 573
 574	of_node_put(memory);
 
 575
 576	/* Now that we know the number of arrays and size of each array,
 577	 * revalidate the size of the property read in.
 578	 */
 579	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
 580		return -1;
 581
 582	aa->arrays = prop;
 583	return 0;
 584}
 585
 586static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb)
 587{
 588	struct assoc_arrays aa = { .arrays = NULL };
 589	int default_nid = NUMA_NO_NODE;
 590	int nid = default_nid;
 591	int rc, index;
 592
 593	if ((primary_domain_index < 0) || !numa_enabled)
 594		return default_nid;
 595
 596	rc = of_get_assoc_arrays(&aa);
 597	if (rc)
 598		return default_nid;
 599
 600	if (primary_domain_index <= aa.array_sz &&
 601	    !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
 602		const __be32 *associativity;
 603
 604		index = lmb->aa_index * aa.array_sz;
 605		associativity = &aa.arrays[index];
 606		nid = __associativity_to_nid(associativity, aa.array_sz);
 607		if (nid > 0 && affinity_form == FORM1_AFFINITY) {
 608			/*
 609			 * lookup array associativity entries have
 610			 * no length of the array as the first element.
 611			 */
 612			__initialize_form1_numa_distance(associativity, aa.array_sz);
 613		}
 614	}
 615	return nid;
 616}
 617
 618/*
 619 * This is like of_node_to_nid_single() for memory represented in the
 620 * ibm,dynamic-reconfiguration-memory node.
 621 */
 622int of_drconf_to_nid_single(struct drmem_lmb *lmb)
 
 623{
 624	struct assoc_arrays aa = { .arrays = NULL };
 625	int default_nid = NUMA_NO_NODE;
 626	int nid = default_nid;
 627	int rc, index;
 628
 629	if ((primary_domain_index < 0) || !numa_enabled)
 630		return default_nid;
 631
 632	rc = of_get_assoc_arrays(&aa);
 633	if (rc)
 634		return default_nid;
 635
 636	if (primary_domain_index <= aa.array_sz &&
 637	    !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
 638		const __be32 *associativity;
 639
 640		index = lmb->aa_index * aa.array_sz;
 641		associativity = &aa.arrays[index];
 642		nid = __associativity_to_nid(associativity, aa.array_sz);
 643	}
 644	return nid;
 645}
 646
 647#ifdef CONFIG_PPC_SPLPAR
 
 
 
 
 648
 649static int __vphn_get_associativity(long lcpu, __be32 *associativity)
 650{
 651	long rc, hwid;
 652
 653	/*
 654	 * On a shared lpar, device tree will not have node associativity.
 655	 * At this time lppaca, or its __old_status field may not be
 656	 * updated. Hence kernel cannot detect if its on a shared lpar. So
 657	 * request an explicit associativity irrespective of whether the
 658	 * lpar is shared or dedicated. Use the device tree property as a
 659	 * fallback. cpu_to_phys_id is only valid between
 660	 * smp_setup_cpu_maps() and smp_setup_pacas().
 661	 */
 662	if (firmware_has_feature(FW_FEATURE_VPHN)) {
 663		if (cpu_to_phys_id)
 664			hwid = cpu_to_phys_id[lcpu];
 665		else
 666			hwid = get_hard_smp_processor_id(lcpu);
 667
 668		rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
 669		if (rc == H_SUCCESS)
 670			return 0;
 671	}
 672
 673	return -1;
 674}
 675
 676static int vphn_get_nid(long lcpu)
 677{
 678	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
 679
 680
 681	if (!__vphn_get_associativity(lcpu, associativity))
 682		return associativity_to_nid(associativity);
 683
 684	return NUMA_NO_NODE;
 685
 686}
 687#else
 688
 689static int __vphn_get_associativity(long lcpu, __be32 *associativity)
 690{
 691	return -1;
 692}
 693
 694static int vphn_get_nid(long unused)
 695{
 696	return NUMA_NO_NODE;
 697}
 698#endif  /* CONFIG_PPC_SPLPAR */
 699
 700/*
 701 * Figure out to which domain a cpu belongs and stick it there.
 702 * Return the id of the domain used.
 703 */
 704static int numa_setup_cpu(unsigned long lcpu)
 705{
 706	struct device_node *cpu;
 707	int fcpu = cpu_first_thread_sibling(lcpu);
 708	int nid = NUMA_NO_NODE;
 709
 710	if (!cpu_present(lcpu)) {
 711		set_cpu_numa_node(lcpu, first_online_node);
 712		return first_online_node;
 713	}
 714
 715	/*
 716	 * If a valid cpu-to-node mapping is already available, use it
 717	 * directly instead of querying the firmware, since it represents
 718	 * the most recent mapping notified to us by the platform (eg: VPHN).
 719	 * Since cpu_to_node binding remains the same for all threads in the
 720	 * core. If a valid cpu-to-node mapping is already available, for
 721	 * the first thread in the core, use it.
 722	 */
 723	nid = numa_cpu_lookup_table[fcpu];
 724	if (nid >= 0) {
 725		map_cpu_to_node(lcpu, nid);
 726		return nid;
 727	}
 728
 729	nid = vphn_get_nid(lcpu);
 730	if (nid != NUMA_NO_NODE)
 731		goto out_present;
 732
 733	cpu = of_get_cpu_node(lcpu, NULL);
 734
 735	if (!cpu) {
 736		WARN_ON(1);
 737		if (cpu_present(lcpu))
 738			goto out_present;
 739		else
 740			goto out;
 741	}
 742
 743	nid = of_node_to_nid_single(cpu);
 744	of_node_put(cpu);
 745
 746out_present:
 747	if (nid < 0 || !node_possible(nid))
 748		nid = first_online_node;
 
 
 749
 750	/*
 751	 * Update for the first thread of the core. All threads of a core
 752	 * have to be part of the same node. This not only avoids querying
 753	 * for every other thread in the core, but always avoids a case
 754	 * where virtual node associativity change causes subsequent threads
 755	 * of a core to be associated with different nid. However if first
 756	 * thread is already online, expect it to have a valid mapping.
 757	 */
 758	if (fcpu != lcpu) {
 759		WARN_ON(cpu_online(fcpu));
 760		map_cpu_to_node(fcpu, nid);
 761	}
 762
 763	map_cpu_to_node(lcpu, nid);
 764out:
 765	return nid;
 766}
 767
 768static void verify_cpu_node_mapping(int cpu, int node)
 769{
 770	int base, sibling, i;
 771
 772	/* Verify that all the threads in the core belong to the same node */
 773	base = cpu_first_thread_sibling(cpu);
 774
 775	for (i = 0; i < threads_per_core; i++) {
 776		sibling = base + i;
 777
 778		if (sibling == cpu || cpu_is_offline(sibling))
 779			continue;
 780
 781		if (cpu_to_node(sibling) != node) {
 782			WARN(1, "CPU thread siblings %d and %d don't belong"
 783				" to the same node!\n", cpu, sibling);
 784			break;
 785		}
 
 
 
 
 786	}
 787}
 788
 789/* Must run before sched domains notifier. */
 790static int ppc_numa_cpu_prepare(unsigned int cpu)
 791{
 792	int nid;
 793
 794	nid = numa_setup_cpu(cpu);
 795	verify_cpu_node_mapping(cpu, nid);
 796	return 0;
 797}
 798
 799static int ppc_numa_cpu_dead(unsigned int cpu)
 800{
 801	return 0;
 802}
 803
 804/*
 805 * Check and possibly modify a memory region to enforce the memory limit.
 806 *
 807 * Returns the size the region should have to enforce the memory limit.
 808 * This will either be the original value of size, a truncated value,
 809 * or zero. If the returned value of size is 0 the region should be
 810 * discarded as it lies wholly above the memory limit.
 811 */
 812static unsigned long __init numa_enforce_memory_limit(unsigned long start,
 813						      unsigned long size)
 814{
 815	/*
 816	 * We use memblock_end_of_DRAM() in here instead of memory_limit because
 817	 * we've already adjusted it for the limit and it takes care of
 818	 * having memory holes below the limit.  Also, in the case of
 819	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
 820	 */
 821
 822	if (start + size <= memblock_end_of_DRAM())
 823		return size;
 824
 825	if (start >= memblock_end_of_DRAM())
 826		return 0;
 827
 828	return memblock_end_of_DRAM() - start;
 829}
 830
 831/*
 832 * Reads the counter for a given entry in
 833 * linux,drconf-usable-memory property
 834 */
 835static inline int __init read_usm_ranges(const __be32 **usm)
 836{
 837	/*
 838	 * For each lmb in ibm,dynamic-memory a corresponding
 839	 * entry in linux,drconf-usable-memory property contains
 840	 * a counter followed by that many (base, size) duple.
 841	 * read the counter from linux,drconf-usable-memory
 842	 */
 843	return read_n_cells(n_mem_size_cells, usm);
 844}
 845
 846/*
 847 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
 848 * node.  This assumes n_mem_{addr,size}_cells have been set.
 849 */
 850static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
 851					const __be32 **usm,
 852					void *data)
 853{
 854	unsigned int ranges, is_kexec_kdump = 0;
 855	unsigned long base, size, sz;
 
 856	int nid;
 
 857
 858	/*
 859	 * Skip this block if the reserved bit is set in flags (0x80)
 860	 * or if the block is not assigned to this partition (0x8)
 861	 */
 862	if ((lmb->flags & DRCONF_MEM_RESERVED)
 863	    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
 864		return 0;
 865
 866	if (*usm)
 
 
 
 
 
 
 
 
 
 
 867		is_kexec_kdump = 1;
 868
 869	base = lmb->base_addr;
 870	size = drmem_lmb_size();
 871	ranges = 1;
 872
 873	if (is_kexec_kdump) {
 874		ranges = read_usm_ranges(usm);
 875		if (!ranges) /* there are no (base, size) duple */
 876			return 0;
 877	}
 878
 879	do {
 880		if (is_kexec_kdump) {
 881			base = read_n_cells(n_mem_addr_cells, usm);
 882			size = read_n_cells(n_mem_size_cells, usm);
 883		}
 884
 885		nid = get_nid_and_numa_distance(lmb);
 886		fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
 887					  &nid);
 888		node_set_online(nid);
 889		sz = numa_enforce_memory_limit(base, size);
 890		if (sz)
 891			memblock_set_node(base, sz, &memblock.memory, nid);
 892	} while (--ranges);
 893
 894	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 895}
 896
 897static int __init parse_numa_properties(void)
 898{
 899	struct device_node *memory;
 900	int default_nid = 0;
 901	unsigned long i;
 902	const __be32 *associativity;
 903
 904	if (numa_enabled == 0) {
 905		pr_warn("disabled by user\n");
 906		return -1;
 907	}
 908
 909	primary_domain_index = find_primary_domain_index();
 910
 911	if (primary_domain_index < 0) {
 912		/*
 913		 * if we fail to parse primary_domain_index from device tree
 914		 * mark the numa disabled, boot with numa disabled.
 915		 */
 916		numa_enabled = false;
 917		return primary_domain_index;
 918	}
 919
 920	pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index);
 
 921
 922	/*
 923	 * If it is FORM2 initialize the distance table here.
 924	 */
 925	if (affinity_form == FORM2_AFFINITY)
 926		initialize_form2_numa_distance_lookup_table();
 927
 928	/*
 929	 * Even though we connect cpus to numa domains later in SMP
 930	 * init, we need to know the node ids now. This is because
 931	 * each node to be onlined must have NODE_DATA etc backing it.
 932	 */
 933	for_each_present_cpu(i) {
 934		__be32 vphn_assoc[VPHN_ASSOC_BUFSIZE];
 935		struct device_node *cpu;
 936		int nid = NUMA_NO_NODE;
 937
 938		memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32));
 939
 940		if (__vphn_get_associativity(i, vphn_assoc) == 0) {
 941			nid = associativity_to_nid(vphn_assoc);
 942			initialize_form1_numa_distance(vphn_assoc);
 943		} else {
 944
 945			/*
 946			 * Don't fall back to default_nid yet -- we will plug
 947			 * cpus into nodes once the memory scan has discovered
 948			 * the topology.
 949			 */
 950			cpu = of_get_cpu_node(i, NULL);
 951			BUG_ON(!cpu);
 952
 953			associativity = of_get_associativity(cpu);
 954			if (associativity) {
 955				nid = associativity_to_nid(associativity);
 956				initialize_form1_numa_distance(associativity);
 957			}
 958			of_node_put(cpu);
 959		}
 960
 961		/* node_set_online() is an UB if 'nid' is negative */
 962		if (likely(nid >= 0))
 963			node_set_online(nid);
 
 
 
 
 
 964	}
 965
 966	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
 967
 968	for_each_node_by_type(memory, "memory") {
 969		unsigned long start;
 970		unsigned long size;
 971		int nid;
 972		int ranges;
 973		const __be32 *memcell_buf;
 974		unsigned int len;
 975
 976		memcell_buf = of_get_property(memory,
 977			"linux,usable-memory", &len);
 978		if (!memcell_buf || len <= 0)
 979			memcell_buf = of_get_property(memory, "reg", &len);
 980		if (!memcell_buf || len <= 0)
 981			continue;
 982
 983		/* ranges in cell */
 984		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
 985new_range:
 986		/* these are order-sensitive, and modify the buffer pointer */
 987		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
 988		size = read_n_cells(n_mem_size_cells, &memcell_buf);
 989
 990		/*
 991		 * Assumption: either all memory nodes or none will
 992		 * have associativity properties.  If none, then
 993		 * everything goes to default_nid.
 994		 */
 995		associativity = of_get_associativity(memory);
 996		if (associativity) {
 997			nid = associativity_to_nid(associativity);
 998			initialize_form1_numa_distance(associativity);
 999		} else
1000			nid = default_nid;
1001
1002		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
1003		node_set_online(nid);
1004
1005		size = numa_enforce_memory_limit(start, size);
1006		if (size)
1007			memblock_set_node(start, size, &memblock.memory, nid);
 
 
 
 
 
1008
1009		if (--ranges)
1010			goto new_range;
1011	}
1012
1013	/*
1014	 * Now do the same thing for each MEMBLOCK listed in the
1015	 * ibm,dynamic-memory property in the
1016	 * ibm,dynamic-reconfiguration-memory node.
1017	 */
1018	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1019	if (memory) {
1020		walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb);
1021		of_node_put(memory);
1022	}
1023
1024	return 0;
1025}
1026
1027static void __init setup_nonnuma(void)
1028{
1029	unsigned long top_of_ram = memblock_end_of_DRAM();
1030	unsigned long total_ram = memblock_phys_mem_size();
1031	unsigned long start_pfn, end_pfn;
1032	unsigned int nid = 0;
1033	int i;
1034
1035	pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram);
1036	pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20);
 
 
 
 
 
 
1037
1038	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
1039		fake_numa_create_new_node(end_pfn, &nid);
1040		memblock_set_node(PFN_PHYS(start_pfn),
1041				  PFN_PHYS(end_pfn - start_pfn),
1042				  &memblock.memory, nid);
1043		node_set_online(nid);
1044	}
1045}
1046
1047void __init dump_numa_cpu_topology(void)
1048{
1049	unsigned int node;
1050	unsigned int cpu, count;
1051
1052	if (!numa_enabled)
1053		return;
1054
1055	for_each_online_node(node) {
1056		pr_info("Node %d CPUs:", node);
1057
1058		count = 0;
1059		/*
1060		 * If we used a CPU iterator here we would miss printing
1061		 * the holes in the cpumap.
1062		 */
1063		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1064			if (cpumask_test_cpu(cpu,
1065					node_to_cpumask_map[node])) {
1066				if (count == 0)
1067					pr_cont(" %u", cpu);
1068				++count;
1069			} else {
1070				if (count > 1)
1071					pr_cont("-%u", cpu - 1);
1072				count = 0;
1073			}
1074		}
1075
1076		if (count > 1)
1077			pr_cont("-%u", nr_cpu_ids - 1);
1078		pr_cont("\n");
1079	}
1080}
1081
1082/* Initialize NODE_DATA for a node on the local memory */
1083static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
1084{
1085	u64 spanned_pages = end_pfn - start_pfn;
1086	const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
1087	u64 nd_pa;
1088	void *nd;
1089	int tnid;
1090
1091	nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
1092	if (!nd_pa)
1093		panic("Cannot allocate %zu bytes for node %d data\n",
1094		      nd_size, nid);
1095
1096	nd = __va(nd_pa);
1097
1098	/* report and initialize */
1099	pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
1100		nd_pa, nd_pa + nd_size - 1);
1101	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
1102	if (tnid != nid)
1103		pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
 
 
 
 
 
 
1104
1105	node_data[nid] = nd;
1106	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
1107	NODE_DATA(nid)->node_id = nid;
1108	NODE_DATA(nid)->node_start_pfn = start_pfn;
1109	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
1110}
1111
1112static void __init find_possible_nodes(void)
 
 
 
 
 
 
 
 
 
1113{
1114	struct device_node *rtas;
1115	const __be32 *domains = NULL;
1116	int prop_length, max_nodes;
1117	u32 i;
1118
1119	if (!numa_enabled)
1120		return;
1121
1122	rtas = of_find_node_by_path("/rtas");
1123	if (!rtas)
1124		return;
1125
1126	/*
1127	 * ibm,current-associativity-domains is a fairly recent property. If
1128	 * it doesn't exist, then fallback on ibm,max-associativity-domains.
1129	 * Current denotes what the platform can support compared to max
1130	 * which denotes what the Hypervisor can support.
1131	 *
1132	 * If the LPAR is migratable, new nodes might be activated after a LPM,
1133	 * so we should consider the max number in that case.
1134	 */
1135	if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
1136		domains = of_get_property(rtas,
1137					  "ibm,current-associativity-domains",
1138					  &prop_length);
1139	if (!domains) {
1140		domains = of_get_property(rtas, "ibm,max-associativity-domains",
1141					&prop_length);
1142		if (!domains)
1143			goto out;
1144	}
1145
1146	max_nodes = of_read_number(&domains[primary_domain_index], 1);
1147	pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
1148
1149	for (i = 0; i < max_nodes; i++) {
1150		if (!node_possible(i))
1151			node_set(i, node_possible_map);
1152	}
1153
1154	prop_length /= sizeof(int);
1155	if (prop_length > primary_domain_index + 2)
1156		coregroup_enabled = 1;
1157
1158out:
1159	of_node_put(rtas);
1160}
1161
1162void __init mem_topology_setup(void)
1163{
1164	int cpu;
1165
1166	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1167	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
1168
1169	/*
1170	 * Linux/mm assumes node 0 to be online at boot. However this is not
1171	 * true on PowerPC, where node 0 is similar to any other node, it
1172	 * could be cpuless, memoryless node. So force node 0 to be offline
1173	 * for now. This will prevent cpuless, memoryless node 0 showing up
1174	 * unnecessarily as online. If a node has cpus or memory that need
1175	 * to be online, then node will anyway be marked online.
 
 
 
 
1176	 */
1177	node_set_offline(0);
 
 
 
1178
1179	if (parse_numa_properties())
1180		setup_nonnuma();
1181
1182	/*
1183	 * Modify the set of possible NUMA nodes to reflect information
1184	 * available about the set of online nodes, and the set of nodes
1185	 * that we expect to make use of for this platform's affinity
1186	 * calculations.
1187	 */
1188	nodes_and(node_possible_map, node_possible_map, node_online_map);
1189
1190	find_possible_nodes();
 
 
 
1191
1192	setup_node_to_cpumask_map();
 
 
 
1193
1194	reset_numa_cpu_lookup_table();
 
 
 
 
 
 
 
1195
1196	for_each_possible_cpu(cpu) {
1197		/*
1198		 * Powerpc with CONFIG_NUMA always used to have a node 0,
1199		 * even if it was memoryless or cpuless. For all cpus that
1200		 * are possible but not present, cpu_to_node() would point
1201		 * to node 0. To remove a cpuless, memoryless dummy node,
1202		 * powerpc need to make sure all possible but not present
1203		 * cpu_to_node are set to a proper node.
1204		 */
1205		numa_setup_cpu(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1206	}
1207}
1208
1209void __init initmem_init(void)
 
1210{
1211	int nid;
1212
1213	memblock_dump_all();
 
 
 
 
 
 
 
1214
1215	for_each_online_node(nid) {
1216		unsigned long start_pfn, end_pfn;
 
 
1217
1218		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1219		setup_node_data(nid, start_pfn, end_pfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1220	}
1221
1222	sparse_init();
1223
1224	/*
1225	 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
1226	 * even before we online them, so that we can use cpu_to_{node,mem}
1227	 * early in boot, cf. smp_prepare_cpus().
1228	 * _nocalls() + manual invocation is used because cpuhp is not yet
1229	 * initialized for the boot CPU.
1230	 */
1231	cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
1232				  ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
 
 
 
 
 
 
 
 
 
 
 
1233}
1234
1235static int __init early_numa(char *p)
1236{
1237	if (!p)
1238		return 0;
1239
1240	if (strstr(p, "off"))
1241		numa_enabled = 0;
1242
 
 
 
1243	p = strstr(p, "fake=");
1244	if (p)
1245		cmdline = p + strlen("fake=");
1246
1247	return 0;
1248}
1249early_param("numa", early_numa);
1250
1251#ifdef CONFIG_MEMORY_HOTPLUG
1252/*
1253 * Find the node associated with a hot added memory section for
1254 * memory represented in the device tree by the property
1255 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1256 */
1257static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
 
1258{
1259	struct drmem_lmb *lmb;
 
1260	unsigned long lmb_size;
1261	int nid = NUMA_NO_NODE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1262
1263	lmb_size = drmem_lmb_size();
1264
1265	for_each_drmem_lmb(lmb) {
1266		/* skip this block if it is reserved or not assigned to
1267		 * this partition */
1268		if ((lmb->flags & DRCONF_MEM_RESERVED)
1269		    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
1270			continue;
1271
1272		if ((scn_addr < lmb->base_addr)
1273		    || (scn_addr >= (lmb->base_addr + lmb_size)))
1274			continue;
1275
1276		nid = of_drconf_to_nid_single(lmb);
1277		break;
1278	}
1279
1280	return nid;
1281}
1282
1283/*
1284 * Find the node associated with a hot added memory section for memory
1285 * represented in the device tree as a node (i.e. memory@XXXX) for
1286 * each memblock.
1287 */
1288static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1289{
1290	struct device_node *memory;
1291	int nid = NUMA_NO_NODE;
1292
1293	for_each_node_by_type(memory, "memory") {
1294		int i = 0;
 
 
 
 
 
 
 
1295
1296		while (1) {
1297			struct resource res;
1298
1299			if (of_address_to_resource(memory, i++, &res))
1300				break;
 
1301
1302			if ((scn_addr < res.start) || (scn_addr > res.end))
1303				continue;
1304
1305			nid = of_node_to_nid_single(memory);
1306			break;
1307		}
1308
1309		if (nid >= 0)
1310			break;
1311	}
1312
1313	of_node_put(memory);
1314
1315	return nid;
1316}
1317
1318/*
1319 * Find the node associated with a hot added memory section.  Section
1320 * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1321 * sections are fully contained within a single MEMBLOCK.
1322 */
1323int hot_add_scn_to_nid(unsigned long scn_addr)
1324{
1325	struct device_node *memory = NULL;
1326	int nid;
1327
1328	if (!numa_enabled)
1329		return first_online_node;
1330
1331	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1332	if (memory) {
1333		nid = hot_add_drconf_scn_to_nid(scn_addr);
1334		of_node_put(memory);
1335	} else {
1336		nid = hot_add_node_scn_to_nid(scn_addr);
1337	}
1338
1339	if (nid < 0 || !node_possible(nid))
1340		nid = first_online_node;
1341
 
 
 
 
 
 
 
 
 
 
 
1342	return nid;
1343}
1344
1345static u64 hot_add_drconf_memory_max(void)
1346{
1347	struct device_node *memory = NULL;
1348	struct device_node *dn = NULL;
1349	const __be64 *lrdr = NULL;
1350
1351	dn = of_find_node_by_path("/rtas");
1352	if (dn) {
1353		lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1354		of_node_put(dn);
1355		if (lrdr)
1356			return be64_to_cpup(lrdr);
1357	}
1358
1359	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1360	if (memory) {
1361		of_node_put(memory);
1362		return drmem_lmb_memory_max();
1363	}
1364	return 0;
1365}
1366
1367/*
1368 * memory_hotplug_max - return max address of memory that may be added
1369 *
1370 * This is currently only used on systems that support drconfig memory
1371 * hotplug.
1372 */
1373u64 memory_hotplug_max(void)
1374{
1375        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1376}
1377#endif /* CONFIG_MEMORY_HOTPLUG */
1378
1379/* Virtual Processor Home Node (VPHN) support */
1380#ifdef CONFIG_PPC_SPLPAR
1381static int topology_inited;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1382
1383/*
1384 * Retrieve the new associativity information for a virtual processor's
1385 * home node.
1386 */
 
 
 
 
 
 
 
 
 
 
 
 
 
1387static long vphn_get_associativity(unsigned long cpu,
1388					__be32 *associativity)
1389{
1390	long rc;
1391
1392	rc = hcall_vphn(get_hard_smp_processor_id(cpu),
1393				VPHN_FLAG_VCPU, associativity);
1394
1395	switch (rc) {
1396	case H_SUCCESS:
1397		pr_debug("VPHN hcall succeeded. Reset polling...\n");
1398		goto out;
1399
1400	case H_FUNCTION:
1401		pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
 
 
1402		break;
1403	case H_HARDWARE:
1404		pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
 
1405			"preventing VPHN. Disabling polling...\n");
1406		break;
1407	case H_PARAMETER:
1408		pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
1409			"Disabling polling...\n");
1410		break;
1411	default:
1412		pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
1413			, rc);
1414		break;
1415	}
1416out:
1417	return rc;
1418}
1419
1420void find_and_update_cpu_nid(int cpu)
 
 
 
 
1421{
1422	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1423	int new_nid;
 
1424
1425	/* Use associativity from first thread for all siblings */
1426	if (vphn_get_associativity(cpu, associativity))
1427		return;
1428
1429	/* Do not have previous associativity, so find it now. */
1430	new_nid = associativity_to_nid(associativity);
1431
1432	if (new_nid < 0 || !node_possible(new_nid))
1433		new_nid = first_online_node;
1434	else
1435		// Associate node <-> cpu, so cpu_up() calls
1436		// try_online_node() on the right node.
1437		set_cpu_numa_node(cpu, new_nid);
 
 
 
 
 
 
 
 
 
 
1438
1439	pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid);
1440}
1441
1442int cpu_to_coregroup_id(int cpu)
1443{
1444	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1445	int index;
1446
1447	if (cpu < 0 || cpu > nr_cpu_ids)
1448		return -1;
1449
1450	if (!coregroup_enabled)
1451		goto out;
 
 
1452
1453	if (!firmware_has_feature(FW_FEATURE_VPHN))
1454		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1455
1456	if (vphn_get_associativity(cpu, associativity))
1457		goto out;
 
 
 
 
1458
1459	index = of_read_number(associativity, 1);
1460	if (index > primary_domain_index + 1)
1461		return of_read_number(&associativity[index - 1], 1);
 
 
 
 
 
 
1462
1463out:
1464	return cpu_to_core_id(cpu);
1465}
 
1466
1467static int topology_update_init(void)
 
 
 
1468{
1469	topology_inited = 1;
1470	return 0;
1471}
1472device_initcall(topology_update_init);
1473#endif /* CONFIG_PPC_SPLPAR */
v3.5.6
 
   1/*
   2 * pSeries NUMA support
   3 *
   4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
 
 
  11#include <linux/threads.h>
  12#include <linux/bootmem.h>
  13#include <linux/init.h>
  14#include <linux/mm.h>
  15#include <linux/mmzone.h>
  16#include <linux/export.h>
  17#include <linux/nodemask.h>
  18#include <linux/cpu.h>
  19#include <linux/notifier.h>
  20#include <linux/memblock.h>
  21#include <linux/of.h>
 
  22#include <linux/pfn.h>
  23#include <linux/cpuset.h>
  24#include <linux/node.h>
 
 
 
 
 
 
  25#include <asm/sparsemem.h>
  26#include <asm/prom.h>
  27#include <asm/smp.h>
 
  28#include <asm/firmware.h>
  29#include <asm/paca.h>
  30#include <asm/hvcall.h>
  31#include <asm/setup.h>
 
 
 
  32
  33static int numa_enabled = 1;
  34
  35static char *cmdline __initdata;
  36
  37static int numa_debug;
  38#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
  39
  40int numa_cpu_lookup_table[NR_CPUS];
  41cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  42struct pglist_data *node_data[MAX_NUMNODES];
  43
  44EXPORT_SYMBOL(numa_cpu_lookup_table);
  45EXPORT_SYMBOL(node_to_cpumask_map);
  46EXPORT_SYMBOL(node_data);
  47
  48static int min_common_depth;
  49static int n_mem_addr_cells, n_mem_size_cells;
  50static int form1_affinity;
 
 
 
 
  51
  52#define MAX_DISTANCE_REF_POINTS 4
  53static int distance_ref_points_depth;
  54static const unsigned int *distance_ref_points;
  55static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
 
 
 
 
  56
  57/*
  58 * Allocate node_to_cpumask_map based on number of available nodes
  59 * Requires node_possible_map to be valid.
  60 *
  61 * Note: cpumask_of_node() is not valid until after this is done.
  62 */
  63static void __init setup_node_to_cpumask_map(void)
  64{
  65	unsigned int node, num = 0;
  66
  67	/* setup nr_node_ids if not done yet */
  68	if (nr_node_ids == MAX_NUMNODES) {
  69		for_each_node_mask(node, node_possible_map)
  70			num = node;
  71		nr_node_ids = num + 1;
  72	}
  73
  74	/* allocate the map */
  75	for (node = 0; node < nr_node_ids; node++)
  76		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
  77
  78	/* cpumask_of_node() will now work */
  79	dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
  80}
  81
  82static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
  83						unsigned int *nid)
  84{
  85	unsigned long long mem;
  86	char *p = cmdline;
  87	static unsigned int fake_nid;
  88	static unsigned long long curr_boundary;
  89
  90	/*
  91	 * Modify node id, iff we started creating NUMA nodes
  92	 * We want to continue from where we left of the last time
  93	 */
  94	if (fake_nid)
  95		*nid = fake_nid;
  96	/*
  97	 * In case there are no more arguments to parse, the
  98	 * node_id should be the same as the last fake node id
  99	 * (we've handled this above).
 100	 */
 101	if (!p)
 102		return 0;
 103
 104	mem = memparse(p, &p);
 105	if (!mem)
 106		return 0;
 107
 108	if (mem < curr_boundary)
 109		return 0;
 110
 111	curr_boundary = mem;
 112
 113	if ((end_pfn << PAGE_SHIFT) > mem) {
 114		/*
 115		 * Skip commas and spaces
 116		 */
 117		while (*p == ',' || *p == ' ' || *p == '\t')
 118			p++;
 119
 120		cmdline = p;
 121		fake_nid++;
 122		*nid = fake_nid;
 123		dbg("created new fake_node with id %d\n", fake_nid);
 124		return 1;
 125	}
 126	return 0;
 127}
 128
 129/*
 130 * get_node_active_region - Return active region containing pfn
 131 * Active range returned is empty if none found.
 132 * @pfn: The page to return the region for
 133 * @node_ar: Returned set to the active region containing @pfn
 134 */
 135static void __init get_node_active_region(unsigned long pfn,
 136					  struct node_active_region *node_ar)
 137{
 138	unsigned long start_pfn, end_pfn;
 139	int i, nid;
 140
 141	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
 142		if (pfn >= start_pfn && pfn < end_pfn) {
 143			node_ar->nid = nid;
 144			node_ar->start_pfn = start_pfn;
 145			node_ar->end_pfn = end_pfn;
 146			break;
 147		}
 148	}
 149}
 150
 151static void map_cpu_to_node(int cpu, int node)
 152{
 153	numa_cpu_lookup_table[cpu] = node;
 154
 155	dbg("adding cpu %d to node %d\n", cpu, node);
 156
 157	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
 
 158		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 
 159}
 160
 161#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
 162static void unmap_cpu_from_node(unsigned long cpu)
 163{
 164	int node = numa_cpu_lookup_table[cpu];
 165
 166	dbg("removing cpu %lu from node %d\n", cpu, node);
 167
 168	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
 169		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
 
 170	} else {
 171		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
 172		       cpu, node);
 173	}
 174}
 175#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
 176
 177/* must hold reference to node during call */
 178static const int *of_get_associativity(struct device_node *dev)
 179{
 180	return of_get_property(dev, "ibm,associativity", NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 181}
 182
 183/*
 184 * Returns the property linux,drconf-usable-memory if
 185 * it exists (the property exists only in kexec/kdump kernels,
 186 * added by kexec-tools)
 187 */
 188static const u32 *of_get_usable_memory(struct device_node *memory)
 
 
 
 
 
 
 
 
 189{
 190	const u32 *prop;
 191	u32 len;
 192	prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
 193	if (!prop || len < sizeof(unsigned int))
 
 
 
 
 194		return 0;
 195	return prop;
 
 
 
 196}
 197
 198int __node_distance(int a, int b)
 199{
 200	int i;
 201	int distance = LOCAL_DISTANCE;
 202
 203	if (!form1_affinity)
 204		return distance;
 205
 206	for (i = 0; i < distance_ref_points_depth; i++) {
 207		if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
 
 208			break;
 209
 210		/* Double the distance for each NUMA level */
 211		distance *= 2;
 212	}
 213
 214	return distance;
 215}
 216
 217static void initialize_distance_lookup_table(int nid,
 218		const unsigned int *associativity)
 219{
 220	int i;
 
 
 
 
 
 221
 222	if (!form1_affinity)
 223		return;
 224
 225	for (i = 0; i < distance_ref_points_depth; i++) {
 226		distance_lookup_table[nid][i] =
 227			associativity[distance_ref_points[i]];
 228	}
 229}
 230
 231/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
 232 * info is found.
 233 */
 234static int associativity_to_nid(const unsigned int *associativity)
 235{
 236	int nid = -1;
 
 237
 238	if (min_common_depth == -1)
 239		goto out;
 
 
 240
 241	if (associativity[0] >= min_common_depth)
 242		nid = associativity[min_common_depth];
 
 243
 244	/* POWER4 LPAR uses 0xffff as invalid node */
 245	if (nid == 0xffff || nid >= MAX_NUMNODES)
 246		nid = -1;
 247
 248	if (nid > 0 && associativity[0] >= distance_ref_points_depth)
 249		initialize_distance_lookup_table(nid, associativity);
 250
 251out:
 252	return nid;
 253}
 
 254
 255/* Returns the nid associated with the given device tree node,
 256 * or -1 if not found.
 257 */
 258static int of_node_to_nid_single(struct device_node *device)
 259{
 260	int nid = -1;
 261	const unsigned int *tmp;
 262
 263	tmp = of_get_associativity(device);
 264	if (tmp)
 265		nid = associativity_to_nid(tmp);
 266	return nid;
 267}
 268
 269/* Walk the device tree upwards, looking for an associativity id */
 270int of_node_to_nid(struct device_node *device)
 271{
 272	struct device_node *tmp;
 273	int nid = -1;
 274
 275	of_node_get(device);
 276	while (device) {
 277		nid = of_node_to_nid_single(device);
 278		if (nid != -1)
 279			break;
 280
 281	        tmp = device;
 282		device = of_get_parent(tmp);
 283		of_node_put(tmp);
 284	}
 285	of_node_put(device);
 286
 287	return nid;
 288}
 289EXPORT_SYMBOL_GPL(of_node_to_nid);
 290
 291static int __init find_min_common_depth(void)
 
 292{
 293	int depth;
 294	struct device_node *chosen;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 295	struct device_node *root;
 296	const char *vec5;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 297
 298	if (firmware_has_feature(FW_FEATURE_OPAL))
 299		root = of_find_node_by_path("/ibm,opal");
 300	else
 301		root = of_find_node_by_path("/rtas");
 302	if (!root)
 303		root = of_find_node_by_path("/");
 304
 305	/*
 306	 * This property is a set of 32-bit integers, each representing
 307	 * an index into the ibm,associativity nodes.
 308	 *
 309	 * With form 0 affinity the first integer is for an SMP configuration
 310	 * (should be all 0's) and the second is for a normal NUMA
 311	 * configuration. We have only one level of NUMA.
 312	 *
 313	 * With form 1 affinity the first integer is the most significant
 314	 * NUMA boundary and the following are progressively less significant
 315	 * boundaries. There can be more than one level of NUMA.
 316	 */
 317	distance_ref_points = of_get_property(root,
 318					"ibm,associativity-reference-points",
 319					&distance_ref_points_depth);
 320
 321	if (!distance_ref_points) {
 322		dbg("NUMA: ibm,associativity-reference-points not found.\n");
 323		goto err;
 324	}
 325
 326	distance_ref_points_depth /= sizeof(int);
 327
 328#define VEC5_AFFINITY_BYTE	5
 329#define VEC5_AFFINITY		0x80
 330
 331	if (firmware_has_feature(FW_FEATURE_OPAL))
 332		form1_affinity = 1;
 333	else {
 334		chosen = of_find_node_by_path("/chosen");
 335		if (chosen) {
 336			vec5 = of_get_property(chosen,
 337					       "ibm,architecture-vec-5", NULL);
 338			if (vec5 && (vec5[VEC5_AFFINITY_BYTE] &
 339							VEC5_AFFINITY)) {
 340				dbg("Using form 1 affinity\n");
 341				form1_affinity = 1;
 342			}
 343		}
 344	}
 345
 346	if (form1_affinity) {
 347		depth = distance_ref_points[0];
 348	} else {
 349		if (distance_ref_points_depth < 2) {
 350			printk(KERN_WARNING "NUMA: "
 351				"short ibm,associativity-reference-points\n");
 352			goto err;
 353		}
 354
 355		depth = distance_ref_points[1];
 
 
 
 
 
 
 356	}
 357
 358	/*
 359	 * Warn and cap if the hardware supports more than
 360	 * MAX_DISTANCE_REF_POINTS domains.
 361	 */
 362	if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
 363		printk(KERN_WARNING "NUMA: distance array capped at "
 364			"%d entries\n", MAX_DISTANCE_REF_POINTS);
 365		distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
 366	}
 367
 368	of_node_put(root);
 369	return depth;
 370
 371err:
 372	of_node_put(root);
 373	return -1;
 374}
 375
 376static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
 377{
 378	struct device_node *memory = NULL;
 379
 380	memory = of_find_node_by_type(memory, "memory");
 381	if (!memory)
 382		panic("numa.c: No memory nodes found!");
 383
 384	*n_addr_cells = of_n_addr_cells(memory);
 385	*n_size_cells = of_n_size_cells(memory);
 386	of_node_put(memory);
 387}
 388
 389static unsigned long read_n_cells(int n, const unsigned int **buf)
 390{
 391	unsigned long result = 0;
 392
 393	while (n--) {
 394		result = (result << 32) | **buf;
 395		(*buf)++;
 396	}
 397	return result;
 398}
 399
 400struct of_drconf_cell {
 401	u64	base_addr;
 402	u32	drc_index;
 403	u32	reserved;
 404	u32	aa_index;
 405	u32	flags;
 406};
 407
 408#define DRCONF_MEM_ASSIGNED	0x00000008
 409#define DRCONF_MEM_AI_INVALID	0x00000040
 410#define DRCONF_MEM_RESERVED	0x00000080
 411
 412/*
 413 * Read the next memblock list entry from the ibm,dynamic-memory property
 414 * and return the information in the provided of_drconf_cell structure.
 415 */
 416static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
 417{
 418	const u32 *cp;
 419
 420	drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
 421
 422	cp = *cellp;
 423	drmem->drc_index = cp[0];
 424	drmem->reserved = cp[1];
 425	drmem->aa_index = cp[2];
 426	drmem->flags = cp[3];
 427
 428	*cellp = cp + 4;
 429}
 430
 431/*
 432 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
 433 *
 434 * The layout of the ibm,dynamic-memory property is a number N of memblock
 435 * list entries followed by N memblock list entries.  Each memblock list entry
 436 * contains information as laid out in the of_drconf_cell struct above.
 437 */
 438static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
 439{
 440	const u32 *prop;
 441	u32 len, entries;
 442
 443	prop = of_get_property(memory, "ibm,dynamic-memory", &len);
 444	if (!prop || len < sizeof(unsigned int))
 445		return 0;
 446
 447	entries = *prop++;
 448
 449	/* Now that we know the number of entries, revalidate the size
 450	 * of the property read in to ensure we have everything
 451	 */
 452	if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
 453		return 0;
 454
 455	*dm = prop;
 456	return entries;
 457}
 458
 459/*
 460 * Retrieve and validate the ibm,lmb-size property for drconf memory
 461 * from the device tree.
 462 */
 463static u64 of_get_lmb_size(struct device_node *memory)
 464{
 465	const u32 *prop;
 466	u32 len;
 467
 468	prop = of_get_property(memory, "ibm,lmb-size", &len);
 469	if (!prop || len < sizeof(unsigned int))
 470		return 0;
 471
 472	return read_n_cells(n_mem_size_cells, &prop);
 473}
 474
 475struct assoc_arrays {
 476	u32	n_arrays;
 477	u32	array_sz;
 478	const u32 *arrays;
 479};
 480
 481/*
 482 * Retrieve and validate the list of associativity arrays for drconf
 483 * memory from the ibm,associativity-lookup-arrays property of the
 484 * device tree..
 485 *
 486 * The layout of the ibm,associativity-lookup-arrays property is a number N
 487 * indicating the number of associativity arrays, followed by a number M
 488 * indicating the size of each associativity array, followed by a list
 489 * of N associativity arrays.
 490 */
 491static int of_get_assoc_arrays(struct device_node *memory,
 492			       struct assoc_arrays *aa)
 493{
 494	const u32 *prop;
 
 495	u32 len;
 496
 
 
 
 
 497	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
 498	if (!prop || len < 2 * sizeof(unsigned int))
 
 499		return -1;
 
 
 
 
 500
 501	aa->n_arrays = *prop++;
 502	aa->array_sz = *prop++;
 503
 504	/* Now that we know the number of arrays and size of each array,
 505	 * revalidate the size of the property read in.
 506	 */
 507	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
 508		return -1;
 509
 510	aa->arrays = prop;
 511	return 0;
 512}
 513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 514/*
 515 * This is like of_node_to_nid_single() for memory represented in the
 516 * ibm,dynamic-reconfiguration-memory node.
 517 */
 518static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
 519				   struct assoc_arrays *aa)
 520{
 521	int default_nid = 0;
 
 522	int nid = default_nid;
 523	int index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524
 525	if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
 526	    !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
 527	    drmem->aa_index < aa->n_arrays) {
 528		index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
 529		nid = aa->arrays[index];
 530
 531		if (nid == 0xffff || nid >= MAX_NUMNODES)
 532			nid = default_nid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 533	}
 534
 535	return nid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 536}
 
 537
 538/*
 539 * Figure out to which domain a cpu belongs and stick it there.
 540 * Return the id of the domain used.
 541 */
 542static int __cpuinit numa_setup_cpu(unsigned long lcpu)
 543{
 544	int nid = 0;
 545	struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 546
 547	if (!cpu) {
 548		WARN_ON(1);
 549		goto out;
 
 
 
 550	}
 551
 552	nid = of_node_to_nid_single(cpu);
 
 553
 554	if (nid < 0 || !node_online(nid))
 
 555		nid = first_online_node;
 556out:
 557	map_cpu_to_node(lcpu, nid);
 558
 559	of_node_put(cpu);
 
 
 
 
 
 
 
 
 
 
 
 560
 
 
 561	return nid;
 562}
 563
 564static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
 565			     unsigned long action,
 566			     void *hcpu)
 567{
 568	unsigned long lcpu = (unsigned long)hcpu;
 569	int ret = NOTIFY_DONE;
 570
 571	switch (action) {
 572	case CPU_UP_PREPARE:
 573	case CPU_UP_PREPARE_FROZEN:
 574		numa_setup_cpu(lcpu);
 575		ret = NOTIFY_OK;
 576		break;
 577#ifdef CONFIG_HOTPLUG_CPU
 578	case CPU_DEAD:
 579	case CPU_DEAD_FROZEN:
 580	case CPU_UP_CANCELED:
 581	case CPU_UP_CANCELED_FROZEN:
 582		unmap_cpu_from_node(lcpu);
 583		break;
 584		ret = NOTIFY_OK;
 585#endif
 586	}
 587	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 588}
 589
 590/*
 591 * Check and possibly modify a memory region to enforce the memory limit.
 592 *
 593 * Returns the size the region should have to enforce the memory limit.
 594 * This will either be the original value of size, a truncated value,
 595 * or zero. If the returned value of size is 0 the region should be
 596 * discarded as it lies wholly above the memory limit.
 597 */
 598static unsigned long __init numa_enforce_memory_limit(unsigned long start,
 599						      unsigned long size)
 600{
 601	/*
 602	 * We use memblock_end_of_DRAM() in here instead of memory_limit because
 603	 * we've already adjusted it for the limit and it takes care of
 604	 * having memory holes below the limit.  Also, in the case of
 605	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
 606	 */
 607
 608	if (start + size <= memblock_end_of_DRAM())
 609		return size;
 610
 611	if (start >= memblock_end_of_DRAM())
 612		return 0;
 613
 614	return memblock_end_of_DRAM() - start;
 615}
 616
 617/*
 618 * Reads the counter for a given entry in
 619 * linux,drconf-usable-memory property
 620 */
 621static inline int __init read_usm_ranges(const u32 **usm)
 622{
 623	/*
 624	 * For each lmb in ibm,dynamic-memory a corresponding
 625	 * entry in linux,drconf-usable-memory property contains
 626	 * a counter followed by that many (base, size) duple.
 627	 * read the counter from linux,drconf-usable-memory
 628	 */
 629	return read_n_cells(n_mem_size_cells, usm);
 630}
 631
 632/*
 633 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
 634 * node.  This assumes n_mem_{addr,size}_cells have been set.
 635 */
 636static void __init parse_drconf_memory(struct device_node *memory)
 
 
 637{
 638	const u32 *uninitialized_var(dm), *usm;
 639	unsigned int n, rc, ranges, is_kexec_kdump = 0;
 640	unsigned long lmb_size, base, size, sz;
 641	int nid;
 642	struct assoc_arrays aa = { .arrays = NULL };
 643
 644	n = of_get_drconf_memory(memory, &dm);
 645	if (!n)
 646		return;
 
 
 
 
 647
 648	lmb_size = of_get_lmb_size(memory);
 649	if (!lmb_size)
 650		return;
 651
 652	rc = of_get_assoc_arrays(memory, &aa);
 653	if (rc)
 654		return;
 655
 656	/* check if this is a kexec/kdump kernel */
 657	usm = of_get_usable_memory(memory);
 658	if (usm != NULL)
 659		is_kexec_kdump = 1;
 660
 661	for (; n != 0; --n) {
 662		struct of_drconf_cell drmem;
 
 663
 664		read_drconf_cell(&drmem, &dm);
 
 
 
 
 665
 666		/* skip this block if the reserved bit is set in flags (0x80)
 667		   or if the block is not assigned to this partition (0x8) */
 668		if ((drmem.flags & DRCONF_MEM_RESERVED)
 669		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
 670			continue;
 671
 672		base = drmem.base_addr;
 673		size = lmb_size;
 674		ranges = 1;
 
 
 
 
 
 675
 676		if (is_kexec_kdump) {
 677			ranges = read_usm_ranges(&usm);
 678			if (!ranges) /* there are no (base, size) duple */
 679				continue;
 680		}
 681		do {
 682			if (is_kexec_kdump) {
 683				base = read_n_cells(n_mem_addr_cells, &usm);
 684				size = read_n_cells(n_mem_size_cells, &usm);
 685			}
 686			nid = of_drconf_to_nid_single(&drmem, &aa);
 687			fake_numa_create_new_node(
 688				((base + size) >> PAGE_SHIFT),
 689					   &nid);
 690			node_set_online(nid);
 691			sz = numa_enforce_memory_limit(base, size);
 692			if (sz)
 693				memblock_set_node(base, sz, nid);
 694		} while (--ranges);
 695	}
 696}
 697
 698static int __init parse_numa_properties(void)
 699{
 700	struct device_node *memory;
 701	int default_nid = 0;
 702	unsigned long i;
 
 703
 704	if (numa_enabled == 0) {
 705		printk(KERN_WARNING "NUMA disabled by user\n");
 706		return -1;
 707	}
 708
 709	min_common_depth = find_min_common_depth();
 
 
 
 
 
 
 
 
 
 710
 711	if (min_common_depth < 0)
 712		return min_common_depth;
 713
 714	dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
 
 
 
 
 715
 716	/*
 717	 * Even though we connect cpus to numa domains later in SMP
 718	 * init, we need to know the node ids now. This is because
 719	 * each node to be onlined must have NODE_DATA etc backing it.
 720	 */
 721	for_each_present_cpu(i) {
 
 722		struct device_node *cpu;
 723		int nid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724
 725		cpu = of_get_cpu_node(i, NULL);
 726		BUG_ON(!cpu);
 727		nid = of_node_to_nid_single(cpu);
 728		of_node_put(cpu);
 
 
 
 729
 730		/*
 731		 * Don't fall back to default_nid yet -- we will plug
 732		 * cpus into nodes once the memory scan has discovered
 733		 * the topology.
 734		 */
 735		if (nid < 0)
 736			continue;
 737		node_set_online(nid);
 738	}
 739
 740	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
 741
 742	for_each_node_by_type(memory, "memory") {
 743		unsigned long start;
 744		unsigned long size;
 745		int nid;
 746		int ranges;
 747		const unsigned int *memcell_buf;
 748		unsigned int len;
 749
 750		memcell_buf = of_get_property(memory,
 751			"linux,usable-memory", &len);
 752		if (!memcell_buf || len <= 0)
 753			memcell_buf = of_get_property(memory, "reg", &len);
 754		if (!memcell_buf || len <= 0)
 755			continue;
 756
 757		/* ranges in cell */
 758		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
 759new_range:
 760		/* these are order-sensitive, and modify the buffer pointer */
 761		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
 762		size = read_n_cells(n_mem_size_cells, &memcell_buf);
 763
 764		/*
 765		 * Assumption: either all memory nodes or none will
 766		 * have associativity properties.  If none, then
 767		 * everything goes to default_nid.
 768		 */
 769		nid = of_node_to_nid_single(memory);
 770		if (nid < 0)
 
 
 
 771			nid = default_nid;
 772
 773		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
 774		node_set_online(nid);
 775
 776		if (!(size = numa_enforce_memory_limit(start, size))) {
 777			if (--ranges)
 778				goto new_range;
 779			else
 780				continue;
 781		}
 782
 783		memblock_set_node(start, size, nid);
 784
 785		if (--ranges)
 786			goto new_range;
 787	}
 788
 789	/*
 790	 * Now do the same thing for each MEMBLOCK listed in the
 791	 * ibm,dynamic-memory property in the
 792	 * ibm,dynamic-reconfiguration-memory node.
 793	 */
 794	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 795	if (memory)
 796		parse_drconf_memory(memory);
 
 
 797
 798	return 0;
 799}
 800
 801static void __init setup_nonnuma(void)
 802{
 803	unsigned long top_of_ram = memblock_end_of_DRAM();
 804	unsigned long total_ram = memblock_phys_mem_size();
 805	unsigned long start_pfn, end_pfn;
 806	unsigned int nid = 0;
 807	struct memblock_region *reg;
 808
 809	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
 810	       top_of_ram, total_ram);
 811	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
 812	       (top_of_ram - total_ram) >> 20);
 813
 814	for_each_memblock(memory, reg) {
 815		start_pfn = memblock_region_memory_base_pfn(reg);
 816		end_pfn = memblock_region_memory_end_pfn(reg);
 817
 
 818		fake_numa_create_new_node(end_pfn, &nid);
 819		memblock_set_node(PFN_PHYS(start_pfn),
 820				  PFN_PHYS(end_pfn - start_pfn), nid);
 
 821		node_set_online(nid);
 822	}
 823}
 824
 825void __init dump_numa_cpu_topology(void)
 826{
 827	unsigned int node;
 828	unsigned int cpu, count;
 829
 830	if (min_common_depth == -1 || !numa_enabled)
 831		return;
 832
 833	for_each_online_node(node) {
 834		printk(KERN_DEBUG "Node %d CPUs:", node);
 835
 836		count = 0;
 837		/*
 838		 * If we used a CPU iterator here we would miss printing
 839		 * the holes in the cpumap.
 840		 */
 841		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
 842			if (cpumask_test_cpu(cpu,
 843					node_to_cpumask_map[node])) {
 844				if (count == 0)
 845					printk(" %u", cpu);
 846				++count;
 847			} else {
 848				if (count > 1)
 849					printk("-%u", cpu - 1);
 850				count = 0;
 851			}
 852		}
 853
 854		if (count > 1)
 855			printk("-%u", nr_cpu_ids - 1);
 856		printk("\n");
 857	}
 858}
 859
 860static void __init dump_numa_memory_topology(void)
 
 861{
 862	unsigned int node;
 863	unsigned int count;
 864
 865	if (min_common_depth == -1 || !numa_enabled)
 866		return;
 867
 868	for_each_online_node(node) {
 869		unsigned long i;
 870
 871		printk(KERN_DEBUG "Node %d Memory:", node);
 872
 873		count = 0;
 874
 875		for (i = 0; i < memblock_end_of_DRAM();
 876		     i += (1 << SECTION_SIZE_BITS)) {
 877			if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
 878				if (count == 0)
 879					printk(" 0x%lx", i);
 880				++count;
 881			} else {
 882				if (count > 0)
 883					printk("-0x%lx", i);
 884				count = 0;
 885			}
 886		}
 887
 888		if (count > 0)
 889			printk("-0x%lx", i);
 890		printk("\n");
 891	}
 
 892}
 893
 894/*
 895 * Allocate some memory, satisfying the memblock or bootmem allocator where
 896 * required. nid is the preferred node and end is the physical address of
 897 * the highest address in the node.
 898 *
 899 * Returns the virtual address of the memory.
 900 */
 901static void __init *careful_zallocation(int nid, unsigned long size,
 902				       unsigned long align,
 903				       unsigned long end_pfn)
 904{
 905	void *ret;
 906	int new_nid;
 907	unsigned long ret_paddr;
 
 
 
 
 
 
 
 
 908
 909	ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 910
 911	/* retry over all memory */
 912	if (!ret_paddr)
 913		ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
 914
 915	if (!ret_paddr)
 916		panic("numa.c: cannot allocate %lu bytes for node %d",
 917		      size, nid);
 918
 919	ret = __va(ret_paddr);
 
 920
 921	/*
 922	 * We initialize the nodes in numeric order: 0, 1, 2...
 923	 * and hand over control from the MEMBLOCK allocator to the
 924	 * bootmem allocator.  If this function is called for
 925	 * node 5, then we know that all nodes <5 are using the
 926	 * bootmem allocator instead of the MEMBLOCK allocator.
 927	 *
 928	 * So, check the nid from which this allocation came
 929	 * and double check to see if we need to use bootmem
 930	 * instead of the MEMBLOCK.  We don't free the MEMBLOCK memory
 931	 * since it would be useless.
 932	 */
 933	new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
 934	if (new_nid < nid) {
 935		ret = __alloc_bootmem_node(NODE_DATA(new_nid),
 936				size, align, 0);
 937
 938		dbg("alloc_bootmem %p %lx\n", ret, size);
 939	}
 940
 941	memset(ret, 0, size);
 942	return ret;
 943}
 
 
 
 
 944
 945static struct notifier_block __cpuinitdata ppc64_numa_nb = {
 946	.notifier_call = cpu_numa_callback,
 947	.priority = 1 /* Must run before sched domains notifier. */
 948};
 949
 950static void __init mark_reserved_regions_for_nid(int nid)
 951{
 952	struct pglist_data *node = NODE_DATA(nid);
 953	struct memblock_region *reg;
 954
 955	for_each_memblock(reserved, reg) {
 956		unsigned long physbase = reg->base;
 957		unsigned long size = reg->size;
 958		unsigned long start_pfn = physbase >> PAGE_SHIFT;
 959		unsigned long end_pfn = PFN_UP(physbase + size);
 960		struct node_active_region node_ar;
 961		unsigned long node_end_pfn = node->node_start_pfn +
 962					     node->node_spanned_pages;
 963
 
 964		/*
 965		 * Check to make sure that this memblock.reserved area is
 966		 * within the bounds of the node that we care about.
 967		 * Checking the nid of the start and end points is not
 968		 * sufficient because the reserved area could span the
 969		 * entire node.
 
 970		 */
 971		if (end_pfn <= node->node_start_pfn ||
 972		    start_pfn >= node_end_pfn)
 973			continue;
 974
 975		get_node_active_region(start_pfn, &node_ar);
 976		while (start_pfn < end_pfn &&
 977			node_ar.start_pfn < node_ar.end_pfn) {
 978			unsigned long reserve_size = size;
 979			/*
 980			 * if reserved region extends past active region
 981			 * then trim size to active region
 982			 */
 983			if (end_pfn > node_ar.end_pfn)
 984				reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
 985					- physbase;
 986			/*
 987			 * Only worry about *this* node, others may not
 988			 * yet have valid NODE_DATA().
 989			 */
 990			if (node_ar.nid == nid) {
 991				dbg("reserve_bootmem %lx %lx nid=%d\n",
 992					physbase, reserve_size, node_ar.nid);
 993				reserve_bootmem_node(NODE_DATA(node_ar.nid),
 994						physbase, reserve_size,
 995						BOOTMEM_DEFAULT);
 996			}
 997			/*
 998			 * if reserved region is contained in the active region
 999			 * then done.
1000			 */
1001			if (end_pfn <= node_ar.end_pfn)
1002				break;
1003
1004			/*
1005			 * reserved region extends past the active region
1006			 *   get next active region that contains this
1007			 *   reserved region
1008			 */
1009			start_pfn = node_ar.end_pfn;
1010			physbase = start_pfn << PAGE_SHIFT;
1011			size = size - reserve_size;
1012			get_node_active_region(start_pfn, &node_ar);
1013		}
1014	}
1015}
1016
1017
1018void __init do_init_bootmem(void)
1019{
1020	int nid;
1021
1022	min_low_pfn = 0;
1023	max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1024	max_pfn = max_low_pfn;
1025
1026	if (parse_numa_properties())
1027		setup_nonnuma();
1028	else
1029		dump_numa_memory_topology();
1030
1031	for_each_online_node(nid) {
1032		unsigned long start_pfn, end_pfn;
1033		void *bootmem_vaddr;
1034		unsigned long bootmap_pages;
1035
1036		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1037
1038		/*
1039		 * Allocate the node structure node local if possible
1040		 *
1041		 * Be careful moving this around, as it relies on all
1042		 * previous nodes' bootmem to be initialized and have
1043		 * all reserved areas marked.
1044		 */
1045		NODE_DATA(nid) = careful_zallocation(nid,
1046					sizeof(struct pglist_data),
1047					SMP_CACHE_BYTES, end_pfn);
1048
1049  		dbg("node %d\n", nid);
1050		dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1051
1052		NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1053		NODE_DATA(nid)->node_start_pfn = start_pfn;
1054		NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1055
1056		if (NODE_DATA(nid)->node_spanned_pages == 0)
1057  			continue;
1058
1059  		dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1060  		dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1061
1062		bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1063		bootmem_vaddr = careful_zallocation(nid,
1064					bootmap_pages << PAGE_SHIFT,
1065					PAGE_SIZE, end_pfn);
1066
1067		dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1068
1069		init_bootmem_node(NODE_DATA(nid),
1070				  __pa(bootmem_vaddr) >> PAGE_SHIFT,
1071				  start_pfn, end_pfn);
1072
1073		free_bootmem_with_active_regions(nid, end_pfn);
1074		/*
1075		 * Be very careful about moving this around.  Future
1076		 * calls to careful_zallocation() depend on this getting
1077		 * done correctly.
1078		 */
1079		mark_reserved_regions_for_nid(nid);
1080		sparse_memory_present_with_active_regions(nid);
1081	}
1082
1083	init_bootmem_done = 1;
1084
1085	/*
1086	 * Now bootmem is initialised we can create the node to cpumask
1087	 * lookup tables and setup the cpu callback to populate them.
 
 
 
1088	 */
1089	setup_node_to_cpumask_map();
1090
1091	register_cpu_notifier(&ppc64_numa_nb);
1092	cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1093			  (void *)(unsigned long)boot_cpuid);
1094}
1095
1096void __init paging_init(void)
1097{
1098	unsigned long max_zone_pfns[MAX_NR_ZONES];
1099	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1100	max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1101	free_area_init_nodes(max_zone_pfns);
1102}
1103
1104static int __init early_numa(char *p)
1105{
1106	if (!p)
1107		return 0;
1108
1109	if (strstr(p, "off"))
1110		numa_enabled = 0;
1111
1112	if (strstr(p, "debug"))
1113		numa_debug = 1;
1114
1115	p = strstr(p, "fake=");
1116	if (p)
1117		cmdline = p + strlen("fake=");
1118
1119	return 0;
1120}
1121early_param("numa", early_numa);
1122
1123#ifdef CONFIG_MEMORY_HOTPLUG
1124/*
1125 * Find the node associated with a hot added memory section for
1126 * memory represented in the device tree by the property
1127 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1128 */
1129static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1130				     unsigned long scn_addr)
1131{
1132	const u32 *dm;
1133	unsigned int drconf_cell_cnt, rc;
1134	unsigned long lmb_size;
1135	struct assoc_arrays aa;
1136	int nid = -1;
1137
1138	drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1139	if (!drconf_cell_cnt)
1140		return -1;
1141
1142	lmb_size = of_get_lmb_size(memory);
1143	if (!lmb_size)
1144		return -1;
1145
1146	rc = of_get_assoc_arrays(memory, &aa);
1147	if (rc)
1148		return -1;
1149
1150	for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1151		struct of_drconf_cell drmem;
1152
1153		read_drconf_cell(&drmem, &dm);
1154
 
1155		/* skip this block if it is reserved or not assigned to
1156		 * this partition */
1157		if ((drmem.flags & DRCONF_MEM_RESERVED)
1158		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1159			continue;
1160
1161		if ((scn_addr < drmem.base_addr)
1162		    || (scn_addr >= (drmem.base_addr + lmb_size)))
1163			continue;
1164
1165		nid = of_drconf_to_nid_single(&drmem, &aa);
1166		break;
1167	}
1168
1169	return nid;
1170}
1171
1172/*
1173 * Find the node associated with a hot added memory section for memory
1174 * represented in the device tree as a node (i.e. memory@XXXX) for
1175 * each memblock.
1176 */
1177int hot_add_node_scn_to_nid(unsigned long scn_addr)
1178{
1179	struct device_node *memory;
1180	int nid = -1;
1181
1182	for_each_node_by_type(memory, "memory") {
1183		unsigned long start, size;
1184		int ranges;
1185		const unsigned int *memcell_buf;
1186		unsigned int len;
1187
1188		memcell_buf = of_get_property(memory, "reg", &len);
1189		if (!memcell_buf || len <= 0)
1190			continue;
1191
1192		/* ranges in cell */
1193		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1194
1195		while (ranges--) {
1196			start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1197			size = read_n_cells(n_mem_size_cells, &memcell_buf);
1198
1199			if ((scn_addr < start) || (scn_addr >= (start + size)))
1200				continue;
1201
1202			nid = of_node_to_nid_single(memory);
1203			break;
1204		}
1205
1206		if (nid >= 0)
1207			break;
1208	}
1209
1210	of_node_put(memory);
1211
1212	return nid;
1213}
1214
1215/*
1216 * Find the node associated with a hot added memory section.  Section
1217 * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1218 * sections are fully contained within a single MEMBLOCK.
1219 */
1220int hot_add_scn_to_nid(unsigned long scn_addr)
1221{
1222	struct device_node *memory = NULL;
1223	int nid, found = 0;
1224
1225	if (!numa_enabled || (min_common_depth < 0))
1226		return first_online_node;
1227
1228	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1229	if (memory) {
1230		nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1231		of_node_put(memory);
1232	} else {
1233		nid = hot_add_node_scn_to_nid(scn_addr);
1234	}
1235
1236	if (nid < 0 || !node_online(nid))
1237		nid = first_online_node;
1238
1239	if (NODE_DATA(nid)->node_spanned_pages)
1240		return nid;
1241
1242	for_each_online_node(nid) {
1243		if (NODE_DATA(nid)->node_spanned_pages) {
1244			found = 1;
1245			break;
1246		}
1247	}
1248
1249	BUG_ON(!found);
1250	return nid;
1251}
1252
1253static u64 hot_add_drconf_memory_max(void)
1254{
1255        struct device_node *memory = NULL;
1256        unsigned int drconf_cell_cnt = 0;
1257        u64 lmb_size = 0;
1258        const u32 *dm = 0;
1259
1260        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1261        if (memory) {
1262                drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1263                lmb_size = of_get_lmb_size(memory);
1264                of_node_put(memory);
1265        }
1266        return lmb_size * drconf_cell_cnt;
 
 
 
 
 
 
1267}
1268
1269/*
1270 * memory_hotplug_max - return max address of memory that may be added
1271 *
1272 * This is currently only used on systems that support drconfig memory
1273 * hotplug.
1274 */
1275u64 memory_hotplug_max(void)
1276{
1277        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1278}
1279#endif /* CONFIG_MEMORY_HOTPLUG */
1280
1281/* Virtual Processor Home Node (VPHN) support */
1282#ifdef CONFIG_PPC_SPLPAR
1283static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1284static cpumask_t cpu_associativity_changes_mask;
1285static int vphn_enabled;
1286static void set_topology_timer(void);
1287
1288/*
1289 * Store the current values of the associativity change counters in the
1290 * hypervisor.
1291 */
1292static void setup_cpu_associativity_change_counters(void)
1293{
1294	int cpu;
1295
1296	/* The VPHN feature supports a maximum of 8 reference points */
1297	BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1298
1299	for_each_possible_cpu(cpu) {
1300		int i;
1301		u8 *counts = vphn_cpu_change_counts[cpu];
1302		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1303
1304		for (i = 0; i < distance_ref_points_depth; i++)
1305			counts[i] = hypervisor_counts[i];
1306	}
1307}
1308
1309/*
1310 * The hypervisor maintains a set of 8 associativity change counters in
1311 * the VPA of each cpu that correspond to the associativity levels in the
1312 * ibm,associativity-reference-points property. When an associativity
1313 * level changes, the corresponding counter is incremented.
1314 *
1315 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1316 * node associativity levels have changed.
1317 *
1318 * Returns the number of cpus with unhandled associativity changes.
1319 */
1320static int update_cpu_associativity_changes_mask(void)
1321{
1322	int cpu, nr_cpus = 0;
1323	cpumask_t *changes = &cpu_associativity_changes_mask;
1324
1325	cpumask_clear(changes);
1326
1327	for_each_possible_cpu(cpu) {
1328		int i, changed = 0;
1329		u8 *counts = vphn_cpu_change_counts[cpu];
1330		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1331
1332		for (i = 0; i < distance_ref_points_depth; i++) {
1333			if (hypervisor_counts[i] != counts[i]) {
1334				counts[i] = hypervisor_counts[i];
1335				changed = 1;
1336			}
1337		}
1338		if (changed) {
1339			cpumask_set_cpu(cpu, changes);
1340			nr_cpus++;
1341		}
1342	}
1343
1344	return nr_cpus;
1345}
1346
1347/*
1348 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1349 * the complete property we have to add the length in the first cell.
1350 */
1351#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1352
1353/*
1354 * Convert the associativity domain numbers returned from the hypervisor
1355 * to the sequence they would appear in the ibm,associativity property.
1356 */
1357static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1358{
1359	int i, nr_assoc_doms = 0;
1360	const u16 *field = (const u16*) packed;
1361
1362#define VPHN_FIELD_UNUSED	(0xffff)
1363#define VPHN_FIELD_MSB		(0x8000)
1364#define VPHN_FIELD_MASK		(~VPHN_FIELD_MSB)
1365
1366	for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1367		if (*field == VPHN_FIELD_UNUSED) {
1368			/* All significant fields processed, and remaining
1369			 * fields contain the reserved value of all 1's.
1370			 * Just store them.
1371			 */
1372			unpacked[i] = *((u32*)field);
1373			field += 2;
1374		} else if (*field & VPHN_FIELD_MSB) {
1375			/* Data is in the lower 15 bits of this field */
1376			unpacked[i] = *field & VPHN_FIELD_MASK;
1377			field++;
1378			nr_assoc_doms++;
1379		} else {
1380			/* Data is in the lower 15 bits of this field
1381			 * concatenated with the next 16 bit field
1382			 */
1383			unpacked[i] = *((u32*)field);
1384			field += 2;
1385			nr_assoc_doms++;
1386		}
1387	}
1388
1389	/* The first cell contains the length of the property */
1390	unpacked[0] = nr_assoc_doms;
1391
1392	return nr_assoc_doms;
1393}
1394
1395/*
1396 * Retrieve the new associativity information for a virtual processor's
1397 * home node.
1398 */
1399static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1400{
1401	long rc;
1402	long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1403	u64 flags = 1;
1404	int hwcpu = get_hard_smp_processor_id(cpu);
1405
1406	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1407	vphn_unpack_associativity(retbuf, associativity);
1408
1409	return rc;
1410}
1411
1412static long vphn_get_associativity(unsigned long cpu,
1413					unsigned int *associativity)
1414{
1415	long rc;
1416
1417	rc = hcall_vphn(cpu, associativity);
 
1418
1419	switch (rc) {
 
 
 
 
1420	case H_FUNCTION:
1421		printk(KERN_INFO
1422			"VPHN is not supported. Disabling polling...\n");
1423		stop_topology_update();
1424		break;
1425	case H_HARDWARE:
1426		printk(KERN_ERR
1427			"hcall_vphn() experienced a hardware fault "
1428			"preventing VPHN. Disabling polling...\n");
1429		stop_topology_update();
 
 
 
 
 
 
 
 
1430	}
1431
1432	return rc;
1433}
1434
1435/*
1436 * Update the node maps and sysfs entries for each cpu whose home node
1437 * has changed.
1438 */
1439int arch_update_cpu_topology(void)
1440{
1441	int cpu, nid, old_nid;
1442	unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1443	struct device *dev;
1444
1445	for_each_cpu(cpu,&cpu_associativity_changes_mask) {
1446		vphn_get_associativity(cpu, associativity);
1447		nid = associativity_to_nid(associativity);
1448
1449		if (nid < 0 || !node_online(nid))
1450			nid = first_online_node;
1451
1452		old_nid = numa_cpu_lookup_table[cpu];
1453
1454		/* Disable hotplug while we update the cpu
1455		 * masks and sysfs.
1456		 */
1457		get_online_cpus();
1458		unregister_cpu_under_node(cpu, old_nid);
1459		unmap_cpu_from_node(cpu);
1460		map_cpu_to_node(cpu, nid);
1461		register_cpu_under_node(cpu, nid);
1462		put_online_cpus();
1463
1464		dev = get_cpu_device(cpu);
1465		if (dev)
1466			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1467	}
1468
1469	return 1;
1470}
1471
1472static void topology_work_fn(struct work_struct *work)
1473{
1474	rebuild_sched_domains();
1475}
1476static DECLARE_WORK(topology_work, topology_work_fn);
 
 
1477
1478void topology_schedule_update(void)
1479{
1480	schedule_work(&topology_work);
1481}
1482
1483static void topology_timer_fn(unsigned long ignored)
1484{
1485	if (!vphn_enabled)
1486		return;
1487	if (update_cpu_associativity_changes_mask() > 0)
1488		topology_schedule_update();
1489	set_topology_timer();
1490}
1491static struct timer_list topology_timer =
1492	TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1493
1494static void set_topology_timer(void)
1495{
1496	topology_timer.data = 0;
1497	topology_timer.expires = jiffies + 60 * HZ;
1498	add_timer(&topology_timer);
1499}
1500
1501/*
1502 * Start polling for VPHN associativity changes.
1503 */
1504int start_topology_update(void)
1505{
1506	int rc = 0;
1507
1508	/* Disabled until races with load balancing are fixed */
1509	if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
1510	    get_lppaca()->shared_proc) {
1511		vphn_enabled = 1;
1512		setup_cpu_associativity_change_counters();
1513		init_timer_deferrable(&topology_timer);
1514		set_topology_timer();
1515		rc = 1;
1516	}
1517
1518	return rc;
 
1519}
1520__initcall(start_topology_update);
1521
1522/*
1523 * Disable polling for VPHN associativity changes.
1524 */
1525int stop_topology_update(void)
1526{
1527	vphn_enabled = 0;
1528	return del_timer_sync(&topology_timer);
1529}
 
1530#endif /* CONFIG_PPC_SPLPAR */