Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * pSeries NUMA support
   4 *
   5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 
 
 
 
 
   6 */
   7#define pr_fmt(fmt) "numa: " fmt
   8
   9#include <linux/threads.h>
  10#include <linux/memblock.h>
  11#include <linux/init.h>
  12#include <linux/mm.h>
  13#include <linux/mmzone.h>
  14#include <linux/export.h>
  15#include <linux/nodemask.h>
  16#include <linux/cpu.h>
  17#include <linux/notifier.h>
 
  18#include <linux/of.h>
  19#include <linux/pfn.h>
  20#include <linux/cpuset.h>
  21#include <linux/node.h>
  22#include <linux/stop_machine.h>
  23#include <linux/proc_fs.h>
  24#include <linux/seq_file.h>
  25#include <linux/uaccess.h>
  26#include <linux/slab.h>
  27#include <asm/cputhreads.h>
  28#include <asm/sparsemem.h>
 
  29#include <asm/smp.h>
 
  30#include <asm/topology.h>
  31#include <asm/firmware.h>
  32#include <asm/paca.h>
  33#include <asm/hvcall.h>
  34#include <asm/setup.h>
  35#include <asm/vdso.h>
  36#include <asm/drmem.h>
  37
  38static int numa_enabled = 1;
  39
  40static char *cmdline __initdata;
  41
 
 
 
  42int numa_cpu_lookup_table[NR_CPUS];
  43cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  44struct pglist_data *node_data[MAX_NUMNODES];
  45
  46EXPORT_SYMBOL(numa_cpu_lookup_table);
  47EXPORT_SYMBOL(node_to_cpumask_map);
  48EXPORT_SYMBOL(node_data);
  49
  50static int primary_domain_index;
  51static int n_mem_addr_cells, n_mem_size_cells;
  52
  53#define FORM0_AFFINITY 0
  54#define FORM1_AFFINITY 1
  55#define FORM2_AFFINITY 2
  56static int affinity_form;
  57
  58#define MAX_DISTANCE_REF_POINTS 4
  59static int distance_ref_points_depth;
  60static const __be32 *distance_ref_points;
  61static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
  62static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = {
  63	[0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 }
  64};
  65static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE };
  66
  67/*
  68 * Allocate node_to_cpumask_map based on number of available nodes
  69 * Requires node_possible_map to be valid.
  70 *
  71 * Note: cpumask_of_node() is not valid until after this is done.
  72 */
  73static void __init setup_node_to_cpumask_map(void)
  74{
  75	unsigned int node;
  76
  77	/* setup nr_node_ids if not done yet */
  78	if (nr_node_ids == MAX_NUMNODES)
  79		setup_nr_node_ids();
  80
  81	/* allocate the map */
  82	for_each_node(node)
  83		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
  84
  85	/* cpumask_of_node() will now work */
  86	pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
  87}
  88
  89static int __init fake_numa_create_new_node(unsigned long end_pfn,
  90						unsigned int *nid)
  91{
  92	unsigned long long mem;
  93	char *p = cmdline;
  94	static unsigned int fake_nid;
  95	static unsigned long long curr_boundary;
  96
  97	/*
  98	 * Modify node id, iff we started creating NUMA nodes
  99	 * We want to continue from where we left of the last time
 100	 */
 101	if (fake_nid)
 102		*nid = fake_nid;
 103	/*
 104	 * In case there are no more arguments to parse, the
 105	 * node_id should be the same as the last fake node id
 106	 * (we've handled this above).
 107	 */
 108	if (!p)
 109		return 0;
 110
 111	mem = memparse(p, &p);
 112	if (!mem)
 113		return 0;
 114
 115	if (mem < curr_boundary)
 116		return 0;
 117
 118	curr_boundary = mem;
 119
 120	if ((end_pfn << PAGE_SHIFT) > mem) {
 121		/*
 122		 * Skip commas and spaces
 123		 */
 124		while (*p == ',' || *p == ' ' || *p == '\t')
 125			p++;
 126
 127		cmdline = p;
 128		fake_nid++;
 129		*nid = fake_nid;
 130		pr_debug("created new fake_node with id %d\n", fake_nid);
 131		return 1;
 132	}
 133	return 0;
 134}
 135
 136static void __init reset_numa_cpu_lookup_table(void)
 137{
 138	unsigned int cpu;
 139
 140	for_each_possible_cpu(cpu)
 141		numa_cpu_lookup_table[cpu] = -1;
 142}
 143
 144void map_cpu_to_node(int cpu, int node)
 
 
 
 
 
 145{
 146	update_numa_cpu_lookup_table(cpu, node);
 147
 148	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) {
 149		pr_debug("adding cpu %d to node %d\n", cpu, node);
 
 150		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 151	}
 152}
 153
 154#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
 155void unmap_cpu_from_node(unsigned long cpu)
 156{
 157	int node = numa_cpu_lookup_table[cpu];
 158
 
 
 159	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
 160		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
 161		pr_debug("removing cpu %lu from node %d\n", cpu, node);
 162	} else {
 163		pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node);
 
 164	}
 165}
 166#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
 167
 168static int __associativity_to_nid(const __be32 *associativity,
 169				  int max_array_sz)
 170{
 171	int nid;
 172	/*
 173	 * primary_domain_index is 1 based array index.
 174	 */
 175	int index = primary_domain_index  - 1;
 176
 177	if (!numa_enabled || index >= max_array_sz)
 178		return NUMA_NO_NODE;
 179
 180	nid = of_read_number(&associativity[index], 1);
 181
 182	/* POWER4 LPAR uses 0xffff as invalid node */
 183	if (nid == 0xffff || nid >= nr_node_ids)
 184		nid = NUMA_NO_NODE;
 185	return nid;
 186}
 
 187/*
 188 * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
 189 * info is found.
 
 190 */
 191static int associativity_to_nid(const __be32 *associativity)
 192{
 193	int array_sz = of_read_number(associativity, 1);
 194
 195	/* Skip the first element in the associativity array */
 196	return __associativity_to_nid((associativity + 1), array_sz);
 197}
 198
 199static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
 200{
 201	int dist;
 202	int node1, node2;
 203
 204	node1 = associativity_to_nid(cpu1_assoc);
 205	node2 = associativity_to_nid(cpu2_assoc);
 206
 207	dist = numa_distance_table[node1][node2];
 208	if (dist <= LOCAL_DISTANCE)
 209		return 0;
 210	else if (dist <= REMOTE_DISTANCE)
 211		return 1;
 212	else
 213		return 2;
 214}
 215
 216static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
 217{
 218	int dist = 0;
 
 219
 220	int i, index;
 
 221
 222	for (i = 0; i < distance_ref_points_depth; i++) {
 223		index = be32_to_cpu(distance_ref_points[i]);
 224		if (cpu1_assoc[index] == cpu2_assoc[index])
 225			break;
 226		dist++;
 
 
 227	}
 228
 229	return dist;
 230}
 
 231
 232int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
 
 233{
 234	/* We should not get called with FORM0 */
 235	VM_WARN_ON(affinity_form == FORM0_AFFINITY);
 236	if (affinity_form == FORM1_AFFINITY)
 237		return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc);
 238	return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc);
 239}
 240
 241/* must hold reference to node during call */
 242static const __be32 *of_get_associativity(struct device_node *dev)
 243{
 244	return of_get_property(dev, "ibm,associativity", NULL);
 
 
 
 
 
 245}
 246
 247int __node_distance(int a, int b)
 
 
 
 248{
 249	int i;
 250	int distance = LOCAL_DISTANCE;
 251
 252	if (affinity_form == FORM2_AFFINITY)
 253		return numa_distance_table[a][b];
 254	else if (affinity_form == FORM0_AFFINITY)
 255		return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
 256
 257	for (i = 0; i < distance_ref_points_depth; i++) {
 258		if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
 259			break;
 260
 261		/* Double the distance for each NUMA level */
 262		distance *= 2;
 
 
 
 
 
 
 
 
 263	}
 264
 265	return distance;
 
 266}
 267EXPORT_SYMBOL(__node_distance);
 268
 269/* Returns the nid associated with the given device tree node,
 270 * or -1 if not found.
 271 */
 272static int of_node_to_nid_single(struct device_node *device)
 273{
 274	int nid = NUMA_NO_NODE;
 275	const __be32 *tmp;
 276
 277	tmp = of_get_associativity(device);
 278	if (tmp)
 279		nid = associativity_to_nid(tmp);
 280	return nid;
 281}
 282
 283/* Walk the device tree upwards, looking for an associativity id */
 284int of_node_to_nid(struct device_node *device)
 285{
 286	int nid = NUMA_NO_NODE;
 287
 288	of_node_get(device);
 289	while (device) {
 290		nid = of_node_to_nid_single(device);
 291		if (nid != -1)
 292			break;
 293
 294		device = of_get_next_parent(device);
 295	}
 296	of_node_put(device);
 297
 298	return nid;
 299}
 300EXPORT_SYMBOL(of_node_to_nid);
 301
 302static void __initialize_form1_numa_distance(const __be32 *associativity,
 303					     int max_array_sz)
 304{
 305	int i, nid;
 306
 307	if (affinity_form != FORM1_AFFINITY)
 308		return;
 309
 310	nid = __associativity_to_nid(associativity, max_array_sz);
 311	if (nid != NUMA_NO_NODE) {
 312		for (i = 0; i < distance_ref_points_depth; i++) {
 313			const __be32 *entry;
 314			int index = be32_to_cpu(distance_ref_points[i]) - 1;
 315
 316			/*
 317			 * broken hierarchy, return with broken distance table
 318			 */
 319			if (WARN(index >= max_array_sz, "Broken ibm,associativity property"))
 320				return;
 321
 322			entry = &associativity[index];
 323			distance_lookup_table[nid][i] = of_read_number(entry, 1);
 324		}
 325	}
 326}
 327
 328static void initialize_form1_numa_distance(const __be32 *associativity)
 329{
 330	int array_sz;
 331
 332	array_sz = of_read_number(associativity, 1);
 333	/* Skip the first element in the associativity array */
 334	__initialize_form1_numa_distance(associativity + 1, array_sz);
 335}
 336
 337/*
 338 * Used to update distance information w.r.t newly added node.
 339 */
 340void update_numa_distance(struct device_node *node)
 341{
 342	int nid;
 343
 344	if (affinity_form == FORM0_AFFINITY)
 345		return;
 346	else if (affinity_form == FORM1_AFFINITY) {
 347		const __be32 *associativity;
 348
 349		associativity = of_get_associativity(node);
 350		if (!associativity)
 351			return;
 352
 353		initialize_form1_numa_distance(associativity);
 354		return;
 355	}
 356
 357	/* FORM2 affinity  */
 358	nid = of_node_to_nid_single(node);
 359	if (nid == NUMA_NO_NODE)
 360		return;
 361
 362	/*
 363	 * With FORM2 we expect NUMA distance of all possible NUMA
 364	 * nodes to be provided during boot.
 365	 */
 366	WARN(numa_distance_table[nid][nid] == -1,
 367	     "NUMA distance details for node %d not provided\n", nid);
 368}
 369
 370/*
 371 * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
 372 * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements}
 373 */
 374static void __init initialize_form2_numa_distance_lookup_table(void)
 375{
 376	int i, j;
 377	struct device_node *root;
 378	const __u8 *form2_distances;
 379	const __be32 *numa_lookup_index;
 380	int form2_distances_length;
 381	int max_numa_index, distance_index;
 382
 383	if (firmware_has_feature(FW_FEATURE_OPAL))
 384		root = of_find_node_by_path("/ibm,opal");
 385	else
 386		root = of_find_node_by_path("/rtas");
 387	if (!root)
 388		root = of_find_node_by_path("/");
 389
 390	numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL);
 391	max_numa_index = of_read_number(&numa_lookup_index[0], 1);
 392
 393	/* first element of the array is the size and is encode-int */
 394	form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL);
 395	form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1);
 396	/* Skip the size which is encoded int */
 397	form2_distances += sizeof(__be32);
 398
 399	pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n",
 400		 form2_distances_length, max_numa_index);
 401
 402	for (i = 0; i < max_numa_index; i++)
 403		/* +1 skip the max_numa_index in the property */
 404		numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
 405
 406
 407	if (form2_distances_length != max_numa_index * max_numa_index) {
 408		WARN(1, "Wrong NUMA distance information\n");
 409		form2_distances = NULL; // don't use it
 410	}
 411	distance_index = 0;
 412	for (i = 0;  i < max_numa_index; i++) {
 413		for (j = 0; j < max_numa_index; j++) {
 414			int nodeA = numa_id_index_table[i];
 415			int nodeB = numa_id_index_table[j];
 416			int dist;
 417
 418			if (form2_distances)
 419				dist = form2_distances[distance_index++];
 420			else if (nodeA == nodeB)
 421				dist = LOCAL_DISTANCE;
 422			else
 423				dist = REMOTE_DISTANCE;
 424			numa_distance_table[nodeA][nodeB] = dist;
 425			pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist);
 426		}
 427	}
 428
 429	of_node_put(root);
 430}
 431
 432static int __init find_primary_domain_index(void)
 433{
 434	int index;
 435	struct device_node *root;
 436
 437	/*
 438	 * Check for which form of affinity.
 439	 */
 440	if (firmware_has_feature(FW_FEATURE_OPAL)) {
 441		affinity_form = FORM1_AFFINITY;
 442	} else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) {
 443		pr_debug("Using form 2 affinity\n");
 444		affinity_form = FORM2_AFFINITY;
 445	} else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) {
 446		pr_debug("Using form 1 affinity\n");
 447		affinity_form = FORM1_AFFINITY;
 448	} else
 449		affinity_form = FORM0_AFFINITY;
 450
 451	if (firmware_has_feature(FW_FEATURE_OPAL))
 452		root = of_find_node_by_path("/ibm,opal");
 453	else
 454		root = of_find_node_by_path("/rtas");
 455	if (!root)
 456		root = of_find_node_by_path("/");
 457
 458	/*
 459	 * This property is a set of 32-bit integers, each representing
 460	 * an index into the ibm,associativity nodes.
 461	 *
 462	 * With form 0 affinity the first integer is for an SMP configuration
 463	 * (should be all 0's) and the second is for a normal NUMA
 464	 * configuration. We have only one level of NUMA.
 465	 *
 466	 * With form 1 affinity the first integer is the most significant
 467	 * NUMA boundary and the following are progressively less significant
 468	 * boundaries. There can be more than one level of NUMA.
 469	 */
 470	distance_ref_points = of_get_property(root,
 471					"ibm,associativity-reference-points",
 472					&distance_ref_points_depth);
 473
 474	if (!distance_ref_points) {
 475		pr_debug("ibm,associativity-reference-points not found.\n");
 476		goto err;
 477	}
 478
 479	distance_ref_points_depth /= sizeof(int);
 480	if (affinity_form == FORM0_AFFINITY) {
 
 
 
 
 
 
 
 
 
 481		if (distance_ref_points_depth < 2) {
 482			pr_warn("short ibm,associativity-reference-points\n");
 
 483			goto err;
 484		}
 485
 486		index = of_read_number(&distance_ref_points[1], 1);
 487	} else {
 488		/*
 489		 * Both FORM1 and FORM2 affinity find the primary domain details
 490		 * at the same offset.
 491		 */
 492		index = of_read_number(distance_ref_points, 1);
 493	}
 
 494	/*
 495	 * Warn and cap if the hardware supports more than
 496	 * MAX_DISTANCE_REF_POINTS domains.
 497	 */
 498	if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
 499		pr_warn("distance array capped at %d entries\n",
 500			MAX_DISTANCE_REF_POINTS);
 501		distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
 502	}
 503
 504	of_node_put(root);
 505	return index;
 506
 507err:
 508	of_node_put(root);
 509	return -1;
 510}
 511
 512static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
 513{
 514	struct device_node *memory = NULL;
 515
 516	memory = of_find_node_by_type(memory, "memory");
 517	if (!memory)
 518		panic("numa.c: No memory nodes found!");
 519
 520	*n_addr_cells = of_n_addr_cells(memory);
 521	*n_size_cells = of_n_size_cells(memory);
 522	of_node_put(memory);
 523}
 524
 525static unsigned long read_n_cells(int n, const __be32 **buf)
 526{
 527	unsigned long result = 0;
 528
 529	while (n--) {
 530		result = (result << 32) | of_read_number(*buf, 1);
 531		(*buf)++;
 532	}
 533	return result;
 534}
 535
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 536struct assoc_arrays {
 537	u32	n_arrays;
 538	u32	array_sz;
 539	const __be32 *arrays;
 540};
 541
 542/*
 543 * Retrieve and validate the list of associativity arrays for drconf
 544 * memory from the ibm,associativity-lookup-arrays property of the
 545 * device tree..
 546 *
 547 * The layout of the ibm,associativity-lookup-arrays property is a number N
 548 * indicating the number of associativity arrays, followed by a number M
 549 * indicating the size of each associativity array, followed by a list
 550 * of N associativity arrays.
 551 */
 552static int of_get_assoc_arrays(struct assoc_arrays *aa)
 
 553{
 554	struct device_node *memory;
 555	const __be32 *prop;
 556	u32 len;
 557
 558	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 559	if (!memory)
 560		return -1;
 561
 562	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
 563	if (!prop || len < 2 * sizeof(unsigned int)) {
 564		of_node_put(memory);
 565		return -1;
 566	}
 567
 568	aa->n_arrays = of_read_number(prop++, 1);
 569	aa->array_sz = of_read_number(prop++, 1);
 570
 571	of_node_put(memory);
 572
 573	/* Now that we know the number of arrays and size of each array,
 574	 * revalidate the size of the property read in.
 575	 */
 576	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
 577		return -1;
 578
 579	aa->arrays = prop;
 580	return 0;
 581}
 582
 583static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb)
 584{
 585	struct assoc_arrays aa = { .arrays = NULL };
 586	int default_nid = NUMA_NO_NODE;
 587	int nid = default_nid;
 588	int rc, index;
 589
 590	if ((primary_domain_index < 0) || !numa_enabled)
 591		return default_nid;
 592
 593	rc = of_get_assoc_arrays(&aa);
 594	if (rc)
 595		return default_nid;
 596
 597	if (primary_domain_index <= aa.array_sz &&
 598	    !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
 599		const __be32 *associativity;
 600
 601		index = lmb->aa_index * aa.array_sz;
 602		associativity = &aa.arrays[index];
 603		nid = __associativity_to_nid(associativity, aa.array_sz);
 604		if (nid > 0 && affinity_form == FORM1_AFFINITY) {
 605			/*
 606			 * lookup array associativity entries have
 607			 * no length of the array as the first element.
 608			 */
 609			__initialize_form1_numa_distance(associativity, aa.array_sz);
 610		}
 611	}
 612	return nid;
 613}
 614
 615/*
 616 * This is like of_node_to_nid_single() for memory represented in the
 617 * ibm,dynamic-reconfiguration-memory node.
 618 */
 619int of_drconf_to_nid_single(struct drmem_lmb *lmb)
 
 620{
 621	struct assoc_arrays aa = { .arrays = NULL };
 622	int default_nid = NUMA_NO_NODE;
 623	int nid = default_nid;
 624	int rc, index;
 625
 626	if ((primary_domain_index < 0) || !numa_enabled)
 627		return default_nid;
 628
 629	rc = of_get_assoc_arrays(&aa);
 630	if (rc)
 631		return default_nid;
 632
 633	if (primary_domain_index <= aa.array_sz &&
 634	    !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
 635		const __be32 *associativity;
 636
 637		index = lmb->aa_index * aa.array_sz;
 638		associativity = &aa.arrays[index];
 639		nid = __associativity_to_nid(associativity, aa.array_sz);
 640	}
 641	return nid;
 642}
 643
 644#ifdef CONFIG_PPC_SPLPAR
 645
 646static int __vphn_get_associativity(long lcpu, __be32 *associativity)
 647{
 648	long rc, hwid;
 
 
 649
 650	/*
 651	 * On a shared lpar, device tree will not have node associativity.
 652	 * At this time lppaca, or its __old_status field may not be
 653	 * updated. Hence kernel cannot detect if its on a shared lpar. So
 654	 * request an explicit associativity irrespective of whether the
 655	 * lpar is shared or dedicated. Use the device tree property as a
 656	 * fallback. cpu_to_phys_id is only valid between
 657	 * smp_setup_cpu_maps() and smp_setup_pacas().
 658	 */
 659	if (firmware_has_feature(FW_FEATURE_VPHN)) {
 660		if (cpu_to_phys_id)
 661			hwid = cpu_to_phys_id[lcpu];
 662		else
 663			hwid = get_hard_smp_processor_id(lcpu);
 664
 665		rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
 666		if (rc == H_SUCCESS)
 667			return 0;
 
 
 668	}
 669
 670	return -1;
 671}
 672
 673static int vphn_get_nid(long lcpu)
 674{
 675	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
 676
 677
 678	if (!__vphn_get_associativity(lcpu, associativity))
 679		return associativity_to_nid(associativity);
 680
 681	return NUMA_NO_NODE;
 682
 683}
 684#else
 685
 686static int __vphn_get_associativity(long lcpu, __be32 *associativity)
 687{
 688	return -1;
 689}
 690
 691static int vphn_get_nid(long unused)
 692{
 693	return NUMA_NO_NODE;
 694}
 695#endif  /* CONFIG_PPC_SPLPAR */
 696
 697/*
 698 * Figure out to which domain a cpu belongs and stick it there.
 699 * Return the id of the domain used.
 700 */
 701static int numa_setup_cpu(unsigned long lcpu)
 702{
 
 703	struct device_node *cpu;
 704	int fcpu = cpu_first_thread_sibling(lcpu);
 705	int nid = NUMA_NO_NODE;
 706
 707	if (!cpu_present(lcpu)) {
 708		set_cpu_numa_node(lcpu, first_online_node);
 709		return first_online_node;
 710	}
 711
 712	/*
 713	 * If a valid cpu-to-node mapping is already available, use it
 714	 * directly instead of querying the firmware, since it represents
 715	 * the most recent mapping notified to us by the platform (eg: VPHN).
 716	 * Since cpu_to_node binding remains the same for all threads in the
 717	 * core. If a valid cpu-to-node mapping is already available, for
 718	 * the first thread in the core, use it.
 719	 */
 720	nid = numa_cpu_lookup_table[fcpu];
 721	if (nid >= 0) {
 722		map_cpu_to_node(lcpu, nid);
 723		return nid;
 724	}
 725
 726	nid = vphn_get_nid(lcpu);
 727	if (nid != NUMA_NO_NODE)
 728		goto out_present;
 729
 730	cpu = of_get_cpu_node(lcpu, NULL);
 731
 732	if (!cpu) {
 733		WARN_ON(1);
 734		if (cpu_present(lcpu))
 735			goto out_present;
 736		else
 737			goto out;
 738	}
 739
 740	nid = of_node_to_nid_single(cpu);
 741	of_node_put(cpu);
 742
 743out_present:
 744	if (nid < 0 || !node_possible(nid))
 745		nid = first_online_node;
 746
 747	/*
 748	 * Update for the first thread of the core. All threads of a core
 749	 * have to be part of the same node. This not only avoids querying
 750	 * for every other thread in the core, but always avoids a case
 751	 * where virtual node associativity change causes subsequent threads
 752	 * of a core to be associated with different nid. However if first
 753	 * thread is already online, expect it to have a valid mapping.
 754	 */
 755	if (fcpu != lcpu) {
 756		WARN_ON(cpu_online(fcpu));
 757		map_cpu_to_node(fcpu, nid);
 758	}
 759
 760	map_cpu_to_node(lcpu, nid);
 
 761out:
 762	return nid;
 763}
 764
 765static void verify_cpu_node_mapping(int cpu, int node)
 766{
 767	int base, sibling, i;
 768
 769	/* Verify that all the threads in the core belong to the same node */
 770	base = cpu_first_thread_sibling(cpu);
 771
 772	for (i = 0; i < threads_per_core; i++) {
 773		sibling = base + i;
 774
 775		if (sibling == cpu || cpu_is_offline(sibling))
 776			continue;
 777
 778		if (cpu_to_node(sibling) != node) {
 779			WARN(1, "CPU thread siblings %d and %d don't belong"
 780				" to the same node!\n", cpu, sibling);
 781			break;
 782		}
 783	}
 784}
 785
 786/* Must run before sched domains notifier. */
 787static int ppc_numa_cpu_prepare(unsigned int cpu)
 788{
 789	int nid;
 790
 791	nid = numa_setup_cpu(cpu);
 792	verify_cpu_node_mapping(cpu, nid);
 793	return 0;
 794}
 795
 796static int ppc_numa_cpu_dead(unsigned int cpu)
 797{
 798	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 799}
 800
 801/*
 802 * Check and possibly modify a memory region to enforce the memory limit.
 803 *
 804 * Returns the size the region should have to enforce the memory limit.
 805 * This will either be the original value of size, a truncated value,
 806 * or zero. If the returned value of size is 0 the region should be
 807 * discarded as it lies wholly above the memory limit.
 808 */
 809static unsigned long __init numa_enforce_memory_limit(unsigned long start,
 810						      unsigned long size)
 811{
 812	/*
 813	 * We use memblock_end_of_DRAM() in here instead of memory_limit because
 814	 * we've already adjusted it for the limit and it takes care of
 815	 * having memory holes below the limit.  Also, in the case of
 816	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
 817	 */
 818
 819	if (start + size <= memblock_end_of_DRAM())
 820		return size;
 821
 822	if (start >= memblock_end_of_DRAM())
 823		return 0;
 824
 825	return memblock_end_of_DRAM() - start;
 826}
 827
 828/*
 829 * Reads the counter for a given entry in
 830 * linux,drconf-usable-memory property
 831 */
 832static inline int __init read_usm_ranges(const __be32 **usm)
 833{
 834	/*
 835	 * For each lmb in ibm,dynamic-memory a corresponding
 836	 * entry in linux,drconf-usable-memory property contains
 837	 * a counter followed by that many (base, size) duple.
 838	 * read the counter from linux,drconf-usable-memory
 839	 */
 840	return read_n_cells(n_mem_size_cells, usm);
 841}
 842
 843/*
 844 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
 845 * node.  This assumes n_mem_{addr,size}_cells have been set.
 846 */
 847static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
 848					const __be32 **usm,
 849					void *data)
 850{
 851	unsigned int ranges, is_kexec_kdump = 0;
 852	unsigned long base, size, sz;
 
 853	int nid;
 
 854
 855	/*
 856	 * Skip this block if the reserved bit is set in flags (0x80)
 857	 * or if the block is not assigned to this partition (0x8)
 858	 */
 859	if ((lmb->flags & DRCONF_MEM_RESERVED)
 860	    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
 861		return 0;
 862
 863	if (*usm)
 
 
 
 
 
 
 864		is_kexec_kdump = 1;
 865
 866	base = lmb->base_addr;
 867	size = drmem_lmb_size();
 868	ranges = 1;
 869
 870	if (is_kexec_kdump) {
 871		ranges = read_usm_ranges(usm);
 872		if (!ranges) /* there are no (base, size) duple */
 873			return 0;
 874	}
 875
 876	do {
 877		if (is_kexec_kdump) {
 878			base = read_n_cells(n_mem_addr_cells, usm);
 879			size = read_n_cells(n_mem_size_cells, usm);
 880		}
 881
 882		nid = get_nid_and_numa_distance(lmb);
 883		fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
 884					  &nid);
 885		node_set_online(nid);
 886		sz = numa_enforce_memory_limit(base, size);
 887		if (sz)
 888			memblock_set_node(base, sz, &memblock.memory, nid);
 889	} while (--ranges);
 890
 891	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 892}
 893
 894static int __init parse_numa_properties(void)
 895{
 896	struct device_node *memory;
 897	int default_nid = 0;
 898	unsigned long i;
 899	const __be32 *associativity;
 900
 901	if (numa_enabled == 0) {
 902		pr_warn("disabled by user\n");
 903		return -1;
 904	}
 905
 906	primary_domain_index = find_primary_domain_index();
 907
 908	if (primary_domain_index < 0) {
 909		/*
 910		 * if we fail to parse primary_domain_index from device tree
 911		 * mark the numa disabled, boot with numa disabled.
 912		 */
 913		numa_enabled = false;
 914		return primary_domain_index;
 915	}
 916
 917	pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index);
 918
 919	/*
 920	 * If it is FORM2 initialize the distance table here.
 921	 */
 922	if (affinity_form == FORM2_AFFINITY)
 923		initialize_form2_numa_distance_lookup_table();
 924
 925	/*
 926	 * Even though we connect cpus to numa domains later in SMP
 927	 * init, we need to know the node ids now. This is because
 928	 * each node to be onlined must have NODE_DATA etc backing it.
 929	 */
 930	for_each_present_cpu(i) {
 931		__be32 vphn_assoc[VPHN_ASSOC_BUFSIZE];
 932		struct device_node *cpu;
 933		int nid = NUMA_NO_NODE;
 934
 935		memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32));
 936
 937		if (__vphn_get_associativity(i, vphn_assoc) == 0) {
 938			nid = associativity_to_nid(vphn_assoc);
 939			initialize_form1_numa_distance(vphn_assoc);
 940		} else {
 941
 942			/*
 943			 * Don't fall back to default_nid yet -- we will plug
 944			 * cpus into nodes once the memory scan has discovered
 945			 * the topology.
 946			 */
 947			cpu = of_get_cpu_node(i, NULL);
 948			BUG_ON(!cpu);
 949
 950			associativity = of_get_associativity(cpu);
 951			if (associativity) {
 952				nid = associativity_to_nid(associativity);
 953				initialize_form1_numa_distance(associativity);
 954			}
 955			of_node_put(cpu);
 956		}
 957
 958		/* node_set_online() is an UB if 'nid' is negative */
 959		if (likely(nid >= 0))
 960			node_set_online(nid);
 
 
 
 
 
 961	}
 962
 963	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
 964
 965	for_each_node_by_type(memory, "memory") {
 966		unsigned long start;
 967		unsigned long size;
 968		int nid;
 969		int ranges;
 970		const __be32 *memcell_buf;
 971		unsigned int len;
 972
 973		memcell_buf = of_get_property(memory,
 974			"linux,usable-memory", &len);
 975		if (!memcell_buf || len <= 0)
 976			memcell_buf = of_get_property(memory, "reg", &len);
 977		if (!memcell_buf || len <= 0)
 978			continue;
 979
 980		/* ranges in cell */
 981		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
 982new_range:
 983		/* these are order-sensitive, and modify the buffer pointer */
 984		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
 985		size = read_n_cells(n_mem_size_cells, &memcell_buf);
 986
 987		/*
 988		 * Assumption: either all memory nodes or none will
 989		 * have associativity properties.  If none, then
 990		 * everything goes to default_nid.
 991		 */
 992		associativity = of_get_associativity(memory);
 993		if (associativity) {
 994			nid = associativity_to_nid(associativity);
 995			initialize_form1_numa_distance(associativity);
 996		} else
 997			nid = default_nid;
 998
 999		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
1000		node_set_online(nid);
1001
1002		size = numa_enforce_memory_limit(start, size);
1003		if (size)
1004			memblock_set_node(start, size, &memblock.memory, nid);
 
 
 
 
 
1005
1006		if (--ranges)
1007			goto new_range;
1008	}
1009
1010	/*
1011	 * Now do the same thing for each MEMBLOCK listed in the
1012	 * ibm,dynamic-memory property in the
1013	 * ibm,dynamic-reconfiguration-memory node.
1014	 */
1015	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1016	if (memory) {
1017		walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb);
1018		of_node_put(memory);
1019	}
1020
1021	return 0;
1022}
1023
1024static void __init setup_nonnuma(void)
1025{
1026	unsigned long top_of_ram = memblock_end_of_DRAM();
1027	unsigned long total_ram = memblock_phys_mem_size();
1028	unsigned long start_pfn, end_pfn;
1029	unsigned int nid = 0;
1030	int i;
1031
1032	pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram);
1033	pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20);
 
 
 
 
 
 
1034
1035	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
1036		fake_numa_create_new_node(end_pfn, &nid);
1037		memblock_set_node(PFN_PHYS(start_pfn),
1038				  PFN_PHYS(end_pfn - start_pfn),
1039				  &memblock.memory, nid);
1040		node_set_online(nid);
1041	}
1042}
1043
1044void __init dump_numa_cpu_topology(void)
1045{
1046	unsigned int node;
1047	unsigned int cpu, count;
1048
1049	if (!numa_enabled)
1050		return;
1051
1052	for_each_online_node(node) {
1053		pr_info("Node %d CPUs:", node);
1054
1055		count = 0;
1056		/*
1057		 * If we used a CPU iterator here we would miss printing
1058		 * the holes in the cpumap.
1059		 */
1060		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1061			if (cpumask_test_cpu(cpu,
1062					node_to_cpumask_map[node])) {
1063				if (count == 0)
1064					pr_cont(" %u", cpu);
1065				++count;
1066			} else {
1067				if (count > 1)
1068					pr_cont("-%u", cpu - 1);
1069				count = 0;
1070			}
1071		}
1072
1073		if (count > 1)
1074			pr_cont("-%u", nr_cpu_ids - 1);
1075		pr_cont("\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1076	}
1077}
1078
 
 
 
 
 
1079/* Initialize NODE_DATA for a node on the local memory */
1080static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
1081{
1082	u64 spanned_pages = end_pfn - start_pfn;
1083	const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
1084	u64 nd_pa;
1085	void *nd;
1086	int tnid;
1087
1088	nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
1089	if (!nd_pa)
1090		panic("Cannot allocate %zu bytes for node %d data\n",
1091		      nd_size, nid);
 
 
1092
 
1093	nd = __va(nd_pa);
1094
1095	/* report and initialize */
1096	pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
1097		nd_pa, nd_pa + nd_size - 1);
1098	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
1099	if (tnid != nid)
1100		pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
1101
1102	node_data[nid] = nd;
1103	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
1104	NODE_DATA(nid)->node_id = nid;
1105	NODE_DATA(nid)->node_start_pfn = start_pfn;
1106	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
1107}
1108
1109static void __init find_possible_nodes(void)
1110{
1111	struct device_node *rtas;
1112	const __be32 *domains = NULL;
1113	int prop_length, max_nodes;
1114	u32 i;
1115
1116	if (!numa_enabled)
1117		return;
1118
1119	rtas = of_find_node_by_path("/rtas");
1120	if (!rtas)
1121		return;
1122
1123	/*
1124	 * ibm,current-associativity-domains is a fairly recent property. If
1125	 * it doesn't exist, then fallback on ibm,max-associativity-domains.
1126	 * Current denotes what the platform can support compared to max
1127	 * which denotes what the Hypervisor can support.
1128	 *
1129	 * If the LPAR is migratable, new nodes might be activated after a LPM,
1130	 * so we should consider the max number in that case.
1131	 */
1132	if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
1133		domains = of_get_property(rtas,
1134					  "ibm,current-associativity-domains",
1135					  &prop_length);
1136	if (!domains) {
1137		domains = of_get_property(rtas, "ibm,max-associativity-domains",
1138					&prop_length);
1139		if (!domains)
1140			goto out;
1141	}
1142
1143	max_nodes = of_read_number(&domains[primary_domain_index], 1);
1144	pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
1145
1146	for (i = 0; i < max_nodes; i++) {
1147		if (!node_possible(i))
1148			node_set(i, node_possible_map);
1149	}
1150
1151	prop_length /= sizeof(int);
1152	if (prop_length > primary_domain_index + 2)
1153		coregroup_enabled = 1;
1154
1155out:
1156	of_node_put(rtas);
1157}
1158
1159void __init mem_topology_setup(void)
1160{
1161	int cpu;
1162
1163	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1164	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
1165
1166	/*
1167	 * Linux/mm assumes node 0 to be online at boot. However this is not
1168	 * true on PowerPC, where node 0 is similar to any other node, it
1169	 * could be cpuless, memoryless node. So force node 0 to be offline
1170	 * for now. This will prevent cpuless, memoryless node 0 showing up
1171	 * unnecessarily as online. If a node has cpus or memory that need
1172	 * to be online, then node will anyway be marked online.
1173	 */
1174	node_set_offline(0);
1175
1176	if (parse_numa_properties())
1177		setup_nonnuma();
 
 
 
 
1178
1179	/*
1180	 * Modify the set of possible NUMA nodes to reflect information
1181	 * available about the set of online nodes, and the set of nodes
1182	 * that we expect to make use of for this platform's affinity
1183	 * calculations.
1184	 */
1185	nodes_and(node_possible_map, node_possible_map, node_online_map);
1186
1187	find_possible_nodes();
1188
1189	setup_node_to_cpumask_map();
1190
1191	reset_numa_cpu_lookup_table();
1192
1193	for_each_possible_cpu(cpu) {
1194		/*
1195		 * Powerpc with CONFIG_NUMA always used to have a node 0,
1196		 * even if it was memoryless or cpuless. For all cpus that
1197		 * are possible but not present, cpu_to_node() would point
1198		 * to node 0. To remove a cpuless, memoryless dummy node,
1199		 * powerpc need to make sure all possible but not present
1200		 * cpu_to_node are set to a proper node.
1201		 */
1202		numa_setup_cpu(cpu);
1203	}
1204}
1205
1206void __init initmem_init(void)
1207{
1208	int nid;
1209
1210	memblock_dump_all();
1211
1212	for_each_online_node(nid) {
1213		unsigned long start_pfn, end_pfn;
1214
1215		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1216		setup_node_data(nid, start_pfn, end_pfn);
 
1217	}
1218
1219	sparse_init();
1220
 
 
 
 
1221	/*
1222	 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
1223	 * even before we online them, so that we can use cpu_to_{node,mem}
1224	 * early in boot, cf. smp_prepare_cpus().
1225	 * _nocalls() + manual invocation is used because cpuhp is not yet
1226	 * initialized for the boot CPU.
1227	 */
1228	cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
1229				  ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
 
1230}
1231
1232static int __init early_numa(char *p)
1233{
1234	if (!p)
1235		return 0;
1236
1237	if (strstr(p, "off"))
1238		numa_enabled = 0;
1239
 
 
 
1240	p = strstr(p, "fake=");
1241	if (p)
1242		cmdline = p + strlen("fake=");
1243
1244	return 0;
1245}
1246early_param("numa", early_numa);
1247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1248#ifdef CONFIG_MEMORY_HOTPLUG
1249/*
1250 * Find the node associated with a hot added memory section for
1251 * memory represented in the device tree by the property
1252 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1253 */
1254static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
 
1255{
1256	struct drmem_lmb *lmb;
 
1257	unsigned long lmb_size;
1258	int nid = NUMA_NO_NODE;
 
 
 
 
 
 
 
 
 
1259
1260	lmb_size = drmem_lmb_size();
 
 
 
 
 
 
 
1261
1262	for_each_drmem_lmb(lmb) {
1263		/* skip this block if it is reserved or not assigned to
1264		 * this partition */
1265		if ((lmb->flags & DRCONF_MEM_RESERVED)
1266		    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
1267			continue;
1268
1269		if ((scn_addr < lmb->base_addr)
1270		    || (scn_addr >= (lmb->base_addr + lmb_size)))
1271			continue;
1272
1273		nid = of_drconf_to_nid_single(lmb);
1274		break;
1275	}
1276
1277	return nid;
1278}
1279
1280/*
1281 * Find the node associated with a hot added memory section for memory
1282 * represented in the device tree as a node (i.e. memory@XXXX) for
1283 * each memblock.
1284 */
1285static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1286{
1287	struct device_node *memory;
1288	int nid = NUMA_NO_NODE;
1289
1290	for_each_node_by_type(memory, "memory") {
1291		unsigned long start, size;
1292		int ranges;
1293		const __be32 *memcell_buf;
1294		unsigned int len;
1295
1296		memcell_buf = of_get_property(memory, "reg", &len);
1297		if (!memcell_buf || len <= 0)
1298			continue;
1299
1300		/* ranges in cell */
1301		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1302
1303		while (ranges--) {
1304			start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1305			size = read_n_cells(n_mem_size_cells, &memcell_buf);
1306
1307			if ((scn_addr < start) || (scn_addr >= (start + size)))
1308				continue;
1309
1310			nid = of_node_to_nid_single(memory);
1311			break;
1312		}
1313
1314		if (nid >= 0)
1315			break;
1316	}
1317
1318	of_node_put(memory);
1319
1320	return nid;
1321}
1322
1323/*
1324 * Find the node associated with a hot added memory section.  Section
1325 * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1326 * sections are fully contained within a single MEMBLOCK.
1327 */
1328int hot_add_scn_to_nid(unsigned long scn_addr)
1329{
1330	struct device_node *memory = NULL;
1331	int nid;
1332
1333	if (!numa_enabled)
1334		return first_online_node;
1335
1336	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1337	if (memory) {
1338		nid = hot_add_drconf_scn_to_nid(scn_addr);
1339		of_node_put(memory);
1340	} else {
1341		nid = hot_add_node_scn_to_nid(scn_addr);
1342	}
1343
1344	if (nid < 0 || !node_possible(nid))
1345		nid = first_online_node;
1346
 
 
 
 
 
 
 
 
 
 
 
1347	return nid;
1348}
1349
1350static u64 hot_add_drconf_memory_max(void)
1351{
1352	struct device_node *memory = NULL;
1353	struct device_node *dn = NULL;
1354	const __be64 *lrdr = NULL;
1355
1356	dn = of_find_node_by_path("/rtas");
1357	if (dn) {
1358		lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1359		of_node_put(dn);
1360		if (lrdr)
1361			return be64_to_cpup(lrdr);
1362	}
1363
1364	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1365	if (memory) {
1366		of_node_put(memory);
1367		return drmem_lmb_memory_max();
1368	}
1369	return 0;
1370}
1371
1372/*
1373 * memory_hotplug_max - return max address of memory that may be added
1374 *
1375 * This is currently only used on systems that support drconfig memory
1376 * hotplug.
1377 */
1378u64 memory_hotplug_max(void)
1379{
1380        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1381}
1382#endif /* CONFIG_MEMORY_HOTPLUG */
1383
1384/* Virtual Processor Home Node (VPHN) support */
1385#ifdef CONFIG_PPC_SPLPAR
1386static int topology_inited;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1387
1388/*
1389 * Retrieve the new associativity information for a virtual processor's
1390 * home node.
1391 */
 
 
 
 
 
 
 
 
 
 
 
 
 
1392static long vphn_get_associativity(unsigned long cpu,
1393					__be32 *associativity)
1394{
1395	long rc;
1396
1397	rc = hcall_vphn(get_hard_smp_processor_id(cpu),
1398				VPHN_FLAG_VCPU, associativity);
1399
1400	switch (rc) {
1401	case H_SUCCESS:
1402		pr_debug("VPHN hcall succeeded. Reset polling...\n");
1403		goto out;
1404
1405	case H_FUNCTION:
1406		pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
 
 
1407		break;
1408	case H_HARDWARE:
1409		pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
 
1410			"preventing VPHN. Disabling polling...\n");
1411		break;
1412	case H_PARAMETER:
1413		pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
1414			"Disabling polling...\n");
1415		break;
1416	default:
1417		pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
1418			, rc);
1419		break;
1420	}
1421out:
1422	return rc;
1423}
1424
1425void find_and_update_cpu_nid(int cpu)
 
 
 
 
 
1426{
1427	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1428	int new_nid;
1429
1430	/* Use associativity from first thread for all siblings */
1431	if (vphn_get_associativity(cpu, associativity))
1432		return;
1433
1434	/* Do not have previous associativity, so find it now. */
1435	new_nid = associativity_to_nid(associativity);
1436
1437	if (new_nid < 0 || !node_possible(new_nid))
1438		new_nid = first_online_node;
1439	else
1440		// Associate node <-> cpu, so cpu_up() calls
1441		// try_online_node() on the right node.
 
 
1442		set_cpu_numa_node(cpu, new_nid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1443
1444	pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid);
 
 
 
 
 
 
 
 
1445}
1446
1447int cpu_to_coregroup_id(int cpu)
 
 
 
 
1448{
 
 
1449	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1450	int index;
 
 
1451
1452	if (cpu < 0 || cpu > nr_cpu_ids)
1453		return -1;
1454
1455	if (!coregroup_enabled)
1456		goto out;
 
1457
1458	if (!firmware_has_feature(FW_FEATURE_VPHN))
1459		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1460
1461	if (vphn_get_associativity(cpu, associativity))
 
 
 
 
 
 
 
 
 
 
 
1462		goto out;
1463
1464	index = of_read_number(associativity, 1);
1465	if (index > primary_domain_index + 1)
1466		return of_read_number(&associativity[index - 1], 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1467
1468out:
1469	return cpu_to_core_id(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1470}
1471
 
 
 
 
 
 
 
1472static int topology_update_init(void)
1473{
1474	topology_inited = 1;
 
 
 
 
 
 
1475	return 0;
1476}
1477device_initcall(topology_update_init);
1478#endif /* CONFIG_PPC_SPLPAR */
v4.6
 
   1/*
   2 * pSeries NUMA support
   3 *
   4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11#define pr_fmt(fmt) "numa: " fmt
  12
  13#include <linux/threads.h>
  14#include <linux/bootmem.h>
  15#include <linux/init.h>
  16#include <linux/mm.h>
  17#include <linux/mmzone.h>
  18#include <linux/export.h>
  19#include <linux/nodemask.h>
  20#include <linux/cpu.h>
  21#include <linux/notifier.h>
  22#include <linux/memblock.h>
  23#include <linux/of.h>
  24#include <linux/pfn.h>
  25#include <linux/cpuset.h>
  26#include <linux/node.h>
  27#include <linux/stop_machine.h>
  28#include <linux/proc_fs.h>
  29#include <linux/seq_file.h>
  30#include <linux/uaccess.h>
  31#include <linux/slab.h>
  32#include <asm/cputhreads.h>
  33#include <asm/sparsemem.h>
  34#include <asm/prom.h>
  35#include <asm/smp.h>
  36#include <asm/cputhreads.h>
  37#include <asm/topology.h>
  38#include <asm/firmware.h>
  39#include <asm/paca.h>
  40#include <asm/hvcall.h>
  41#include <asm/setup.h>
  42#include <asm/vdso.h>
 
  43
  44static int numa_enabled = 1;
  45
  46static char *cmdline __initdata;
  47
  48static int numa_debug;
  49#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
  50
  51int numa_cpu_lookup_table[NR_CPUS];
  52cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  53struct pglist_data *node_data[MAX_NUMNODES];
  54
  55EXPORT_SYMBOL(numa_cpu_lookup_table);
  56EXPORT_SYMBOL(node_to_cpumask_map);
  57EXPORT_SYMBOL(node_data);
  58
  59static int min_common_depth;
  60static int n_mem_addr_cells, n_mem_size_cells;
  61static int form1_affinity;
 
 
 
 
  62
  63#define MAX_DISTANCE_REF_POINTS 4
  64static int distance_ref_points_depth;
  65static const __be32 *distance_ref_points;
  66static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
 
 
 
 
  67
  68/*
  69 * Allocate node_to_cpumask_map based on number of available nodes
  70 * Requires node_possible_map to be valid.
  71 *
  72 * Note: cpumask_of_node() is not valid until after this is done.
  73 */
  74static void __init setup_node_to_cpumask_map(void)
  75{
  76	unsigned int node;
  77
  78	/* setup nr_node_ids if not done yet */
  79	if (nr_node_ids == MAX_NUMNODES)
  80		setup_nr_node_ids();
  81
  82	/* allocate the map */
  83	for_each_node(node)
  84		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
  85
  86	/* cpumask_of_node() will now work */
  87	dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
  88}
  89
  90static int __init fake_numa_create_new_node(unsigned long end_pfn,
  91						unsigned int *nid)
  92{
  93	unsigned long long mem;
  94	char *p = cmdline;
  95	static unsigned int fake_nid;
  96	static unsigned long long curr_boundary;
  97
  98	/*
  99	 * Modify node id, iff we started creating NUMA nodes
 100	 * We want to continue from where we left of the last time
 101	 */
 102	if (fake_nid)
 103		*nid = fake_nid;
 104	/*
 105	 * In case there are no more arguments to parse, the
 106	 * node_id should be the same as the last fake node id
 107	 * (we've handled this above).
 108	 */
 109	if (!p)
 110		return 0;
 111
 112	mem = memparse(p, &p);
 113	if (!mem)
 114		return 0;
 115
 116	if (mem < curr_boundary)
 117		return 0;
 118
 119	curr_boundary = mem;
 120
 121	if ((end_pfn << PAGE_SHIFT) > mem) {
 122		/*
 123		 * Skip commas and spaces
 124		 */
 125		while (*p == ',' || *p == ' ' || *p == '\t')
 126			p++;
 127
 128		cmdline = p;
 129		fake_nid++;
 130		*nid = fake_nid;
 131		dbg("created new fake_node with id %d\n", fake_nid);
 132		return 1;
 133	}
 134	return 0;
 135}
 136
 137static void reset_numa_cpu_lookup_table(void)
 138{
 139	unsigned int cpu;
 140
 141	for_each_possible_cpu(cpu)
 142		numa_cpu_lookup_table[cpu] = -1;
 143}
 144
 145static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
 146{
 147	numa_cpu_lookup_table[cpu] = node;
 148}
 149
 150static void map_cpu_to_node(int cpu, int node)
 151{
 152	update_numa_cpu_lookup_table(cpu, node);
 153
 154	dbg("adding cpu %d to node %d\n", cpu, node);
 155
 156	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
 157		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 
 158}
 159
 160#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
 161static void unmap_cpu_from_node(unsigned long cpu)
 162{
 163	int node = numa_cpu_lookup_table[cpu];
 164
 165	dbg("removing cpu %lu from node %d\n", cpu, node);
 166
 167	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
 168		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
 
 169	} else {
 170		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
 171		       cpu, node);
 172	}
 173}
 174#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
 175
 176/* must hold reference to node during call */
 177static const __be32 *of_get_associativity(struct device_node *dev)
 178{
 179	return of_get_property(dev, "ibm,associativity", NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 180}
 181
 182/*
 183 * Returns the property linux,drconf-usable-memory if
 184 * it exists (the property exists only in kexec/kdump kernels,
 185 * added by kexec-tools)
 186 */
 187static const __be32 *of_get_usable_memory(struct device_node *memory)
 
 
 
 
 
 
 
 
 188{
 189	const __be32 *prop;
 190	u32 len;
 191	prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
 192	if (!prop || len < sizeof(unsigned int))
 193		return NULL;
 194	return prop;
 
 
 
 
 
 
 
 195}
 196
 197int __node_distance(int a, int b)
 198{
 199	int i;
 200	int distance = LOCAL_DISTANCE;
 201
 202	if (!form1_affinity)
 203		return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
 204
 205	for (i = 0; i < distance_ref_points_depth; i++) {
 206		if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
 
 207			break;
 208
 209		/* Double the distance for each NUMA level */
 210		distance *= 2;
 211	}
 212
 213	return distance;
 214}
 215EXPORT_SYMBOL(__node_distance);
 216
 217static void initialize_distance_lookup_table(int nid,
 218		const __be32 *associativity)
 219{
 220	int i;
 
 
 
 
 
 221
 222	if (!form1_affinity)
 223		return;
 224
 225	for (i = 0; i < distance_ref_points_depth; i++) {
 226		const __be32 *entry;
 227
 228		entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
 229		distance_lookup_table[nid][i] = of_read_number(entry, 1);
 230	}
 231}
 232
 233/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
 234 * info is found.
 235 */
 236static int associativity_to_nid(const __be32 *associativity)
 237{
 238	int nid = -1;
 
 239
 240	if (min_common_depth == -1)
 241		goto out;
 
 
 242
 243	if (of_read_number(associativity, 1) >= min_common_depth)
 244		nid = of_read_number(&associativity[min_common_depth], 1);
 
 245
 246	/* POWER4 LPAR uses 0xffff as invalid node */
 247	if (nid == 0xffff || nid >= MAX_NUMNODES)
 248		nid = -1;
 249
 250	if (nid > 0 &&
 251		of_read_number(associativity, 1) >= distance_ref_points_depth) {
 252		/*
 253		 * Skip the length field and send start of associativity array
 254		 */
 255		initialize_distance_lookup_table(nid, associativity + 1);
 256	}
 257
 258out:
 259	return nid;
 260}
 
 261
 262/* Returns the nid associated with the given device tree node,
 263 * or -1 if not found.
 264 */
 265static int of_node_to_nid_single(struct device_node *device)
 266{
 267	int nid = -1;
 268	const __be32 *tmp;
 269
 270	tmp = of_get_associativity(device);
 271	if (tmp)
 272		nid = associativity_to_nid(tmp);
 273	return nid;
 274}
 275
 276/* Walk the device tree upwards, looking for an associativity id */
 277int of_node_to_nid(struct device_node *device)
 278{
 279	int nid = -1;
 280
 281	of_node_get(device);
 282	while (device) {
 283		nid = of_node_to_nid_single(device);
 284		if (nid != -1)
 285			break;
 286
 287		device = of_get_next_parent(device);
 288	}
 289	of_node_put(device);
 290
 291	return nid;
 292}
 293EXPORT_SYMBOL_GPL(of_node_to_nid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 294
 295static int __init find_min_common_depth(void)
 296{
 297	int depth;
 298	struct device_node *root;
 299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300	if (firmware_has_feature(FW_FEATURE_OPAL))
 301		root = of_find_node_by_path("/ibm,opal");
 302	else
 303		root = of_find_node_by_path("/rtas");
 304	if (!root)
 305		root = of_find_node_by_path("/");
 306
 307	/*
 308	 * This property is a set of 32-bit integers, each representing
 309	 * an index into the ibm,associativity nodes.
 310	 *
 311	 * With form 0 affinity the first integer is for an SMP configuration
 312	 * (should be all 0's) and the second is for a normal NUMA
 313	 * configuration. We have only one level of NUMA.
 314	 *
 315	 * With form 1 affinity the first integer is the most significant
 316	 * NUMA boundary and the following are progressively less significant
 317	 * boundaries. There can be more than one level of NUMA.
 318	 */
 319	distance_ref_points = of_get_property(root,
 320					"ibm,associativity-reference-points",
 321					&distance_ref_points_depth);
 322
 323	if (!distance_ref_points) {
 324		dbg("NUMA: ibm,associativity-reference-points not found.\n");
 325		goto err;
 326	}
 327
 328	distance_ref_points_depth /= sizeof(int);
 329
 330	if (firmware_has_feature(FW_FEATURE_OPAL) ||
 331	    firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
 332		dbg("Using form 1 affinity\n");
 333		form1_affinity = 1;
 334	}
 335
 336	if (form1_affinity) {
 337		depth = of_read_number(distance_ref_points, 1);
 338	} else {
 339		if (distance_ref_points_depth < 2) {
 340			printk(KERN_WARNING "NUMA: "
 341				"short ibm,associativity-reference-points\n");
 342			goto err;
 343		}
 344
 345		depth = of_read_number(&distance_ref_points[1], 1);
 
 
 
 
 
 
 346	}
 347
 348	/*
 349	 * Warn and cap if the hardware supports more than
 350	 * MAX_DISTANCE_REF_POINTS domains.
 351	 */
 352	if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
 353		printk(KERN_WARNING "NUMA: distance array capped at "
 354			"%d entries\n", MAX_DISTANCE_REF_POINTS);
 355		distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
 356	}
 357
 358	of_node_put(root);
 359	return depth;
 360
 361err:
 362	of_node_put(root);
 363	return -1;
 364}
 365
 366static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
 367{
 368	struct device_node *memory = NULL;
 369
 370	memory = of_find_node_by_type(memory, "memory");
 371	if (!memory)
 372		panic("numa.c: No memory nodes found!");
 373
 374	*n_addr_cells = of_n_addr_cells(memory);
 375	*n_size_cells = of_n_size_cells(memory);
 376	of_node_put(memory);
 377}
 378
 379static unsigned long read_n_cells(int n, const __be32 **buf)
 380{
 381	unsigned long result = 0;
 382
 383	while (n--) {
 384		result = (result << 32) | of_read_number(*buf, 1);
 385		(*buf)++;
 386	}
 387	return result;
 388}
 389
 390/*
 391 * Read the next memblock list entry from the ibm,dynamic-memory property
 392 * and return the information in the provided of_drconf_cell structure.
 393 */
 394static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
 395{
 396	const __be32 *cp;
 397
 398	drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
 399
 400	cp = *cellp;
 401	drmem->drc_index = of_read_number(cp, 1);
 402	drmem->reserved = of_read_number(&cp[1], 1);
 403	drmem->aa_index = of_read_number(&cp[2], 1);
 404	drmem->flags = of_read_number(&cp[3], 1);
 405
 406	*cellp = cp + 4;
 407}
 408
 409/*
 410 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
 411 *
 412 * The layout of the ibm,dynamic-memory property is a number N of memblock
 413 * list entries followed by N memblock list entries.  Each memblock list entry
 414 * contains information as laid out in the of_drconf_cell struct above.
 415 */
 416static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
 417{
 418	const __be32 *prop;
 419	u32 len, entries;
 420
 421	prop = of_get_property(memory, "ibm,dynamic-memory", &len);
 422	if (!prop || len < sizeof(unsigned int))
 423		return 0;
 424
 425	entries = of_read_number(prop++, 1);
 426
 427	/* Now that we know the number of entries, revalidate the size
 428	 * of the property read in to ensure we have everything
 429	 */
 430	if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
 431		return 0;
 432
 433	*dm = prop;
 434	return entries;
 435}
 436
 437/*
 438 * Retrieve and validate the ibm,lmb-size property for drconf memory
 439 * from the device tree.
 440 */
 441static u64 of_get_lmb_size(struct device_node *memory)
 442{
 443	const __be32 *prop;
 444	u32 len;
 445
 446	prop = of_get_property(memory, "ibm,lmb-size", &len);
 447	if (!prop || len < sizeof(unsigned int))
 448		return 0;
 449
 450	return read_n_cells(n_mem_size_cells, &prop);
 451}
 452
 453struct assoc_arrays {
 454	u32	n_arrays;
 455	u32	array_sz;
 456	const __be32 *arrays;
 457};
 458
 459/*
 460 * Retrieve and validate the list of associativity arrays for drconf
 461 * memory from the ibm,associativity-lookup-arrays property of the
 462 * device tree..
 463 *
 464 * The layout of the ibm,associativity-lookup-arrays property is a number N
 465 * indicating the number of associativity arrays, followed by a number M
 466 * indicating the size of each associativity array, followed by a list
 467 * of N associativity arrays.
 468 */
 469static int of_get_assoc_arrays(struct device_node *memory,
 470			       struct assoc_arrays *aa)
 471{
 
 472	const __be32 *prop;
 473	u32 len;
 474
 
 
 
 
 475	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
 476	if (!prop || len < 2 * sizeof(unsigned int))
 
 477		return -1;
 
 478
 479	aa->n_arrays = of_read_number(prop++, 1);
 480	aa->array_sz = of_read_number(prop++, 1);
 481
 
 
 482	/* Now that we know the number of arrays and size of each array,
 483	 * revalidate the size of the property read in.
 484	 */
 485	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
 486		return -1;
 487
 488	aa->arrays = prop;
 489	return 0;
 490}
 491
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 492/*
 493 * This is like of_node_to_nid_single() for memory represented in the
 494 * ibm,dynamic-reconfiguration-memory node.
 495 */
 496static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
 497				   struct assoc_arrays *aa)
 498{
 499	int default_nid = 0;
 
 500	int nid = default_nid;
 501	int index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502
 503	if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
 504	    !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
 505	    drmem->aa_index < aa->n_arrays) {
 506		index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
 507		nid = of_read_number(&aa->arrays[index], 1);
 508
 509		if (nid == 0xffff || nid >= MAX_NUMNODES)
 510			nid = default_nid;
 
 
 
 
 
 
 
 
 
 
 
 
 511
 512		if (nid > 0) {
 513			index = drmem->aa_index * aa->array_sz;
 514			initialize_distance_lookup_table(nid,
 515							&aa->arrays[index]);
 516		}
 517	}
 518
 519	return nid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520}
 
 521
 522/*
 523 * Figure out to which domain a cpu belongs and stick it there.
 524 * Return the id of the domain used.
 525 */
 526static int numa_setup_cpu(unsigned long lcpu)
 527{
 528	int nid = -1;
 529	struct device_node *cpu;
 
 
 
 
 
 
 
 530
 531	/*
 532	 * If a valid cpu-to-node mapping is already available, use it
 533	 * directly instead of querying the firmware, since it represents
 534	 * the most recent mapping notified to us by the platform (eg: VPHN).
 
 
 
 535	 */
 536	if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
 
 537		map_cpu_to_node(lcpu, nid);
 538		return nid;
 539	}
 540
 
 
 
 
 541	cpu = of_get_cpu_node(lcpu, NULL);
 542
 543	if (!cpu) {
 544		WARN_ON(1);
 545		if (cpu_present(lcpu))
 546			goto out_present;
 547		else
 548			goto out;
 549	}
 550
 551	nid = of_node_to_nid_single(cpu);
 
 552
 553out_present:
 554	if (nid < 0 || !node_online(nid))
 555		nid = first_online_node;
 556
 
 
 
 
 
 
 
 
 
 
 
 
 
 557	map_cpu_to_node(lcpu, nid);
 558	of_node_put(cpu);
 559out:
 560	return nid;
 561}
 562
 563static void verify_cpu_node_mapping(int cpu, int node)
 564{
 565	int base, sibling, i;
 566
 567	/* Verify that all the threads in the core belong to the same node */
 568	base = cpu_first_thread_sibling(cpu);
 569
 570	for (i = 0; i < threads_per_core; i++) {
 571		sibling = base + i;
 572
 573		if (sibling == cpu || cpu_is_offline(sibling))
 574			continue;
 575
 576		if (cpu_to_node(sibling) != node) {
 577			WARN(1, "CPU thread siblings %d and %d don't belong"
 578				" to the same node!\n", cpu, sibling);
 579			break;
 580		}
 581	}
 582}
 583
 584static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
 585			     void *hcpu)
 586{
 587	unsigned long lcpu = (unsigned long)hcpu;
 588	int ret = NOTIFY_DONE, nid;
 
 
 
 
 589
 590	switch (action) {
 591	case CPU_UP_PREPARE:
 592	case CPU_UP_PREPARE_FROZEN:
 593		nid = numa_setup_cpu(lcpu);
 594		verify_cpu_node_mapping((int)lcpu, nid);
 595		ret = NOTIFY_OK;
 596		break;
 597#ifdef CONFIG_HOTPLUG_CPU
 598	case CPU_DEAD:
 599	case CPU_DEAD_FROZEN:
 600	case CPU_UP_CANCELED:
 601	case CPU_UP_CANCELED_FROZEN:
 602		unmap_cpu_from_node(lcpu);
 603		ret = NOTIFY_OK;
 604		break;
 605#endif
 606	}
 607	return ret;
 608}
 609
 610/*
 611 * Check and possibly modify a memory region to enforce the memory limit.
 612 *
 613 * Returns the size the region should have to enforce the memory limit.
 614 * This will either be the original value of size, a truncated value,
 615 * or zero. If the returned value of size is 0 the region should be
 616 * discarded as it lies wholly above the memory limit.
 617 */
 618static unsigned long __init numa_enforce_memory_limit(unsigned long start,
 619						      unsigned long size)
 620{
 621	/*
 622	 * We use memblock_end_of_DRAM() in here instead of memory_limit because
 623	 * we've already adjusted it for the limit and it takes care of
 624	 * having memory holes below the limit.  Also, in the case of
 625	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
 626	 */
 627
 628	if (start + size <= memblock_end_of_DRAM())
 629		return size;
 630
 631	if (start >= memblock_end_of_DRAM())
 632		return 0;
 633
 634	return memblock_end_of_DRAM() - start;
 635}
 636
 637/*
 638 * Reads the counter for a given entry in
 639 * linux,drconf-usable-memory property
 640 */
 641static inline int __init read_usm_ranges(const __be32 **usm)
 642{
 643	/*
 644	 * For each lmb in ibm,dynamic-memory a corresponding
 645	 * entry in linux,drconf-usable-memory property contains
 646	 * a counter followed by that many (base, size) duple.
 647	 * read the counter from linux,drconf-usable-memory
 648	 */
 649	return read_n_cells(n_mem_size_cells, usm);
 650}
 651
 652/*
 653 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
 654 * node.  This assumes n_mem_{addr,size}_cells have been set.
 655 */
 656static void __init parse_drconf_memory(struct device_node *memory)
 
 
 657{
 658	const __be32 *uninitialized_var(dm), *usm;
 659	unsigned int n, rc, ranges, is_kexec_kdump = 0;
 660	unsigned long lmb_size, base, size, sz;
 661	int nid;
 662	struct assoc_arrays aa = { .arrays = NULL };
 663
 664	n = of_get_drconf_memory(memory, &dm);
 665	if (!n)
 666		return;
 667
 668	lmb_size = of_get_lmb_size(memory);
 669	if (!lmb_size)
 670		return;
 671
 672	rc = of_get_assoc_arrays(memory, &aa);
 673	if (rc)
 674		return;
 675
 676	/* check if this is a kexec/kdump kernel */
 677	usm = of_get_usable_memory(memory);
 678	if (usm != NULL)
 679		is_kexec_kdump = 1;
 680
 681	for (; n != 0; --n) {
 682		struct of_drconf_cell drmem;
 
 683
 684		read_drconf_cell(&drmem, &dm);
 
 
 
 
 685
 686		/* skip this block if the reserved bit is set in flags (0x80)
 687		   or if the block is not assigned to this partition (0x8) */
 688		if ((drmem.flags & DRCONF_MEM_RESERVED)
 689		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
 690			continue;
 691
 692		base = drmem.base_addr;
 693		size = lmb_size;
 694		ranges = 1;
 
 
 
 
 
 695
 696		if (is_kexec_kdump) {
 697			ranges = read_usm_ranges(&usm);
 698			if (!ranges) /* there are no (base, size) duple */
 699				continue;
 700		}
 701		do {
 702			if (is_kexec_kdump) {
 703				base = read_n_cells(n_mem_addr_cells, &usm);
 704				size = read_n_cells(n_mem_size_cells, &usm);
 705			}
 706			nid = of_drconf_to_nid_single(&drmem, &aa);
 707			fake_numa_create_new_node(
 708				((base + size) >> PAGE_SHIFT),
 709					   &nid);
 710			node_set_online(nid);
 711			sz = numa_enforce_memory_limit(base, size);
 712			if (sz)
 713				memblock_set_node(base, sz,
 714						  &memblock.memory, nid);
 715		} while (--ranges);
 716	}
 717}
 718
 719static int __init parse_numa_properties(void)
 720{
 721	struct device_node *memory;
 722	int default_nid = 0;
 723	unsigned long i;
 
 724
 725	if (numa_enabled == 0) {
 726		printk(KERN_WARNING "NUMA disabled by user\n");
 727		return -1;
 728	}
 729
 730	min_common_depth = find_min_common_depth();
 731
 732	if (min_common_depth < 0)
 733		return min_common_depth;
 
 
 
 
 
 
 734
 735	dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
 
 
 
 
 
 
 736
 737	/*
 738	 * Even though we connect cpus to numa domains later in SMP
 739	 * init, we need to know the node ids now. This is because
 740	 * each node to be onlined must have NODE_DATA etc backing it.
 741	 */
 742	for_each_present_cpu(i) {
 
 743		struct device_node *cpu;
 744		int nid;
 
 
 745
 746		cpu = of_get_cpu_node(i, NULL);
 747		BUG_ON(!cpu);
 748		nid = of_node_to_nid_single(cpu);
 749		of_node_put(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 750
 751		/*
 752		 * Don't fall back to default_nid yet -- we will plug
 753		 * cpus into nodes once the memory scan has discovered
 754		 * the topology.
 755		 */
 756		if (nid < 0)
 757			continue;
 758		node_set_online(nid);
 759	}
 760
 761	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
 762
 763	for_each_node_by_type(memory, "memory") {
 764		unsigned long start;
 765		unsigned long size;
 766		int nid;
 767		int ranges;
 768		const __be32 *memcell_buf;
 769		unsigned int len;
 770
 771		memcell_buf = of_get_property(memory,
 772			"linux,usable-memory", &len);
 773		if (!memcell_buf || len <= 0)
 774			memcell_buf = of_get_property(memory, "reg", &len);
 775		if (!memcell_buf || len <= 0)
 776			continue;
 777
 778		/* ranges in cell */
 779		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
 780new_range:
 781		/* these are order-sensitive, and modify the buffer pointer */
 782		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
 783		size = read_n_cells(n_mem_size_cells, &memcell_buf);
 784
 785		/*
 786		 * Assumption: either all memory nodes or none will
 787		 * have associativity properties.  If none, then
 788		 * everything goes to default_nid.
 789		 */
 790		nid = of_node_to_nid_single(memory);
 791		if (nid < 0)
 
 
 
 792			nid = default_nid;
 793
 794		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
 795		node_set_online(nid);
 796
 797		if (!(size = numa_enforce_memory_limit(start, size))) {
 798			if (--ranges)
 799				goto new_range;
 800			else
 801				continue;
 802		}
 803
 804		memblock_set_node(start, size, &memblock.memory, nid);
 805
 806		if (--ranges)
 807			goto new_range;
 808	}
 809
 810	/*
 811	 * Now do the same thing for each MEMBLOCK listed in the
 812	 * ibm,dynamic-memory property in the
 813	 * ibm,dynamic-reconfiguration-memory node.
 814	 */
 815	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 816	if (memory)
 817		parse_drconf_memory(memory);
 
 
 818
 819	return 0;
 820}
 821
 822static void __init setup_nonnuma(void)
 823{
 824	unsigned long top_of_ram = memblock_end_of_DRAM();
 825	unsigned long total_ram = memblock_phys_mem_size();
 826	unsigned long start_pfn, end_pfn;
 827	unsigned int nid = 0;
 828	struct memblock_region *reg;
 829
 830	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
 831	       top_of_ram, total_ram);
 832	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
 833	       (top_of_ram - total_ram) >> 20);
 834
 835	for_each_memblock(memory, reg) {
 836		start_pfn = memblock_region_memory_base_pfn(reg);
 837		end_pfn = memblock_region_memory_end_pfn(reg);
 838
 
 839		fake_numa_create_new_node(end_pfn, &nid);
 840		memblock_set_node(PFN_PHYS(start_pfn),
 841				  PFN_PHYS(end_pfn - start_pfn),
 842				  &memblock.memory, nid);
 843		node_set_online(nid);
 844	}
 845}
 846
 847void __init dump_numa_cpu_topology(void)
 848{
 849	unsigned int node;
 850	unsigned int cpu, count;
 851
 852	if (min_common_depth == -1 || !numa_enabled)
 853		return;
 854
 855	for_each_online_node(node) {
 856		printk(KERN_DEBUG "Node %d CPUs:", node);
 857
 858		count = 0;
 859		/*
 860		 * If we used a CPU iterator here we would miss printing
 861		 * the holes in the cpumap.
 862		 */
 863		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
 864			if (cpumask_test_cpu(cpu,
 865					node_to_cpumask_map[node])) {
 866				if (count == 0)
 867					printk(" %u", cpu);
 868				++count;
 869			} else {
 870				if (count > 1)
 871					printk("-%u", cpu - 1);
 872				count = 0;
 873			}
 874		}
 875
 876		if (count > 1)
 877			printk("-%u", nr_cpu_ids - 1);
 878		printk("\n");
 879	}
 880}
 881
 882static void __init dump_numa_memory_topology(void)
 883{
 884	unsigned int node;
 885	unsigned int count;
 886
 887	if (min_common_depth == -1 || !numa_enabled)
 888		return;
 889
 890	for_each_online_node(node) {
 891		unsigned long i;
 892
 893		printk(KERN_DEBUG "Node %d Memory:", node);
 894
 895		count = 0;
 896
 897		for (i = 0; i < memblock_end_of_DRAM();
 898		     i += (1 << SECTION_SIZE_BITS)) {
 899			if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
 900				if (count == 0)
 901					printk(" 0x%lx", i);
 902				++count;
 903			} else {
 904				if (count > 0)
 905					printk("-0x%lx", i);
 906				count = 0;
 907			}
 908		}
 909
 910		if (count > 0)
 911			printk("-0x%lx", i);
 912		printk("\n");
 913	}
 914}
 915
 916static struct notifier_block ppc64_numa_nb = {
 917	.notifier_call = cpu_numa_callback,
 918	.priority = 1 /* Must run before sched domains notifier. */
 919};
 920
 921/* Initialize NODE_DATA for a node on the local memory */
 922static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
 923{
 924	u64 spanned_pages = end_pfn - start_pfn;
 925	const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
 926	u64 nd_pa;
 927	void *nd;
 928	int tnid;
 929
 930	if (spanned_pages)
 931		pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
 932			nid, start_pfn << PAGE_SHIFT,
 933			(end_pfn << PAGE_SHIFT) - 1);
 934	else
 935		pr_info("Initmem setup node %d\n", nid);
 936
 937	nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
 938	nd = __va(nd_pa);
 939
 940	/* report and initialize */
 941	pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
 942		nd_pa, nd_pa + nd_size - 1);
 943	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
 944	if (tnid != nid)
 945		pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
 946
 947	node_data[nid] = nd;
 948	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
 949	NODE_DATA(nid)->node_id = nid;
 950	NODE_DATA(nid)->node_start_pfn = start_pfn;
 951	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
 952}
 953
 954void __init initmem_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 955{
 956	int nid, cpu;
 
 
 
 957
 958	max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
 959	max_pfn = max_low_pfn;
 
 
 
 
 
 
 
 960
 961	if (parse_numa_properties())
 962		setup_nonnuma();
 963	else
 964		dump_numa_memory_topology();
 965
 966	memblock_dump_all();
 967
 968	/*
 969	 * Reduce the possible NUMA nodes to the online NUMA nodes,
 970	 * since we do not support node hotplug. This ensures that  we
 971	 * lower the maximum NUMA node ID to what is actually present.
 
 972	 */
 973	nodes_and(node_possible_map, node_possible_map, node_online_map);
 974
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975	for_each_online_node(nid) {
 976		unsigned long start_pfn, end_pfn;
 977
 978		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 979		setup_node_data(nid, start_pfn, end_pfn);
 980		sparse_memory_present_with_active_regions(nid);
 981	}
 982
 983	sparse_init();
 984
 985	setup_node_to_cpumask_map();
 986
 987	reset_numa_cpu_lookup_table();
 988	register_cpu_notifier(&ppc64_numa_nb);
 989	/*
 990	 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
 991	 * even before we online them, so that we can use cpu_to_{node,mem}
 992	 * early in boot, cf. smp_prepare_cpus().
 
 
 993	 */
 994	for_each_present_cpu(cpu) {
 995		numa_setup_cpu((unsigned long)cpu);
 996	}
 997}
 998
 999static int __init early_numa(char *p)
1000{
1001	if (!p)
1002		return 0;
1003
1004	if (strstr(p, "off"))
1005		numa_enabled = 0;
1006
1007	if (strstr(p, "debug"))
1008		numa_debug = 1;
1009
1010	p = strstr(p, "fake=");
1011	if (p)
1012		cmdline = p + strlen("fake=");
1013
1014	return 0;
1015}
1016early_param("numa", early_numa);
1017
1018static bool topology_updates_enabled = true;
1019
1020static int __init early_topology_updates(char *p)
1021{
1022	if (!p)
1023		return 0;
1024
1025	if (!strcmp(p, "off")) {
1026		pr_info("Disabling topology updates\n");
1027		topology_updates_enabled = false;
1028	}
1029
1030	return 0;
1031}
1032early_param("topology_updates", early_topology_updates);
1033
1034#ifdef CONFIG_MEMORY_HOTPLUG
1035/*
1036 * Find the node associated with a hot added memory section for
1037 * memory represented in the device tree by the property
1038 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1039 */
1040static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1041				     unsigned long scn_addr)
1042{
1043	const __be32 *dm;
1044	unsigned int drconf_cell_cnt, rc;
1045	unsigned long lmb_size;
1046	struct assoc_arrays aa;
1047	int nid = -1;
1048
1049	drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1050	if (!drconf_cell_cnt)
1051		return -1;
1052
1053	lmb_size = of_get_lmb_size(memory);
1054	if (!lmb_size)
1055		return -1;
1056
1057	rc = of_get_assoc_arrays(memory, &aa);
1058	if (rc)
1059		return -1;
1060
1061	for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1062		struct of_drconf_cell drmem;
1063
1064		read_drconf_cell(&drmem, &dm);
1065
 
1066		/* skip this block if it is reserved or not assigned to
1067		 * this partition */
1068		if ((drmem.flags & DRCONF_MEM_RESERVED)
1069		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1070			continue;
1071
1072		if ((scn_addr < drmem.base_addr)
1073		    || (scn_addr >= (drmem.base_addr + lmb_size)))
1074			continue;
1075
1076		nid = of_drconf_to_nid_single(&drmem, &aa);
1077		break;
1078	}
1079
1080	return nid;
1081}
1082
1083/*
1084 * Find the node associated with a hot added memory section for memory
1085 * represented in the device tree as a node (i.e. memory@XXXX) for
1086 * each memblock.
1087 */
1088static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1089{
1090	struct device_node *memory;
1091	int nid = -1;
1092
1093	for_each_node_by_type(memory, "memory") {
1094		unsigned long start, size;
1095		int ranges;
1096		const __be32 *memcell_buf;
1097		unsigned int len;
1098
1099		memcell_buf = of_get_property(memory, "reg", &len);
1100		if (!memcell_buf || len <= 0)
1101			continue;
1102
1103		/* ranges in cell */
1104		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1105
1106		while (ranges--) {
1107			start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1108			size = read_n_cells(n_mem_size_cells, &memcell_buf);
1109
1110			if ((scn_addr < start) || (scn_addr >= (start + size)))
1111				continue;
1112
1113			nid = of_node_to_nid_single(memory);
1114			break;
1115		}
1116
1117		if (nid >= 0)
1118			break;
1119	}
1120
1121	of_node_put(memory);
1122
1123	return nid;
1124}
1125
1126/*
1127 * Find the node associated with a hot added memory section.  Section
1128 * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1129 * sections are fully contained within a single MEMBLOCK.
1130 */
1131int hot_add_scn_to_nid(unsigned long scn_addr)
1132{
1133	struct device_node *memory = NULL;
1134	int nid, found = 0;
1135
1136	if (!numa_enabled || (min_common_depth < 0))
1137		return first_online_node;
1138
1139	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1140	if (memory) {
1141		nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1142		of_node_put(memory);
1143	} else {
1144		nid = hot_add_node_scn_to_nid(scn_addr);
1145	}
1146
1147	if (nid < 0 || !node_online(nid))
1148		nid = first_online_node;
1149
1150	if (NODE_DATA(nid)->node_spanned_pages)
1151		return nid;
1152
1153	for_each_online_node(nid) {
1154		if (NODE_DATA(nid)->node_spanned_pages) {
1155			found = 1;
1156			break;
1157		}
1158	}
1159
1160	BUG_ON(!found);
1161	return nid;
1162}
1163
1164static u64 hot_add_drconf_memory_max(void)
1165{
1166        struct device_node *memory = NULL;
1167        unsigned int drconf_cell_cnt = 0;
1168        u64 lmb_size = 0;
1169	const __be32 *dm = NULL;
1170
1171        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1172        if (memory) {
1173                drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1174                lmb_size = of_get_lmb_size(memory);
1175                of_node_put(memory);
1176        }
1177        return lmb_size * drconf_cell_cnt;
 
 
 
 
 
 
1178}
1179
1180/*
1181 * memory_hotplug_max - return max address of memory that may be added
1182 *
1183 * This is currently only used on systems that support drconfig memory
1184 * hotplug.
1185 */
1186u64 memory_hotplug_max(void)
1187{
1188        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1189}
1190#endif /* CONFIG_MEMORY_HOTPLUG */
1191
1192/* Virtual Processor Home Node (VPHN) support */
1193#ifdef CONFIG_PPC_SPLPAR
1194
1195#include "vphn.h"
1196
1197struct topology_update_data {
1198	struct topology_update_data *next;
1199	unsigned int cpu;
1200	int old_nid;
1201	int new_nid;
1202};
1203
1204static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1205static cpumask_t cpu_associativity_changes_mask;
1206static int vphn_enabled;
1207static int prrn_enabled;
1208static void reset_topology_timer(void);
1209
1210/*
1211 * Store the current values of the associativity change counters in the
1212 * hypervisor.
1213 */
1214static void setup_cpu_associativity_change_counters(void)
1215{
1216	int cpu;
1217
1218	/* The VPHN feature supports a maximum of 8 reference points */
1219	BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1220
1221	for_each_possible_cpu(cpu) {
1222		int i;
1223		u8 *counts = vphn_cpu_change_counts[cpu];
1224		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1225
1226		for (i = 0; i < distance_ref_points_depth; i++)
1227			counts[i] = hypervisor_counts[i];
1228	}
1229}
1230
1231/*
1232 * The hypervisor maintains a set of 8 associativity change counters in
1233 * the VPA of each cpu that correspond to the associativity levels in the
1234 * ibm,associativity-reference-points property. When an associativity
1235 * level changes, the corresponding counter is incremented.
1236 *
1237 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1238 * node associativity levels have changed.
1239 *
1240 * Returns the number of cpus with unhandled associativity changes.
1241 */
1242static int update_cpu_associativity_changes_mask(void)
1243{
1244	int cpu;
1245	cpumask_t *changes = &cpu_associativity_changes_mask;
1246
1247	for_each_possible_cpu(cpu) {
1248		int i, changed = 0;
1249		u8 *counts = vphn_cpu_change_counts[cpu];
1250		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1251
1252		for (i = 0; i < distance_ref_points_depth; i++) {
1253			if (hypervisor_counts[i] != counts[i]) {
1254				counts[i] = hypervisor_counts[i];
1255				changed = 1;
1256			}
1257		}
1258		if (changed) {
1259			cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1260			cpu = cpu_last_thread_sibling(cpu);
1261		}
1262	}
1263
1264	return cpumask_weight(changes);
1265}
1266
1267/*
1268 * Retrieve the new associativity information for a virtual processor's
1269 * home node.
1270 */
1271static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1272{
1273	long rc;
1274	long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1275	u64 flags = 1;
1276	int hwcpu = get_hard_smp_processor_id(cpu);
1277
1278	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1279	vphn_unpack_associativity(retbuf, associativity);
1280
1281	return rc;
1282}
1283
1284static long vphn_get_associativity(unsigned long cpu,
1285					__be32 *associativity)
1286{
1287	long rc;
1288
1289	rc = hcall_vphn(cpu, associativity);
 
1290
1291	switch (rc) {
 
 
 
 
1292	case H_FUNCTION:
1293		printk(KERN_INFO
1294			"VPHN is not supported. Disabling polling...\n");
1295		stop_topology_update();
1296		break;
1297	case H_HARDWARE:
1298		printk(KERN_ERR
1299			"hcall_vphn() experienced a hardware fault "
1300			"preventing VPHN. Disabling polling...\n");
1301		stop_topology_update();
 
 
 
 
 
 
 
 
1302	}
1303
1304	return rc;
1305}
1306
1307/*
1308 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1309 * characteristics change. This function doesn't perform any locking and is
1310 * only safe to call from stop_machine().
1311 */
1312static int update_cpu_topology(void *data)
1313{
1314	struct topology_update_data *update;
1315	unsigned long cpu;
1316
1317	if (!data)
1318		return -EINVAL;
 
1319
1320	cpu = smp_processor_id();
 
1321
1322	for (update = data; update; update = update->next) {
1323		int new_nid = update->new_nid;
1324		if (cpu != update->cpu)
1325			continue;
1326
1327		unmap_cpu_from_node(cpu);
1328		map_cpu_to_node(cpu, new_nid);
1329		set_cpu_numa_node(cpu, new_nid);
1330		set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1331		vdso_getcpu_init();
1332	}
1333
1334	return 0;
1335}
1336
1337static int update_lookup_table(void *data)
1338{
1339	struct topology_update_data *update;
1340
1341	if (!data)
1342		return -EINVAL;
1343
1344	/*
1345	 * Upon topology update, the numa-cpu lookup table needs to be updated
1346	 * for all threads in the core, including offline CPUs, to ensure that
1347	 * future hotplug operations respect the cpu-to-node associativity
1348	 * properly.
1349	 */
1350	for (update = data; update; update = update->next) {
1351		int nid, base, j;
1352
1353		nid = update->new_nid;
1354		base = cpu_first_thread_sibling(update->cpu);
1355
1356		for (j = 0; j < threads_per_core; j++) {
1357			update_numa_cpu_lookup_table(base + j, nid);
1358		}
1359	}
1360
1361	return 0;
1362}
1363
1364/*
1365 * Update the node maps and sysfs entries for each cpu whose home node
1366 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1367 */
1368int arch_update_cpu_topology(void)
1369{
1370	unsigned int cpu, sibling, changed = 0;
1371	struct topology_update_data *updates, *ud;
1372	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1373	cpumask_t updated_cpus;
1374	struct device *dev;
1375	int weight, new_nid, i = 0;
1376
1377	if (!prrn_enabled && !vphn_enabled)
1378		return 0;
1379
1380	weight = cpumask_weight(&cpu_associativity_changes_mask);
1381	if (!weight)
1382		return 0;
1383
1384	updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1385	if (!updates)
1386		return 0;
1387
1388	cpumask_clear(&updated_cpus);
1389
1390	for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1391		/*
1392		 * If siblings aren't flagged for changes, updates list
1393		 * will be too short. Skip on this update and set for next
1394		 * update.
1395		 */
1396		if (!cpumask_subset(cpu_sibling_mask(cpu),
1397					&cpu_associativity_changes_mask)) {
1398			pr_info("Sibling bits not set for associativity "
1399					"change, cpu%d\n", cpu);
1400			cpumask_or(&cpu_associativity_changes_mask,
1401					&cpu_associativity_changes_mask,
1402					cpu_sibling_mask(cpu));
1403			cpu = cpu_last_thread_sibling(cpu);
1404			continue;
1405		}
1406
1407		/* Use associativity from first thread for all siblings */
1408		vphn_get_associativity(cpu, associativity);
1409		new_nid = associativity_to_nid(associativity);
1410		if (new_nid < 0 || !node_online(new_nid))
1411			new_nid = first_online_node;
1412
1413		if (new_nid == numa_cpu_lookup_table[cpu]) {
1414			cpumask_andnot(&cpu_associativity_changes_mask,
1415					&cpu_associativity_changes_mask,
1416					cpu_sibling_mask(cpu));
1417			cpu = cpu_last_thread_sibling(cpu);
1418			continue;
1419		}
1420
1421		for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1422			ud = &updates[i++];
1423			ud->cpu = sibling;
1424			ud->new_nid = new_nid;
1425			ud->old_nid = numa_cpu_lookup_table[sibling];
1426			cpumask_set_cpu(sibling, &updated_cpus);
1427			if (i < weight)
1428				ud->next = &updates[i];
1429		}
1430		cpu = cpu_last_thread_sibling(cpu);
1431	}
1432
1433	pr_debug("Topology update for the following CPUs:\n");
1434	if (cpumask_weight(&updated_cpus)) {
1435		for (ud = &updates[0]; ud; ud = ud->next) {
1436			pr_debug("cpu %d moving from node %d "
1437					  "to %d\n", ud->cpu,
1438					  ud->old_nid, ud->new_nid);
1439		}
1440	}
1441
1442	/*
1443	 * In cases where we have nothing to update (because the updates list
1444	 * is too short or because the new topology is same as the old one),
1445	 * skip invoking update_cpu_topology() via stop-machine(). This is
1446	 * necessary (and not just a fast-path optimization) since stop-machine
1447	 * can end up electing a random CPU to run update_cpu_topology(), and
1448	 * thus trick us into setting up incorrect cpu-node mappings (since
1449	 * 'updates' is kzalloc()'ed).
1450	 *
1451	 * And for the similar reason, we will skip all the following updating.
1452	 */
1453	if (!cpumask_weight(&updated_cpus))
1454		goto out;
1455
1456	stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1457
1458	/*
1459	 * Update the numa-cpu lookup table with the new mappings, even for
1460	 * offline CPUs. It is best to perform this update from the stop-
1461	 * machine context.
1462	 */
1463	stop_machine(update_lookup_table, &updates[0],
1464					cpumask_of(raw_smp_processor_id()));
1465
1466	for (ud = &updates[0]; ud; ud = ud->next) {
1467		unregister_cpu_under_node(ud->cpu, ud->old_nid);
1468		register_cpu_under_node(ud->cpu, ud->new_nid);
1469
1470		dev = get_cpu_device(ud->cpu);
1471		if (dev)
1472			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1473		cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1474		changed = 1;
1475	}
1476
1477out:
1478	kfree(updates);
1479	return changed;
1480}
1481
1482static void topology_work_fn(struct work_struct *work)
1483{
1484	rebuild_sched_domains();
1485}
1486static DECLARE_WORK(topology_work, topology_work_fn);
1487
1488static void topology_schedule_update(void)
1489{
1490	schedule_work(&topology_work);
1491}
1492
1493static void topology_timer_fn(unsigned long ignored)
1494{
1495	if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1496		topology_schedule_update();
1497	else if (vphn_enabled) {
1498		if (update_cpu_associativity_changes_mask() > 0)
1499			topology_schedule_update();
1500		reset_topology_timer();
1501	}
1502}
1503static struct timer_list topology_timer =
1504	TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1505
1506static void reset_topology_timer(void)
1507{
1508	topology_timer.data = 0;
1509	topology_timer.expires = jiffies + 60 * HZ;
1510	mod_timer(&topology_timer, topology_timer.expires);
1511}
1512
1513#ifdef CONFIG_SMP
1514
1515static void stage_topology_update(int core_id)
1516{
1517	cpumask_or(&cpu_associativity_changes_mask,
1518		&cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1519	reset_topology_timer();
1520}
1521
1522static int dt_update_callback(struct notifier_block *nb,
1523				unsigned long action, void *data)
1524{
1525	struct of_reconfig_data *update = data;
1526	int rc = NOTIFY_DONE;
1527
1528	switch (action) {
1529	case OF_RECONFIG_UPDATE_PROPERTY:
1530		if (!of_prop_cmp(update->dn->type, "cpu") &&
1531		    !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1532			u32 core_id;
1533			of_property_read_u32(update->dn, "reg", &core_id);
1534			stage_topology_update(core_id);
1535			rc = NOTIFY_OK;
1536		}
1537		break;
1538	}
1539
1540	return rc;
1541}
1542
1543static struct notifier_block dt_update_nb = {
1544	.notifier_call = dt_update_callback,
1545};
1546
1547#endif
1548
1549/*
1550 * Start polling for associativity changes.
1551 */
1552int start_topology_update(void)
1553{
1554	int rc = 0;
1555
1556	if (firmware_has_feature(FW_FEATURE_PRRN)) {
1557		if (!prrn_enabled) {
1558			prrn_enabled = 1;
1559			vphn_enabled = 0;
1560#ifdef CONFIG_SMP
1561			rc = of_reconfig_notifier_register(&dt_update_nb);
1562#endif
1563		}
1564	} else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1565		   lppaca_shared_proc(get_lppaca())) {
1566		if (!vphn_enabled) {
1567			prrn_enabled = 0;
1568			vphn_enabled = 1;
1569			setup_cpu_associativity_change_counters();
1570			init_timer_deferrable(&topology_timer);
1571			reset_topology_timer();
1572		}
1573	}
1574
1575	return rc;
1576}
1577
1578/*
1579 * Disable polling for VPHN associativity changes.
1580 */
1581int stop_topology_update(void)
1582{
1583	int rc = 0;
1584
1585	if (prrn_enabled) {
1586		prrn_enabled = 0;
1587#ifdef CONFIG_SMP
1588		rc = of_reconfig_notifier_unregister(&dt_update_nb);
1589#endif
1590	} else if (vphn_enabled) {
1591		vphn_enabled = 0;
1592		rc = del_timer_sync(&topology_timer);
1593	}
1594
1595	return rc;
1596}
1597
1598int prrn_is_enabled(void)
1599{
1600	return prrn_enabled;
1601}
1602
1603static int topology_read(struct seq_file *file, void *v)
1604{
1605	if (vphn_enabled || prrn_enabled)
1606		seq_puts(file, "on\n");
1607	else
1608		seq_puts(file, "off\n");
1609
1610	return 0;
1611}
1612
1613static int topology_open(struct inode *inode, struct file *file)
1614{
1615	return single_open(file, topology_read, NULL);
1616}
1617
1618static ssize_t topology_write(struct file *file, const char __user *buf,
1619			      size_t count, loff_t *off)
1620{
1621	char kbuf[4]; /* "on" or "off" plus null. */
1622	int read_len;
1623
1624	read_len = count < 3 ? count : 3;
1625	if (copy_from_user(kbuf, buf, read_len))
1626		return -EINVAL;
1627
1628	kbuf[read_len] = '\0';
1629
1630	if (!strncmp(kbuf, "on", 2))
1631		start_topology_update();
1632	else if (!strncmp(kbuf, "off", 3))
1633		stop_topology_update();
1634	else
1635		return -EINVAL;
1636
1637	return count;
1638}
1639
1640static const struct file_operations topology_ops = {
1641	.read = seq_read,
1642	.write = topology_write,
1643	.open = topology_open,
1644	.release = single_release
1645};
1646
1647static int topology_update_init(void)
1648{
1649	/* Do not poll for changes if disabled at boot */
1650	if (topology_updates_enabled)
1651		start_topology_update();
1652
1653	if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1654		return -ENOMEM;
1655
1656	return 0;
1657}
1658device_initcall(topology_update_init);
1659#endif /* CONFIG_PPC_SPLPAR */