Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * pSeries NUMA support
   4 *
   5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 
 
 
 
 
   6 */
   7#define pr_fmt(fmt) "numa: " fmt
   8
   9#include <linux/threads.h>
  10#include <linux/memblock.h>
  11#include <linux/init.h>
  12#include <linux/mm.h>
  13#include <linux/mmzone.h>
  14#include <linux/export.h>
  15#include <linux/nodemask.h>
  16#include <linux/cpu.h>
  17#include <linux/notifier.h>
 
  18#include <linux/of.h>
  19#include <linux/pfn.h>
  20#include <linux/cpuset.h>
  21#include <linux/node.h>
  22#include <linux/stop_machine.h>
  23#include <linux/proc_fs.h>
  24#include <linux/seq_file.h>
  25#include <linux/uaccess.h>
  26#include <linux/slab.h>
  27#include <asm/cputhreads.h>
  28#include <asm/sparsemem.h>
  29#include <asm/prom.h>
  30#include <asm/smp.h>
 
  31#include <asm/topology.h>
  32#include <asm/firmware.h>
  33#include <asm/paca.h>
  34#include <asm/hvcall.h>
  35#include <asm/setup.h>
  36#include <asm/vdso.h>
  37#include <asm/drmem.h>
  38
  39static int numa_enabled = 1;
  40
  41static char *cmdline __initdata;
  42
  43static int numa_debug;
  44#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
  45
  46int numa_cpu_lookup_table[NR_CPUS];
  47cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  48struct pglist_data *node_data[MAX_NUMNODES];
  49
  50EXPORT_SYMBOL(numa_cpu_lookup_table);
  51EXPORT_SYMBOL(node_to_cpumask_map);
  52EXPORT_SYMBOL(node_data);
  53
  54static int min_common_depth;
  55static int n_mem_addr_cells, n_mem_size_cells;
  56static int form1_affinity;
  57
  58#define MAX_DISTANCE_REF_POINTS 4
  59static int distance_ref_points_depth;
  60static const __be32 *distance_ref_points;
  61static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
  62
  63/*
  64 * Allocate node_to_cpumask_map based on number of available nodes
  65 * Requires node_possible_map to be valid.
  66 *
  67 * Note: cpumask_of_node() is not valid until after this is done.
  68 */
  69static void __init setup_node_to_cpumask_map(void)
  70{
  71	unsigned int node;
  72
  73	/* setup nr_node_ids if not done yet */
  74	if (nr_node_ids == MAX_NUMNODES)
  75		setup_nr_node_ids();
  76
  77	/* allocate the map */
  78	for_each_node(node)
  79		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
  80
  81	/* cpumask_of_node() will now work */
  82	dbg("Node to cpumask map for %u nodes\n", nr_node_ids);
  83}
  84
  85static int __init fake_numa_create_new_node(unsigned long end_pfn,
  86						unsigned int *nid)
  87{
  88	unsigned long long mem;
  89	char *p = cmdline;
  90	static unsigned int fake_nid;
  91	static unsigned long long curr_boundary;
  92
  93	/*
  94	 * Modify node id, iff we started creating NUMA nodes
  95	 * We want to continue from where we left of the last time
  96	 */
  97	if (fake_nid)
  98		*nid = fake_nid;
  99	/*
 100	 * In case there are no more arguments to parse, the
 101	 * node_id should be the same as the last fake node id
 102	 * (we've handled this above).
 103	 */
 104	if (!p)
 105		return 0;
 106
 107	mem = memparse(p, &p);
 108	if (!mem)
 109		return 0;
 110
 111	if (mem < curr_boundary)
 112		return 0;
 113
 114	curr_boundary = mem;
 115
 116	if ((end_pfn << PAGE_SHIFT) > mem) {
 117		/*
 118		 * Skip commas and spaces
 119		 */
 120		while (*p == ',' || *p == ' ' || *p == '\t')
 121			p++;
 122
 123		cmdline = p;
 124		fake_nid++;
 125		*nid = fake_nid;
 126		dbg("created new fake_node with id %d\n", fake_nid);
 127		return 1;
 128	}
 129	return 0;
 130}
 131
 132static void reset_numa_cpu_lookup_table(void)
 133{
 134	unsigned int cpu;
 135
 136	for_each_possible_cpu(cpu)
 137		numa_cpu_lookup_table[cpu] = -1;
 138}
 139
 140static void map_cpu_to_node(int cpu, int node)
 141{
 142	update_numa_cpu_lookup_table(cpu, node);
 143
 144	dbg("adding cpu %d to node %d\n", cpu, node);
 145
 146	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
 147		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 148}
 149
 150#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
 151static void unmap_cpu_from_node(unsigned long cpu)
 152{
 153	int node = numa_cpu_lookup_table[cpu];
 154
 155	dbg("removing cpu %lu from node %d\n", cpu, node);
 156
 157	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
 158		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
 159	} else {
 160		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
 161		       cpu, node);
 162	}
 163}
 164#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
 165
 166int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
 167{
 168	int dist = 0;
 169
 170	int i, index;
 171
 172	for (i = 0; i < distance_ref_points_depth; i++) {
 173		index = be32_to_cpu(distance_ref_points[i]);
 174		if (cpu1_assoc[index] == cpu2_assoc[index])
 175			break;
 176		dist++;
 177	}
 178
 179	return dist;
 180}
 181
 182/* must hold reference to node during call */
 183static const __be32 *of_get_associativity(struct device_node *dev)
 184{
 185	return of_get_property(dev, "ibm,associativity", NULL);
 186}
 187
 188int __node_distance(int a, int b)
 189{
 190	int i;
 191	int distance = LOCAL_DISTANCE;
 192
 193	if (!form1_affinity)
 194		return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
 195
 196	for (i = 0; i < distance_ref_points_depth; i++) {
 197		if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
 198			break;
 199
 200		/* Double the distance for each NUMA level */
 201		distance *= 2;
 202	}
 203
 204	return distance;
 205}
 206EXPORT_SYMBOL(__node_distance);
 207
 208static void initialize_distance_lookup_table(int nid,
 209		const __be32 *associativity)
 210{
 211	int i;
 212
 213	if (!form1_affinity)
 214		return;
 215
 216	for (i = 0; i < distance_ref_points_depth; i++) {
 217		const __be32 *entry;
 218
 219		entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
 220		distance_lookup_table[nid][i] = of_read_number(entry, 1);
 221	}
 222}
 223
 224/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
 225 * info is found.
 226 */
 227static int associativity_to_nid(const __be32 *associativity)
 228{
 229	int nid = NUMA_NO_NODE;
 230
 231	if (!numa_enabled)
 232		goto out;
 233
 234	if (of_read_number(associativity, 1) >= min_common_depth)
 235		nid = of_read_number(&associativity[min_common_depth], 1);
 236
 237	/* POWER4 LPAR uses 0xffff as invalid node */
 238	if (nid == 0xffff || nid >= MAX_NUMNODES)
 239		nid = NUMA_NO_NODE;
 240
 241	if (nid > 0 &&
 242		of_read_number(associativity, 1) >= distance_ref_points_depth) {
 243		/*
 244		 * Skip the length field and send start of associativity array
 245		 */
 246		initialize_distance_lookup_table(nid, associativity + 1);
 247	}
 248
 249out:
 250	return nid;
 251}
 252
 253/* Returns the nid associated with the given device tree node,
 254 * or -1 if not found.
 255 */
 256static int of_node_to_nid_single(struct device_node *device)
 257{
 258	int nid = NUMA_NO_NODE;
 259	const __be32 *tmp;
 260
 261	tmp = of_get_associativity(device);
 262	if (tmp)
 263		nid = associativity_to_nid(tmp);
 264	return nid;
 265}
 266
 267/* Walk the device tree upwards, looking for an associativity id */
 268int of_node_to_nid(struct device_node *device)
 269{
 270	int nid = NUMA_NO_NODE;
 271
 272	of_node_get(device);
 273	while (device) {
 274		nid = of_node_to_nid_single(device);
 275		if (nid != -1)
 276			break;
 277
 278		device = of_get_next_parent(device);
 279	}
 280	of_node_put(device);
 281
 282	return nid;
 283}
 284EXPORT_SYMBOL(of_node_to_nid);
 285
 286static int __init find_min_common_depth(void)
 287{
 288	int depth;
 289	struct device_node *root;
 290
 291	if (firmware_has_feature(FW_FEATURE_OPAL))
 292		root = of_find_node_by_path("/ibm,opal");
 293	else
 294		root = of_find_node_by_path("/rtas");
 295	if (!root)
 296		root = of_find_node_by_path("/");
 297
 298	/*
 299	 * This property is a set of 32-bit integers, each representing
 300	 * an index into the ibm,associativity nodes.
 301	 *
 302	 * With form 0 affinity the first integer is for an SMP configuration
 303	 * (should be all 0's) and the second is for a normal NUMA
 304	 * configuration. We have only one level of NUMA.
 305	 *
 306	 * With form 1 affinity the first integer is the most significant
 307	 * NUMA boundary and the following are progressively less significant
 308	 * boundaries. There can be more than one level of NUMA.
 309	 */
 310	distance_ref_points = of_get_property(root,
 311					"ibm,associativity-reference-points",
 312					&distance_ref_points_depth);
 313
 314	if (!distance_ref_points) {
 315		dbg("NUMA: ibm,associativity-reference-points not found.\n");
 316		goto err;
 317	}
 318
 319	distance_ref_points_depth /= sizeof(int);
 320
 321	if (firmware_has_feature(FW_FEATURE_OPAL) ||
 322	    firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
 323		dbg("Using form 1 affinity\n");
 324		form1_affinity = 1;
 325	}
 326
 327	if (form1_affinity) {
 328		depth = of_read_number(distance_ref_points, 1);
 329	} else {
 330		if (distance_ref_points_depth < 2) {
 331			printk(KERN_WARNING "NUMA: "
 332				"short ibm,associativity-reference-points\n");
 333			goto err;
 334		}
 335
 336		depth = of_read_number(&distance_ref_points[1], 1);
 337	}
 338
 339	/*
 340	 * Warn and cap if the hardware supports more than
 341	 * MAX_DISTANCE_REF_POINTS domains.
 342	 */
 343	if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
 344		printk(KERN_WARNING "NUMA: distance array capped at "
 345			"%d entries\n", MAX_DISTANCE_REF_POINTS);
 346		distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
 347	}
 348
 349	of_node_put(root);
 350	return depth;
 351
 352err:
 353	of_node_put(root);
 354	return -1;
 355}
 356
 357static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
 358{
 359	struct device_node *memory = NULL;
 360
 361	memory = of_find_node_by_type(memory, "memory");
 362	if (!memory)
 363		panic("numa.c: No memory nodes found!");
 364
 365	*n_addr_cells = of_n_addr_cells(memory);
 366	*n_size_cells = of_n_size_cells(memory);
 367	of_node_put(memory);
 368}
 369
 370static unsigned long read_n_cells(int n, const __be32 **buf)
 371{
 372	unsigned long result = 0;
 373
 374	while (n--) {
 375		result = (result << 32) | of_read_number(*buf, 1);
 376		(*buf)++;
 377	}
 378	return result;
 379}
 380
 381struct assoc_arrays {
 382	u32	n_arrays;
 383	u32	array_sz;
 384	const __be32 *arrays;
 385};
 386
 387/*
 388 * Retrieve and validate the list of associativity arrays for drconf
 389 * memory from the ibm,associativity-lookup-arrays property of the
 390 * device tree..
 391 *
 392 * The layout of the ibm,associativity-lookup-arrays property is a number N
 393 * indicating the number of associativity arrays, followed by a number M
 394 * indicating the size of each associativity array, followed by a list
 395 * of N associativity arrays.
 396 */
 397static int of_get_assoc_arrays(struct assoc_arrays *aa)
 398{
 399	struct device_node *memory;
 400	const __be32 *prop;
 401	u32 len;
 402
 403	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 404	if (!memory)
 405		return -1;
 406
 407	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
 408	if (!prop || len < 2 * sizeof(unsigned int)) {
 409		of_node_put(memory);
 410		return -1;
 411	}
 412
 413	aa->n_arrays = of_read_number(prop++, 1);
 414	aa->array_sz = of_read_number(prop++, 1);
 415
 416	of_node_put(memory);
 417
 418	/* Now that we know the number of arrays and size of each array,
 419	 * revalidate the size of the property read in.
 420	 */
 421	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
 422		return -1;
 423
 424	aa->arrays = prop;
 425	return 0;
 426}
 427
 428/*
 429 * This is like of_node_to_nid_single() for memory represented in the
 430 * ibm,dynamic-reconfiguration-memory node.
 431 */
 432static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
 433{
 434	struct assoc_arrays aa = { .arrays = NULL };
 435	int default_nid = NUMA_NO_NODE;
 436	int nid = default_nid;
 437	int rc, index;
 438
 439	if ((min_common_depth < 0) || !numa_enabled)
 440		return default_nid;
 441
 442	rc = of_get_assoc_arrays(&aa);
 443	if (rc)
 444		return default_nid;
 445
 446	if (min_common_depth <= aa.array_sz &&
 447	    !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
 
 448		index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
 449		nid = of_read_number(&aa.arrays[index], 1);
 450
 451		if (nid == 0xffff || nid >= MAX_NUMNODES)
 452			nid = default_nid;
 453
 454		if (nid > 0) {
 455			index = lmb->aa_index * aa.array_sz;
 456			initialize_distance_lookup_table(nid,
 457							&aa.arrays[index]);
 458		}
 459	}
 460
 461	return nid;
 462}
 463
 464/*
 465 * Figure out to which domain a cpu belongs and stick it there.
 466 * Return the id of the domain used.
 467 */
 468static int numa_setup_cpu(unsigned long lcpu)
 469{
 470	int nid = NUMA_NO_NODE;
 471	struct device_node *cpu;
 472
 473	/*
 474	 * If a valid cpu-to-node mapping is already available, use it
 475	 * directly instead of querying the firmware, since it represents
 476	 * the most recent mapping notified to us by the platform (eg: VPHN).
 477	 */
 478	if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
 479		map_cpu_to_node(lcpu, nid);
 480		return nid;
 481	}
 482
 483	cpu = of_get_cpu_node(lcpu, NULL);
 484
 485	if (!cpu) {
 486		WARN_ON(1);
 487		if (cpu_present(lcpu))
 488			goto out_present;
 489		else
 490			goto out;
 491	}
 492
 493	nid = of_node_to_nid_single(cpu);
 494
 495out_present:
 496	if (nid < 0 || !node_possible(nid))
 497		nid = first_online_node;
 498
 499	map_cpu_to_node(lcpu, nid);
 500	of_node_put(cpu);
 501out:
 502	return nid;
 503}
 504
 505static void verify_cpu_node_mapping(int cpu, int node)
 506{
 507	int base, sibling, i;
 508
 509	/* Verify that all the threads in the core belong to the same node */
 510	base = cpu_first_thread_sibling(cpu);
 511
 512	for (i = 0; i < threads_per_core; i++) {
 513		sibling = base + i;
 514
 515		if (sibling == cpu || cpu_is_offline(sibling))
 516			continue;
 517
 518		if (cpu_to_node(sibling) != node) {
 519			WARN(1, "CPU thread siblings %d and %d don't belong"
 520				" to the same node!\n", cpu, sibling);
 521			break;
 522		}
 523	}
 524}
 525
 526/* Must run before sched domains notifier. */
 527static int ppc_numa_cpu_prepare(unsigned int cpu)
 528{
 529	int nid;
 530
 531	nid = numa_setup_cpu(cpu);
 532	verify_cpu_node_mapping(cpu, nid);
 533	return 0;
 534}
 535
 536static int ppc_numa_cpu_dead(unsigned int cpu)
 537{
 538#ifdef CONFIG_HOTPLUG_CPU
 539	unmap_cpu_from_node(cpu);
 540#endif
 541	return 0;
 542}
 543
 544/*
 545 * Check and possibly modify a memory region to enforce the memory limit.
 546 *
 547 * Returns the size the region should have to enforce the memory limit.
 548 * This will either be the original value of size, a truncated value,
 549 * or zero. If the returned value of size is 0 the region should be
 550 * discarded as it lies wholly above the memory limit.
 551 */
 552static unsigned long __init numa_enforce_memory_limit(unsigned long start,
 553						      unsigned long size)
 554{
 555	/*
 556	 * We use memblock_end_of_DRAM() in here instead of memory_limit because
 557	 * we've already adjusted it for the limit and it takes care of
 558	 * having memory holes below the limit.  Also, in the case of
 559	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
 560	 */
 561
 562	if (start + size <= memblock_end_of_DRAM())
 563		return size;
 564
 565	if (start >= memblock_end_of_DRAM())
 566		return 0;
 567
 568	return memblock_end_of_DRAM() - start;
 569}
 570
 571/*
 572 * Reads the counter for a given entry in
 573 * linux,drconf-usable-memory property
 574 */
 575static inline int __init read_usm_ranges(const __be32 **usm)
 576{
 577	/*
 578	 * For each lmb in ibm,dynamic-memory a corresponding
 579	 * entry in linux,drconf-usable-memory property contains
 580	 * a counter followed by that many (base, size) duple.
 581	 * read the counter from linux,drconf-usable-memory
 582	 */
 583	return read_n_cells(n_mem_size_cells, usm);
 584}
 585
 586/*
 587 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
 588 * node.  This assumes n_mem_{addr,size}_cells have been set.
 589 */
 590static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
 591					const __be32 **usm)
 592{
 593	unsigned int ranges, is_kexec_kdump = 0;
 594	unsigned long base, size, sz;
 595	int nid;
 596
 597	/*
 598	 * Skip this block if the reserved bit is set in flags (0x80)
 599	 * or if the block is not assigned to this partition (0x8)
 600	 */
 601	if ((lmb->flags & DRCONF_MEM_RESERVED)
 602	    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
 603		return;
 604
 605	if (*usm)
 606		is_kexec_kdump = 1;
 607
 608	base = lmb->base_addr;
 609	size = drmem_lmb_size();
 610	ranges = 1;
 611
 612	if (is_kexec_kdump) {
 613		ranges = read_usm_ranges(usm);
 614		if (!ranges) /* there are no (base, size) duple */
 615			return;
 616	}
 617
 618	do {
 619		if (is_kexec_kdump) {
 620			base = read_n_cells(n_mem_addr_cells, usm);
 621			size = read_n_cells(n_mem_size_cells, usm);
 622		}
 623
 624		nid = of_drconf_to_nid_single(lmb);
 625		fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
 626					  &nid);
 627		node_set_online(nid);
 628		sz = numa_enforce_memory_limit(base, size);
 629		if (sz)
 630			memblock_set_node(base, sz, &memblock.memory, nid);
 631	} while (--ranges);
 632}
 633
 634static int __init parse_numa_properties(void)
 635{
 636	struct device_node *memory;
 637	int default_nid = 0;
 638	unsigned long i;
 639
 640	if (numa_enabled == 0) {
 641		printk(KERN_WARNING "NUMA disabled by user\n");
 642		return -1;
 643	}
 644
 645	min_common_depth = find_min_common_depth();
 646
 647	if (min_common_depth < 0) {
 648		/*
 649		 * if we fail to parse min_common_depth from device tree
 650		 * mark the numa disabled, boot with numa disabled.
 651		 */
 652		numa_enabled = false;
 653		return min_common_depth;
 654	}
 655
 656	dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
 657
 658	/*
 659	 * Even though we connect cpus to numa domains later in SMP
 660	 * init, we need to know the node ids now. This is because
 661	 * each node to be onlined must have NODE_DATA etc backing it.
 662	 */
 663	for_each_present_cpu(i) {
 664		struct device_node *cpu;
 665		int nid;
 666
 667		cpu = of_get_cpu_node(i, NULL);
 668		BUG_ON(!cpu);
 669		nid = of_node_to_nid_single(cpu);
 670		of_node_put(cpu);
 671
 672		/*
 673		 * Don't fall back to default_nid yet -- we will plug
 674		 * cpus into nodes once the memory scan has discovered
 675		 * the topology.
 676		 */
 677		if (nid < 0)
 678			continue;
 679		node_set_online(nid);
 680	}
 681
 682	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
 683
 684	for_each_node_by_type(memory, "memory") {
 685		unsigned long start;
 686		unsigned long size;
 687		int nid;
 688		int ranges;
 689		const __be32 *memcell_buf;
 690		unsigned int len;
 691
 692		memcell_buf = of_get_property(memory,
 693			"linux,usable-memory", &len);
 694		if (!memcell_buf || len <= 0)
 695			memcell_buf = of_get_property(memory, "reg", &len);
 696		if (!memcell_buf || len <= 0)
 697			continue;
 698
 699		/* ranges in cell */
 700		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
 701new_range:
 702		/* these are order-sensitive, and modify the buffer pointer */
 703		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
 704		size = read_n_cells(n_mem_size_cells, &memcell_buf);
 705
 706		/*
 707		 * Assumption: either all memory nodes or none will
 708		 * have associativity properties.  If none, then
 709		 * everything goes to default_nid.
 710		 */
 711		nid = of_node_to_nid_single(memory);
 712		if (nid < 0)
 713			nid = default_nid;
 714
 715		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
 716		node_set_online(nid);
 717
 718		size = numa_enforce_memory_limit(start, size);
 719		if (size)
 720			memblock_set_node(start, size, &memblock.memory, nid);
 721
 722		if (--ranges)
 723			goto new_range;
 724	}
 725
 726	/*
 727	 * Now do the same thing for each MEMBLOCK listed in the
 728	 * ibm,dynamic-memory property in the
 729	 * ibm,dynamic-reconfiguration-memory node.
 730	 */
 731	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 732	if (memory) {
 733		walk_drmem_lmbs(memory, numa_setup_drmem_lmb);
 734		of_node_put(memory);
 735	}
 736
 737	return 0;
 738}
 739
 740static void __init setup_nonnuma(void)
 741{
 742	unsigned long top_of_ram = memblock_end_of_DRAM();
 743	unsigned long total_ram = memblock_phys_mem_size();
 744	unsigned long start_pfn, end_pfn;
 745	unsigned int nid = 0;
 746	struct memblock_region *reg;
 747
 748	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
 749	       top_of_ram, total_ram);
 750	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
 751	       (top_of_ram - total_ram) >> 20);
 752
 753	for_each_memblock(memory, reg) {
 754		start_pfn = memblock_region_memory_base_pfn(reg);
 755		end_pfn = memblock_region_memory_end_pfn(reg);
 756
 757		fake_numa_create_new_node(end_pfn, &nid);
 758		memblock_set_node(PFN_PHYS(start_pfn),
 759				  PFN_PHYS(end_pfn - start_pfn),
 760				  &memblock.memory, nid);
 761		node_set_online(nid);
 762	}
 763}
 764
 765void __init dump_numa_cpu_topology(void)
 766{
 767	unsigned int node;
 768	unsigned int cpu, count;
 769
 770	if (!numa_enabled)
 771		return;
 772
 773	for_each_online_node(node) {
 774		pr_info("Node %d CPUs:", node);
 775
 776		count = 0;
 777		/*
 778		 * If we used a CPU iterator here we would miss printing
 779		 * the holes in the cpumap.
 780		 */
 781		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
 782			if (cpumask_test_cpu(cpu,
 783					node_to_cpumask_map[node])) {
 784				if (count == 0)
 785					pr_cont(" %u", cpu);
 786				++count;
 787			} else {
 788				if (count > 1)
 789					pr_cont("-%u", cpu - 1);
 790				count = 0;
 791			}
 792		}
 793
 794		if (count > 1)
 795			pr_cont("-%u", nr_cpu_ids - 1);
 796		pr_cont("\n");
 797	}
 798}
 799
 800/* Initialize NODE_DATA for a node on the local memory */
 801static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
 802{
 803	u64 spanned_pages = end_pfn - start_pfn;
 804	const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
 805	u64 nd_pa;
 806	void *nd;
 807	int tnid;
 808
 809	nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
 810	if (!nd_pa)
 811		panic("Cannot allocate %zu bytes for node %d data\n",
 812		      nd_size, nid);
 813
 814	nd = __va(nd_pa);
 815
 816	/* report and initialize */
 817	pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
 818		nd_pa, nd_pa + nd_size - 1);
 819	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
 820	if (tnid != nid)
 821		pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
 822
 823	node_data[nid] = nd;
 824	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
 825	NODE_DATA(nid)->node_id = nid;
 826	NODE_DATA(nid)->node_start_pfn = start_pfn;
 827	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
 828}
 829
 830static void __init find_possible_nodes(void)
 831{
 832	struct device_node *rtas;
 833	u32 numnodes, i;
 834
 835	if (!numa_enabled)
 836		return;
 837
 838	rtas = of_find_node_by_path("/rtas");
 839	if (!rtas)
 840		return;
 841
 842	if (of_property_read_u32_index(rtas,
 843				"ibm,max-associativity-domains",
 844				min_common_depth, &numnodes))
 845		goto out;
 846
 847	for (i = 0; i < numnodes; i++) {
 848		if (!node_possible(i))
 849			node_set(i, node_possible_map);
 850	}
 851
 852out:
 853	of_node_put(rtas);
 854}
 855
 856void __init mem_topology_setup(void)
 857{
 858	int cpu;
 859
 860	if (parse_numa_properties())
 861		setup_nonnuma();
 862
 863	/*
 864	 * Modify the set of possible NUMA nodes to reflect information
 865	 * available about the set of online nodes, and the set of nodes
 866	 * that we expect to make use of for this platform's affinity
 867	 * calculations.
 868	 */
 869	nodes_and(node_possible_map, node_possible_map, node_online_map);
 870
 871	find_possible_nodes();
 872
 873	setup_node_to_cpumask_map();
 874
 875	reset_numa_cpu_lookup_table();
 876
 877	for_each_present_cpu(cpu)
 878		numa_setup_cpu(cpu);
 879}
 880
 881void __init initmem_init(void)
 882{
 883	int nid;
 884
 885	max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
 886	max_pfn = max_low_pfn;
 887
 888	memblock_dump_all();
 889
 890	for_each_online_node(nid) {
 891		unsigned long start_pfn, end_pfn;
 892
 893		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 894		setup_node_data(nid, start_pfn, end_pfn);
 895		sparse_memory_present_with_active_regions(nid);
 896	}
 897
 898	sparse_init();
 899
 900	/*
 901	 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
 902	 * even before we online them, so that we can use cpu_to_{node,mem}
 903	 * early in boot, cf. smp_prepare_cpus().
 904	 * _nocalls() + manual invocation is used because cpuhp is not yet
 905	 * initialized for the boot CPU.
 906	 */
 907	cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
 908				  ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
 909}
 910
 911static int __init early_numa(char *p)
 912{
 913	if (!p)
 914		return 0;
 915
 916	if (strstr(p, "off"))
 917		numa_enabled = 0;
 918
 919	if (strstr(p, "debug"))
 920		numa_debug = 1;
 921
 922	p = strstr(p, "fake=");
 923	if (p)
 924		cmdline = p + strlen("fake=");
 925
 926	return 0;
 927}
 928early_param("numa", early_numa);
 929
 930/*
 931 * The platform can inform us through one of several mechanisms
 932 * (post-migration device tree updates, PRRN or VPHN) that the NUMA
 933 * assignment of a resource has changed. This controls whether we act
 934 * on that. Disabled by default.
 935 */
 936static bool topology_updates_enabled;
 937
 938static int __init early_topology_updates(char *p)
 939{
 940	if (!p)
 941		return 0;
 942
 943	if (!strcmp(p, "on")) {
 944		pr_warn("Caution: enabling topology updates\n");
 945		topology_updates_enabled = true;
 946	}
 947
 948	return 0;
 949}
 950early_param("topology_updates", early_topology_updates);
 951
 952#ifdef CONFIG_MEMORY_HOTPLUG
 953/*
 954 * Find the node associated with a hot added memory section for
 955 * memory represented in the device tree by the property
 956 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
 957 */
 958static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
 959{
 960	struct drmem_lmb *lmb;
 961	unsigned long lmb_size;
 962	int nid = NUMA_NO_NODE;
 963
 964	lmb_size = drmem_lmb_size();
 965
 966	for_each_drmem_lmb(lmb) {
 967		/* skip this block if it is reserved or not assigned to
 968		 * this partition */
 969		if ((lmb->flags & DRCONF_MEM_RESERVED)
 970		    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
 971			continue;
 972
 973		if ((scn_addr < lmb->base_addr)
 974		    || (scn_addr >= (lmb->base_addr + lmb_size)))
 975			continue;
 976
 977		nid = of_drconf_to_nid_single(lmb);
 978		break;
 979	}
 980
 981	return nid;
 982}
 983
 984/*
 985 * Find the node associated with a hot added memory section for memory
 986 * represented in the device tree as a node (i.e. memory@XXXX) for
 987 * each memblock.
 988 */
 989static int hot_add_node_scn_to_nid(unsigned long scn_addr)
 990{
 991	struct device_node *memory;
 992	int nid = NUMA_NO_NODE;
 993
 994	for_each_node_by_type(memory, "memory") {
 995		unsigned long start, size;
 996		int ranges;
 997		const __be32 *memcell_buf;
 998		unsigned int len;
 999
1000		memcell_buf = of_get_property(memory, "reg", &len);
1001		if (!memcell_buf || len <= 0)
1002			continue;
1003
1004		/* ranges in cell */
1005		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1006
1007		while (ranges--) {
1008			start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1009			size = read_n_cells(n_mem_size_cells, &memcell_buf);
1010
1011			if ((scn_addr < start) || (scn_addr >= (start + size)))
1012				continue;
1013
1014			nid = of_node_to_nid_single(memory);
1015			break;
1016		}
1017
1018		if (nid >= 0)
1019			break;
1020	}
1021
1022	of_node_put(memory);
1023
1024	return nid;
1025}
1026
1027/*
1028 * Find the node associated with a hot added memory section.  Section
1029 * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1030 * sections are fully contained within a single MEMBLOCK.
1031 */
1032int hot_add_scn_to_nid(unsigned long scn_addr)
1033{
1034	struct device_node *memory = NULL;
1035	int nid;
1036
1037	if (!numa_enabled)
1038		return first_online_node;
1039
1040	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1041	if (memory) {
1042		nid = hot_add_drconf_scn_to_nid(scn_addr);
1043		of_node_put(memory);
1044	} else {
1045		nid = hot_add_node_scn_to_nid(scn_addr);
1046	}
1047
1048	if (nid < 0 || !node_possible(nid))
1049		nid = first_online_node;
1050
1051	return nid;
1052}
1053
1054static u64 hot_add_drconf_memory_max(void)
1055{
1056	struct device_node *memory = NULL;
1057	struct device_node *dn = NULL;
1058	const __be64 *lrdr = NULL;
1059
1060	dn = of_find_node_by_path("/rtas");
1061	if (dn) {
1062		lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1063		of_node_put(dn);
1064		if (lrdr)
1065			return be64_to_cpup(lrdr);
1066	}
1067
1068	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1069	if (memory) {
1070		of_node_put(memory);
1071		return drmem_lmb_memory_max();
1072	}
1073	return 0;
1074}
1075
1076/*
1077 * memory_hotplug_max - return max address of memory that may be added
1078 *
1079 * This is currently only used on systems that support drconfig memory
1080 * hotplug.
1081 */
1082u64 memory_hotplug_max(void)
1083{
1084        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1085}
1086#endif /* CONFIG_MEMORY_HOTPLUG */
1087
1088/* Virtual Processor Home Node (VPHN) support */
1089#ifdef CONFIG_PPC_SPLPAR
 
 
 
1090struct topology_update_data {
1091	struct topology_update_data *next;
1092	unsigned int cpu;
1093	int old_nid;
1094	int new_nid;
1095};
1096
1097#define TOPOLOGY_DEF_TIMER_SECS	60
1098
1099static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1100static cpumask_t cpu_associativity_changes_mask;
1101static int vphn_enabled;
1102static int prrn_enabled;
1103static void reset_topology_timer(void);
1104static int topology_timer_secs = 1;
1105static int topology_inited;
 
1106
1107/*
1108 * Change polling interval for associativity changes.
1109 */
1110int timed_topology_update(int nsecs)
1111{
1112	if (vphn_enabled) {
1113		if (nsecs > 0)
1114			topology_timer_secs = nsecs;
1115		else
1116			topology_timer_secs = TOPOLOGY_DEF_TIMER_SECS;
1117
1118		reset_topology_timer();
1119	}
1120
1121	return 0;
1122}
1123
1124/*
1125 * Store the current values of the associativity change counters in the
1126 * hypervisor.
1127 */
1128static void setup_cpu_associativity_change_counters(void)
1129{
1130	int cpu;
1131
1132	/* The VPHN feature supports a maximum of 8 reference points */
1133	BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1134
1135	for_each_possible_cpu(cpu) {
1136		int i;
1137		u8 *counts = vphn_cpu_change_counts[cpu];
1138		volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
1139
1140		for (i = 0; i < distance_ref_points_depth; i++)
1141			counts[i] = hypervisor_counts[i];
1142	}
1143}
1144
1145/*
1146 * The hypervisor maintains a set of 8 associativity change counters in
1147 * the VPA of each cpu that correspond to the associativity levels in the
1148 * ibm,associativity-reference-points property. When an associativity
1149 * level changes, the corresponding counter is incremented.
1150 *
1151 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1152 * node associativity levels have changed.
1153 *
1154 * Returns the number of cpus with unhandled associativity changes.
1155 */
1156static int update_cpu_associativity_changes_mask(void)
1157{
1158	int cpu;
1159	cpumask_t *changes = &cpu_associativity_changes_mask;
1160
1161	for_each_possible_cpu(cpu) {
1162		int i, changed = 0;
1163		u8 *counts = vphn_cpu_change_counts[cpu];
1164		volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
1165
1166		for (i = 0; i < distance_ref_points_depth; i++) {
1167			if (hypervisor_counts[i] != counts[i]) {
1168				counts[i] = hypervisor_counts[i];
1169				changed = 1;
1170			}
1171		}
1172		if (changed) {
1173			cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1174			cpu = cpu_last_thread_sibling(cpu);
1175		}
1176	}
1177
1178	return cpumask_weight(changes);
1179}
1180
1181/*
1182 * Retrieve the new associativity information for a virtual processor's
1183 * home node.
1184 */
 
 
 
 
 
 
 
 
 
 
 
 
 
1185static long vphn_get_associativity(unsigned long cpu,
1186					__be32 *associativity)
1187{
1188	long rc;
1189
1190	rc = hcall_vphn(get_hard_smp_processor_id(cpu),
1191				VPHN_FLAG_VCPU, associativity);
1192
1193	switch (rc) {
1194	case H_FUNCTION:
1195		printk_once(KERN_INFO
1196			"VPHN is not supported. Disabling polling...\n");
1197		stop_topology_update();
1198		break;
1199	case H_HARDWARE:
1200		printk(KERN_ERR
1201			"hcall_vphn() experienced a hardware fault "
1202			"preventing VPHN. Disabling polling...\n");
1203		stop_topology_update();
1204		break;
1205	case H_SUCCESS:
1206		dbg("VPHN hcall succeeded. Reset polling...\n");
1207		timed_topology_update(0);
1208		break;
1209	}
1210
1211	return rc;
1212}
1213
1214int find_and_online_cpu_nid(int cpu)
1215{
1216	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1217	int new_nid;
1218
1219	/* Use associativity from first thread for all siblings */
1220	if (vphn_get_associativity(cpu, associativity))
1221		return cpu_to_node(cpu);
1222
1223	new_nid = associativity_to_nid(associativity);
1224	if (new_nid < 0 || !node_possible(new_nid))
1225		new_nid = first_online_node;
1226
1227	if (NODE_DATA(new_nid) == NULL) {
1228#ifdef CONFIG_MEMORY_HOTPLUG
1229		/*
1230		 * Need to ensure that NODE_DATA is initialized for a node from
1231		 * available memory (see memblock_alloc_try_nid). If unable to
1232		 * init the node, then default to nearest node that has memory
1233		 * installed. Skip onlining a node if the subsystems are not
1234		 * yet initialized.
1235		 */
1236		if (!topology_inited || try_online_node(new_nid))
1237			new_nid = first_online_node;
1238#else
1239		/*
1240		 * Default to using the nearest node that has memory installed.
1241		 * Otherwise, it would be necessary to patch the kernel MM code
1242		 * to deal with more memoryless-node error conditions.
1243		 */
1244		new_nid = first_online_node;
1245#endif
1246	}
1247
1248	pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__,
1249		cpu, new_nid);
1250	return new_nid;
1251}
1252
1253/*
1254 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1255 * characteristics change. This function doesn't perform any locking and is
1256 * only safe to call from stop_machine().
1257 */
1258static int update_cpu_topology(void *data)
1259{
1260	struct topology_update_data *update;
1261	unsigned long cpu;
1262
1263	if (!data)
1264		return -EINVAL;
1265
1266	cpu = smp_processor_id();
1267
1268	for (update = data; update; update = update->next) {
1269		int new_nid = update->new_nid;
1270		if (cpu != update->cpu)
1271			continue;
1272
1273		unmap_cpu_from_node(cpu);
1274		map_cpu_to_node(cpu, new_nid);
1275		set_cpu_numa_node(cpu, new_nid);
1276		set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1277		vdso_getcpu_init();
1278	}
1279
1280	return 0;
1281}
1282
1283static int update_lookup_table(void *data)
1284{
1285	struct topology_update_data *update;
1286
1287	if (!data)
1288		return -EINVAL;
1289
1290	/*
1291	 * Upon topology update, the numa-cpu lookup table needs to be updated
1292	 * for all threads in the core, including offline CPUs, to ensure that
1293	 * future hotplug operations respect the cpu-to-node associativity
1294	 * properly.
1295	 */
1296	for (update = data; update; update = update->next) {
1297		int nid, base, j;
1298
1299		nid = update->new_nid;
1300		base = cpu_first_thread_sibling(update->cpu);
1301
1302		for (j = 0; j < threads_per_core; j++) {
1303			update_numa_cpu_lookup_table(base + j, nid);
1304		}
1305	}
1306
1307	return 0;
1308}
1309
1310/*
1311 * Update the node maps and sysfs entries for each cpu whose home node
1312 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1313 *
1314 * cpus_locked says whether we already hold cpu_hotplug_lock.
1315 */
1316int numa_update_cpu_topology(bool cpus_locked)
1317{
1318	unsigned int cpu, sibling, changed = 0;
1319	struct topology_update_data *updates, *ud;
1320	cpumask_t updated_cpus;
1321	struct device *dev;
1322	int weight, new_nid, i = 0;
1323
1324	if (!prrn_enabled && !vphn_enabled && topology_inited)
 
 
1325		return 0;
 
1326
1327	weight = cpumask_weight(&cpu_associativity_changes_mask);
1328	if (!weight)
1329		return 0;
1330
1331	updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL);
1332	if (!updates)
1333		return 0;
1334
1335	cpumask_clear(&updated_cpus);
1336
1337	for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1338		/*
1339		 * If siblings aren't flagged for changes, updates list
1340		 * will be too short. Skip on this update and set for next
1341		 * update.
1342		 */
1343		if (!cpumask_subset(cpu_sibling_mask(cpu),
1344					&cpu_associativity_changes_mask)) {
1345			pr_info("Sibling bits not set for associativity "
1346					"change, cpu%d\n", cpu);
1347			cpumask_or(&cpu_associativity_changes_mask,
1348					&cpu_associativity_changes_mask,
1349					cpu_sibling_mask(cpu));
1350			cpu = cpu_last_thread_sibling(cpu);
1351			continue;
1352		}
1353
1354		new_nid = find_and_online_cpu_nid(cpu);
1355
1356		if (new_nid == numa_cpu_lookup_table[cpu]) {
1357			cpumask_andnot(&cpu_associativity_changes_mask,
1358					&cpu_associativity_changes_mask,
1359					cpu_sibling_mask(cpu));
1360			dbg("Assoc chg gives same node %d for cpu%d\n",
1361					new_nid, cpu);
1362			cpu = cpu_last_thread_sibling(cpu);
1363			continue;
1364		}
1365
1366		for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1367			ud = &updates[i++];
1368			ud->next = &updates[i];
1369			ud->cpu = sibling;
1370			ud->new_nid = new_nid;
1371			ud->old_nid = numa_cpu_lookup_table[sibling];
1372			cpumask_set_cpu(sibling, &updated_cpus);
1373		}
1374		cpu = cpu_last_thread_sibling(cpu);
1375	}
1376
1377	/*
1378	 * Prevent processing of 'updates' from overflowing array
1379	 * where last entry filled in a 'next' pointer.
1380	 */
1381	if (i)
1382		updates[i-1].next = NULL;
1383
1384	pr_debug("Topology update for the following CPUs:\n");
1385	if (cpumask_weight(&updated_cpus)) {
1386		for (ud = &updates[0]; ud; ud = ud->next) {
1387			pr_debug("cpu %d moving from node %d "
1388					  "to %d\n", ud->cpu,
1389					  ud->old_nid, ud->new_nid);
1390		}
1391	}
1392
1393	/*
1394	 * In cases where we have nothing to update (because the updates list
1395	 * is too short or because the new topology is same as the old one),
1396	 * skip invoking update_cpu_topology() via stop-machine(). This is
1397	 * necessary (and not just a fast-path optimization) since stop-machine
1398	 * can end up electing a random CPU to run update_cpu_topology(), and
1399	 * thus trick us into setting up incorrect cpu-node mappings (since
1400	 * 'updates' is kzalloc()'ed).
1401	 *
1402	 * And for the similar reason, we will skip all the following updating.
1403	 */
1404	if (!cpumask_weight(&updated_cpus))
1405		goto out;
1406
1407	if (cpus_locked)
1408		stop_machine_cpuslocked(update_cpu_topology, &updates[0],
1409					&updated_cpus);
1410	else
1411		stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1412
1413	/*
1414	 * Update the numa-cpu lookup table with the new mappings, even for
1415	 * offline CPUs. It is best to perform this update from the stop-
1416	 * machine context.
1417	 */
1418	if (cpus_locked)
1419		stop_machine_cpuslocked(update_lookup_table, &updates[0],
1420					cpumask_of(raw_smp_processor_id()));
1421	else
1422		stop_machine(update_lookup_table, &updates[0],
1423			     cpumask_of(raw_smp_processor_id()));
1424
1425	for (ud = &updates[0]; ud; ud = ud->next) {
1426		unregister_cpu_under_node(ud->cpu, ud->old_nid);
1427		register_cpu_under_node(ud->cpu, ud->new_nid);
1428
1429		dev = get_cpu_device(ud->cpu);
1430		if (dev)
1431			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1432		cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1433		changed = 1;
1434	}
1435
1436out:
1437	kfree(updates);
 
1438	return changed;
1439}
1440
1441int arch_update_cpu_topology(void)
1442{
1443	return numa_update_cpu_topology(true);
1444}
1445
1446static void topology_work_fn(struct work_struct *work)
1447{
1448	rebuild_sched_domains();
1449}
1450static DECLARE_WORK(topology_work, topology_work_fn);
1451
1452static void topology_schedule_update(void)
1453{
1454	schedule_work(&topology_work);
1455}
1456
1457static void topology_timer_fn(struct timer_list *unused)
1458{
1459	if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1460		topology_schedule_update();
1461	else if (vphn_enabled) {
1462		if (update_cpu_associativity_changes_mask() > 0)
1463			topology_schedule_update();
1464		reset_topology_timer();
1465	}
1466}
1467static struct timer_list topology_timer;
1468
1469static void reset_topology_timer(void)
1470{
1471	if (vphn_enabled)
1472		mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
1473}
1474
1475#ifdef CONFIG_SMP
1476
 
 
 
 
 
 
 
1477static int dt_update_callback(struct notifier_block *nb,
1478				unsigned long action, void *data)
1479{
1480	struct of_reconfig_data *update = data;
1481	int rc = NOTIFY_DONE;
1482
1483	switch (action) {
1484	case OF_RECONFIG_UPDATE_PROPERTY:
1485		if (of_node_is_type(update->dn, "cpu") &&
1486		    !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1487			u32 core_id;
1488			of_property_read_u32(update->dn, "reg", &core_id);
1489			rc = dlpar_cpu_readd(core_id);
1490			rc = NOTIFY_OK;
1491		}
1492		break;
1493	}
1494
1495	return rc;
1496}
1497
1498static struct notifier_block dt_update_nb = {
1499	.notifier_call = dt_update_callback,
1500};
1501
1502#endif
1503
1504/*
1505 * Start polling for associativity changes.
1506 */
1507int start_topology_update(void)
1508{
1509	int rc = 0;
1510
1511	if (!topology_updates_enabled)
1512		return 0;
1513
1514	if (firmware_has_feature(FW_FEATURE_PRRN)) {
1515		if (!prrn_enabled) {
1516			prrn_enabled = 1;
1517#ifdef CONFIG_SMP
1518			rc = of_reconfig_notifier_register(&dt_update_nb);
1519#endif
1520		}
1521	}
1522	if (firmware_has_feature(FW_FEATURE_VPHN) &&
1523		   lppaca_shared_proc(get_lppaca())) {
1524		if (!vphn_enabled) {
1525			vphn_enabled = 1;
1526			setup_cpu_associativity_change_counters();
1527			timer_setup(&topology_timer, topology_timer_fn,
1528				    TIMER_DEFERRABLE);
1529			reset_topology_timer();
1530		}
1531	}
1532
1533	pr_info("Starting topology update%s%s\n",
1534		(prrn_enabled ? " prrn_enabled" : ""),
1535		(vphn_enabled ? " vphn_enabled" : ""));
1536
1537	return rc;
1538}
1539
1540/*
1541 * Disable polling for VPHN associativity changes.
1542 */
1543int stop_topology_update(void)
1544{
1545	int rc = 0;
1546
1547	if (!topology_updates_enabled)
1548		return 0;
1549
1550	if (prrn_enabled) {
1551		prrn_enabled = 0;
1552#ifdef CONFIG_SMP
1553		rc = of_reconfig_notifier_unregister(&dt_update_nb);
1554#endif
1555	}
1556	if (vphn_enabled) {
1557		vphn_enabled = 0;
1558		rc = del_timer_sync(&topology_timer);
1559	}
1560
1561	pr_info("Stopping topology update\n");
1562
1563	return rc;
1564}
1565
1566int prrn_is_enabled(void)
1567{
1568	return prrn_enabled;
1569}
1570
1571void __init shared_proc_topology_init(void)
1572{
1573	if (lppaca_shared_proc(get_lppaca())) {
1574		bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
1575			    nr_cpumask_bits);
1576		numa_update_cpu_topology(false);
1577	}
1578}
1579
1580static int topology_read(struct seq_file *file, void *v)
1581{
1582	if (vphn_enabled || prrn_enabled)
1583		seq_puts(file, "on\n");
1584	else
1585		seq_puts(file, "off\n");
1586
1587	return 0;
1588}
1589
1590static int topology_open(struct inode *inode, struct file *file)
1591{
1592	return single_open(file, topology_read, NULL);
1593}
1594
1595static ssize_t topology_write(struct file *file, const char __user *buf,
1596			      size_t count, loff_t *off)
1597{
1598	char kbuf[4]; /* "on" or "off" plus null. */
1599	int read_len;
1600
1601	read_len = count < 3 ? count : 3;
1602	if (copy_from_user(kbuf, buf, read_len))
1603		return -EINVAL;
1604
1605	kbuf[read_len] = '\0';
1606
1607	if (!strncmp(kbuf, "on", 2)) {
1608		topology_updates_enabled = true;
1609		start_topology_update();
1610	} else if (!strncmp(kbuf, "off", 3)) {
1611		stop_topology_update();
1612		topology_updates_enabled = false;
1613	} else
1614		return -EINVAL;
1615
1616	return count;
1617}
1618
1619static const struct file_operations topology_ops = {
1620	.read = seq_read,
1621	.write = topology_write,
1622	.open = topology_open,
1623	.release = single_release
1624};
1625
1626static int topology_update_init(void)
1627{
1628	start_topology_update();
 
 
1629
1630	if (vphn_enabled)
1631		topology_schedule_update();
1632
1633	if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1634		return -ENOMEM;
1635
1636	topology_inited = 1;
 
 
 
 
1637	return 0;
1638}
1639device_initcall(topology_update_init);
1640#endif /* CONFIG_PPC_SPLPAR */
v4.17
 
   1/*
   2 * pSeries NUMA support
   3 *
   4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11#define pr_fmt(fmt) "numa: " fmt
  12
  13#include <linux/threads.h>
  14#include <linux/bootmem.h>
  15#include <linux/init.h>
  16#include <linux/mm.h>
  17#include <linux/mmzone.h>
  18#include <linux/export.h>
  19#include <linux/nodemask.h>
  20#include <linux/cpu.h>
  21#include <linux/notifier.h>
  22#include <linux/memblock.h>
  23#include <linux/of.h>
  24#include <linux/pfn.h>
  25#include <linux/cpuset.h>
  26#include <linux/node.h>
  27#include <linux/stop_machine.h>
  28#include <linux/proc_fs.h>
  29#include <linux/seq_file.h>
  30#include <linux/uaccess.h>
  31#include <linux/slab.h>
  32#include <asm/cputhreads.h>
  33#include <asm/sparsemem.h>
  34#include <asm/prom.h>
  35#include <asm/smp.h>
  36#include <asm/cputhreads.h>
  37#include <asm/topology.h>
  38#include <asm/firmware.h>
  39#include <asm/paca.h>
  40#include <asm/hvcall.h>
  41#include <asm/setup.h>
  42#include <asm/vdso.h>
  43#include <asm/drmem.h>
  44
  45static int numa_enabled = 1;
  46
  47static char *cmdline __initdata;
  48
  49static int numa_debug;
  50#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
  51
  52int numa_cpu_lookup_table[NR_CPUS];
  53cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  54struct pglist_data *node_data[MAX_NUMNODES];
  55
  56EXPORT_SYMBOL(numa_cpu_lookup_table);
  57EXPORT_SYMBOL(node_to_cpumask_map);
  58EXPORT_SYMBOL(node_data);
  59
  60static int min_common_depth;
  61static int n_mem_addr_cells, n_mem_size_cells;
  62static int form1_affinity;
  63
  64#define MAX_DISTANCE_REF_POINTS 4
  65static int distance_ref_points_depth;
  66static const __be32 *distance_ref_points;
  67static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
  68
  69/*
  70 * Allocate node_to_cpumask_map based on number of available nodes
  71 * Requires node_possible_map to be valid.
  72 *
  73 * Note: cpumask_of_node() is not valid until after this is done.
  74 */
  75static void __init setup_node_to_cpumask_map(void)
  76{
  77	unsigned int node;
  78
  79	/* setup nr_node_ids if not done yet */
  80	if (nr_node_ids == MAX_NUMNODES)
  81		setup_nr_node_ids();
  82
  83	/* allocate the map */
  84	for_each_node(node)
  85		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
  86
  87	/* cpumask_of_node() will now work */
  88	dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
  89}
  90
  91static int __init fake_numa_create_new_node(unsigned long end_pfn,
  92						unsigned int *nid)
  93{
  94	unsigned long long mem;
  95	char *p = cmdline;
  96	static unsigned int fake_nid;
  97	static unsigned long long curr_boundary;
  98
  99	/*
 100	 * Modify node id, iff we started creating NUMA nodes
 101	 * We want to continue from where we left of the last time
 102	 */
 103	if (fake_nid)
 104		*nid = fake_nid;
 105	/*
 106	 * In case there are no more arguments to parse, the
 107	 * node_id should be the same as the last fake node id
 108	 * (we've handled this above).
 109	 */
 110	if (!p)
 111		return 0;
 112
 113	mem = memparse(p, &p);
 114	if (!mem)
 115		return 0;
 116
 117	if (mem < curr_boundary)
 118		return 0;
 119
 120	curr_boundary = mem;
 121
 122	if ((end_pfn << PAGE_SHIFT) > mem) {
 123		/*
 124		 * Skip commas and spaces
 125		 */
 126		while (*p == ',' || *p == ' ' || *p == '\t')
 127			p++;
 128
 129		cmdline = p;
 130		fake_nid++;
 131		*nid = fake_nid;
 132		dbg("created new fake_node with id %d\n", fake_nid);
 133		return 1;
 134	}
 135	return 0;
 136}
 137
 138static void reset_numa_cpu_lookup_table(void)
 139{
 140	unsigned int cpu;
 141
 142	for_each_possible_cpu(cpu)
 143		numa_cpu_lookup_table[cpu] = -1;
 144}
 145
 146static void map_cpu_to_node(int cpu, int node)
 147{
 148	update_numa_cpu_lookup_table(cpu, node);
 149
 150	dbg("adding cpu %d to node %d\n", cpu, node);
 151
 152	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
 153		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 154}
 155
 156#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
 157static void unmap_cpu_from_node(unsigned long cpu)
 158{
 159	int node = numa_cpu_lookup_table[cpu];
 160
 161	dbg("removing cpu %lu from node %d\n", cpu, node);
 162
 163	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
 164		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
 165	} else {
 166		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
 167		       cpu, node);
 168	}
 169}
 170#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
 171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172/* must hold reference to node during call */
 173static const __be32 *of_get_associativity(struct device_node *dev)
 174{
 175	return of_get_property(dev, "ibm,associativity", NULL);
 176}
 177
 178int __node_distance(int a, int b)
 179{
 180	int i;
 181	int distance = LOCAL_DISTANCE;
 182
 183	if (!form1_affinity)
 184		return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
 185
 186	for (i = 0; i < distance_ref_points_depth; i++) {
 187		if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
 188			break;
 189
 190		/* Double the distance for each NUMA level */
 191		distance *= 2;
 192	}
 193
 194	return distance;
 195}
 196EXPORT_SYMBOL(__node_distance);
 197
 198static void initialize_distance_lookup_table(int nid,
 199		const __be32 *associativity)
 200{
 201	int i;
 202
 203	if (!form1_affinity)
 204		return;
 205
 206	for (i = 0; i < distance_ref_points_depth; i++) {
 207		const __be32 *entry;
 208
 209		entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
 210		distance_lookup_table[nid][i] = of_read_number(entry, 1);
 211	}
 212}
 213
 214/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
 215 * info is found.
 216 */
 217static int associativity_to_nid(const __be32 *associativity)
 218{
 219	int nid = -1;
 220
 221	if (min_common_depth == -1)
 222		goto out;
 223
 224	if (of_read_number(associativity, 1) >= min_common_depth)
 225		nid = of_read_number(&associativity[min_common_depth], 1);
 226
 227	/* POWER4 LPAR uses 0xffff as invalid node */
 228	if (nid == 0xffff || nid >= MAX_NUMNODES)
 229		nid = -1;
 230
 231	if (nid > 0 &&
 232		of_read_number(associativity, 1) >= distance_ref_points_depth) {
 233		/*
 234		 * Skip the length field and send start of associativity array
 235		 */
 236		initialize_distance_lookup_table(nid, associativity + 1);
 237	}
 238
 239out:
 240	return nid;
 241}
 242
 243/* Returns the nid associated with the given device tree node,
 244 * or -1 if not found.
 245 */
 246static int of_node_to_nid_single(struct device_node *device)
 247{
 248	int nid = -1;
 249	const __be32 *tmp;
 250
 251	tmp = of_get_associativity(device);
 252	if (tmp)
 253		nid = associativity_to_nid(tmp);
 254	return nid;
 255}
 256
 257/* Walk the device tree upwards, looking for an associativity id */
 258int of_node_to_nid(struct device_node *device)
 259{
 260	int nid = -1;
 261
 262	of_node_get(device);
 263	while (device) {
 264		nid = of_node_to_nid_single(device);
 265		if (nid != -1)
 266			break;
 267
 268		device = of_get_next_parent(device);
 269	}
 270	of_node_put(device);
 271
 272	return nid;
 273}
 274EXPORT_SYMBOL(of_node_to_nid);
 275
 276static int __init find_min_common_depth(void)
 277{
 278	int depth;
 279	struct device_node *root;
 280
 281	if (firmware_has_feature(FW_FEATURE_OPAL))
 282		root = of_find_node_by_path("/ibm,opal");
 283	else
 284		root = of_find_node_by_path("/rtas");
 285	if (!root)
 286		root = of_find_node_by_path("/");
 287
 288	/*
 289	 * This property is a set of 32-bit integers, each representing
 290	 * an index into the ibm,associativity nodes.
 291	 *
 292	 * With form 0 affinity the first integer is for an SMP configuration
 293	 * (should be all 0's) and the second is for a normal NUMA
 294	 * configuration. We have only one level of NUMA.
 295	 *
 296	 * With form 1 affinity the first integer is the most significant
 297	 * NUMA boundary and the following are progressively less significant
 298	 * boundaries. There can be more than one level of NUMA.
 299	 */
 300	distance_ref_points = of_get_property(root,
 301					"ibm,associativity-reference-points",
 302					&distance_ref_points_depth);
 303
 304	if (!distance_ref_points) {
 305		dbg("NUMA: ibm,associativity-reference-points not found.\n");
 306		goto err;
 307	}
 308
 309	distance_ref_points_depth /= sizeof(int);
 310
 311	if (firmware_has_feature(FW_FEATURE_OPAL) ||
 312	    firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
 313		dbg("Using form 1 affinity\n");
 314		form1_affinity = 1;
 315	}
 316
 317	if (form1_affinity) {
 318		depth = of_read_number(distance_ref_points, 1);
 319	} else {
 320		if (distance_ref_points_depth < 2) {
 321			printk(KERN_WARNING "NUMA: "
 322				"short ibm,associativity-reference-points\n");
 323			goto err;
 324		}
 325
 326		depth = of_read_number(&distance_ref_points[1], 1);
 327	}
 328
 329	/*
 330	 * Warn and cap if the hardware supports more than
 331	 * MAX_DISTANCE_REF_POINTS domains.
 332	 */
 333	if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
 334		printk(KERN_WARNING "NUMA: distance array capped at "
 335			"%d entries\n", MAX_DISTANCE_REF_POINTS);
 336		distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
 337	}
 338
 339	of_node_put(root);
 340	return depth;
 341
 342err:
 343	of_node_put(root);
 344	return -1;
 345}
 346
 347static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
 348{
 349	struct device_node *memory = NULL;
 350
 351	memory = of_find_node_by_type(memory, "memory");
 352	if (!memory)
 353		panic("numa.c: No memory nodes found!");
 354
 355	*n_addr_cells = of_n_addr_cells(memory);
 356	*n_size_cells = of_n_size_cells(memory);
 357	of_node_put(memory);
 358}
 359
 360static unsigned long read_n_cells(int n, const __be32 **buf)
 361{
 362	unsigned long result = 0;
 363
 364	while (n--) {
 365		result = (result << 32) | of_read_number(*buf, 1);
 366		(*buf)++;
 367	}
 368	return result;
 369}
 370
 371struct assoc_arrays {
 372	u32	n_arrays;
 373	u32	array_sz;
 374	const __be32 *arrays;
 375};
 376
 377/*
 378 * Retrieve and validate the list of associativity arrays for drconf
 379 * memory from the ibm,associativity-lookup-arrays property of the
 380 * device tree..
 381 *
 382 * The layout of the ibm,associativity-lookup-arrays property is a number N
 383 * indicating the number of associativity arrays, followed by a number M
 384 * indicating the size of each associativity array, followed by a list
 385 * of N associativity arrays.
 386 */
 387static int of_get_assoc_arrays(struct assoc_arrays *aa)
 388{
 389	struct device_node *memory;
 390	const __be32 *prop;
 391	u32 len;
 392
 393	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 394	if (!memory)
 395		return -1;
 396
 397	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
 398	if (!prop || len < 2 * sizeof(unsigned int)) {
 399		of_node_put(memory);
 400		return -1;
 401	}
 402
 403	aa->n_arrays = of_read_number(prop++, 1);
 404	aa->array_sz = of_read_number(prop++, 1);
 405
 406	of_node_put(memory);
 407
 408	/* Now that we know the number of arrays and size of each array,
 409	 * revalidate the size of the property read in.
 410	 */
 411	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
 412		return -1;
 413
 414	aa->arrays = prop;
 415	return 0;
 416}
 417
 418/*
 419 * This is like of_node_to_nid_single() for memory represented in the
 420 * ibm,dynamic-reconfiguration-memory node.
 421 */
 422static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
 423{
 424	struct assoc_arrays aa = { .arrays = NULL };
 425	int default_nid = 0;
 426	int nid = default_nid;
 427	int rc, index;
 428
 
 
 
 429	rc = of_get_assoc_arrays(&aa);
 430	if (rc)
 431		return default_nid;
 432
 433	if (min_common_depth > 0 && min_common_depth <= aa.array_sz &&
 434	    !(lmb->flags & DRCONF_MEM_AI_INVALID) &&
 435	    lmb->aa_index < aa.n_arrays) {
 436		index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
 437		nid = of_read_number(&aa.arrays[index], 1);
 438
 439		if (nid == 0xffff || nid >= MAX_NUMNODES)
 440			nid = default_nid;
 441
 442		if (nid > 0) {
 443			index = lmb->aa_index * aa.array_sz;
 444			initialize_distance_lookup_table(nid,
 445							&aa.arrays[index]);
 446		}
 447	}
 448
 449	return nid;
 450}
 451
 452/*
 453 * Figure out to which domain a cpu belongs and stick it there.
 454 * Return the id of the domain used.
 455 */
 456static int numa_setup_cpu(unsigned long lcpu)
 457{
 458	int nid = -1;
 459	struct device_node *cpu;
 460
 461	/*
 462	 * If a valid cpu-to-node mapping is already available, use it
 463	 * directly instead of querying the firmware, since it represents
 464	 * the most recent mapping notified to us by the platform (eg: VPHN).
 465	 */
 466	if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
 467		map_cpu_to_node(lcpu, nid);
 468		return nid;
 469	}
 470
 471	cpu = of_get_cpu_node(lcpu, NULL);
 472
 473	if (!cpu) {
 474		WARN_ON(1);
 475		if (cpu_present(lcpu))
 476			goto out_present;
 477		else
 478			goto out;
 479	}
 480
 481	nid = of_node_to_nid_single(cpu);
 482
 483out_present:
 484	if (nid < 0 || !node_possible(nid))
 485		nid = first_online_node;
 486
 487	map_cpu_to_node(lcpu, nid);
 488	of_node_put(cpu);
 489out:
 490	return nid;
 491}
 492
 493static void verify_cpu_node_mapping(int cpu, int node)
 494{
 495	int base, sibling, i;
 496
 497	/* Verify that all the threads in the core belong to the same node */
 498	base = cpu_first_thread_sibling(cpu);
 499
 500	for (i = 0; i < threads_per_core; i++) {
 501		sibling = base + i;
 502
 503		if (sibling == cpu || cpu_is_offline(sibling))
 504			continue;
 505
 506		if (cpu_to_node(sibling) != node) {
 507			WARN(1, "CPU thread siblings %d and %d don't belong"
 508				" to the same node!\n", cpu, sibling);
 509			break;
 510		}
 511	}
 512}
 513
 514/* Must run before sched domains notifier. */
 515static int ppc_numa_cpu_prepare(unsigned int cpu)
 516{
 517	int nid;
 518
 519	nid = numa_setup_cpu(cpu);
 520	verify_cpu_node_mapping(cpu, nid);
 521	return 0;
 522}
 523
 524static int ppc_numa_cpu_dead(unsigned int cpu)
 525{
 526#ifdef CONFIG_HOTPLUG_CPU
 527	unmap_cpu_from_node(cpu);
 528#endif
 529	return 0;
 530}
 531
 532/*
 533 * Check and possibly modify a memory region to enforce the memory limit.
 534 *
 535 * Returns the size the region should have to enforce the memory limit.
 536 * This will either be the original value of size, a truncated value,
 537 * or zero. If the returned value of size is 0 the region should be
 538 * discarded as it lies wholly above the memory limit.
 539 */
 540static unsigned long __init numa_enforce_memory_limit(unsigned long start,
 541						      unsigned long size)
 542{
 543	/*
 544	 * We use memblock_end_of_DRAM() in here instead of memory_limit because
 545	 * we've already adjusted it for the limit and it takes care of
 546	 * having memory holes below the limit.  Also, in the case of
 547	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
 548	 */
 549
 550	if (start + size <= memblock_end_of_DRAM())
 551		return size;
 552
 553	if (start >= memblock_end_of_DRAM())
 554		return 0;
 555
 556	return memblock_end_of_DRAM() - start;
 557}
 558
 559/*
 560 * Reads the counter for a given entry in
 561 * linux,drconf-usable-memory property
 562 */
 563static inline int __init read_usm_ranges(const __be32 **usm)
 564{
 565	/*
 566	 * For each lmb in ibm,dynamic-memory a corresponding
 567	 * entry in linux,drconf-usable-memory property contains
 568	 * a counter followed by that many (base, size) duple.
 569	 * read the counter from linux,drconf-usable-memory
 570	 */
 571	return read_n_cells(n_mem_size_cells, usm);
 572}
 573
 574/*
 575 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
 576 * node.  This assumes n_mem_{addr,size}_cells have been set.
 577 */
 578static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
 579					const __be32 **usm)
 580{
 581	unsigned int ranges, is_kexec_kdump = 0;
 582	unsigned long base, size, sz;
 583	int nid;
 584
 585	/*
 586	 * Skip this block if the reserved bit is set in flags (0x80)
 587	 * or if the block is not assigned to this partition (0x8)
 588	 */
 589	if ((lmb->flags & DRCONF_MEM_RESERVED)
 590	    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
 591		return;
 592
 593	if (*usm)
 594		is_kexec_kdump = 1;
 595
 596	base = lmb->base_addr;
 597	size = drmem_lmb_size();
 598	ranges = 1;
 599
 600	if (is_kexec_kdump) {
 601		ranges = read_usm_ranges(usm);
 602		if (!ranges) /* there are no (base, size) duple */
 603			return;
 604	}
 605
 606	do {
 607		if (is_kexec_kdump) {
 608			base = read_n_cells(n_mem_addr_cells, usm);
 609			size = read_n_cells(n_mem_size_cells, usm);
 610		}
 611
 612		nid = of_drconf_to_nid_single(lmb);
 613		fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
 614					  &nid);
 615		node_set_online(nid);
 616		sz = numa_enforce_memory_limit(base, size);
 617		if (sz)
 618			memblock_set_node(base, sz, &memblock.memory, nid);
 619	} while (--ranges);
 620}
 621
 622static int __init parse_numa_properties(void)
 623{
 624	struct device_node *memory;
 625	int default_nid = 0;
 626	unsigned long i;
 627
 628	if (numa_enabled == 0) {
 629		printk(KERN_WARNING "NUMA disabled by user\n");
 630		return -1;
 631	}
 632
 633	min_common_depth = find_min_common_depth();
 634
 635	if (min_common_depth < 0)
 
 
 
 
 
 636		return min_common_depth;
 
 637
 638	dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
 639
 640	/*
 641	 * Even though we connect cpus to numa domains later in SMP
 642	 * init, we need to know the node ids now. This is because
 643	 * each node to be onlined must have NODE_DATA etc backing it.
 644	 */
 645	for_each_present_cpu(i) {
 646		struct device_node *cpu;
 647		int nid;
 648
 649		cpu = of_get_cpu_node(i, NULL);
 650		BUG_ON(!cpu);
 651		nid = of_node_to_nid_single(cpu);
 652		of_node_put(cpu);
 653
 654		/*
 655		 * Don't fall back to default_nid yet -- we will plug
 656		 * cpus into nodes once the memory scan has discovered
 657		 * the topology.
 658		 */
 659		if (nid < 0)
 660			continue;
 661		node_set_online(nid);
 662	}
 663
 664	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
 665
 666	for_each_node_by_type(memory, "memory") {
 667		unsigned long start;
 668		unsigned long size;
 669		int nid;
 670		int ranges;
 671		const __be32 *memcell_buf;
 672		unsigned int len;
 673
 674		memcell_buf = of_get_property(memory,
 675			"linux,usable-memory", &len);
 676		if (!memcell_buf || len <= 0)
 677			memcell_buf = of_get_property(memory, "reg", &len);
 678		if (!memcell_buf || len <= 0)
 679			continue;
 680
 681		/* ranges in cell */
 682		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
 683new_range:
 684		/* these are order-sensitive, and modify the buffer pointer */
 685		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
 686		size = read_n_cells(n_mem_size_cells, &memcell_buf);
 687
 688		/*
 689		 * Assumption: either all memory nodes or none will
 690		 * have associativity properties.  If none, then
 691		 * everything goes to default_nid.
 692		 */
 693		nid = of_node_to_nid_single(memory);
 694		if (nid < 0)
 695			nid = default_nid;
 696
 697		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
 698		node_set_online(nid);
 699
 700		size = numa_enforce_memory_limit(start, size);
 701		if (size)
 702			memblock_set_node(start, size, &memblock.memory, nid);
 703
 704		if (--ranges)
 705			goto new_range;
 706	}
 707
 708	/*
 709	 * Now do the same thing for each MEMBLOCK listed in the
 710	 * ibm,dynamic-memory property in the
 711	 * ibm,dynamic-reconfiguration-memory node.
 712	 */
 713	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 714	if (memory) {
 715		walk_drmem_lmbs(memory, numa_setup_drmem_lmb);
 716		of_node_put(memory);
 717	}
 718
 719	return 0;
 720}
 721
 722static void __init setup_nonnuma(void)
 723{
 724	unsigned long top_of_ram = memblock_end_of_DRAM();
 725	unsigned long total_ram = memblock_phys_mem_size();
 726	unsigned long start_pfn, end_pfn;
 727	unsigned int nid = 0;
 728	struct memblock_region *reg;
 729
 730	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
 731	       top_of_ram, total_ram);
 732	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
 733	       (top_of_ram - total_ram) >> 20);
 734
 735	for_each_memblock(memory, reg) {
 736		start_pfn = memblock_region_memory_base_pfn(reg);
 737		end_pfn = memblock_region_memory_end_pfn(reg);
 738
 739		fake_numa_create_new_node(end_pfn, &nid);
 740		memblock_set_node(PFN_PHYS(start_pfn),
 741				  PFN_PHYS(end_pfn - start_pfn),
 742				  &memblock.memory, nid);
 743		node_set_online(nid);
 744	}
 745}
 746
 747void __init dump_numa_cpu_topology(void)
 748{
 749	unsigned int node;
 750	unsigned int cpu, count;
 751
 752	if (min_common_depth == -1 || !numa_enabled)
 753		return;
 754
 755	for_each_online_node(node) {
 756		pr_info("Node %d CPUs:", node);
 757
 758		count = 0;
 759		/*
 760		 * If we used a CPU iterator here we would miss printing
 761		 * the holes in the cpumap.
 762		 */
 763		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
 764			if (cpumask_test_cpu(cpu,
 765					node_to_cpumask_map[node])) {
 766				if (count == 0)
 767					pr_cont(" %u", cpu);
 768				++count;
 769			} else {
 770				if (count > 1)
 771					pr_cont("-%u", cpu - 1);
 772				count = 0;
 773			}
 774		}
 775
 776		if (count > 1)
 777			pr_cont("-%u", nr_cpu_ids - 1);
 778		pr_cont("\n");
 779	}
 780}
 781
 782/* Initialize NODE_DATA for a node on the local memory */
 783static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
 784{
 785	u64 spanned_pages = end_pfn - start_pfn;
 786	const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
 787	u64 nd_pa;
 788	void *nd;
 789	int tnid;
 790
 791	nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
 
 
 
 
 792	nd = __va(nd_pa);
 793
 794	/* report and initialize */
 795	pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
 796		nd_pa, nd_pa + nd_size - 1);
 797	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
 798	if (tnid != nid)
 799		pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
 800
 801	node_data[nid] = nd;
 802	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
 803	NODE_DATA(nid)->node_id = nid;
 804	NODE_DATA(nid)->node_start_pfn = start_pfn;
 805	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
 806}
 807
 808static void __init find_possible_nodes(void)
 809{
 810	struct device_node *rtas;
 811	u32 numnodes, i;
 812
 813	if (min_common_depth <= 0)
 814		return;
 815
 816	rtas = of_find_node_by_path("/rtas");
 817	if (!rtas)
 818		return;
 819
 820	if (of_property_read_u32_index(rtas,
 821				"ibm,max-associativity-domains",
 822				min_common_depth, &numnodes))
 823		goto out;
 824
 825	for (i = 0; i < numnodes; i++) {
 826		if (!node_possible(i))
 827			node_set(i, node_possible_map);
 828	}
 829
 830out:
 831	of_node_put(rtas);
 832}
 833
 834void __init mem_topology_setup(void)
 835{
 836	int cpu;
 837
 838	if (parse_numa_properties())
 839		setup_nonnuma();
 840
 841	/*
 842	 * Modify the set of possible NUMA nodes to reflect information
 843	 * available about the set of online nodes, and the set of nodes
 844	 * that we expect to make use of for this platform's affinity
 845	 * calculations.
 846	 */
 847	nodes_and(node_possible_map, node_possible_map, node_online_map);
 848
 849	find_possible_nodes();
 850
 851	setup_node_to_cpumask_map();
 852
 853	reset_numa_cpu_lookup_table();
 854
 855	for_each_present_cpu(cpu)
 856		numa_setup_cpu(cpu);
 857}
 858
 859void __init initmem_init(void)
 860{
 861	int nid;
 862
 863	max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
 864	max_pfn = max_low_pfn;
 865
 866	memblock_dump_all();
 867
 868	for_each_online_node(nid) {
 869		unsigned long start_pfn, end_pfn;
 870
 871		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 872		setup_node_data(nid, start_pfn, end_pfn);
 873		sparse_memory_present_with_active_regions(nid);
 874	}
 875
 876	sparse_init();
 877
 878	/*
 879	 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
 880	 * even before we online them, so that we can use cpu_to_{node,mem}
 881	 * early in boot, cf. smp_prepare_cpus().
 882	 * _nocalls() + manual invocation is used because cpuhp is not yet
 883	 * initialized for the boot CPU.
 884	 */
 885	cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
 886				  ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
 887}
 888
 889static int __init early_numa(char *p)
 890{
 891	if (!p)
 892		return 0;
 893
 894	if (strstr(p, "off"))
 895		numa_enabled = 0;
 896
 897	if (strstr(p, "debug"))
 898		numa_debug = 1;
 899
 900	p = strstr(p, "fake=");
 901	if (p)
 902		cmdline = p + strlen("fake=");
 903
 904	return 0;
 905}
 906early_param("numa", early_numa);
 907
 908static bool topology_updates_enabled = true;
 
 
 
 
 
 
 909
 910static int __init early_topology_updates(char *p)
 911{
 912	if (!p)
 913		return 0;
 914
 915	if (!strcmp(p, "off")) {
 916		pr_info("Disabling topology updates\n");
 917		topology_updates_enabled = false;
 918	}
 919
 920	return 0;
 921}
 922early_param("topology_updates", early_topology_updates);
 923
 924#ifdef CONFIG_MEMORY_HOTPLUG
 925/*
 926 * Find the node associated with a hot added memory section for
 927 * memory represented in the device tree by the property
 928 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
 929 */
 930static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
 931{
 932	struct drmem_lmb *lmb;
 933	unsigned long lmb_size;
 934	int nid = -1;
 935
 936	lmb_size = drmem_lmb_size();
 937
 938	for_each_drmem_lmb(lmb) {
 939		/* skip this block if it is reserved or not assigned to
 940		 * this partition */
 941		if ((lmb->flags & DRCONF_MEM_RESERVED)
 942		    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
 943			continue;
 944
 945		if ((scn_addr < lmb->base_addr)
 946		    || (scn_addr >= (lmb->base_addr + lmb_size)))
 947			continue;
 948
 949		nid = of_drconf_to_nid_single(lmb);
 950		break;
 951	}
 952
 953	return nid;
 954}
 955
 956/*
 957 * Find the node associated with a hot added memory section for memory
 958 * represented in the device tree as a node (i.e. memory@XXXX) for
 959 * each memblock.
 960 */
 961static int hot_add_node_scn_to_nid(unsigned long scn_addr)
 962{
 963	struct device_node *memory;
 964	int nid = -1;
 965
 966	for_each_node_by_type(memory, "memory") {
 967		unsigned long start, size;
 968		int ranges;
 969		const __be32 *memcell_buf;
 970		unsigned int len;
 971
 972		memcell_buf = of_get_property(memory, "reg", &len);
 973		if (!memcell_buf || len <= 0)
 974			continue;
 975
 976		/* ranges in cell */
 977		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
 978
 979		while (ranges--) {
 980			start = read_n_cells(n_mem_addr_cells, &memcell_buf);
 981			size = read_n_cells(n_mem_size_cells, &memcell_buf);
 982
 983			if ((scn_addr < start) || (scn_addr >= (start + size)))
 984				continue;
 985
 986			nid = of_node_to_nid_single(memory);
 987			break;
 988		}
 989
 990		if (nid >= 0)
 991			break;
 992	}
 993
 994	of_node_put(memory);
 995
 996	return nid;
 997}
 998
 999/*
1000 * Find the node associated with a hot added memory section.  Section
1001 * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1002 * sections are fully contained within a single MEMBLOCK.
1003 */
1004int hot_add_scn_to_nid(unsigned long scn_addr)
1005{
1006	struct device_node *memory = NULL;
1007	int nid;
1008
1009	if (!numa_enabled || (min_common_depth < 0))
1010		return first_online_node;
1011
1012	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1013	if (memory) {
1014		nid = hot_add_drconf_scn_to_nid(scn_addr);
1015		of_node_put(memory);
1016	} else {
1017		nid = hot_add_node_scn_to_nid(scn_addr);
1018	}
1019
1020	if (nid < 0 || !node_possible(nid))
1021		nid = first_online_node;
1022
1023	return nid;
1024}
1025
1026static u64 hot_add_drconf_memory_max(void)
1027{
1028	struct device_node *memory = NULL;
1029	struct device_node *dn = NULL;
1030	const __be64 *lrdr = NULL;
1031
1032	dn = of_find_node_by_path("/rtas");
1033	if (dn) {
1034		lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1035		of_node_put(dn);
1036		if (lrdr)
1037			return be64_to_cpup(lrdr);
1038	}
1039
1040	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1041	if (memory) {
1042		of_node_put(memory);
1043		return drmem_lmb_memory_max();
1044	}
1045	return 0;
1046}
1047
1048/*
1049 * memory_hotplug_max - return max address of memory that may be added
1050 *
1051 * This is currently only used on systems that support drconfig memory
1052 * hotplug.
1053 */
1054u64 memory_hotplug_max(void)
1055{
1056        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1057}
1058#endif /* CONFIG_MEMORY_HOTPLUG */
1059
1060/* Virtual Processor Home Node (VPHN) support */
1061#ifdef CONFIG_PPC_SPLPAR
1062
1063#include "vphn.h"
1064
1065struct topology_update_data {
1066	struct topology_update_data *next;
1067	unsigned int cpu;
1068	int old_nid;
1069	int new_nid;
1070};
1071
1072#define TOPOLOGY_DEF_TIMER_SECS	60
1073
1074static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1075static cpumask_t cpu_associativity_changes_mask;
1076static int vphn_enabled;
1077static int prrn_enabled;
1078static void reset_topology_timer(void);
1079static int topology_timer_secs = 1;
1080static int topology_inited;
1081static int topology_update_needed;
1082
1083/*
1084 * Change polling interval for associativity changes.
1085 */
1086int timed_topology_update(int nsecs)
1087{
1088	if (vphn_enabled) {
1089		if (nsecs > 0)
1090			topology_timer_secs = nsecs;
1091		else
1092			topology_timer_secs = TOPOLOGY_DEF_TIMER_SECS;
1093
1094		reset_topology_timer();
1095	}
1096
1097	return 0;
1098}
1099
1100/*
1101 * Store the current values of the associativity change counters in the
1102 * hypervisor.
1103 */
1104static void setup_cpu_associativity_change_counters(void)
1105{
1106	int cpu;
1107
1108	/* The VPHN feature supports a maximum of 8 reference points */
1109	BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1110
1111	for_each_possible_cpu(cpu) {
1112		int i;
1113		u8 *counts = vphn_cpu_change_counts[cpu];
1114		volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
1115
1116		for (i = 0; i < distance_ref_points_depth; i++)
1117			counts[i] = hypervisor_counts[i];
1118	}
1119}
1120
1121/*
1122 * The hypervisor maintains a set of 8 associativity change counters in
1123 * the VPA of each cpu that correspond to the associativity levels in the
1124 * ibm,associativity-reference-points property. When an associativity
1125 * level changes, the corresponding counter is incremented.
1126 *
1127 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1128 * node associativity levels have changed.
1129 *
1130 * Returns the number of cpus with unhandled associativity changes.
1131 */
1132static int update_cpu_associativity_changes_mask(void)
1133{
1134	int cpu;
1135	cpumask_t *changes = &cpu_associativity_changes_mask;
1136
1137	for_each_possible_cpu(cpu) {
1138		int i, changed = 0;
1139		u8 *counts = vphn_cpu_change_counts[cpu];
1140		volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
1141
1142		for (i = 0; i < distance_ref_points_depth; i++) {
1143			if (hypervisor_counts[i] != counts[i]) {
1144				counts[i] = hypervisor_counts[i];
1145				changed = 1;
1146			}
1147		}
1148		if (changed) {
1149			cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1150			cpu = cpu_last_thread_sibling(cpu);
1151		}
1152	}
1153
1154	return cpumask_weight(changes);
1155}
1156
1157/*
1158 * Retrieve the new associativity information for a virtual processor's
1159 * home node.
1160 */
1161static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1162{
1163	long rc;
1164	long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1165	u64 flags = 1;
1166	int hwcpu = get_hard_smp_processor_id(cpu);
1167
1168	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1169	vphn_unpack_associativity(retbuf, associativity);
1170
1171	return rc;
1172}
1173
1174static long vphn_get_associativity(unsigned long cpu,
1175					__be32 *associativity)
1176{
1177	long rc;
1178
1179	rc = hcall_vphn(cpu, associativity);
 
1180
1181	switch (rc) {
1182	case H_FUNCTION:
1183		printk(KERN_INFO
1184			"VPHN is not supported. Disabling polling...\n");
1185		stop_topology_update();
1186		break;
1187	case H_HARDWARE:
1188		printk(KERN_ERR
1189			"hcall_vphn() experienced a hardware fault "
1190			"preventing VPHN. Disabling polling...\n");
1191		stop_topology_update();
1192		break;
1193	case H_SUCCESS:
1194		dbg("VPHN hcall succeeded. Reset polling...\n");
1195		timed_topology_update(0);
1196		break;
1197	}
1198
1199	return rc;
1200}
1201
1202int find_and_online_cpu_nid(int cpu)
1203{
1204	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1205	int new_nid;
1206
1207	/* Use associativity from first thread for all siblings */
1208	vphn_get_associativity(cpu, associativity);
 
 
1209	new_nid = associativity_to_nid(associativity);
1210	if (new_nid < 0 || !node_possible(new_nid))
1211		new_nid = first_online_node;
1212
1213	if (NODE_DATA(new_nid) == NULL) {
1214#ifdef CONFIG_MEMORY_HOTPLUG
1215		/*
1216		 * Need to ensure that NODE_DATA is initialized for a node from
1217		 * available memory (see memblock_alloc_try_nid). If unable to
1218		 * init the node, then default to nearest node that has memory
1219		 * installed.
 
1220		 */
1221		if (try_online_node(new_nid))
1222			new_nid = first_online_node;
1223#else
1224		/*
1225		 * Default to using the nearest node that has memory installed.
1226		 * Otherwise, it would be necessary to patch the kernel MM code
1227		 * to deal with more memoryless-node error conditions.
1228		 */
1229		new_nid = first_online_node;
1230#endif
1231	}
1232
1233	pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__,
1234		cpu, new_nid);
1235	return new_nid;
1236}
1237
1238/*
1239 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1240 * characteristics change. This function doesn't perform any locking and is
1241 * only safe to call from stop_machine().
1242 */
1243static int update_cpu_topology(void *data)
1244{
1245	struct topology_update_data *update;
1246	unsigned long cpu;
1247
1248	if (!data)
1249		return -EINVAL;
1250
1251	cpu = smp_processor_id();
1252
1253	for (update = data; update; update = update->next) {
1254		int new_nid = update->new_nid;
1255		if (cpu != update->cpu)
1256			continue;
1257
1258		unmap_cpu_from_node(cpu);
1259		map_cpu_to_node(cpu, new_nid);
1260		set_cpu_numa_node(cpu, new_nid);
1261		set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1262		vdso_getcpu_init();
1263	}
1264
1265	return 0;
1266}
1267
1268static int update_lookup_table(void *data)
1269{
1270	struct topology_update_data *update;
1271
1272	if (!data)
1273		return -EINVAL;
1274
1275	/*
1276	 * Upon topology update, the numa-cpu lookup table needs to be updated
1277	 * for all threads in the core, including offline CPUs, to ensure that
1278	 * future hotplug operations respect the cpu-to-node associativity
1279	 * properly.
1280	 */
1281	for (update = data; update; update = update->next) {
1282		int nid, base, j;
1283
1284		nid = update->new_nid;
1285		base = cpu_first_thread_sibling(update->cpu);
1286
1287		for (j = 0; j < threads_per_core; j++) {
1288			update_numa_cpu_lookup_table(base + j, nid);
1289		}
1290	}
1291
1292	return 0;
1293}
1294
1295/*
1296 * Update the node maps and sysfs entries for each cpu whose home node
1297 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1298 *
1299 * cpus_locked says whether we already hold cpu_hotplug_lock.
1300 */
1301int numa_update_cpu_topology(bool cpus_locked)
1302{
1303	unsigned int cpu, sibling, changed = 0;
1304	struct topology_update_data *updates, *ud;
1305	cpumask_t updated_cpus;
1306	struct device *dev;
1307	int weight, new_nid, i = 0;
1308
1309	if (!prrn_enabled && !vphn_enabled) {
1310		if (!topology_inited)
1311			topology_update_needed = 1;
1312		return 0;
1313	}
1314
1315	weight = cpumask_weight(&cpu_associativity_changes_mask);
1316	if (!weight)
1317		return 0;
1318
1319	updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1320	if (!updates)
1321		return 0;
1322
1323	cpumask_clear(&updated_cpus);
1324
1325	for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1326		/*
1327		 * If siblings aren't flagged for changes, updates list
1328		 * will be too short. Skip on this update and set for next
1329		 * update.
1330		 */
1331		if (!cpumask_subset(cpu_sibling_mask(cpu),
1332					&cpu_associativity_changes_mask)) {
1333			pr_info("Sibling bits not set for associativity "
1334					"change, cpu%d\n", cpu);
1335			cpumask_or(&cpu_associativity_changes_mask,
1336					&cpu_associativity_changes_mask,
1337					cpu_sibling_mask(cpu));
1338			cpu = cpu_last_thread_sibling(cpu);
1339			continue;
1340		}
1341
1342		new_nid = find_and_online_cpu_nid(cpu);
1343
1344		if (new_nid == numa_cpu_lookup_table[cpu]) {
1345			cpumask_andnot(&cpu_associativity_changes_mask,
1346					&cpu_associativity_changes_mask,
1347					cpu_sibling_mask(cpu));
1348			dbg("Assoc chg gives same node %d for cpu%d\n",
1349					new_nid, cpu);
1350			cpu = cpu_last_thread_sibling(cpu);
1351			continue;
1352		}
1353
1354		for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1355			ud = &updates[i++];
1356			ud->next = &updates[i];
1357			ud->cpu = sibling;
1358			ud->new_nid = new_nid;
1359			ud->old_nid = numa_cpu_lookup_table[sibling];
1360			cpumask_set_cpu(sibling, &updated_cpus);
1361		}
1362		cpu = cpu_last_thread_sibling(cpu);
1363	}
1364
1365	/*
1366	 * Prevent processing of 'updates' from overflowing array
1367	 * where last entry filled in a 'next' pointer.
1368	 */
1369	if (i)
1370		updates[i-1].next = NULL;
1371
1372	pr_debug("Topology update for the following CPUs:\n");
1373	if (cpumask_weight(&updated_cpus)) {
1374		for (ud = &updates[0]; ud; ud = ud->next) {
1375			pr_debug("cpu %d moving from node %d "
1376					  "to %d\n", ud->cpu,
1377					  ud->old_nid, ud->new_nid);
1378		}
1379	}
1380
1381	/*
1382	 * In cases where we have nothing to update (because the updates list
1383	 * is too short or because the new topology is same as the old one),
1384	 * skip invoking update_cpu_topology() via stop-machine(). This is
1385	 * necessary (and not just a fast-path optimization) since stop-machine
1386	 * can end up electing a random CPU to run update_cpu_topology(), and
1387	 * thus trick us into setting up incorrect cpu-node mappings (since
1388	 * 'updates' is kzalloc()'ed).
1389	 *
1390	 * And for the similar reason, we will skip all the following updating.
1391	 */
1392	if (!cpumask_weight(&updated_cpus))
1393		goto out;
1394
1395	if (cpus_locked)
1396		stop_machine_cpuslocked(update_cpu_topology, &updates[0],
1397					&updated_cpus);
1398	else
1399		stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1400
1401	/*
1402	 * Update the numa-cpu lookup table with the new mappings, even for
1403	 * offline CPUs. It is best to perform this update from the stop-
1404	 * machine context.
1405	 */
1406	if (cpus_locked)
1407		stop_machine_cpuslocked(update_lookup_table, &updates[0],
1408					cpumask_of(raw_smp_processor_id()));
1409	else
1410		stop_machine(update_lookup_table, &updates[0],
1411			     cpumask_of(raw_smp_processor_id()));
1412
1413	for (ud = &updates[0]; ud; ud = ud->next) {
1414		unregister_cpu_under_node(ud->cpu, ud->old_nid);
1415		register_cpu_under_node(ud->cpu, ud->new_nid);
1416
1417		dev = get_cpu_device(ud->cpu);
1418		if (dev)
1419			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1420		cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1421		changed = 1;
1422	}
1423
1424out:
1425	kfree(updates);
1426	topology_update_needed = 0;
1427	return changed;
1428}
1429
1430int arch_update_cpu_topology(void)
1431{
1432	return numa_update_cpu_topology(true);
1433}
1434
1435static void topology_work_fn(struct work_struct *work)
1436{
1437	rebuild_sched_domains();
1438}
1439static DECLARE_WORK(topology_work, topology_work_fn);
1440
1441static void topology_schedule_update(void)
1442{
1443	schedule_work(&topology_work);
1444}
1445
1446static void topology_timer_fn(struct timer_list *unused)
1447{
1448	if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1449		topology_schedule_update();
1450	else if (vphn_enabled) {
1451		if (update_cpu_associativity_changes_mask() > 0)
1452			topology_schedule_update();
1453		reset_topology_timer();
1454	}
1455}
1456static struct timer_list topology_timer;
1457
1458static void reset_topology_timer(void)
1459{
1460	mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
 
1461}
1462
1463#ifdef CONFIG_SMP
1464
1465static void stage_topology_update(int core_id)
1466{
1467	cpumask_or(&cpu_associativity_changes_mask,
1468		&cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1469	reset_topology_timer();
1470}
1471
1472static int dt_update_callback(struct notifier_block *nb,
1473				unsigned long action, void *data)
1474{
1475	struct of_reconfig_data *update = data;
1476	int rc = NOTIFY_DONE;
1477
1478	switch (action) {
1479	case OF_RECONFIG_UPDATE_PROPERTY:
1480		if (!of_prop_cmp(update->dn->type, "cpu") &&
1481		    !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1482			u32 core_id;
1483			of_property_read_u32(update->dn, "reg", &core_id);
1484			stage_topology_update(core_id);
1485			rc = NOTIFY_OK;
1486		}
1487		break;
1488	}
1489
1490	return rc;
1491}
1492
1493static struct notifier_block dt_update_nb = {
1494	.notifier_call = dt_update_callback,
1495};
1496
1497#endif
1498
1499/*
1500 * Start polling for associativity changes.
1501 */
1502int start_topology_update(void)
1503{
1504	int rc = 0;
1505
 
 
 
1506	if (firmware_has_feature(FW_FEATURE_PRRN)) {
1507		if (!prrn_enabled) {
1508			prrn_enabled = 1;
1509#ifdef CONFIG_SMP
1510			rc = of_reconfig_notifier_register(&dt_update_nb);
1511#endif
1512		}
1513	}
1514	if (firmware_has_feature(FW_FEATURE_VPHN) &&
1515		   lppaca_shared_proc(get_lppaca())) {
1516		if (!vphn_enabled) {
1517			vphn_enabled = 1;
1518			setup_cpu_associativity_change_counters();
1519			timer_setup(&topology_timer, topology_timer_fn,
1520				    TIMER_DEFERRABLE);
1521			reset_topology_timer();
1522		}
1523	}
1524
 
 
 
 
1525	return rc;
1526}
1527
1528/*
1529 * Disable polling for VPHN associativity changes.
1530 */
1531int stop_topology_update(void)
1532{
1533	int rc = 0;
1534
 
 
 
1535	if (prrn_enabled) {
1536		prrn_enabled = 0;
1537#ifdef CONFIG_SMP
1538		rc = of_reconfig_notifier_unregister(&dt_update_nb);
1539#endif
1540	}
1541	if (vphn_enabled) {
1542		vphn_enabled = 0;
1543		rc = del_timer_sync(&topology_timer);
1544	}
1545
 
 
1546	return rc;
1547}
1548
1549int prrn_is_enabled(void)
1550{
1551	return prrn_enabled;
1552}
1553
 
 
 
 
 
 
 
 
 
1554static int topology_read(struct seq_file *file, void *v)
1555{
1556	if (vphn_enabled || prrn_enabled)
1557		seq_puts(file, "on\n");
1558	else
1559		seq_puts(file, "off\n");
1560
1561	return 0;
1562}
1563
1564static int topology_open(struct inode *inode, struct file *file)
1565{
1566	return single_open(file, topology_read, NULL);
1567}
1568
1569static ssize_t topology_write(struct file *file, const char __user *buf,
1570			      size_t count, loff_t *off)
1571{
1572	char kbuf[4]; /* "on" or "off" plus null. */
1573	int read_len;
1574
1575	read_len = count < 3 ? count : 3;
1576	if (copy_from_user(kbuf, buf, read_len))
1577		return -EINVAL;
1578
1579	kbuf[read_len] = '\0';
1580
1581	if (!strncmp(kbuf, "on", 2))
 
1582		start_topology_update();
1583	else if (!strncmp(kbuf, "off", 3))
1584		stop_topology_update();
1585	else
 
1586		return -EINVAL;
1587
1588	return count;
1589}
1590
1591static const struct file_operations topology_ops = {
1592	.read = seq_read,
1593	.write = topology_write,
1594	.open = topology_open,
1595	.release = single_release
1596};
1597
1598static int topology_update_init(void)
1599{
1600	/* Do not poll for changes if disabled at boot */
1601	if (topology_updates_enabled)
1602		start_topology_update();
1603
1604	if (vphn_enabled)
1605		topology_schedule_update();
1606
1607	if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1608		return -ENOMEM;
1609
1610	topology_inited = 1;
1611	if (topology_update_needed)
1612		bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
1613					nr_cpumask_bits);
1614
1615	return 0;
1616}
1617device_initcall(topology_update_init);
1618#endif /* CONFIG_PPC_SPLPAR */