Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#include <linux/sched.h>
  16#include <linux/kernel.h>
  17#include <linux/mmzone.h>
  18#include <linux/bootmem.h>
  19#include <linux/module.h>
  20#include <linux/node.h>
  21#include <linux/cpu.h>
  22#include <linux/ioport.h>
  23#include <linux/irq.h>
  24#include <linux/kexec.h>
  25#include <linux/pci.h>
 
  26#include <linux/initrd.h>
  27#include <linux/io.h>
  28#include <linux/highmem.h>
  29#include <linux/smp.h>
  30#include <linux/timex.h>
 
 
 
  31#include <asm/setup.h>
  32#include <asm/sections.h>
  33#include <asm/cacheflush.h>
  34#include <asm/pgalloc.h>
  35#include <asm/mmu_context.h>
  36#include <hv/hypervisor.h>
  37#include <arch/interrupts.h>
  38
  39/* <linux/smp.h> doesn't provide this definition. */
  40#ifndef CONFIG_SMP
  41#define setup_max_cpus 1
  42#endif
  43
  44static inline int ABS(int x) { return x >= 0 ? x : -x; }
  45
  46/* Chip information */
  47char chip_model[64] __write_once;
  48
 
 
 
 
  49struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
  50EXPORT_SYMBOL(node_data);
  51
  52/* We only create bootmem data on node 0. */
  53static bootmem_data_t __initdata node0_bdata;
  54
  55/* Information on the NUMA nodes that we compute early */
  56unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
  57unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
  58unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
  59unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
  60unsigned long __initdata node_free_pfn[MAX_NUMNODES];
  61
  62static unsigned long __initdata node_percpu[MAX_NUMNODES];
  63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64#ifdef CONFIG_HIGHMEM
  65/* Page frame index of end of lowmem on each controller. */
  66unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES];
  67
  68/* Number of pages that can be mapped into lowmem. */
  69static unsigned long __initdata mappable_physpages;
  70#endif
  71
  72/* Data on which physical memory controller corresponds to which NUMA node */
  73int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
  74
  75#ifdef CONFIG_HIGHMEM
  76/* Map information from VAs to PAs */
  77unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
  78  __write_once __attribute__((aligned(L2_CACHE_BYTES)));
  79EXPORT_SYMBOL(pbase_map);
  80
  81/* Map information from PAs to VAs */
  82void *vbase_map[NR_PA_HIGHBIT_VALUES]
  83  __write_once __attribute__((aligned(L2_CACHE_BYTES)));
  84EXPORT_SYMBOL(vbase_map);
  85#endif
  86
  87/* Node number as a function of the high PA bits */
  88int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once;
  89EXPORT_SYMBOL(highbits_to_node);
  90
  91static unsigned int __initdata maxmem_pfn = -1U;
  92static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = {
  93	[0 ... MAX_NUMNODES-1] = -1U
  94};
  95static nodemask_t __initdata isolnodes;
  96
  97#ifdef CONFIG_PCI
  98enum { DEFAULT_PCI_RESERVE_MB = 64 };
  99static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB;
 100unsigned long __initdata pci_reserve_start_pfn = -1U;
 101unsigned long __initdata pci_reserve_end_pfn = -1U;
 102#endif
 103
 104static int __init setup_maxmem(char *str)
 105{
 106	long maxmem_mb;
 107	if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 ||
 108	    maxmem_mb == 0)
 109		return -EINVAL;
 110
 111	maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) <<
 112		(HPAGE_SHIFT - PAGE_SHIFT);
 113	pr_info("Forcing RAM used to no more than %dMB\n",
 114	       maxmem_pfn >> (20 - PAGE_SHIFT));
 115	return 0;
 116}
 117early_param("maxmem", setup_maxmem);
 118
 119static int __init setup_maxnodemem(char *str)
 120{
 121	char *endp;
 122	long maxnodemem_mb, node;
 
 123
 124	node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
 125	if (node >= MAX_NUMNODES || *endp != ':' ||
 126	    strict_strtol(endp+1, 0, &maxnodemem_mb) != 0)
 127		return -EINVAL;
 128
 129	maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) <<
 
 130		(HPAGE_SHIFT - PAGE_SHIFT);
 131	pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
 132	       node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
 133	return 0;
 134}
 135early_param("maxnodemem", setup_maxnodemem);
 136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137static int __init setup_isolnodes(char *str)
 138{
 139	char buf[MAX_NUMNODES * 5];
 140	if (str == NULL || nodelist_parse(str, isolnodes) != 0)
 141		return -EINVAL;
 142
 143	nodelist_scnprintf(buf, sizeof(buf), isolnodes);
 144	pr_info("Set isolnodes value to '%s'\n", buf);
 145	return 0;
 146}
 147early_param("isolnodes", setup_isolnodes);
 148
 149#ifdef CONFIG_PCI
 150static int __init setup_pci_reserve(char* str)
 151{
 152	unsigned long mb;
 153
 154	if (str == NULL || strict_strtoul(str, 0, &mb) != 0 ||
 155	    mb > 3 * 1024)
 156		return -EINVAL;
 157
 158	pci_reserve_mb = mb;
 159	pr_info("Reserving %dMB for PCIE root complex mappings\n",
 160	       pci_reserve_mb);
 161	return 0;
 162}
 163early_param("pci_reserve", setup_pci_reserve);
 164#endif
 165
 166#ifndef __tilegx__
 167/*
 168 * vmalloc=size forces the vmalloc area to be exactly 'size' bytes.
 169 * This can be used to increase (or decrease) the vmalloc area.
 170 */
 171static int __init parse_vmalloc(char *arg)
 172{
 173	if (!arg)
 174		return -EINVAL;
 175
 176	VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK;
 177
 178	/* See validate_va() for more on this test. */
 179	if ((long)_VMALLOC_START >= 0)
 180		early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
 181			    VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL);
 182
 183	return 0;
 184}
 185early_param("vmalloc", parse_vmalloc);
 186#endif
 187
 188#ifdef CONFIG_HIGHMEM
 189/*
 190 * Determine for each controller where its lowmem is mapped and how much of
 191 * it is mapped there.  On controller zero, the first few megabytes are
 192 * already mapped in as code at MEM_SV_INTRPT, so in principle we could
 193 * start our data mappings higher up, but for now we don't bother, to avoid
 194 * additional confusion.
 195 *
 196 * One question is whether, on systems with more than 768 Mb and
 197 * controllers of different sizes, to map in a proportionate amount of
 198 * each one, or to try to map the same amount from each controller.
 199 * (E.g. if we have three controllers with 256MB, 1GB, and 256MB
 200 * respectively, do we map 256MB from each, or do we map 128 MB, 512
 201 * MB, and 128 MB respectively?)  For now we use a proportionate
 202 * solution like the latter.
 203 *
 204 * The VA/PA mapping demands that we align our decisions at 16 MB
 205 * boundaries so that we can rapidly convert VA to PA.
 206 */
 207static void *__init setup_pa_va_mapping(void)
 208{
 209	unsigned long curr_pages = 0;
 210	unsigned long vaddr = PAGE_OFFSET;
 211	nodemask_t highonlynodes = isolnodes;
 212	int i, j;
 213
 214	memset(pbase_map, -1, sizeof(pbase_map));
 215	memset(vbase_map, -1, sizeof(vbase_map));
 216
 217	/* Node zero cannot be isolated for LOWMEM purposes. */
 218	node_clear(0, highonlynodes);
 219
 220	/* Count up the number of pages on non-highonlynodes controllers. */
 221	mappable_physpages = 0;
 222	for_each_online_node(i) {
 223		if (!node_isset(i, highonlynodes))
 224			mappable_physpages +=
 225				node_end_pfn[i] - node_start_pfn[i];
 226	}
 227
 228	for_each_online_node(i) {
 229		unsigned long start = node_start_pfn[i];
 230		unsigned long end = node_end_pfn[i];
 231		unsigned long size = end - start;
 232		unsigned long vaddr_end;
 233
 234		if (node_isset(i, highonlynodes)) {
 235			/* Mark this controller as having no lowmem. */
 236			node_lowmem_end_pfn[i] = start;
 237			continue;
 238		}
 239
 240		curr_pages += size;
 241		if (mappable_physpages > MAXMEM_PFN) {
 242			vaddr_end = PAGE_OFFSET +
 243				(((u64)curr_pages * MAXMEM_PFN /
 244				  mappable_physpages)
 245				 << PAGE_SHIFT);
 246		} else {
 247			vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT);
 248		}
 249		for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) {
 250			unsigned long this_pfn =
 251				start + (j << HUGETLB_PAGE_ORDER);
 252			pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn;
 253			if (vbase_map[__pfn_to_highbits(this_pfn)] ==
 254			    (void *)-1)
 255				vbase_map[__pfn_to_highbits(this_pfn)] =
 256					(void *)(vaddr & HPAGE_MASK);
 257		}
 258		node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER);
 259		BUG_ON(node_lowmem_end_pfn[i] > end);
 260	}
 261
 262	/* Return highest address of any mapped memory. */
 263	return (void *)vaddr;
 264}
 265#endif /* CONFIG_HIGHMEM */
 266
 267/*
 268 * Register our most important memory mappings with the debug stub.
 269 *
 270 * This is up to 4 mappings for lowmem, one mapping per memory
 271 * controller, plus one for our text segment.
 272 */
 273static void __cpuinit store_permanent_mappings(void)
 274{
 275	int i;
 276
 277	for_each_online_node(i) {
 278		HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT;
 279#ifdef CONFIG_HIGHMEM
 280		HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i];
 281#else
 282		HV_PhysAddr high_mapped_pa = node_end_pfn[i];
 283#endif
 284
 285		unsigned long pages = high_mapped_pa - node_start_pfn[i];
 286		HV_VirtAddr addr = (HV_VirtAddr) __va(pa);
 287		hv_store_mapping(addr, pages << PAGE_SHIFT, pa);
 288	}
 289
 290	hv_store_mapping((HV_VirtAddr)_stext,
 291			 (uint32_t)(_einittext - _stext), 0);
 292}
 293
 294/*
 295 * Use hv_inquire_physical() to populate node_{start,end}_pfn[]
 296 * and node_online_map, doing suitable sanity-checking.
 297 * Also set min_low_pfn, max_low_pfn, and max_pfn.
 298 */
 299static void __init setup_memory(void)
 300{
 301	int i, j;
 302	int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 };
 303#ifdef CONFIG_HIGHMEM
 304	long highmem_pages;
 305#endif
 306#ifndef __tilegx__
 307	int cap;
 308#endif
 309#if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
 310	long lowmem_pages;
 311#endif
 
 312
 313	/* We are using a char to hold the cpu_2_node[] mapping */
 314	BUILD_BUG_ON(MAX_NUMNODES > 127);
 315
 316	/* Discover the ranges of memory available to us */
 317	for (i = 0; ; ++i) {
 318		unsigned long start, size, end, highbits;
 319		HV_PhysAddrRange range = hv_inquire_physical(i);
 320		if (range.size == 0)
 321			break;
 322#ifdef CONFIG_FLATMEM
 323		if (i > 0) {
 324			pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
 325			       range.size, range.start + range.size);
 326			continue;
 327		}
 328#endif
 329#ifndef __tilegx__
 330		if ((unsigned long)range.start) {
 331			pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
 332			       range.start, range.start + range.size);
 333			continue;
 334		}
 335#endif
 336		if ((range.start & (HPAGE_SIZE-1)) != 0 ||
 337		    (range.size & (HPAGE_SIZE-1)) != 0) {
 338			unsigned long long start_pa = range.start;
 339			unsigned long long orig_size = range.size;
 340			range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
 341			range.size -= (range.start - start_pa);
 342			range.size &= HPAGE_MASK;
 343			pr_err("Range not hugepage-aligned: %#llx..%#llx:"
 344			       " now %#llx-%#llx\n",
 345			       start_pa, start_pa + orig_size,
 346			       range.start, range.start + range.size);
 347		}
 348		highbits = __pa_to_highbits(range.start);
 349		if (highbits >= NR_PA_HIGHBIT_VALUES) {
 350			pr_err("PA high bits too high: %#llx..%#llx\n",
 351			       range.start, range.start + range.size);
 352			continue;
 353		}
 354		if (highbits_seen[highbits]) {
 355			pr_err("Range overlaps in high bits: %#llx..%#llx\n",
 356			       range.start, range.start + range.size);
 357			continue;
 358		}
 359		highbits_seen[highbits] = 1;
 360		if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
 361			int max_size = maxnodemem_pfn[i];
 362			if (max_size > 0) {
 363				pr_err("Maxnodemem reduced node %d to"
 364				       " %d pages\n", i, max_size);
 365				range.size = PFN_PHYS(max_size);
 366			} else {
 367				pr_err("Maxnodemem disabled node %d\n", i);
 368				continue;
 369			}
 370		}
 371		if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) {
 372			int max_size = maxmem_pfn - num_physpages;
 373			if (max_size > 0) {
 374				pr_err("Maxmem reduced node %d to %d pages\n",
 375				       i, max_size);
 376				range.size = PFN_PHYS(max_size);
 377			} else {
 378				pr_err("Maxmem disabled node %d\n", i);
 379				continue;
 380			}
 381		}
 382		if (i >= MAX_NUMNODES) {
 383			pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
 384			       i, range.size, range.size + range.start);
 385			continue;
 386		}
 387
 388		start = range.start >> PAGE_SHIFT;
 389		size = range.size >> PAGE_SHIFT;
 390		end = start + size;
 391
 392#ifndef __tilegx__
 393		if (((HV_PhysAddr)end << PAGE_SHIFT) !=
 394		    (range.start + range.size)) {
 395			pr_err("PAs too high to represent: %#llx..%#llx\n",
 396			       range.start, range.start + range.size);
 397			continue;
 398		}
 399#endif
 400#ifdef CONFIG_PCI
 401		/*
 402		 * Blocks that overlap the pci reserved region must
 403		 * have enough space to hold the maximum percpu data
 404		 * region at the top of the range.  If there isn't
 405		 * enough space above the reserved region, just
 406		 * truncate the node.
 407		 */
 408		if (start <= pci_reserve_start_pfn &&
 409		    end > pci_reserve_start_pfn) {
 410			unsigned int per_cpu_size =
 411				__per_cpu_end - __per_cpu_start;
 412			unsigned int percpu_pages =
 413				NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
 414			if (end < pci_reserve_end_pfn + percpu_pages) {
 415				end = pci_reserve_start_pfn;
 416				pr_err("PCI mapping region reduced node %d to"
 417				       " %ld pages\n", i, end - start);
 418			}
 419		}
 420#endif
 421
 422		for (j = __pfn_to_highbits(start);
 423		     j <= __pfn_to_highbits(end - 1); j++)
 424			highbits_to_node[j] = i;
 425
 426		node_start_pfn[i] = start;
 427		node_end_pfn[i] = end;
 428		node_controller[i] = range.controller;
 429		num_physpages += size;
 430		max_pfn = end;
 431
 432		/* Mark node as online */
 433		node_set(i, node_online_map);
 434		node_set(i, node_possible_map);
 435	}
 436
 437#ifndef __tilegx__
 438	/*
 439	 * For 4KB pages, mem_map "struct page" data is 1% of the size
 440	 * of the physical memory, so can be quite big (640 MB for
 441	 * four 16G zones).  These structures must be mapped in
 442	 * lowmem, and since we currently cap out at about 768 MB,
 443	 * it's impractical to try to use this much address space.
 444	 * For now, arbitrarily cap the amount of physical memory
 445	 * we're willing to use at 8 million pages (32GB of 4KB pages).
 446	 */
 447	cap = 8 * 1024 * 1024;  /* 8 million pages */
 448	if (num_physpages > cap) {
 449		int num_nodes = num_online_nodes();
 450		int cap_each = cap / num_nodes;
 451		unsigned long dropped_pages = 0;
 452		for (i = 0; i < num_nodes; ++i) {
 453			int size = node_end_pfn[i] - node_start_pfn[i];
 454			if (size > cap_each) {
 455				dropped_pages += (size - cap_each);
 456				node_end_pfn[i] = node_start_pfn[i] + cap_each;
 457			}
 458		}
 459		num_physpages -= dropped_pages;
 460		pr_warning("Only using %ldMB memory;"
 461		       " ignoring %ldMB.\n",
 462		       num_physpages >> (20 - PAGE_SHIFT),
 463		       dropped_pages >> (20 - PAGE_SHIFT));
 464		pr_warning("Consider using a larger page size.\n");
 465	}
 466#endif
 467
 468	/* Heap starts just above the last loaded address. */
 469	min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET);
 470
 471#ifdef CONFIG_HIGHMEM
 472	/* Find where we map lowmem from each controller. */
 473	high_memory = setup_pa_va_mapping();
 474
 475	/* Set max_low_pfn based on what node 0 can directly address. */
 476	max_low_pfn = node_lowmem_end_pfn[0];
 477
 478	lowmem_pages = (mappable_physpages > MAXMEM_PFN) ?
 479		MAXMEM_PFN : mappable_physpages;
 480	highmem_pages = (long) (num_physpages - lowmem_pages);
 481
 482	pr_notice("%ldMB HIGHMEM available.\n",
 483	       pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
 484	pr_notice("%ldMB LOWMEM available.\n",
 485			pages_to_mb(lowmem_pages));
 486#else
 487	/* Set max_low_pfn based on what node 0 can directly address. */
 488	max_low_pfn = node_end_pfn[0];
 489
 490#ifndef __tilegx__
 491	if (node_end_pfn[0] > MAXMEM_PFN) {
 492		pr_warning("Only using %ldMB LOWMEM.\n",
 493		       MAXMEM>>20);
 494		pr_warning("Use a HIGHMEM enabled kernel.\n");
 495		max_low_pfn = MAXMEM_PFN;
 496		max_pfn = MAXMEM_PFN;
 497		num_physpages = MAXMEM_PFN;
 498		node_end_pfn[0] = MAXMEM_PFN;
 499	} else {
 500		pr_notice("%ldMB memory available.\n",
 501		       pages_to_mb(node_end_pfn[0]));
 502	}
 503	for (i = 1; i < MAX_NUMNODES; ++i) {
 504		node_start_pfn[i] = 0;
 505		node_end_pfn[i] = 0;
 506	}
 507	high_memory = __va(node_end_pfn[0]);
 508#else
 509	lowmem_pages = 0;
 510	for (i = 0; i < MAX_NUMNODES; ++i) {
 511		int pages = node_end_pfn[i] - node_start_pfn[i];
 512		lowmem_pages += pages;
 513		if (pages)
 514			high_memory = pfn_to_kaddr(node_end_pfn[i]);
 515	}
 516	pr_notice("%ldMB memory available.\n",
 517	       pages_to_mb(lowmem_pages));
 518#endif
 519#endif
 520}
 521
 522static void __init setup_bootmem_allocator(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 523{
 524	unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn;
 
 
 
 
 
 525
 526	/* Provide a node 0 bdata. */
 527	NODE_DATA(0)->bdata = &node0_bdata;
 
 528
 529#ifdef CONFIG_PCI
 530	/* Don't let boot memory alias the PCI region. */
 531	last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn);
 
 
 
 
 
 
 
 
 
 
 
 532#else
 533	last_alloc_pfn = max_low_pfn;
 534#endif
 535
 536	/*
 537	 * Initialize the boot-time allocator (with low memory only):
 538	 * The first argument says where to put the bitmap, and the
 539	 * second says where the end of allocatable memory is.
 540	 */
 541	bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn);
 
 
 
 
 
 
 
 
 542
 
 
 
 
 
 
 
 543	/*
 544	 * Let the bootmem allocator use all the space we've given it
 545	 * except for its own bitmap.
 546	 */
 547	first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size);
 548	if (first_alloc_pfn >= last_alloc_pfn)
 549		early_panic("Not enough memory on controller 0 for bootmem\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 550
 551	free_bootmem(PFN_PHYS(first_alloc_pfn),
 552		     PFN_PHYS(last_alloc_pfn - first_alloc_pfn));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 553
 554#ifdef CONFIG_KEXEC
 555	if (crashk_res.start != crashk_res.end)
 556		reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0);
 557#endif
 558}
 559
 560void *__init alloc_remap(int nid, unsigned long size)
 561{
 562	int pages = node_end_pfn[nid] - node_start_pfn[nid];
 563	void *map = pfn_to_kaddr(node_memmap_pfn[nid]);
 564	BUG_ON(size != pages * sizeof(struct page));
 565	memset(map, 0, size);
 566	return map;
 567}
 568
 569static int __init percpu_size(void)
 570{
 571	int size = __per_cpu_end - __per_cpu_start;
 572	size += PERCPU_MODULE_RESERVE;
 573	size += PERCPU_DYNAMIC_EARLY_SIZE;
 574	if (size < PCPU_MIN_UNIT_SIZE)
 575		size = PCPU_MIN_UNIT_SIZE;
 576	size = roundup(size, PAGE_SIZE);
 577
 578	/* In several places we assume the per-cpu data fits on a huge page. */
 579	BUG_ON(kdata_huge && size > HPAGE_SIZE);
 580	return size;
 581}
 582
 583static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
 584{
 585	void *kva = __alloc_bootmem(size, PAGE_SIZE, goal);
 586	unsigned long pfn = kaddr_to_pfn(kva);
 587	BUG_ON(goal && PFN_PHYS(pfn) != goal);
 588	return pfn;
 589}
 590
 591static void __init zone_sizes_init(void)
 592{
 593	unsigned long zones_size[MAX_NR_ZONES] = { 0 };
 594	int size = percpu_size();
 595	int num_cpus = smp_height * smp_width;
 
 
 596	int i;
 597
 598	for (i = 0; i < num_cpus; ++i)
 599		node_percpu[cpu_to_node(i)] += size;
 600
 601	for_each_online_node(i) {
 602		unsigned long start = node_start_pfn[i];
 603		unsigned long end = node_end_pfn[i];
 604#ifdef CONFIG_HIGHMEM
 605		unsigned long lowmem_end = node_lowmem_end_pfn[i];
 606#else
 607		unsigned long lowmem_end = end;
 608#endif
 609		int memmap_size = (end - start) * sizeof(struct page);
 610		node_free_pfn[i] = start;
 611
 612		/*
 613		 * Set aside pages for per-cpu data and the mem_map array.
 614		 *
 615		 * Since the per-cpu data requires special homecaching,
 616		 * if we are in kdata_huge mode, we put it at the end of
 617		 * the lowmem region.  If we're not in kdata_huge mode,
 618		 * we take the per-cpu pages from the bottom of the
 619		 * controller, since that avoids fragmenting a huge page
 620		 * that users might want.  We always take the memmap
 621		 * from the bottom of the controller, since with
 622		 * kdata_huge that lets it be under a huge TLB entry.
 623		 *
 624		 * If the user has requested isolnodes for a controller,
 625		 * though, there'll be no lowmem, so we just alloc_bootmem
 626		 * the memmap.  There will be no percpu memory either.
 627		 */
 628		if (__pfn_to_highbits(start) == 0) {
 629			/* In low PAs, allocate via bootmem. */
 
 
 
 630			unsigned long goal = 0;
 631			node_memmap_pfn[i] =
 632				alloc_bootmem_pfn(memmap_size, goal);
 633			if (kdata_huge)
 634				goal = PFN_PHYS(lowmem_end) - node_percpu[i];
 635			if (node_percpu[i])
 636				node_percpu_pfn[i] =
 637				    alloc_bootmem_pfn(node_percpu[i], goal);
 638		} else if (cpu_isset(i, isolnodes)) {
 639			node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0);
 640			BUG_ON(node_percpu[i] != 0);
 641		} else {
 642			/* In high PAs, just reserve some pages. */
 643			node_memmap_pfn[i] = node_free_pfn[i];
 644			node_free_pfn[i] += PFN_UP(memmap_size);
 645			if (!kdata_huge) {
 646				node_percpu_pfn[i] = node_free_pfn[i];
 647				node_free_pfn[i] += PFN_UP(node_percpu[i]);
 648			} else {
 649				node_percpu_pfn[i] =
 650					lowmem_end - PFN_UP(node_percpu[i]);
 651			}
 652		}
 653
 654#ifdef CONFIG_HIGHMEM
 655		if (start > lowmem_end) {
 656			zones_size[ZONE_NORMAL] = 0;
 657			zones_size[ZONE_HIGHMEM] = end - start;
 658		} else {
 659			zones_size[ZONE_NORMAL] = lowmem_end - start;
 660			zones_size[ZONE_HIGHMEM] = end - lowmem_end;
 661		}
 662#else
 663		zones_size[ZONE_NORMAL] = end - start;
 664#endif
 665
 666		/*
 667		 * Everyone shares node 0's bootmem allocator, but
 668		 * we use alloc_remap(), above, to put the actual
 669		 * struct page array on the individual controllers,
 670		 * which is most of the data that we actually care about.
 671		 * We can't place bootmem allocators on the other
 672		 * controllers since the bootmem allocator can only
 673		 * operate on 32-bit physical addresses.
 674		 */
 675		NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
 
 676
 677		free_area_init_node(i, zones_size, start, NULL);
 678		printk(KERN_DEBUG "  Normal zone: %ld per-cpu pages\n",
 679		       PFN_UP(node_percpu[i]));
 680
 681		/* Track the type of memory on each node */
 682		if (zones_size[ZONE_NORMAL])
 683			node_set_state(i, N_NORMAL_MEMORY);
 684#ifdef CONFIG_HIGHMEM
 685		if (end != start)
 686			node_set_state(i, N_HIGH_MEMORY);
 687#endif
 688
 689		node_set_online(i);
 690	}
 691}
 692
 693#ifdef CONFIG_NUMA
 694
 695/* which logical CPUs are on which nodes */
 696struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once;
 697EXPORT_SYMBOL(node_2_cpu_mask);
 698
 699/* which node each logical CPU is on */
 700char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES)));
 701EXPORT_SYMBOL(cpu_2_node);
 702
 703/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
 704static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus)
 705{
 706	if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus))
 707		return -1;
 708	else
 709		return cpu_to_node(cpu);
 710}
 711
 712/* Return number of immediately-adjacent tiles sharing the same NUMA node. */
 713static int __init node_neighbors(int node, int cpu,
 714				 struct cpumask *unbound_cpus)
 715{
 716	int neighbors = 0;
 717	int w = smp_width;
 718	int h = smp_height;
 719	int x = cpu % w;
 720	int y = cpu / w;
 721	if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node)
 722		++neighbors;
 723	if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node)
 724		++neighbors;
 725	if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node)
 726		++neighbors;
 727	if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node)
 728		++neighbors;
 729	return neighbors;
 730}
 731
 732static void __init setup_numa_mapping(void)
 733{
 734	int distance[MAX_NUMNODES][NR_CPUS];
 735	HV_Coord coord;
 736	int cpu, node, cpus, i, x, y;
 737	int num_nodes = num_online_nodes();
 738	struct cpumask unbound_cpus;
 739	nodemask_t default_nodes;
 740
 741	cpumask_clear(&unbound_cpus);
 742
 743	/* Get set of nodes we will use for defaults */
 744	nodes_andnot(default_nodes, node_online_map, isolnodes);
 745	if (nodes_empty(default_nodes)) {
 746		BUG_ON(!node_isset(0, node_online_map));
 747		pr_err("Forcing NUMA node zero available as a default node\n");
 748		node_set(0, default_nodes);
 749	}
 750
 751	/* Populate the distance[] array */
 752	memset(distance, -1, sizeof(distance));
 753	cpu = 0;
 754	for (coord.y = 0; coord.y < smp_height; ++coord.y) {
 755		for (coord.x = 0; coord.x < smp_width;
 756		     ++coord.x, ++cpu) {
 757			BUG_ON(cpu >= nr_cpu_ids);
 758			if (!cpu_possible(cpu)) {
 759				cpu_2_node[cpu] = -1;
 760				continue;
 761			}
 762			for_each_node_mask(node, default_nodes) {
 763				HV_MemoryControllerInfo info =
 764					hv_inquire_memory_controller(
 765						coord, node_controller[node]);
 766				distance[node][cpu] =
 767					ABS(info.coord.x) + ABS(info.coord.y);
 768			}
 769			cpumask_set_cpu(cpu, &unbound_cpus);
 770		}
 771	}
 772	cpus = cpu;
 773
 774	/*
 775	 * Round-robin through the NUMA nodes until all the cpus are
 776	 * assigned.  We could be more clever here (e.g. create four
 777	 * sorted linked lists on the same set of cpu nodes, and pull
 778	 * off them in round-robin sequence, removing from all four
 779	 * lists each time) but given the relatively small numbers
 780	 * involved, O(n^2) seem OK for a one-time cost.
 781	 */
 782	node = first_node(default_nodes);
 783	while (!cpumask_empty(&unbound_cpus)) {
 784		int best_cpu = -1;
 785		int best_distance = INT_MAX;
 786		for (cpu = 0; cpu < cpus; ++cpu) {
 787			if (cpumask_test_cpu(cpu, &unbound_cpus)) {
 788				/*
 789				 * Compute metric, which is how much
 790				 * closer the cpu is to this memory
 791				 * controller than the others, shifted
 792				 * up, and then the number of
 793				 * neighbors already in the node as an
 794				 * epsilon adjustment to try to keep
 795				 * the nodes compact.
 796				 */
 797				int d = distance[node][cpu] * num_nodes;
 798				for_each_node_mask(i, default_nodes) {
 799					if (i != node)
 800						d -= distance[i][cpu];
 801				}
 802				d *= 8;  /* allow space for epsilon */
 803				d -= node_neighbors(node, cpu, &unbound_cpus);
 804				if (d < best_distance) {
 805					best_cpu = cpu;
 806					best_distance = d;
 807				}
 808			}
 809		}
 810		BUG_ON(best_cpu < 0);
 811		cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
 812		cpu_2_node[best_cpu] = node;
 813		cpumask_clear_cpu(best_cpu, &unbound_cpus);
 814		node = next_node(node, default_nodes);
 815		if (node == MAX_NUMNODES)
 816			node = first_node(default_nodes);
 817	}
 818
 819	/* Print out node assignments and set defaults for disabled cpus */
 820	cpu = 0;
 821	for (y = 0; y < smp_height; ++y) {
 822		printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
 823		for (x = 0; x < smp_width; ++x, ++cpu) {
 824			if (cpu_to_node(cpu) < 0) {
 825				pr_cont(" -");
 826				cpu_2_node[cpu] = first_node(default_nodes);
 827			} else {
 828				pr_cont(" %d", cpu_to_node(cpu));
 829			}
 830		}
 831		pr_cont("\n");
 832	}
 833}
 834
 835static struct cpu cpu_devices[NR_CPUS];
 836
 837static int __init topology_init(void)
 838{
 839	int i;
 840
 841	for_each_online_node(i)
 842		register_one_node(i);
 843
 844	for (i = 0; i < smp_height * smp_width; ++i)
 845		register_cpu(&cpu_devices[i], i);
 846
 847	return 0;
 848}
 849
 850subsys_initcall(topology_init);
 851
 852#else /* !CONFIG_NUMA */
 853
 854#define setup_numa_mapping() do { } while (0)
 855
 856#endif /* CONFIG_NUMA */
 857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 858/**
 859 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
 860 * @boot: Is this the boot cpu?
 861 *
 862 * Called from setup_arch() on the boot cpu, or online_secondary().
 863 */
 864void __cpuinit setup_cpu(int boot)
 865{
 866	/* The boot cpu sets up its permanent mappings much earlier. */
 867	if (!boot)
 868		store_permanent_mappings();
 869
 870	/* Allow asynchronous TLB interrupts. */
 871#if CHIP_HAS_TILE_DMA()
 872	arch_local_irq_unmask(INT_DMATLB_MISS);
 873	arch_local_irq_unmask(INT_DMATLB_ACCESS);
 874#endif
 875#if CHIP_HAS_SN_PROC()
 876	arch_local_irq_unmask(INT_SNITLB_MISS);
 877#endif
 878#ifdef __tilegx__
 879	arch_local_irq_unmask(INT_SINGLE_STEP_K);
 880#endif
 881
 882	/*
 883	 * Allow user access to many generic SPRs, like the cycle
 884	 * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc.
 885	 */
 886	__insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1);
 887
 888#if CHIP_HAS_SN()
 889	/* Static network is not restricted. */
 890	__insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
 891#endif
 892#if CHIP_HAS_SN_PROC()
 893	__insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1);
 894	__insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1);
 895#endif
 896
 897	/*
 898	 * Set the MPL for interrupt control 0 & 1 to the corresponding
 899	 * values.  This includes access to the SYSTEM_SAVE and EX_CONTEXT
 900	 * SPRs, as well as the interrupt mask.
 901	 */
 902	__insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
 903	__insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
 904
 905	/* Initialize IRQ support for this cpu. */
 906	setup_irq_regs();
 907
 908#ifdef CONFIG_HARDWALL
 909	/* Reset the network state on this cpu. */
 910	reset_network_state();
 911#endif
 
 
 912}
 913
 914#ifdef CONFIG_BLK_DEV_INITRD
 915
 916static int __initdata set_initramfs_file;
 917static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
 918
 919static int __init setup_initramfs_file(char *str)
 920{
 921	if (str == NULL)
 922		return -EINVAL;
 923	strncpy(initramfs_file, str, sizeof(initramfs_file) - 1);
 924	set_initramfs_file = 1;
 925
 926	return 0;
 927}
 928early_param("initramfs_file", setup_initramfs_file);
 929
 930/*
 931 * We look for an additional "initramfs.cpio.gz" file in the hvfs.
 932 * If there is one, we allocate some memory for it and it will be
 933 * unpacked to the initramfs after any built-in initramfs_data.
 934 */
 935static void __init load_hv_initrd(void)
 936{
 937	HV_FS_StatInfo stat;
 938	int fd, rc;
 939	void *initrd;
 940
 
 
 
 
 941	fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
 942	if (fd == HV_ENOENT) {
 943		if (set_initramfs_file)
 944			pr_warning("No such hvfs initramfs file '%s'\n",
 945				   initramfs_file);
 946		return;
 
 
 
 
 
 
 947	}
 948	BUG_ON(fd < 0);
 949	stat = hv_fs_fstat(fd);
 950	BUG_ON(stat.size < 0);
 951	if (stat.flags & HV_FS_ISDIR) {
 952		pr_warning("Ignoring hvfs file '%s': it's a directory.\n",
 953			   initramfs_file);
 954		return;
 955	}
 956	initrd = alloc_bootmem_pages(stat.size);
 957	rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
 958	if (rc != stat.size) {
 959		pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
 960		       stat.size, initramfs_file, rc);
 961		free_initrd_mem((unsigned long) initrd, stat.size);
 962		return;
 963	}
 964	initrd_start = (unsigned long) initrd;
 965	initrd_end = initrd_start + stat.size;
 966}
 967
 968void __init free_initrd_mem(unsigned long begin, unsigned long end)
 969{
 970	free_bootmem(__pa(begin), end - begin);
 971}
 972
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973#else
 974static inline void load_hv_initrd(void) {}
 975#endif /* CONFIG_BLK_DEV_INITRD */
 976
 977static void __init validate_hv(void)
 978{
 979	/*
 980	 * It may already be too late, but let's check our built-in
 981	 * configuration against what the hypervisor is providing.
 982	 */
 983	unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE);
 984	int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL);
 985	int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE);
 986	HV_ASIDRange asid_range;
 987
 988#ifndef CONFIG_SMP
 989	HV_Topology topology = hv_inquire_topology();
 990	BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
 991	if (topology.width != 1 || topology.height != 1) {
 992		pr_warning("Warning: booting UP kernel on %dx%d grid;"
 993			   " will ignore all but first tile.\n",
 994			   topology.width, topology.height);
 995	}
 996#endif
 997
 998	if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text)
 999		early_panic("Hypervisor glue size %ld is too big!\n",
1000			    glue_size);
1001	if (hv_page_size != PAGE_SIZE)
1002		early_panic("Hypervisor page size %#x != our %#lx\n",
1003			    hv_page_size, PAGE_SIZE);
1004	if (hv_hpage_size != HPAGE_SIZE)
1005		early_panic("Hypervisor huge page size %#x != our %#lx\n",
1006			    hv_hpage_size, HPAGE_SIZE);
1007
1008#ifdef CONFIG_SMP
1009	/*
1010	 * Some hypervisor APIs take a pointer to a bitmap array
1011	 * whose size is at least the number of cpus on the chip.
1012	 * We use a struct cpumask for this, so it must be big enough.
1013	 */
1014	if ((smp_height * smp_width) > nr_cpu_ids)
1015		early_panic("Hypervisor %d x %d grid too big for Linux"
1016			    " NR_CPUS %d\n", smp_height, smp_width,
1017			    nr_cpu_ids);
1018#endif
1019
1020	/*
1021	 * Check that we're using allowed ASIDs, and initialize the
1022	 * various asid variables to their appropriate initial states.
1023	 */
1024	asid_range = hv_inquire_asid(0);
1025	__get_cpu_var(current_asid) = min_asid = asid_range.start;
1026	max_asid = asid_range.start + asid_range.size - 1;
1027
1028	if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
1029		       sizeof(chip_model)) < 0) {
1030		pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
1031		strlcpy(chip_model, "unknown", sizeof(chip_model));
1032	}
1033}
1034
1035static void __init validate_va(void)
1036{
1037#ifndef __tilegx__   /* FIXME: GX: probably some validation relevant here */
1038	/*
1039	 * Similarly, make sure we're only using allowed VAs.
1040	 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT,
1041	 * and 0 .. KERNEL_HIGH_VADDR.
1042	 * In addition, make sure we CAN'T use the end of memory, since
1043	 * we use the last chunk of each pgd for the pgd_list.
1044	 */
1045	int i, user_kernel_ok = 0;
1046	unsigned long max_va = 0;
1047	unsigned long list_va =
1048		((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
1049
1050	for (i = 0; ; ++i) {
1051		HV_VirtAddrRange range = hv_inquire_virtual(i);
1052		if (range.size == 0)
1053			break;
1054		if (range.start <= MEM_USER_INTRPT &&
1055		    range.start + range.size >= MEM_HV_INTRPT)
1056			user_kernel_ok = 1;
1057		if (range.start == 0)
1058			max_va = range.size;
1059		BUG_ON(range.start + range.size > list_va);
1060	}
1061	if (!user_kernel_ok)
1062		early_panic("Hypervisor not configured for user/kernel VAs\n");
1063	if (max_va == 0)
1064		early_panic("Hypervisor not configured for low VAs\n");
1065	if (max_va < KERNEL_HIGH_VADDR)
1066		early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
1067			    max_va, KERNEL_HIGH_VADDR);
1068
1069	/* Kernel PCs must have their high bit set; see intvec.S. */
1070	if ((long)VMALLOC_START >= 0)
1071		early_panic(
1072			"Linux VMALLOC region below the 2GB line (%#lx)!\n"
1073			"Reconfigure the kernel with fewer NR_HUGE_VMAPS\n"
1074			"or smaller VMALLOC_RESERVE.\n",
1075			VMALLOC_START);
1076#endif
1077}
1078
1079/*
1080 * cpu_lotar_map lists all the cpus that are valid for the supervisor
1081 * to cache data on at a page level, i.e. what cpus can be placed in
1082 * the LOTAR field of a PTE.  It is equivalent to the set of possible
1083 * cpus plus any other cpus that are willing to share their cache.
1084 * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
1085 */
1086struct cpumask __write_once cpu_lotar_map;
1087EXPORT_SYMBOL(cpu_lotar_map);
1088
1089#if CHIP_HAS_CBOX_HOME_MAP()
1090/*
1091 * hash_for_home_map lists all the tiles that hash-for-home data
1092 * will be cached on.  Note that this may includes tiles that are not
1093 * valid for this supervisor to use otherwise (e.g. if a hypervisor
1094 * device is being shared between multiple supervisors).
1095 * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE).
1096 */
1097struct cpumask hash_for_home_map;
1098EXPORT_SYMBOL(hash_for_home_map);
1099#endif
1100
1101/*
1102 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
1103 * flush on our behalf.  It is set to cpu_possible_map OR'ed with
1104 * hash_for_home_map, and it is what should be passed to
1105 * hv_flush_remote() to flush all caches.  Note that if there are
1106 * dedicated hypervisor driver tiles that have authorized use of their
1107 * cache, those tiles will only appear in cpu_lotar_map, NOT in
1108 * cpu_cacheable_map, as they are a special case.
1109 */
1110struct cpumask __write_once cpu_cacheable_map;
1111EXPORT_SYMBOL(cpu_cacheable_map);
1112
1113static __initdata struct cpumask disabled_map;
1114
1115static int __init disabled_cpus(char *str)
1116{
1117	int boot_cpu = smp_processor_id();
1118
1119	if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
1120		return -EINVAL;
1121	if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
1122		pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
1123		cpumask_clear_cpu(boot_cpu, &disabled_map);
1124	}
1125	return 0;
1126}
1127
1128early_param("disabled_cpus", disabled_cpus);
1129
1130void __init print_disabled_cpus(void)
1131{
1132	if (!cpumask_empty(&disabled_map)) {
1133		char buf[100];
1134		cpulist_scnprintf(buf, sizeof(buf), &disabled_map);
1135		pr_info("CPUs not available for Linux: %s\n", buf);
1136	}
1137}
1138
1139static void __init setup_cpu_maps(void)
1140{
1141	struct cpumask hv_disabled_map, cpu_possible_init;
1142	int boot_cpu = smp_processor_id();
1143	int cpus, i, rc;
1144
1145	/* Learn which cpus are allowed by the hypervisor. */
1146	rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL,
1147			      (HV_VirtAddr) cpumask_bits(&cpu_possible_init),
1148			      sizeof(cpu_cacheable_map));
1149	if (rc < 0)
1150		early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc);
1151	if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init))
1152		early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu);
1153
1154	/* Compute the cpus disabled by the hvconfig file. */
1155	cpumask_complement(&hv_disabled_map, &cpu_possible_init);
1156
1157	/* Include them with the cpus disabled by "disabled_cpus". */
1158	cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map);
1159
1160	/*
1161	 * Disable every cpu after "setup_max_cpus".  But don't mark
1162	 * as disabled the cpus that are outside of our initial rectangle,
1163	 * since that turns out to be confusing.
1164	 */
1165	cpus = 1;                          /* this cpu */
1166	cpumask_set_cpu(boot_cpu, &disabled_map);   /* ignore this cpu */
1167	for (i = 0; cpus < setup_max_cpus; ++i)
1168		if (!cpumask_test_cpu(i, &disabled_map))
1169			++cpus;
1170	for (; i < smp_height * smp_width; ++i)
1171		cpumask_set_cpu(i, &disabled_map);
1172	cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */
1173	for (i = smp_height * smp_width; i < NR_CPUS; ++i)
1174		cpumask_clear_cpu(i, &disabled_map);
1175
1176	/*
1177	 * Setup cpu_possible map as every cpu allocated to us, minus
1178	 * the results of any "disabled_cpus" settings.
1179	 */
1180	cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map);
1181	init_cpu_possible(&cpu_possible_init);
1182
1183	/* Learn which cpus are valid for LOTAR caching. */
1184	rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR,
1185			      (HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
1186			      sizeof(cpu_lotar_map));
1187	if (rc < 0) {
1188		pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1189		cpu_lotar_map = cpu_possible_map;
1190	}
1191
1192#if CHIP_HAS_CBOX_HOME_MAP()
1193	/* Retrieve set of CPUs used for hash-for-home caching */
1194	rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
1195			      (HV_VirtAddr) hash_for_home_map.bits,
1196			      sizeof(hash_for_home_map));
1197	if (rc < 0)
1198		early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1199	cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map);
1200#else
1201	cpu_cacheable_map = cpu_possible_map;
1202#endif
1203}
1204
1205
1206static int __init dataplane(char *str)
1207{
1208	pr_warning("WARNING: dataplane support disabled in this kernel\n");
1209	return 0;
1210}
1211
1212early_param("dataplane", dataplane);
1213
1214#ifdef CONFIG_CMDLINE_BOOL
1215static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
1216#endif
1217
1218void __init setup_arch(char **cmdline_p)
1219{
1220	int len;
1221
1222#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
1223	len = hv_get_command_line((HV_VirtAddr) boot_command_line,
1224				  COMMAND_LINE_SIZE);
1225	if (boot_command_line[0])
1226		pr_warning("WARNING: ignoring dynamic command line \"%s\"\n",
1227			   boot_command_line);
1228	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1229#else
1230	char *hv_cmdline;
1231#if defined(CONFIG_CMDLINE_BOOL)
1232	if (builtin_cmdline[0]) {
1233		int builtin_len = strlcpy(boot_command_line, builtin_cmdline,
1234					  COMMAND_LINE_SIZE);
1235		if (builtin_len < COMMAND_LINE_SIZE-1)
1236			boot_command_line[builtin_len++] = ' ';
1237		hv_cmdline = &boot_command_line[builtin_len];
1238		len = COMMAND_LINE_SIZE - builtin_len;
1239	} else
1240#endif
1241	{
1242		hv_cmdline = boot_command_line;
1243		len = COMMAND_LINE_SIZE;
1244	}
1245	len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len);
1246	if (len < 0 || len > COMMAND_LINE_SIZE)
1247		early_panic("hv_get_command_line failed: %d\n", len);
1248#endif
1249
1250	*cmdline_p = boot_command_line;
1251
1252	/* Set disabled_map and setup_max_cpus very early */
1253	parse_early_param();
1254
1255	/* Make sure the kernel is compatible with the hypervisor. */
1256	validate_hv();
1257	validate_va();
1258
1259	setup_cpu_maps();
1260
1261
1262#ifdef CONFIG_PCI
1263	/*
1264	 * Initialize the PCI structures.  This is done before memory
1265	 * setup so that we know whether or not a pci_reserve region
1266	 * is necessary.
1267	 */
1268	if (tile_pci_init() == 0)
1269		pci_reserve_mb = 0;
1270
1271	/* PCI systems reserve a region just below 4GB for mapping iomem. */
1272	pci_reserve_end_pfn  = (1 << (32 - PAGE_SHIFT));
1273	pci_reserve_start_pfn = pci_reserve_end_pfn -
1274		(pci_reserve_mb << (20 - PAGE_SHIFT));
1275#endif
1276
1277	init_mm.start_code = (unsigned long) _text;
1278	init_mm.end_code = (unsigned long) _etext;
1279	init_mm.end_data = (unsigned long) _edata;
1280	init_mm.brk = (unsigned long) _end;
1281
1282	setup_memory();
1283	store_permanent_mappings();
1284	setup_bootmem_allocator();
1285
1286	/*
1287	 * NOTE: before this point _nobody_ is allowed to allocate
1288	 * any memory using the bootmem allocator.
1289	 */
1290
 
 
 
 
1291	paging_init();
1292	setup_numa_mapping();
1293	zone_sizes_init();
1294	set_page_homes();
1295	setup_cpu(1);
1296	setup_clock();
1297	load_hv_initrd();
1298}
1299
1300
1301/*
1302 * Set up per-cpu memory.
1303 */
1304
1305unsigned long __per_cpu_offset[NR_CPUS] __write_once;
1306EXPORT_SYMBOL(__per_cpu_offset);
1307
1308static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
1309static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 };
1310
1311/*
1312 * As the percpu code allocates pages, we return the pages from the
1313 * end of the node for the specified cpu.
1314 */
1315static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
1316{
1317	int nid = cpu_to_node(cpu);
1318	unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid];
1319
1320	BUG_ON(size % PAGE_SIZE != 0);
1321	pfn_offset[nid] += size / PAGE_SIZE;
1322	BUG_ON(node_percpu[nid] < size);
1323	node_percpu[nid] -= size;
1324	if (percpu_pfn[cpu] == 0)
1325		percpu_pfn[cpu] = pfn;
1326	return pfn_to_kaddr(pfn);
1327}
1328
1329/*
1330 * Pages reserved for percpu memory are not freeable, and in any case we are
1331 * on a short path to panic() in setup_per_cpu_area() at this point anyway.
1332 */
1333static void __init pcpu_fc_free(void *ptr, size_t size)
1334{
1335}
1336
1337/*
1338 * Set up vmalloc page tables using bootmem for the percpu code.
1339 */
1340static void __init pcpu_fc_populate_pte(unsigned long addr)
1341{
1342	pgd_t *pgd;
1343	pud_t *pud;
1344	pmd_t *pmd;
1345	pte_t *pte;
1346
1347	BUG_ON(pgd_addr_invalid(addr));
1348	if (addr < VMALLOC_START || addr >= VMALLOC_END)
1349		panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
1350		      " try increasing CONFIG_VMALLOC_RESERVE\n",
1351		      addr, VMALLOC_START, VMALLOC_END);
1352
1353	pgd = swapper_pg_dir + pgd_index(addr);
1354	pud = pud_offset(pgd, addr);
1355	BUG_ON(!pud_present(*pud));
1356	pmd = pmd_offset(pud, addr);
1357	if (pmd_present(*pmd)) {
1358		BUG_ON(pmd_huge_page(*pmd));
1359	} else {
1360		pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE,
1361				      HV_PAGE_TABLE_ALIGN, 0);
1362		pmd_populate_kernel(&init_mm, pmd, pte);
1363	}
1364}
1365
1366void __init setup_per_cpu_areas(void)
1367{
1368	struct page *pg;
1369	unsigned long delta, pfn, lowmem_va;
1370	unsigned long size = percpu_size();
1371	char *ptr;
1372	int rc, cpu, i;
1373
1374	rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc,
1375				   pcpu_fc_free, pcpu_fc_populate_pte);
1376	if (rc < 0)
1377		panic("Cannot initialize percpu area (err=%d)", rc);
1378
1379	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1380	for_each_possible_cpu(cpu) {
1381		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1382
1383		/* finv the copy out of cache so we can change homecache */
1384		ptr = pcpu_base_addr + pcpu_unit_offsets[cpu];
1385		__finv_buffer(ptr, size);
1386		pfn = percpu_pfn[cpu];
1387
1388		/* Rewrite the page tables to cache on that cpu */
1389		pg = pfn_to_page(pfn);
1390		for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1391
1392			/* Update the vmalloc mapping and page home. */
1393			pte_t *ptep =
1394				virt_to_pte(NULL, (unsigned long)ptr + i);
1395			pte_t pte = *ptep;
1396			BUG_ON(pfn != pte_pfn(pte));
1397			pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1398			pte = set_remote_cache_cpu(pte, cpu);
1399			set_pte(ptep, pte);
1400
1401			/* Update the lowmem mapping for consistency. */
1402			lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1403			ptep = virt_to_pte(NULL, lowmem_va);
1404			if (pte_huge(*ptep)) {
1405				printk(KERN_DEBUG "early shatter of huge page"
1406				       " at %#lx\n", lowmem_va);
1407				shatter_pmd((pmd_t *)ptep);
1408				ptep = virt_to_pte(NULL, lowmem_va);
1409				BUG_ON(pte_huge(*ptep));
1410			}
1411			BUG_ON(pfn != pte_pfn(*ptep));
1412			set_pte(ptep, pte);
1413		}
1414	}
1415
1416	/* Set our thread pointer appropriately. */
1417	set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]);
1418
1419	/* Make sure the finv's have completed. */
1420	mb_incoherent();
1421
1422	/* Flush the TLB so we reference it properly from here on out. */
1423	local_flush_tlb_all();
1424}
1425
1426static struct resource data_resource = {
1427	.name	= "Kernel data",
1428	.start	= 0,
1429	.end	= 0,
1430	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
1431};
1432
1433static struct resource code_resource = {
1434	.name	= "Kernel code",
1435	.start	= 0,
1436	.end	= 0,
1437	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
1438};
1439
1440/*
1441 * We reserve all resources above 4GB so that PCI won't try to put
1442 * mappings above 4GB; the standard allows that for some devices but
1443 * the probing code trunates values to 32 bits.
1444 */
1445#ifdef CONFIG_PCI
1446static struct resource* __init
1447insert_non_bus_resource(void)
1448{
1449	struct resource *res =
1450		kzalloc(sizeof(struct resource), GFP_ATOMIC);
 
 
1451	res->name = "Non-Bus Physical Address Space";
1452	res->start = (1ULL << 32);
1453	res->end = -1LL;
1454	res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1455	if (insert_resource(&iomem_resource, res)) {
1456		kfree(res);
1457		return NULL;
1458	}
1459	return res;
1460}
1461#endif
1462
1463static struct resource* __init
1464insert_ram_resource(u64 start_pfn, u64 end_pfn)
1465{
1466	struct resource *res =
1467		kzalloc(sizeof(struct resource), GFP_ATOMIC);
1468	res->name = "System RAM";
 
 
1469	res->start = start_pfn << PAGE_SHIFT;
1470	res->end = (end_pfn << PAGE_SHIFT) - 1;
1471	res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1472	if (insert_resource(&iomem_resource, res)) {
1473		kfree(res);
1474		return NULL;
1475	}
1476	return res;
1477}
1478
1479/*
1480 * Request address space for all standard resources
1481 *
1482 * If the system includes PCI root complex drivers, we need to create
1483 * a window just below 4GB where PCI BARs can be mapped.
1484 */
1485static int __init request_standard_resources(void)
1486{
1487	int i;
1488	enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
1489
1490	iomem_resource.end = -1LL;
1491#ifdef CONFIG_PCI
1492	insert_non_bus_resource();
1493#endif
1494
1495	for_each_online_node(i) {
1496		u64 start_pfn = node_start_pfn[i];
1497		u64 end_pfn = node_end_pfn[i];
1498
1499#ifdef CONFIG_PCI
1500		if (start_pfn <= pci_reserve_start_pfn &&
1501		    end_pfn > pci_reserve_start_pfn) {
1502			if (end_pfn > pci_reserve_end_pfn)
1503				insert_ram_resource(pci_reserve_end_pfn,
1504						     end_pfn);
1505			end_pfn = pci_reserve_start_pfn;
1506		}
1507#endif
1508		insert_ram_resource(start_pfn, end_pfn);
1509	}
1510
1511	code_resource.start = __pa(_text - CODE_DELTA);
1512	code_resource.end = __pa(_etext - CODE_DELTA)-1;
1513	data_resource.start = __pa(_sdata);
1514	data_resource.end = __pa(_end)-1;
1515
1516	insert_resource(&iomem_resource, &code_resource);
1517	insert_resource(&iomem_resource, &data_resource);
 
 
 
 
 
 
 
1518
1519#ifdef CONFIG_KEXEC
1520	insert_resource(&iomem_resource, &crashk_res);
1521#endif
1522
1523	return 0;
1524}
1525
1526subsys_initcall(request_standard_resources);
v3.15
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#include <linux/sched.h>
  16#include <linux/kernel.h>
  17#include <linux/mmzone.h>
  18#include <linux/bootmem.h>
  19#include <linux/module.h>
  20#include <linux/node.h>
  21#include <linux/cpu.h>
  22#include <linux/ioport.h>
  23#include <linux/irq.h>
  24#include <linux/kexec.h>
  25#include <linux/pci.h>
  26#include <linux/swiotlb.h>
  27#include <linux/initrd.h>
  28#include <linux/io.h>
  29#include <linux/highmem.h>
  30#include <linux/smp.h>
  31#include <linux/timex.h>
  32#include <linux/hugetlb.h>
  33#include <linux/start_kernel.h>
  34#include <linux/screen_info.h>
  35#include <asm/setup.h>
  36#include <asm/sections.h>
  37#include <asm/cacheflush.h>
  38#include <asm/pgalloc.h>
  39#include <asm/mmu_context.h>
  40#include <hv/hypervisor.h>
  41#include <arch/interrupts.h>
  42
  43/* <linux/smp.h> doesn't provide this definition. */
  44#ifndef CONFIG_SMP
  45#define setup_max_cpus 1
  46#endif
  47
  48static inline int ABS(int x) { return x >= 0 ? x : -x; }
  49
  50/* Chip information */
  51char chip_model[64] __write_once;
  52
  53#ifdef CONFIG_VT
  54struct screen_info screen_info;
  55#endif
  56
  57struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
  58EXPORT_SYMBOL(node_data);
  59
 
 
 
  60/* Information on the NUMA nodes that we compute early */
  61unsigned long node_start_pfn[MAX_NUMNODES];
  62unsigned long node_end_pfn[MAX_NUMNODES];
  63unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
  64unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
  65unsigned long __initdata node_free_pfn[MAX_NUMNODES];
  66
  67static unsigned long __initdata node_percpu[MAX_NUMNODES];
  68
  69/*
  70 * per-CPU stack and boot info.
  71 */
  72DEFINE_PER_CPU(unsigned long, boot_sp) =
  73	(unsigned long)init_stack + THREAD_SIZE;
  74
  75#ifdef CONFIG_SMP
  76DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
  77#else
  78/*
  79 * The variable must be __initdata since it references __init code.
  80 * With CONFIG_SMP it is per-cpu data, which is exempt from validation.
  81 */
  82unsigned long __initdata boot_pc = (unsigned long)start_kernel;
  83#endif
  84
  85#ifdef CONFIG_HIGHMEM
  86/* Page frame index of end of lowmem on each controller. */
  87unsigned long node_lowmem_end_pfn[MAX_NUMNODES];
  88
  89/* Number of pages that can be mapped into lowmem. */
  90static unsigned long __initdata mappable_physpages;
  91#endif
  92
  93/* Data on which physical memory controller corresponds to which NUMA node */
  94int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
  95
  96#ifdef CONFIG_HIGHMEM
  97/* Map information from VAs to PAs */
  98unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
  99  __write_once __attribute__((aligned(L2_CACHE_BYTES)));
 100EXPORT_SYMBOL(pbase_map);
 101
 102/* Map information from PAs to VAs */
 103void *vbase_map[NR_PA_HIGHBIT_VALUES]
 104  __write_once __attribute__((aligned(L2_CACHE_BYTES)));
 105EXPORT_SYMBOL(vbase_map);
 106#endif
 107
 108/* Node number as a function of the high PA bits */
 109int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once;
 110EXPORT_SYMBOL(highbits_to_node);
 111
 112static unsigned int __initdata maxmem_pfn = -1U;
 113static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = {
 114	[0 ... MAX_NUMNODES-1] = -1U
 115};
 116static nodemask_t __initdata isolnodes;
 117
 118#if defined(CONFIG_PCI) && !defined(__tilegx__)
 119enum { DEFAULT_PCI_RESERVE_MB = 64 };
 120static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB;
 121unsigned long __initdata pci_reserve_start_pfn = -1U;
 122unsigned long __initdata pci_reserve_end_pfn = -1U;
 123#endif
 124
 125static int __init setup_maxmem(char *str)
 126{
 127	unsigned long long maxmem;
 128	if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
 
 129		return -EINVAL;
 130
 131	maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
 
 132	pr_info("Forcing RAM used to no more than %dMB\n",
 133	       maxmem_pfn >> (20 - PAGE_SHIFT));
 134	return 0;
 135}
 136early_param("maxmem", setup_maxmem);
 137
 138static int __init setup_maxnodemem(char *str)
 139{
 140	char *endp;
 141	unsigned long long maxnodemem;
 142	long node;
 143
 144	node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
 145	if (node >= MAX_NUMNODES || *endp != ':')
 
 146		return -EINVAL;
 147
 148	maxnodemem = memparse(endp+1, NULL);
 149	maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
 150		(HPAGE_SHIFT - PAGE_SHIFT);
 151	pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
 152	       node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
 153	return 0;
 154}
 155early_param("maxnodemem", setup_maxnodemem);
 156
 157struct memmap_entry {
 158	u64 addr;	/* start of memory segment */
 159	u64 size;	/* size of memory segment */
 160};
 161static struct memmap_entry memmap_map[64];
 162static int memmap_nr;
 163
 164static void add_memmap_region(u64 addr, u64 size)
 165{
 166	if (memmap_nr >= ARRAY_SIZE(memmap_map)) {
 167		pr_err("Ooops! Too many entries in the memory map!\n");
 168		return;
 169	}
 170	memmap_map[memmap_nr].addr = addr;
 171	memmap_map[memmap_nr].size = size;
 172	memmap_nr++;
 173}
 174
 175static int __init setup_memmap(char *p)
 176{
 177	char *oldp;
 178	u64 start_at, mem_size;
 179
 180	if (!p)
 181		return -EINVAL;
 182
 183	if (!strncmp(p, "exactmap", 8)) {
 184		pr_err("\"memmap=exactmap\" not valid on tile\n");
 185		return 0;
 186	}
 187
 188	oldp = p;
 189	mem_size = memparse(p, &p);
 190	if (p == oldp)
 191		return -EINVAL;
 192
 193	if (*p == '@') {
 194		pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n");
 195	} else if (*p == '#') {
 196		pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n");
 197	} else if (*p == '$') {
 198		start_at = memparse(p+1, &p);
 199		add_memmap_region(start_at, mem_size);
 200	} else {
 201		if (mem_size == 0)
 202			return -EINVAL;
 203		maxmem_pfn = (mem_size >> HPAGE_SHIFT) <<
 204			(HPAGE_SHIFT - PAGE_SHIFT);
 205	}
 206	return *p == '\0' ? 0 : -EINVAL;
 207}
 208early_param("memmap", setup_memmap);
 209
 210static int __init setup_mem(char *str)
 211{
 212	return setup_maxmem(str);
 213}
 214early_param("mem", setup_mem);  /* compatibility with x86 */
 215
 216static int __init setup_isolnodes(char *str)
 217{
 218	char buf[MAX_NUMNODES * 5];
 219	if (str == NULL || nodelist_parse(str, isolnodes) != 0)
 220		return -EINVAL;
 221
 222	nodelist_scnprintf(buf, sizeof(buf), isolnodes);
 223	pr_info("Set isolnodes value to '%s'\n", buf);
 224	return 0;
 225}
 226early_param("isolnodes", setup_isolnodes);
 227
 228#if defined(CONFIG_PCI) && !defined(__tilegx__)
 229static int __init setup_pci_reserve(char* str)
 230{
 231	unsigned long mb;
 232
 233	if (str == NULL || strict_strtoul(str, 0, &mb) != 0 ||
 234	    mb > 3 * 1024)
 235		return -EINVAL;
 236
 237	pci_reserve_mb = mb;
 238	pr_info("Reserving %dMB for PCIE root complex mappings\n",
 239		pci_reserve_mb);
 240	return 0;
 241}
 242early_param("pci_reserve", setup_pci_reserve);
 243#endif
 244
 245#ifndef __tilegx__
 246/*
 247 * vmalloc=size forces the vmalloc area to be exactly 'size' bytes.
 248 * This can be used to increase (or decrease) the vmalloc area.
 249 */
 250static int __init parse_vmalloc(char *arg)
 251{
 252	if (!arg)
 253		return -EINVAL;
 254
 255	VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK;
 256
 257	/* See validate_va() for more on this test. */
 258	if ((long)_VMALLOC_START >= 0)
 259		early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
 260			    VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL);
 261
 262	return 0;
 263}
 264early_param("vmalloc", parse_vmalloc);
 265#endif
 266
 267#ifdef CONFIG_HIGHMEM
 268/*
 269 * Determine for each controller where its lowmem is mapped and how much of
 270 * it is mapped there.  On controller zero, the first few megabytes are
 271 * already mapped in as code at MEM_SV_START, so in principle we could
 272 * start our data mappings higher up, but for now we don't bother, to avoid
 273 * additional confusion.
 274 *
 275 * One question is whether, on systems with more than 768 Mb and
 276 * controllers of different sizes, to map in a proportionate amount of
 277 * each one, or to try to map the same amount from each controller.
 278 * (E.g. if we have three controllers with 256MB, 1GB, and 256MB
 279 * respectively, do we map 256MB from each, or do we map 128 MB, 512
 280 * MB, and 128 MB respectively?)  For now we use a proportionate
 281 * solution like the latter.
 282 *
 283 * The VA/PA mapping demands that we align our decisions at 16 MB
 284 * boundaries so that we can rapidly convert VA to PA.
 285 */
 286static void *__init setup_pa_va_mapping(void)
 287{
 288	unsigned long curr_pages = 0;
 289	unsigned long vaddr = PAGE_OFFSET;
 290	nodemask_t highonlynodes = isolnodes;
 291	int i, j;
 292
 293	memset(pbase_map, -1, sizeof(pbase_map));
 294	memset(vbase_map, -1, sizeof(vbase_map));
 295
 296	/* Node zero cannot be isolated for LOWMEM purposes. */
 297	node_clear(0, highonlynodes);
 298
 299	/* Count up the number of pages on non-highonlynodes controllers. */
 300	mappable_physpages = 0;
 301	for_each_online_node(i) {
 302		if (!node_isset(i, highonlynodes))
 303			mappable_physpages +=
 304				node_end_pfn[i] - node_start_pfn[i];
 305	}
 306
 307	for_each_online_node(i) {
 308		unsigned long start = node_start_pfn[i];
 309		unsigned long end = node_end_pfn[i];
 310		unsigned long size = end - start;
 311		unsigned long vaddr_end;
 312
 313		if (node_isset(i, highonlynodes)) {
 314			/* Mark this controller as having no lowmem. */
 315			node_lowmem_end_pfn[i] = start;
 316			continue;
 317		}
 318
 319		curr_pages += size;
 320		if (mappable_physpages > MAXMEM_PFN) {
 321			vaddr_end = PAGE_OFFSET +
 322				(((u64)curr_pages * MAXMEM_PFN /
 323				  mappable_physpages)
 324				 << PAGE_SHIFT);
 325		} else {
 326			vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT);
 327		}
 328		for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) {
 329			unsigned long this_pfn =
 330				start + (j << HUGETLB_PAGE_ORDER);
 331			pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn;
 332			if (vbase_map[__pfn_to_highbits(this_pfn)] ==
 333			    (void *)-1)
 334				vbase_map[__pfn_to_highbits(this_pfn)] =
 335					(void *)(vaddr & HPAGE_MASK);
 336		}
 337		node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER);
 338		BUG_ON(node_lowmem_end_pfn[i] > end);
 339	}
 340
 341	/* Return highest address of any mapped memory. */
 342	return (void *)vaddr;
 343}
 344#endif /* CONFIG_HIGHMEM */
 345
 346/*
 347 * Register our most important memory mappings with the debug stub.
 348 *
 349 * This is up to 4 mappings for lowmem, one mapping per memory
 350 * controller, plus one for our text segment.
 351 */
 352static void store_permanent_mappings(void)
 353{
 354	int i;
 355
 356	for_each_online_node(i) {
 357		HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT;
 358#ifdef CONFIG_HIGHMEM
 359		HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i];
 360#else
 361		HV_PhysAddr high_mapped_pa = node_end_pfn[i];
 362#endif
 363
 364		unsigned long pages = high_mapped_pa - node_start_pfn[i];
 365		HV_VirtAddr addr = (HV_VirtAddr) __va(pa);
 366		hv_store_mapping(addr, pages << PAGE_SHIFT, pa);
 367	}
 368
 369	hv_store_mapping((HV_VirtAddr)_text,
 370			 (uint32_t)(_einittext - _text), 0);
 371}
 372
 373/*
 374 * Use hv_inquire_physical() to populate node_{start,end}_pfn[]
 375 * and node_online_map, doing suitable sanity-checking.
 376 * Also set min_low_pfn, max_low_pfn, and max_pfn.
 377 */
 378static void __init setup_memory(void)
 379{
 380	int i, j;
 381	int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 };
 382#ifdef CONFIG_HIGHMEM
 383	long highmem_pages;
 384#endif
 385#ifndef __tilegx__
 386	int cap;
 387#endif
 388#if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
 389	long lowmem_pages;
 390#endif
 391	unsigned long physpages = 0;
 392
 393	/* We are using a char to hold the cpu_2_node[] mapping */
 394	BUILD_BUG_ON(MAX_NUMNODES > 127);
 395
 396	/* Discover the ranges of memory available to us */
 397	for (i = 0; ; ++i) {
 398		unsigned long start, size, end, highbits;
 399		HV_PhysAddrRange range = hv_inquire_physical(i);
 400		if (range.size == 0)
 401			break;
 402#ifdef CONFIG_FLATMEM
 403		if (i > 0) {
 404			pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
 405			       range.size, range.start + range.size);
 406			continue;
 407		}
 408#endif
 409#ifndef __tilegx__
 410		if ((unsigned long)range.start) {
 411			pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
 412			       range.start, range.start + range.size);
 413			continue;
 414		}
 415#endif
 416		if ((range.start & (HPAGE_SIZE-1)) != 0 ||
 417		    (range.size & (HPAGE_SIZE-1)) != 0) {
 418			unsigned long long start_pa = range.start;
 419			unsigned long long orig_size = range.size;
 420			range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
 421			range.size -= (range.start - start_pa);
 422			range.size &= HPAGE_MASK;
 423			pr_err("Range not hugepage-aligned: %#llx..%#llx:"
 424			       " now %#llx-%#llx\n",
 425			       start_pa, start_pa + orig_size,
 426			       range.start, range.start + range.size);
 427		}
 428		highbits = __pa_to_highbits(range.start);
 429		if (highbits >= NR_PA_HIGHBIT_VALUES) {
 430			pr_err("PA high bits too high: %#llx..%#llx\n",
 431			       range.start, range.start + range.size);
 432			continue;
 433		}
 434		if (highbits_seen[highbits]) {
 435			pr_err("Range overlaps in high bits: %#llx..%#llx\n",
 436			       range.start, range.start + range.size);
 437			continue;
 438		}
 439		highbits_seen[highbits] = 1;
 440		if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
 441			int max_size = maxnodemem_pfn[i];
 442			if (max_size > 0) {
 443				pr_err("Maxnodemem reduced node %d to"
 444				       " %d pages\n", i, max_size);
 445				range.size = PFN_PHYS(max_size);
 446			} else {
 447				pr_err("Maxnodemem disabled node %d\n", i);
 448				continue;
 449			}
 450		}
 451		if (physpages + PFN_DOWN(range.size) > maxmem_pfn) {
 452			int max_size = maxmem_pfn - physpages;
 453			if (max_size > 0) {
 454				pr_err("Maxmem reduced node %d to %d pages\n",
 455				       i, max_size);
 456				range.size = PFN_PHYS(max_size);
 457			} else {
 458				pr_err("Maxmem disabled node %d\n", i);
 459				continue;
 460			}
 461		}
 462		if (i >= MAX_NUMNODES) {
 463			pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
 464			       i, range.size, range.size + range.start);
 465			continue;
 466		}
 467
 468		start = range.start >> PAGE_SHIFT;
 469		size = range.size >> PAGE_SHIFT;
 470		end = start + size;
 471
 472#ifndef __tilegx__
 473		if (((HV_PhysAddr)end << PAGE_SHIFT) !=
 474		    (range.start + range.size)) {
 475			pr_err("PAs too high to represent: %#llx..%#llx\n",
 476			       range.start, range.start + range.size);
 477			continue;
 478		}
 479#endif
 480#if defined(CONFIG_PCI) && !defined(__tilegx__)
 481		/*
 482		 * Blocks that overlap the pci reserved region must
 483		 * have enough space to hold the maximum percpu data
 484		 * region at the top of the range.  If there isn't
 485		 * enough space above the reserved region, just
 486		 * truncate the node.
 487		 */
 488		if (start <= pci_reserve_start_pfn &&
 489		    end > pci_reserve_start_pfn) {
 490			unsigned int per_cpu_size =
 491				__per_cpu_end - __per_cpu_start;
 492			unsigned int percpu_pages =
 493				NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
 494			if (end < pci_reserve_end_pfn + percpu_pages) {
 495				end = pci_reserve_start_pfn;
 496				pr_err("PCI mapping region reduced node %d to"
 497				       " %ld pages\n", i, end - start);
 498			}
 499		}
 500#endif
 501
 502		for (j = __pfn_to_highbits(start);
 503		     j <= __pfn_to_highbits(end - 1); j++)
 504			highbits_to_node[j] = i;
 505
 506		node_start_pfn[i] = start;
 507		node_end_pfn[i] = end;
 508		node_controller[i] = range.controller;
 509		physpages += size;
 510		max_pfn = end;
 511
 512		/* Mark node as online */
 513		node_set(i, node_online_map);
 514		node_set(i, node_possible_map);
 515	}
 516
 517#ifndef __tilegx__
 518	/*
 519	 * For 4KB pages, mem_map "struct page" data is 1% of the size
 520	 * of the physical memory, so can be quite big (640 MB for
 521	 * four 16G zones).  These structures must be mapped in
 522	 * lowmem, and since we currently cap out at about 768 MB,
 523	 * it's impractical to try to use this much address space.
 524	 * For now, arbitrarily cap the amount of physical memory
 525	 * we're willing to use at 8 million pages (32GB of 4KB pages).
 526	 */
 527	cap = 8 * 1024 * 1024;  /* 8 million pages */
 528	if (physpages > cap) {
 529		int num_nodes = num_online_nodes();
 530		int cap_each = cap / num_nodes;
 531		unsigned long dropped_pages = 0;
 532		for (i = 0; i < num_nodes; ++i) {
 533			int size = node_end_pfn[i] - node_start_pfn[i];
 534			if (size > cap_each) {
 535				dropped_pages += (size - cap_each);
 536				node_end_pfn[i] = node_start_pfn[i] + cap_each;
 537			}
 538		}
 539		physpages -= dropped_pages;
 540		pr_warning("Only using %ldMB memory;"
 541		       " ignoring %ldMB.\n",
 542		       physpages >> (20 - PAGE_SHIFT),
 543		       dropped_pages >> (20 - PAGE_SHIFT));
 544		pr_warning("Consider using a larger page size.\n");
 545	}
 546#endif
 547
 548	/* Heap starts just above the last loaded address. */
 549	min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET);
 550
 551#ifdef CONFIG_HIGHMEM
 552	/* Find where we map lowmem from each controller. */
 553	high_memory = setup_pa_va_mapping();
 554
 555	/* Set max_low_pfn based on what node 0 can directly address. */
 556	max_low_pfn = node_lowmem_end_pfn[0];
 557
 558	lowmem_pages = (mappable_physpages > MAXMEM_PFN) ?
 559		MAXMEM_PFN : mappable_physpages;
 560	highmem_pages = (long) (physpages - lowmem_pages);
 561
 562	pr_notice("%ldMB HIGHMEM available.\n",
 563	       pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
 564	pr_notice("%ldMB LOWMEM available.\n",
 565			pages_to_mb(lowmem_pages));
 566#else
 567	/* Set max_low_pfn based on what node 0 can directly address. */
 568	max_low_pfn = node_end_pfn[0];
 569
 570#ifndef __tilegx__
 571	if (node_end_pfn[0] > MAXMEM_PFN) {
 572		pr_warning("Only using %ldMB LOWMEM.\n",
 573		       MAXMEM>>20);
 574		pr_warning("Use a HIGHMEM enabled kernel.\n");
 575		max_low_pfn = MAXMEM_PFN;
 576		max_pfn = MAXMEM_PFN;
 
 577		node_end_pfn[0] = MAXMEM_PFN;
 578	} else {
 579		pr_notice("%ldMB memory available.\n",
 580		       pages_to_mb(node_end_pfn[0]));
 581	}
 582	for (i = 1; i < MAX_NUMNODES; ++i) {
 583		node_start_pfn[i] = 0;
 584		node_end_pfn[i] = 0;
 585	}
 586	high_memory = __va(node_end_pfn[0]);
 587#else
 588	lowmem_pages = 0;
 589	for (i = 0; i < MAX_NUMNODES; ++i) {
 590		int pages = node_end_pfn[i] - node_start_pfn[i];
 591		lowmem_pages += pages;
 592		if (pages)
 593			high_memory = pfn_to_kaddr(node_end_pfn[i]);
 594	}
 595	pr_notice("%ldMB memory available.\n",
 596	       pages_to_mb(lowmem_pages));
 597#endif
 598#endif
 599}
 600
 601/*
 602 * On 32-bit machines, we only put bootmem on the low controller,
 603 * since PAs > 4GB can't be used in bootmem.  In principle one could
 604 * imagine, e.g., multiple 1 GB controllers all of which could support
 605 * bootmem, but in practice using controllers this small isn't a
 606 * particularly interesting scenario, so we just keep it simple and
 607 * use only the first controller for bootmem on 32-bit machines.
 608 */
 609static inline int node_has_bootmem(int nid)
 610{
 611#ifdef CONFIG_64BIT
 612	return 1;
 613#else
 614	return nid == 0;
 615#endif
 616}
 617
 618static inline unsigned long alloc_bootmem_pfn(int nid,
 619					      unsigned long size,
 620					      unsigned long goal)
 621{
 622	void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
 623					 PAGE_SIZE, goal);
 624	unsigned long pfn = kaddr_to_pfn(kva);
 625	BUG_ON(goal && PFN_PHYS(pfn) != goal);
 626	return pfn;
 627}
 628
 629static void __init setup_bootmem_allocator_node(int i)
 630{
 631	unsigned long start, end, mapsize, mapstart;
 632
 633	if (node_has_bootmem(i)) {
 634		NODE_DATA(i)->bdata = &bootmem_node_data[i];
 635	} else {
 636		/* Share controller zero's bdata for now. */
 637		NODE_DATA(i)->bdata = &bootmem_node_data[0];
 638		return;
 639	}
 640
 641	/* Skip up to after the bss in node 0. */
 642	start = (i == 0) ? min_low_pfn : node_start_pfn[i];
 643
 644	/* Only lowmem, if we're a HIGHMEM build. */
 645#ifdef CONFIG_HIGHMEM
 646	end = node_lowmem_end_pfn[i];
 647#else
 648	end = node_end_pfn[i];
 649#endif
 650
 651	/* No memory here. */
 652	if (end == start)
 653		return;
 654
 655	/* Figure out where the bootmem bitmap is located. */
 656	mapsize = bootmem_bootmap_pages(end - start);
 657	if (i == 0) {
 658		/* Use some space right before the heap on node 0. */
 659		mapstart = start;
 660		start += mapsize;
 661	} else {
 662		/* Allocate bitmap on node 0 to avoid page table issues. */
 663		mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
 664	}
 665
 666	/* Initialize a node. */
 667	init_bootmem_node(NODE_DATA(i), mapstart, start, end);
 668
 669	/* Free all the space back into the allocator. */
 670	free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
 671
 672#if defined(CONFIG_PCI) && !defined(__tilegx__)
 673	/*
 674	 * Throw away any memory aliased by the PCI region.
 
 675	 */
 676	if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) {
 677		start = max(pci_reserve_start_pfn, start);
 678		end = min(pci_reserve_end_pfn, end);
 679		reserve_bootmem(PFN_PHYS(start), PFN_PHYS(end - start),
 680				BOOTMEM_EXCLUSIVE);
 681	}
 682#endif
 683}
 684
 685static void __init setup_bootmem_allocator(void)
 686{
 687	int i;
 688	for (i = 0; i < MAX_NUMNODES; ++i)
 689		setup_bootmem_allocator_node(i);
 690
 691	/* Reserve any memory excluded by "memmap" arguments. */
 692	for (i = 0; i < memmap_nr; ++i) {
 693		struct memmap_entry *m = &memmap_map[i];
 694		reserve_bootmem(m->addr, m->size, 0);
 695	}
 696
 697#ifdef CONFIG_BLK_DEV_INITRD
 698	if (initrd_start) {
 699		/* Make sure the initrd memory region is not modified. */
 700		if (reserve_bootmem(initrd_start, initrd_end - initrd_start,
 701				    BOOTMEM_EXCLUSIVE)) {
 702			pr_crit("The initrd memory region has been polluted. Disabling it.\n");
 703			initrd_start = 0;
 704			initrd_end = 0;
 705		} else {
 706			/*
 707			 * Translate initrd_start & initrd_end from PA to VA for
 708			 * future access.
 709			 */
 710			initrd_start += PAGE_OFFSET;
 711			initrd_end += PAGE_OFFSET;
 712		}
 713	}
 714#endif
 715
 716#ifdef CONFIG_KEXEC
 717	if (crashk_res.start != crashk_res.end)
 718		reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0);
 719#endif
 720}
 721
 722void *__init alloc_remap(int nid, unsigned long size)
 723{
 724	int pages = node_end_pfn[nid] - node_start_pfn[nid];
 725	void *map = pfn_to_kaddr(node_memmap_pfn[nid]);
 726	BUG_ON(size != pages * sizeof(struct page));
 727	memset(map, 0, size);
 728	return map;
 729}
 730
 731static int __init percpu_size(void)
 732{
 733	int size = __per_cpu_end - __per_cpu_start;
 734	size += PERCPU_MODULE_RESERVE;
 735	size += PERCPU_DYNAMIC_EARLY_SIZE;
 736	if (size < PCPU_MIN_UNIT_SIZE)
 737		size = PCPU_MIN_UNIT_SIZE;
 738	size = roundup(size, PAGE_SIZE);
 739
 740	/* In several places we assume the per-cpu data fits on a huge page. */
 741	BUG_ON(kdata_huge && size > HPAGE_SIZE);
 742	return size;
 743}
 744
 
 
 
 
 
 
 
 
 745static void __init zone_sizes_init(void)
 746{
 747	unsigned long zones_size[MAX_NR_ZONES] = { 0 };
 748	int size = percpu_size();
 749	int num_cpus = smp_height * smp_width;
 750	const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT));
 751
 752	int i;
 753
 754	for (i = 0; i < num_cpus; ++i)
 755		node_percpu[cpu_to_node(i)] += size;
 756
 757	for_each_online_node(i) {
 758		unsigned long start = node_start_pfn[i];
 759		unsigned long end = node_end_pfn[i];
 760#ifdef CONFIG_HIGHMEM
 761		unsigned long lowmem_end = node_lowmem_end_pfn[i];
 762#else
 763		unsigned long lowmem_end = end;
 764#endif
 765		int memmap_size = (end - start) * sizeof(struct page);
 766		node_free_pfn[i] = start;
 767
 768		/*
 769		 * Set aside pages for per-cpu data and the mem_map array.
 770		 *
 771		 * Since the per-cpu data requires special homecaching,
 772		 * if we are in kdata_huge mode, we put it at the end of
 773		 * the lowmem region.  If we're not in kdata_huge mode,
 774		 * we take the per-cpu pages from the bottom of the
 775		 * controller, since that avoids fragmenting a huge page
 776		 * that users might want.  We always take the memmap
 777		 * from the bottom of the controller, since with
 778		 * kdata_huge that lets it be under a huge TLB entry.
 779		 *
 780		 * If the user has requested isolnodes for a controller,
 781		 * though, there'll be no lowmem, so we just alloc_bootmem
 782		 * the memmap.  There will be no percpu memory either.
 783		 */
 784		if (i != 0 && cpu_isset(i, isolnodes)) {
 785			node_memmap_pfn[i] =
 786				alloc_bootmem_pfn(0, memmap_size, 0);
 787			BUG_ON(node_percpu[i] != 0);
 788		} else if (node_has_bootmem(start)) {
 789			unsigned long goal = 0;
 790			node_memmap_pfn[i] =
 791				alloc_bootmem_pfn(i, memmap_size, 0);
 792			if (kdata_huge)
 793				goal = PFN_PHYS(lowmem_end) - node_percpu[i];
 794			if (node_percpu[i])
 795				node_percpu_pfn[i] =
 796					alloc_bootmem_pfn(i, node_percpu[i],
 797							  goal);
 
 
 798		} else {
 799			/* In non-bootmem zones, just reserve some pages. */
 800			node_memmap_pfn[i] = node_free_pfn[i];
 801			node_free_pfn[i] += PFN_UP(memmap_size);
 802			if (!kdata_huge) {
 803				node_percpu_pfn[i] = node_free_pfn[i];
 804				node_free_pfn[i] += PFN_UP(node_percpu[i]);
 805			} else {
 806				node_percpu_pfn[i] =
 807					lowmem_end - PFN_UP(node_percpu[i]);
 808			}
 809		}
 810
 811#ifdef CONFIG_HIGHMEM
 812		if (start > lowmem_end) {
 813			zones_size[ZONE_NORMAL] = 0;
 814			zones_size[ZONE_HIGHMEM] = end - start;
 815		} else {
 816			zones_size[ZONE_NORMAL] = lowmem_end - start;
 817			zones_size[ZONE_HIGHMEM] = end - lowmem_end;
 818		}
 819#else
 820		zones_size[ZONE_NORMAL] = end - start;
 821#endif
 822
 823		if (start < dma_end) {
 824			zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL],
 825						   dma_end - start);
 826			zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA];
 827		} else {
 828			zones_size[ZONE_DMA] = 0;
 829		}
 830
 831		/* Take zone metadata from controller 0 if we're isolnode. */
 832		if (node_isset(i, isolnodes))
 833			NODE_DATA(i)->bdata = &bootmem_node_data[0];
 834
 835		free_area_init_node(i, zones_size, start, NULL);
 836		printk(KERN_DEBUG "  Normal zone: %ld per-cpu pages\n",
 837		       PFN_UP(node_percpu[i]));
 838
 839		/* Track the type of memory on each node */
 840		if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA])
 841			node_set_state(i, N_NORMAL_MEMORY);
 842#ifdef CONFIG_HIGHMEM
 843		if (end != start)
 844			node_set_state(i, N_HIGH_MEMORY);
 845#endif
 846
 847		node_set_online(i);
 848	}
 849}
 850
 851#ifdef CONFIG_NUMA
 852
 853/* which logical CPUs are on which nodes */
 854struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once;
 855EXPORT_SYMBOL(node_2_cpu_mask);
 856
 857/* which node each logical CPU is on */
 858char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES)));
 859EXPORT_SYMBOL(cpu_2_node);
 860
 861/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
 862static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus)
 863{
 864	if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus))
 865		return -1;
 866	else
 867		return cpu_to_node(cpu);
 868}
 869
 870/* Return number of immediately-adjacent tiles sharing the same NUMA node. */
 871static int __init node_neighbors(int node, int cpu,
 872				 struct cpumask *unbound_cpus)
 873{
 874	int neighbors = 0;
 875	int w = smp_width;
 876	int h = smp_height;
 877	int x = cpu % w;
 878	int y = cpu / w;
 879	if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node)
 880		++neighbors;
 881	if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node)
 882		++neighbors;
 883	if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node)
 884		++neighbors;
 885	if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node)
 886		++neighbors;
 887	return neighbors;
 888}
 889
 890static void __init setup_numa_mapping(void)
 891{
 892	int distance[MAX_NUMNODES][NR_CPUS];
 893	HV_Coord coord;
 894	int cpu, node, cpus, i, x, y;
 895	int num_nodes = num_online_nodes();
 896	struct cpumask unbound_cpus;
 897	nodemask_t default_nodes;
 898
 899	cpumask_clear(&unbound_cpus);
 900
 901	/* Get set of nodes we will use for defaults */
 902	nodes_andnot(default_nodes, node_online_map, isolnodes);
 903	if (nodes_empty(default_nodes)) {
 904		BUG_ON(!node_isset(0, node_online_map));
 905		pr_err("Forcing NUMA node zero available as a default node\n");
 906		node_set(0, default_nodes);
 907	}
 908
 909	/* Populate the distance[] array */
 910	memset(distance, -1, sizeof(distance));
 911	cpu = 0;
 912	for (coord.y = 0; coord.y < smp_height; ++coord.y) {
 913		for (coord.x = 0; coord.x < smp_width;
 914		     ++coord.x, ++cpu) {
 915			BUG_ON(cpu >= nr_cpu_ids);
 916			if (!cpu_possible(cpu)) {
 917				cpu_2_node[cpu] = -1;
 918				continue;
 919			}
 920			for_each_node_mask(node, default_nodes) {
 921				HV_MemoryControllerInfo info =
 922					hv_inquire_memory_controller(
 923						coord, node_controller[node]);
 924				distance[node][cpu] =
 925					ABS(info.coord.x) + ABS(info.coord.y);
 926			}
 927			cpumask_set_cpu(cpu, &unbound_cpus);
 928		}
 929	}
 930	cpus = cpu;
 931
 932	/*
 933	 * Round-robin through the NUMA nodes until all the cpus are
 934	 * assigned.  We could be more clever here (e.g. create four
 935	 * sorted linked lists on the same set of cpu nodes, and pull
 936	 * off them in round-robin sequence, removing from all four
 937	 * lists each time) but given the relatively small numbers
 938	 * involved, O(n^2) seem OK for a one-time cost.
 939	 */
 940	node = first_node(default_nodes);
 941	while (!cpumask_empty(&unbound_cpus)) {
 942		int best_cpu = -1;
 943		int best_distance = INT_MAX;
 944		for (cpu = 0; cpu < cpus; ++cpu) {
 945			if (cpumask_test_cpu(cpu, &unbound_cpus)) {
 946				/*
 947				 * Compute metric, which is how much
 948				 * closer the cpu is to this memory
 949				 * controller than the others, shifted
 950				 * up, and then the number of
 951				 * neighbors already in the node as an
 952				 * epsilon adjustment to try to keep
 953				 * the nodes compact.
 954				 */
 955				int d = distance[node][cpu] * num_nodes;
 956				for_each_node_mask(i, default_nodes) {
 957					if (i != node)
 958						d -= distance[i][cpu];
 959				}
 960				d *= 8;  /* allow space for epsilon */
 961				d -= node_neighbors(node, cpu, &unbound_cpus);
 962				if (d < best_distance) {
 963					best_cpu = cpu;
 964					best_distance = d;
 965				}
 966			}
 967		}
 968		BUG_ON(best_cpu < 0);
 969		cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
 970		cpu_2_node[best_cpu] = node;
 971		cpumask_clear_cpu(best_cpu, &unbound_cpus);
 972		node = next_node(node, default_nodes);
 973		if (node == MAX_NUMNODES)
 974			node = first_node(default_nodes);
 975	}
 976
 977	/* Print out node assignments and set defaults for disabled cpus */
 978	cpu = 0;
 979	for (y = 0; y < smp_height; ++y) {
 980		printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
 981		for (x = 0; x < smp_width; ++x, ++cpu) {
 982			if (cpu_to_node(cpu) < 0) {
 983				pr_cont(" -");
 984				cpu_2_node[cpu] = first_node(default_nodes);
 985			} else {
 986				pr_cont(" %d", cpu_to_node(cpu));
 987			}
 988		}
 989		pr_cont("\n");
 990	}
 991}
 992
 993static struct cpu cpu_devices[NR_CPUS];
 994
 995static int __init topology_init(void)
 996{
 997	int i;
 998
 999	for_each_online_node(i)
1000		register_one_node(i);
1001
1002	for (i = 0; i < smp_height * smp_width; ++i)
1003		register_cpu(&cpu_devices[i], i);
1004
1005	return 0;
1006}
1007
1008subsys_initcall(topology_init);
1009
1010#else /* !CONFIG_NUMA */
1011
1012#define setup_numa_mapping() do { } while (0)
1013
1014#endif /* CONFIG_NUMA */
1015
1016/*
1017 * Initialize hugepage support on this cpu.  We do this on all cores
1018 * early in boot: before argument parsing for the boot cpu, and after
1019 * argument parsing but before the init functions run on the secondaries.
1020 * So the values we set up here in the hypervisor may be overridden on
1021 * the boot cpu as arguments are parsed.
1022 */
1023static void init_super_pages(void)
1024{
1025#ifdef CONFIG_HUGETLB_SUPER_PAGES
1026	int i;
1027	for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
1028		hv_set_pte_super_shift(i, huge_shift[i]);
1029#endif
1030}
1031
1032/**
1033 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
1034 * @boot: Is this the boot cpu?
1035 *
1036 * Called from setup_arch() on the boot cpu, or online_secondary().
1037 */
1038void setup_cpu(int boot)
1039{
1040	/* The boot cpu sets up its permanent mappings much earlier. */
1041	if (!boot)
1042		store_permanent_mappings();
1043
1044	/* Allow asynchronous TLB interrupts. */
1045#if CHIP_HAS_TILE_DMA()
1046	arch_local_irq_unmask(INT_DMATLB_MISS);
1047	arch_local_irq_unmask(INT_DMATLB_ACCESS);
1048#endif
 
 
 
1049#ifdef __tilegx__
1050	arch_local_irq_unmask(INT_SINGLE_STEP_K);
1051#endif
1052
1053	/*
1054	 * Allow user access to many generic SPRs, like the cycle
1055	 * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc.
1056	 */
1057	__insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1);
1058
1059#if CHIP_HAS_SN()
1060	/* Static network is not restricted. */
1061	__insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
1062#endif
 
 
 
 
1063
1064	/*
1065	 * Set the MPL for interrupt control 0 & 1 to the corresponding
1066	 * values.  This includes access to the SYSTEM_SAVE and EX_CONTEXT
1067	 * SPRs, as well as the interrupt mask.
1068	 */
1069	__insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
1070	__insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
1071
1072	/* Initialize IRQ support for this cpu. */
1073	setup_irq_regs();
1074
1075#ifdef CONFIG_HARDWALL
1076	/* Reset the network state on this cpu. */
1077	reset_network_state();
1078#endif
1079
1080	init_super_pages();
1081}
1082
1083#ifdef CONFIG_BLK_DEV_INITRD
1084
1085static int __initdata set_initramfs_file;
1086static char __initdata initramfs_file[128] = "initramfs";
1087
1088static int __init setup_initramfs_file(char *str)
1089{
1090	if (str == NULL)
1091		return -EINVAL;
1092	strncpy(initramfs_file, str, sizeof(initramfs_file) - 1);
1093	set_initramfs_file = 1;
1094
1095	return 0;
1096}
1097early_param("initramfs_file", setup_initramfs_file);
1098
1099/*
1100 * We look for a file called "initramfs" in the hvfs.  If there is one, we
1101 * allocate some memory for it and it will be unpacked to the initramfs.
1102 * If it's compressed, the initd code will uncompress it first.
1103 */
1104static void __init load_hv_initrd(void)
1105{
1106	HV_FS_StatInfo stat;
1107	int fd, rc;
1108	void *initrd;
1109
1110	/* If initrd has already been set, skip initramfs file in hvfs. */
1111	if (initrd_start)
1112		return;
1113
1114	fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
1115	if (fd == HV_ENOENT) {
1116		if (set_initramfs_file) {
1117			pr_warning("No such hvfs initramfs file '%s'\n",
1118				   initramfs_file);
1119			return;
1120		} else {
1121			/* Try old backwards-compatible name. */
1122			fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
1123			if (fd == HV_ENOENT)
1124				return;
1125		}
1126	}
1127	BUG_ON(fd < 0);
1128	stat = hv_fs_fstat(fd);
1129	BUG_ON(stat.size < 0);
1130	if (stat.flags & HV_FS_ISDIR) {
1131		pr_warning("Ignoring hvfs file '%s': it's a directory.\n",
1132			   initramfs_file);
1133		return;
1134	}
1135	initrd = alloc_bootmem_pages(stat.size);
1136	rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
1137	if (rc != stat.size) {
1138		pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
1139		       stat.size, initramfs_file, rc);
1140		free_initrd_mem((unsigned long) initrd, stat.size);
1141		return;
1142	}
1143	initrd_start = (unsigned long) initrd;
1144	initrd_end = initrd_start + stat.size;
1145}
1146
1147void __init free_initrd_mem(unsigned long begin, unsigned long end)
1148{
1149	free_bootmem(__pa(begin), end - begin);
1150}
1151
1152static int __init setup_initrd(char *str)
1153{
1154	char *endp;
1155	unsigned long initrd_size;
1156
1157	initrd_size = str ? simple_strtoul(str, &endp, 0) : 0;
1158	if (initrd_size == 0 || *endp != '@')
1159		return -EINVAL;
1160
1161	initrd_start = simple_strtoul(endp+1, &endp, 0);
1162	if (initrd_start == 0)
1163		return -EINVAL;
1164
1165	initrd_end = initrd_start + initrd_size;
1166
1167	return 0;
1168}
1169early_param("initrd", setup_initrd);
1170
1171#else
1172static inline void load_hv_initrd(void) {}
1173#endif /* CONFIG_BLK_DEV_INITRD */
1174
1175static void __init validate_hv(void)
1176{
1177	/*
1178	 * It may already be too late, but let's check our built-in
1179	 * configuration against what the hypervisor is providing.
1180	 */
1181	unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE);
1182	int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL);
1183	int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE);
1184	HV_ASIDRange asid_range;
1185
1186#ifndef CONFIG_SMP
1187	HV_Topology topology = hv_inquire_topology();
1188	BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
1189	if (topology.width != 1 || topology.height != 1) {
1190		pr_warning("Warning: booting UP kernel on %dx%d grid;"
1191			   " will ignore all but first tile.\n",
1192			   topology.width, topology.height);
1193	}
1194#endif
1195
1196	if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text)
1197		early_panic("Hypervisor glue size %ld is too big!\n",
1198			    glue_size);
1199	if (hv_page_size != PAGE_SIZE)
1200		early_panic("Hypervisor page size %#x != our %#lx\n",
1201			    hv_page_size, PAGE_SIZE);
1202	if (hv_hpage_size != HPAGE_SIZE)
1203		early_panic("Hypervisor huge page size %#x != our %#lx\n",
1204			    hv_hpage_size, HPAGE_SIZE);
1205
1206#ifdef CONFIG_SMP
1207	/*
1208	 * Some hypervisor APIs take a pointer to a bitmap array
1209	 * whose size is at least the number of cpus on the chip.
1210	 * We use a struct cpumask for this, so it must be big enough.
1211	 */
1212	if ((smp_height * smp_width) > nr_cpu_ids)
1213		early_panic("Hypervisor %d x %d grid too big for Linux"
1214			    " NR_CPUS %d\n", smp_height, smp_width,
1215			    nr_cpu_ids);
1216#endif
1217
1218	/*
1219	 * Check that we're using allowed ASIDs, and initialize the
1220	 * various asid variables to their appropriate initial states.
1221	 */
1222	asid_range = hv_inquire_asid(0);
1223	__get_cpu_var(current_asid) = min_asid = asid_range.start;
1224	max_asid = asid_range.start + asid_range.size - 1;
1225
1226	if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
1227		       sizeof(chip_model)) < 0) {
1228		pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
1229		strlcpy(chip_model, "unknown", sizeof(chip_model));
1230	}
1231}
1232
1233static void __init validate_va(void)
1234{
1235#ifndef __tilegx__   /* FIXME: GX: probably some validation relevant here */
1236	/*
1237	 * Similarly, make sure we're only using allowed VAs.
1238	 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START,
1239	 * and 0 .. KERNEL_HIGH_VADDR.
1240	 * In addition, make sure we CAN'T use the end of memory, since
1241	 * we use the last chunk of each pgd for the pgd_list.
1242	 */
1243	int i, user_kernel_ok = 0;
1244	unsigned long max_va = 0;
1245	unsigned long list_va =
1246		((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
1247
1248	for (i = 0; ; ++i) {
1249		HV_VirtAddrRange range = hv_inquire_virtual(i);
1250		if (range.size == 0)
1251			break;
1252		if (range.start <= MEM_USER_INTRPT &&
1253		    range.start + range.size >= MEM_HV_START)
1254			user_kernel_ok = 1;
1255		if (range.start == 0)
1256			max_va = range.size;
1257		BUG_ON(range.start + range.size > list_va);
1258	}
1259	if (!user_kernel_ok)
1260		early_panic("Hypervisor not configured for user/kernel VAs\n");
1261	if (max_va == 0)
1262		early_panic("Hypervisor not configured for low VAs\n");
1263	if (max_va < KERNEL_HIGH_VADDR)
1264		early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
1265			    max_va, KERNEL_HIGH_VADDR);
1266
1267	/* Kernel PCs must have their high bit set; see intvec.S. */
1268	if ((long)VMALLOC_START >= 0)
1269		early_panic(
1270			"Linux VMALLOC region below the 2GB line (%#lx)!\n"
1271			"Reconfigure the kernel with smaller VMALLOC_RESERVE.\n",
 
1272			VMALLOC_START);
1273#endif
1274}
1275
1276/*
1277 * cpu_lotar_map lists all the cpus that are valid for the supervisor
1278 * to cache data on at a page level, i.e. what cpus can be placed in
1279 * the LOTAR field of a PTE.  It is equivalent to the set of possible
1280 * cpus plus any other cpus that are willing to share their cache.
1281 * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
1282 */
1283struct cpumask __write_once cpu_lotar_map;
1284EXPORT_SYMBOL(cpu_lotar_map);
1285
 
1286/*
1287 * hash_for_home_map lists all the tiles that hash-for-home data
1288 * will be cached on.  Note that this may includes tiles that are not
1289 * valid for this supervisor to use otherwise (e.g. if a hypervisor
1290 * device is being shared between multiple supervisors).
1291 * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE).
1292 */
1293struct cpumask hash_for_home_map;
1294EXPORT_SYMBOL(hash_for_home_map);
 
1295
1296/*
1297 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
1298 * flush on our behalf.  It is set to cpu_possible_mask OR'ed with
1299 * hash_for_home_map, and it is what should be passed to
1300 * hv_flush_remote() to flush all caches.  Note that if there are
1301 * dedicated hypervisor driver tiles that have authorized use of their
1302 * cache, those tiles will only appear in cpu_lotar_map, NOT in
1303 * cpu_cacheable_map, as they are a special case.
1304 */
1305struct cpumask __write_once cpu_cacheable_map;
1306EXPORT_SYMBOL(cpu_cacheable_map);
1307
1308static __initdata struct cpumask disabled_map;
1309
1310static int __init disabled_cpus(char *str)
1311{
1312	int boot_cpu = smp_processor_id();
1313
1314	if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
1315		return -EINVAL;
1316	if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
1317		pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
1318		cpumask_clear_cpu(boot_cpu, &disabled_map);
1319	}
1320	return 0;
1321}
1322
1323early_param("disabled_cpus", disabled_cpus);
1324
1325void __init print_disabled_cpus(void)
1326{
1327	if (!cpumask_empty(&disabled_map)) {
1328		char buf[100];
1329		cpulist_scnprintf(buf, sizeof(buf), &disabled_map);
1330		pr_info("CPUs not available for Linux: %s\n", buf);
1331	}
1332}
1333
1334static void __init setup_cpu_maps(void)
1335{
1336	struct cpumask hv_disabled_map, cpu_possible_init;
1337	int boot_cpu = smp_processor_id();
1338	int cpus, i, rc;
1339
1340	/* Learn which cpus are allowed by the hypervisor. */
1341	rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL,
1342			      (HV_VirtAddr) cpumask_bits(&cpu_possible_init),
1343			      sizeof(cpu_cacheable_map));
1344	if (rc < 0)
1345		early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc);
1346	if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init))
1347		early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu);
1348
1349	/* Compute the cpus disabled by the hvconfig file. */
1350	cpumask_complement(&hv_disabled_map, &cpu_possible_init);
1351
1352	/* Include them with the cpus disabled by "disabled_cpus". */
1353	cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map);
1354
1355	/*
1356	 * Disable every cpu after "setup_max_cpus".  But don't mark
1357	 * as disabled the cpus that are outside of our initial rectangle,
1358	 * since that turns out to be confusing.
1359	 */
1360	cpus = 1;                          /* this cpu */
1361	cpumask_set_cpu(boot_cpu, &disabled_map);   /* ignore this cpu */
1362	for (i = 0; cpus < setup_max_cpus; ++i)
1363		if (!cpumask_test_cpu(i, &disabled_map))
1364			++cpus;
1365	for (; i < smp_height * smp_width; ++i)
1366		cpumask_set_cpu(i, &disabled_map);
1367	cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */
1368	for (i = smp_height * smp_width; i < NR_CPUS; ++i)
1369		cpumask_clear_cpu(i, &disabled_map);
1370
1371	/*
1372	 * Setup cpu_possible map as every cpu allocated to us, minus
1373	 * the results of any "disabled_cpus" settings.
1374	 */
1375	cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map);
1376	init_cpu_possible(&cpu_possible_init);
1377
1378	/* Learn which cpus are valid for LOTAR caching. */
1379	rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR,
1380			      (HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
1381			      sizeof(cpu_lotar_map));
1382	if (rc < 0) {
1383		pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1384		cpu_lotar_map = *cpu_possible_mask;
1385	}
1386
 
1387	/* Retrieve set of CPUs used for hash-for-home caching */
1388	rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
1389			      (HV_VirtAddr) hash_for_home_map.bits,
1390			      sizeof(hash_for_home_map));
1391	if (rc < 0)
1392		early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1393	cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
 
 
 
1394}
1395
1396
1397static int __init dataplane(char *str)
1398{
1399	pr_warning("WARNING: dataplane support disabled in this kernel\n");
1400	return 0;
1401}
1402
1403early_param("dataplane", dataplane);
1404
1405#ifdef CONFIG_CMDLINE_BOOL
1406static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
1407#endif
1408
1409void __init setup_arch(char **cmdline_p)
1410{
1411	int len;
1412
1413#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
1414	len = hv_get_command_line((HV_VirtAddr) boot_command_line,
1415				  COMMAND_LINE_SIZE);
1416	if (boot_command_line[0])
1417		pr_warning("WARNING: ignoring dynamic command line \"%s\"\n",
1418			   boot_command_line);
1419	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1420#else
1421	char *hv_cmdline;
1422#if defined(CONFIG_CMDLINE_BOOL)
1423	if (builtin_cmdline[0]) {
1424		int builtin_len = strlcpy(boot_command_line, builtin_cmdline,
1425					  COMMAND_LINE_SIZE);
1426		if (builtin_len < COMMAND_LINE_SIZE-1)
1427			boot_command_line[builtin_len++] = ' ';
1428		hv_cmdline = &boot_command_line[builtin_len];
1429		len = COMMAND_LINE_SIZE - builtin_len;
1430	} else
1431#endif
1432	{
1433		hv_cmdline = boot_command_line;
1434		len = COMMAND_LINE_SIZE;
1435	}
1436	len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len);
1437	if (len < 0 || len > COMMAND_LINE_SIZE)
1438		early_panic("hv_get_command_line failed: %d\n", len);
1439#endif
1440
1441	*cmdline_p = boot_command_line;
1442
1443	/* Set disabled_map and setup_max_cpus very early */
1444	parse_early_param();
1445
1446	/* Make sure the kernel is compatible with the hypervisor. */
1447	validate_hv();
1448	validate_va();
1449
1450	setup_cpu_maps();
1451
1452
1453#if defined(CONFIG_PCI) && !defined(__tilegx__)
1454	/*
1455	 * Initialize the PCI structures.  This is done before memory
1456	 * setup so that we know whether or not a pci_reserve region
1457	 * is necessary.
1458	 */
1459	if (tile_pci_init() == 0)
1460		pci_reserve_mb = 0;
1461
1462	/* PCI systems reserve a region just below 4GB for mapping iomem. */
1463	pci_reserve_end_pfn  = (1 << (32 - PAGE_SHIFT));
1464	pci_reserve_start_pfn = pci_reserve_end_pfn -
1465		(pci_reserve_mb << (20 - PAGE_SHIFT));
1466#endif
1467
1468	init_mm.start_code = (unsigned long) _text;
1469	init_mm.end_code = (unsigned long) _etext;
1470	init_mm.end_data = (unsigned long) _edata;
1471	init_mm.brk = (unsigned long) _end;
1472
1473	setup_memory();
1474	store_permanent_mappings();
1475	setup_bootmem_allocator();
1476
1477	/*
1478	 * NOTE: before this point _nobody_ is allowed to allocate
1479	 * any memory using the bootmem allocator.
1480	 */
1481
1482#ifdef CONFIG_SWIOTLB
1483	swiotlb_init(0);
1484#endif
1485
1486	paging_init();
1487	setup_numa_mapping();
1488	zone_sizes_init();
1489	set_page_homes();
1490	setup_cpu(1);
1491	setup_clock();
1492	load_hv_initrd();
1493}
1494
1495
1496/*
1497 * Set up per-cpu memory.
1498 */
1499
1500unsigned long __per_cpu_offset[NR_CPUS] __write_once;
1501EXPORT_SYMBOL(__per_cpu_offset);
1502
1503static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
1504static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 };
1505
1506/*
1507 * As the percpu code allocates pages, we return the pages from the
1508 * end of the node for the specified cpu.
1509 */
1510static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
1511{
1512	int nid = cpu_to_node(cpu);
1513	unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid];
1514
1515	BUG_ON(size % PAGE_SIZE != 0);
1516	pfn_offset[nid] += size / PAGE_SIZE;
1517	BUG_ON(node_percpu[nid] < size);
1518	node_percpu[nid] -= size;
1519	if (percpu_pfn[cpu] == 0)
1520		percpu_pfn[cpu] = pfn;
1521	return pfn_to_kaddr(pfn);
1522}
1523
1524/*
1525 * Pages reserved for percpu memory are not freeable, and in any case we are
1526 * on a short path to panic() in setup_per_cpu_area() at this point anyway.
1527 */
1528static void __init pcpu_fc_free(void *ptr, size_t size)
1529{
1530}
1531
1532/*
1533 * Set up vmalloc page tables using bootmem for the percpu code.
1534 */
1535static void __init pcpu_fc_populate_pte(unsigned long addr)
1536{
1537	pgd_t *pgd;
1538	pud_t *pud;
1539	pmd_t *pmd;
1540	pte_t *pte;
1541
1542	BUG_ON(pgd_addr_invalid(addr));
1543	if (addr < VMALLOC_START || addr >= VMALLOC_END)
1544		panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
1545		      " try increasing CONFIG_VMALLOC_RESERVE\n",
1546		      addr, VMALLOC_START, VMALLOC_END);
1547
1548	pgd = swapper_pg_dir + pgd_index(addr);
1549	pud = pud_offset(pgd, addr);
1550	BUG_ON(!pud_present(*pud));
1551	pmd = pmd_offset(pud, addr);
1552	if (pmd_present(*pmd)) {
1553		BUG_ON(pmd_huge_page(*pmd));
1554	} else {
1555		pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE,
1556				      HV_PAGE_TABLE_ALIGN, 0);
1557		pmd_populate_kernel(&init_mm, pmd, pte);
1558	}
1559}
1560
1561void __init setup_per_cpu_areas(void)
1562{
1563	struct page *pg;
1564	unsigned long delta, pfn, lowmem_va;
1565	unsigned long size = percpu_size();
1566	char *ptr;
1567	int rc, cpu, i;
1568
1569	rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc,
1570				   pcpu_fc_free, pcpu_fc_populate_pte);
1571	if (rc < 0)
1572		panic("Cannot initialize percpu area (err=%d)", rc);
1573
1574	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1575	for_each_possible_cpu(cpu) {
1576		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1577
1578		/* finv the copy out of cache so we can change homecache */
1579		ptr = pcpu_base_addr + pcpu_unit_offsets[cpu];
1580		__finv_buffer(ptr, size);
1581		pfn = percpu_pfn[cpu];
1582
1583		/* Rewrite the page tables to cache on that cpu */
1584		pg = pfn_to_page(pfn);
1585		for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1586
1587			/* Update the vmalloc mapping and page home. */
1588			unsigned long addr = (unsigned long)ptr + i;
1589			pte_t *ptep = virt_to_kpte(addr);
1590			pte_t pte = *ptep;
1591			BUG_ON(pfn != pte_pfn(pte));
1592			pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1593			pte = set_remote_cache_cpu(pte, cpu);
1594			set_pte_at(&init_mm, addr, ptep, pte);
1595
1596			/* Update the lowmem mapping for consistency. */
1597			lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1598			ptep = virt_to_kpte(lowmem_va);
1599			if (pte_huge(*ptep)) {
1600				printk(KERN_DEBUG "early shatter of huge page"
1601				       " at %#lx\n", lowmem_va);
1602				shatter_pmd((pmd_t *)ptep);
1603				ptep = virt_to_kpte(lowmem_va);
1604				BUG_ON(pte_huge(*ptep));
1605			}
1606			BUG_ON(pfn != pte_pfn(*ptep));
1607			set_pte_at(&init_mm, lowmem_va, ptep, pte);
1608		}
1609	}
1610
1611	/* Set our thread pointer appropriately. */
1612	set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]);
1613
1614	/* Make sure the finv's have completed. */
1615	mb_incoherent();
1616
1617	/* Flush the TLB so we reference it properly from here on out. */
1618	local_flush_tlb_all();
1619}
1620
1621static struct resource data_resource = {
1622	.name	= "Kernel data",
1623	.start	= 0,
1624	.end	= 0,
1625	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
1626};
1627
1628static struct resource code_resource = {
1629	.name	= "Kernel code",
1630	.start	= 0,
1631	.end	= 0,
1632	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
1633};
1634
1635/*
1636 * On Pro, we reserve all resources above 4GB so that PCI won't try to put
1637 * mappings above 4GB.
 
1638 */
1639#if defined(CONFIG_PCI) && !defined(__tilegx__)
1640static struct resource* __init
1641insert_non_bus_resource(void)
1642{
1643	struct resource *res =
1644		kzalloc(sizeof(struct resource), GFP_ATOMIC);
1645	if (!res)
1646		return NULL;
1647	res->name = "Non-Bus Physical Address Space";
1648	res->start = (1ULL << 32);
1649	res->end = -1LL;
1650	res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1651	if (insert_resource(&iomem_resource, res)) {
1652		kfree(res);
1653		return NULL;
1654	}
1655	return res;
1656}
1657#endif
1658
1659static struct resource* __init
1660insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
1661{
1662	struct resource *res =
1663		kzalloc(sizeof(struct resource), GFP_ATOMIC);
1664	if (!res)
1665		return NULL;
1666	res->name = reserved ? "Reserved" : "System RAM";
1667	res->start = start_pfn << PAGE_SHIFT;
1668	res->end = (end_pfn << PAGE_SHIFT) - 1;
1669	res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1670	if (insert_resource(&iomem_resource, res)) {
1671		kfree(res);
1672		return NULL;
1673	}
1674	return res;
1675}
1676
1677/*
1678 * Request address space for all standard resources
1679 *
1680 * If the system includes PCI root complex drivers, we need to create
1681 * a window just below 4GB where PCI BARs can be mapped.
1682 */
1683static int __init request_standard_resources(void)
1684{
1685	int i;
1686	enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
1687
1688#if defined(CONFIG_PCI) && !defined(__tilegx__)
 
1689	insert_non_bus_resource();
1690#endif
1691
1692	for_each_online_node(i) {
1693		u64 start_pfn = node_start_pfn[i];
1694		u64 end_pfn = node_end_pfn[i];
1695
1696#if defined(CONFIG_PCI) && !defined(__tilegx__)
1697		if (start_pfn <= pci_reserve_start_pfn &&
1698		    end_pfn > pci_reserve_start_pfn) {
1699			if (end_pfn > pci_reserve_end_pfn)
1700				insert_ram_resource(pci_reserve_end_pfn,
1701						    end_pfn, 0);
1702			end_pfn = pci_reserve_start_pfn;
1703		}
1704#endif
1705		insert_ram_resource(start_pfn, end_pfn, 0);
1706	}
1707
1708	code_resource.start = __pa(_text - CODE_DELTA);
1709	code_resource.end = __pa(_etext - CODE_DELTA)-1;
1710	data_resource.start = __pa(_sdata);
1711	data_resource.end = __pa(_end)-1;
1712
1713	insert_resource(&iomem_resource, &code_resource);
1714	insert_resource(&iomem_resource, &data_resource);
1715
1716	/* Mark any "memmap" regions busy for the resource manager. */
1717	for (i = 0; i < memmap_nr; ++i) {
1718		struct memmap_entry *m = &memmap_map[i];
1719		insert_ram_resource(PFN_DOWN(m->addr),
1720				    PFN_UP(m->addr + m->size - 1), 1);
1721	}
1722
1723#ifdef CONFIG_KEXEC
1724	insert_resource(&iomem_resource, &crashk_res);
1725#endif
1726
1727	return 0;
1728}
1729
1730subsys_initcall(request_standard_resources);