Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2004-2007 Cavium Networks
   7 * Copyright (C) 2008, 2009 Wind River Systems
   8 *   written by Ralf Baechle <ralf@linux-mips.org>
   9 */
  10#include <linux/compiler.h>
  11#include <linux/vmalloc.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/console.h>
  15#include <linux/delay.h>
  16#include <linux/export.h>
  17#include <linux/interrupt.h>
  18#include <linux/io.h>
 
  19#include <linux/serial.h>
  20#include <linux/smp.h>
  21#include <linux/types.h>
  22#include <linux/string.h>	/* for memset */
  23#include <linux/tty.h>
  24#include <linux/time.h>
  25#include <linux/platform_device.h>
  26#include <linux/serial_core.h>
  27#include <linux/serial_8250.h>
  28#include <linux/of_fdt.h>
  29#include <linux/libfdt.h>
  30#include <linux/kexec.h>
  31
  32#include <asm/processor.h>
  33#include <asm/reboot.h>
  34#include <asm/smp-ops.h>
  35#include <asm/irq_cpu.h>
  36#include <asm/mipsregs.h>
  37#include <asm/bootinfo.h>
  38#include <asm/sections.h>
  39#include <asm/fw/fw.h>
  40#include <asm/setup.h>
  41#include <asm/prom.h>
  42#include <asm/time.h>
  43
  44#include <asm/octeon/octeon.h>
  45#include <asm/octeon/pci-octeon.h>
  46#include <asm/octeon/cvmx-rst-defs.h>
  47
  48/*
  49 * TRUE for devices having registers with little-endian byte
  50 * order, FALSE for registers with native-endian byte order.
  51 * PCI mandates little-endian, USB and SATA are configuraable,
  52 * but we chose little-endian for these.
  53 */
  54const bool octeon_should_swizzle_table[256] = {
  55	[0x00] = true,	/* bootbus/CF */
  56	[0x1b] = true,	/* PCI mmio window */
  57	[0x1c] = true,	/* PCI mmio window */
  58	[0x1d] = true,	/* PCI mmio window */
  59	[0x1e] = true,	/* PCI mmio window */
  60	[0x68] = true,	/* OCTEON III USB */
  61	[0x69] = true,	/* OCTEON III USB */
  62	[0x6c] = true,	/* OCTEON III SATA */
  63	[0x6f] = true,	/* OCTEON II USB */
  64};
  65EXPORT_SYMBOL(octeon_should_swizzle_table);
  66
  67#ifdef CONFIG_PCI
  68extern void pci_console_init(const char *arg);
  69#endif
  70
  71static unsigned long long max_memory = ULLONG_MAX;
  72static unsigned long long reserve_low_mem;
  73
  74DEFINE_SEMAPHORE(octeon_bootbus_sem);
  75EXPORT_SYMBOL(octeon_bootbus_sem);
  76
  77static struct octeon_boot_descriptor *octeon_boot_desc_ptr;
  78
  79struct cvmx_bootinfo *octeon_bootinfo;
  80EXPORT_SYMBOL(octeon_bootinfo);
  81
  82#ifdef CONFIG_KEXEC
  83#ifdef CONFIG_SMP
  84/*
  85 * Wait for relocation code is prepared and send
  86 * secondary CPUs to spin until kernel is relocated.
  87 */
  88static void octeon_kexec_smp_down(void *ignored)
  89{
  90	int cpu = smp_processor_id();
  91
  92	local_irq_disable();
  93	set_cpu_online(cpu, false);
  94	while (!atomic_read(&kexec_ready_to_reboot))
  95		cpu_relax();
  96
  97	asm volatile (
  98	"	sync						\n"
  99	"	synci	($0)					\n");
 100
 101	kexec_reboot();
 102}
 103#endif
 104
 105#define OCTEON_DDR0_BASE    (0x0ULL)
 106#define OCTEON_DDR0_SIZE    (0x010000000ULL)
 107#define OCTEON_DDR1_BASE    (0x410000000ULL)
 108#define OCTEON_DDR1_SIZE    (0x010000000ULL)
 109#define OCTEON_DDR2_BASE    (0x020000000ULL)
 110#define OCTEON_DDR2_SIZE    (0x3e0000000ULL)
 111#define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
 112
 113static struct kimage *kimage_ptr;
 114
 115static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes)
 116{
 117	int64_t addr;
 118	struct cvmx_bootmem_desc *bootmem_desc;
 119
 120	bootmem_desc = cvmx_bootmem_get_desc();
 121
 122	if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) {
 123		mem_size = OCTEON_MAX_PHY_MEM_SIZE;
 124		pr_err("Error: requested memory too large,"
 125		       "truncating to maximum size\n");
 126	}
 127
 128	bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
 129	bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
 130
 131	addr = (OCTEON_DDR0_BASE + reserve_low_mem + low_reserved_bytes);
 132	bootmem_desc->head_addr = 0;
 133
 134	if (mem_size <= OCTEON_DDR0_SIZE) {
 135		__cvmx_bootmem_phy_free(addr,
 136				mem_size - reserve_low_mem -
 137				low_reserved_bytes, 0);
 138		return;
 139	}
 140
 141	__cvmx_bootmem_phy_free(addr,
 142			OCTEON_DDR0_SIZE - reserve_low_mem -
 143			low_reserved_bytes, 0);
 144
 145	mem_size -= OCTEON_DDR0_SIZE;
 146
 147	if (mem_size > OCTEON_DDR1_SIZE) {
 148		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
 149		__cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
 150				mem_size - OCTEON_DDR1_SIZE, 0);
 151	} else
 152		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
 153}
 154
 155static int octeon_kexec_prepare(struct kimage *image)
 156{
 157	int i;
 158	char *bootloader = "kexec";
 159
 160	octeon_boot_desc_ptr->argc = 0;
 161	for (i = 0; i < image->nr_segments; i++) {
 162		if (!strncmp(bootloader, (char *)image->segment[i].buf,
 163				strlen(bootloader))) {
 164			/*
 165			 * convert command line string to array
 166			 * of parameters (as bootloader does).
 167			 */
 168			int argc = 0, offt;
 169			char *str = (char *)image->segment[i].buf;
 170			char *ptr = strchr(str, ' ');
 171			while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) {
 172				*ptr = '\0';
 173				if (ptr[1] != ' ') {
 174					offt = (int)(ptr - str + 1);
 175					octeon_boot_desc_ptr->argv[argc] =
 176						image->segment[i].mem + offt;
 177					argc++;
 178				}
 179				ptr = strchr(ptr + 1, ' ');
 180			}
 181			octeon_boot_desc_ptr->argc = argc;
 182			break;
 183		}
 184	}
 185
 186	/*
 187	 * Information about segments will be needed during pre-boot memory
 188	 * initialization.
 189	 */
 190	kimage_ptr = image;
 191	return 0;
 192}
 193
 194static void octeon_generic_shutdown(void)
 195{
 196	int i;
 197#ifdef CONFIG_SMP
 198	int cpu;
 199#endif
 200	struct cvmx_bootmem_desc *bootmem_desc;
 201	void *named_block_array_ptr;
 202
 203	bootmem_desc = cvmx_bootmem_get_desc();
 204	named_block_array_ptr =
 205		cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr);
 206
 207#ifdef CONFIG_SMP
 208	/* disable watchdogs */
 209	for_each_online_cpu(cpu)
 210		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
 211#else
 212	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
 213#endif
 214	if (kimage_ptr != kexec_crash_image) {
 215		memset(named_block_array_ptr,
 216			0x0,
 217			CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
 218			sizeof(struct cvmx_bootmem_named_block_desc));
 219		/*
 220		 * Mark all memory (except low 0x100000 bytes) as free.
 221		 * It is the same thing that bootloader does.
 222		 */
 223		kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL,
 224				0x100000);
 225		/*
 226		 * Allocate all segments to avoid their corruption during boot.
 227		 */
 228		for (i = 0; i < kimage_ptr->nr_segments; i++)
 229			cvmx_bootmem_alloc_address(
 230				kimage_ptr->segment[i].memsz + 2*PAGE_SIZE,
 231				kimage_ptr->segment[i].mem - PAGE_SIZE,
 232				PAGE_SIZE);
 233	} else {
 234		/*
 235		 * Do not mark all memory as free. Free only named sections
 236		 * leaving the rest of memory unchanged.
 237		 */
 238		struct cvmx_bootmem_named_block_desc *ptr =
 239			(struct cvmx_bootmem_named_block_desc *)
 240			named_block_array_ptr;
 241
 242		for (i = 0; i < bootmem_desc->named_block_num_blocks; i++)
 243			if (ptr[i].size)
 244				cvmx_bootmem_free_named(ptr[i].name);
 245	}
 246	kexec_args[2] = 1UL; /* running on octeon_main_processor */
 247	kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
 248#ifdef CONFIG_SMP
 249	secondary_kexec_args[2] = 0UL; /* running on secondary cpu */
 250	secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
 251#endif
 252}
 253
 254static void octeon_shutdown(void)
 255{
 256	octeon_generic_shutdown();
 257#ifdef CONFIG_SMP
 258	smp_call_function(octeon_kexec_smp_down, NULL, 0);
 259	smp_wmb();
 260	while (num_online_cpus() > 1) {
 261		cpu_relax();
 262		mdelay(1);
 263	}
 264#endif
 265}
 266
 267static void octeon_crash_shutdown(struct pt_regs *regs)
 268{
 269	octeon_generic_shutdown();
 270	default_machine_crash_shutdown(regs);
 271}
 272
 273#ifdef CONFIG_SMP
 274void octeon_crash_smp_send_stop(void)
 275{
 276	int cpu;
 277
 278	/* disable watchdogs */
 279	for_each_online_cpu(cpu)
 280		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
 281}
 282#endif
 283
 284#endif /* CONFIG_KEXEC */
 285
 286#ifdef CONFIG_CAVIUM_RESERVE32
 287uint64_t octeon_reserve32_memory;
 288EXPORT_SYMBOL(octeon_reserve32_memory);
 289#endif
 290
 291#ifdef CONFIG_KEXEC
 292/* crashkernel cmdline parameter is parsed _after_ memory setup
 293 * we also parse it here (workaround for EHB5200) */
 294static uint64_t crashk_size, crashk_base;
 295#endif
 296
 297static int octeon_uart;
 298
 299extern asmlinkage void handle_int(void);
 300
 301/**
 302 * Return non zero if we are currently running in the Octeon simulator
 
 303 *
 304 * Returns
 305 */
 306int octeon_is_simulation(void)
 307{
 308	return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
 309}
 310EXPORT_SYMBOL(octeon_is_simulation);
 311
 312/**
 313 * Return true if Octeon is in PCI Host mode. This means
 314 * Linux can control the PCI bus.
 315 *
 316 * Returns Non zero if Octeon in host mode.
 317 */
 318int octeon_is_pci_host(void)
 319{
 320#ifdef CONFIG_PCI
 321	return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
 322#else
 323	return 0;
 324#endif
 325}
 326
 327/**
 328 * Get the clock rate of Octeon
 329 *
 330 * Returns Clock rate in HZ
 331 */
 332uint64_t octeon_get_clock_rate(void)
 333{
 334	struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
 335
 336	return sysinfo->cpu_clock_hz;
 337}
 338EXPORT_SYMBOL(octeon_get_clock_rate);
 339
 340static u64 octeon_io_clock_rate;
 341
 342u64 octeon_get_io_clock_rate(void)
 343{
 344	return octeon_io_clock_rate;
 345}
 346EXPORT_SYMBOL(octeon_get_io_clock_rate);
 347
 348
 349/**
 350 * Write to the LCD display connected to the bootbus. This display
 351 * exists on most Cavium evaluation boards. If it doesn't exist, then
 352 * this function doesn't do anything.
 353 *
 354 * @s:	    String to write
 
 
 
 355 */
 356static void octeon_write_lcd(const char *s)
 357{
 358	if (octeon_bootinfo->led_display_base_addr) {
 359		void __iomem *lcd_address =
 360			ioremap_nocache(octeon_bootinfo->led_display_base_addr,
 361					8);
 362		int i;
 363		for (i = 0; i < 8; i++, s++) {
 364			if (*s)
 365				iowrite8(*s, lcd_address + i);
 366			else
 367				iowrite8(' ', lcd_address + i);
 368		}
 369		iounmap(lcd_address);
 370	}
 371}
 372
 373/**
 374 * Return the console uart passed by the bootloader
 375 *
 376 * Returns uart	  (0 or 1)
 377 */
 378static int octeon_get_boot_uart(void)
 379{
 380	return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
 381		1 : 0;
 382}
 383
 384/**
 385 * Get the coremask Linux was booted on.
 386 *
 387 * Returns Core mask
 388 */
 389int octeon_get_boot_coremask(void)
 390{
 391	return octeon_boot_desc_ptr->core_mask;
 392}
 393
 394/**
 395 * Check the hardware BIST results for a CPU
 396 */
 397void octeon_check_cpu_bist(void)
 398{
 399	const int coreid = cvmx_get_core_num();
 400	unsigned long long mask;
 401	unsigned long long bist_val;
 402
 403	/* Check BIST results for COP0 registers */
 404	mask = 0x1f00000000ull;
 405	bist_val = read_octeon_c0_icacheerr();
 406	if (bist_val & mask)
 407		pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
 408		       coreid, bist_val);
 409
 410	bist_val = read_octeon_c0_dcacheerr();
 411	if (bist_val & 1)
 412		pr_err("Core%d L1 Dcache parity error: "
 413		       "CacheErr(dcache) = 0x%llx\n",
 414		       coreid, bist_val);
 415
 416	mask = 0xfc00000000000000ull;
 417	bist_val = read_c0_cvmmemctl();
 418	if (bist_val & mask)
 419		pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
 420		       coreid, bist_val);
 421
 422	write_octeon_c0_dcacheerr(0);
 423}
 424
 425/**
 426 * Reboot Octeon
 427 *
 428 * @command: Command to pass to the bootloader. Currently ignored.
 429 */
 430static void octeon_restart(char *command)
 431{
 432	/* Disable all watchdogs before soft reset. They don't get cleared */
 433#ifdef CONFIG_SMP
 434	int cpu;
 435	for_each_online_cpu(cpu)
 436		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
 437#else
 438	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
 439#endif
 440
 441	mb();
 442	while (1)
 443		if (OCTEON_IS_OCTEON3())
 444			cvmx_write_csr(CVMX_RST_SOFT_RST, 1);
 445		else
 446			cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
 447}
 448
 449
 450/**
 451 * Permanently stop a core.
 452 *
 453 * @arg: Ignored.
 454 */
 455static void octeon_kill_core(void *arg)
 456{
 457	if (octeon_is_simulation())
 458		/* A break instruction causes the simulator stop a core */
 459		asm volatile ("break" ::: "memory");
 460
 461	local_irq_disable();
 462	/* Disable watchdog on this core. */
 463	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
 464	/* Spin in a low power mode. */
 465	while (true)
 466		asm volatile ("wait" ::: "memory");
 467}
 468
 469
 470/**
 471 * Halt the system
 472 */
 473static void octeon_halt(void)
 474{
 475	smp_call_function(octeon_kill_core, NULL, 0);
 476
 477	switch (octeon_bootinfo->board_type) {
 478	case CVMX_BOARD_TYPE_NAO38:
 479		/* Driving a 1 to GPIO 12 shuts off this board */
 480		cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
 481		cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
 482		break;
 483	default:
 484		octeon_write_lcd("PowerOff");
 485		break;
 486	}
 487
 488	octeon_kill_core(NULL);
 489}
 490
 491static char __read_mostly octeon_system_type[80];
 492
 493static void __init init_octeon_system_type(void)
 494{
 495	char const *board_type;
 496
 497	board_type = cvmx_board_type_to_string(octeon_bootinfo->board_type);
 498	if (board_type == NULL) {
 499		struct device_node *root;
 500		int ret;
 501
 502		root = of_find_node_by_path("/");
 503		ret = of_property_read_string(root, "model", &board_type);
 504		of_node_put(root);
 505		if (ret)
 506			board_type = "Unsupported Board";
 507	}
 508
 509	snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
 510		 board_type, octeon_model_get_string(read_c0_prid()));
 511}
 512
 513/**
 514 * Return a string representing the system type
 515 *
 516 * Returns
 517 */
 518const char *octeon_board_type_string(void)
 519{
 520	return octeon_system_type;
 521}
 522
 523const char *get_system_type(void)
 524	__attribute__ ((alias("octeon_board_type_string")));
 525
 526void octeon_user_io_init(void)
 527{
 528	union octeon_cvmemctl cvmmemctl;
 529
 530	/* Get the current settings for CP0_CVMMEMCTL_REG */
 531	cvmmemctl.u64 = read_c0_cvmmemctl();
 532	/* R/W If set, marked write-buffer entries time out the same
 533	 * as as other entries; if clear, marked write-buffer entries
 534	 * use the maximum timeout. */
 535	cvmmemctl.s.dismarkwblongto = 1;
 536	/* R/W If set, a merged store does not clear the write-buffer
 537	 * entry timeout state. */
 538	cvmmemctl.s.dismrgclrwbto = 0;
 539	/* R/W Two bits that are the MSBs of the resultant CVMSEG LM
 540	 * word location for an IOBDMA. The other 8 bits come from the
 541	 * SCRADDR field of the IOBDMA. */
 542	cvmmemctl.s.iobdmascrmsb = 0;
 543	/* R/W If set, SYNCWS and SYNCS only order marked stores; if
 544	 * clear, SYNCWS and SYNCS only order unmarked
 545	 * stores. SYNCWSMARKED has no effect when DISSYNCWS is
 546	 * set. */
 547	cvmmemctl.s.syncwsmarked = 0;
 548	/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
 549	cvmmemctl.s.dissyncws = 0;
 550	/* R/W If set, no stall happens on write buffer full. */
 551	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
 552		cvmmemctl.s.diswbfst = 1;
 553	else
 554		cvmmemctl.s.diswbfst = 0;
 555	/* R/W If set (and SX set), supervisor-level loads/stores can
 556	 * use XKPHYS addresses with <48>==0 */
 557	cvmmemctl.s.xkmemenas = 0;
 558
 559	/* R/W If set (and UX set), user-level loads/stores can use
 560	 * XKPHYS addresses with VA<48>==0 */
 561	cvmmemctl.s.xkmemenau = 0;
 562
 563	/* R/W If set (and SX set), supervisor-level loads/stores can
 564	 * use XKPHYS addresses with VA<48>==1 */
 565	cvmmemctl.s.xkioenas = 0;
 566
 567	/* R/W If set (and UX set), user-level loads/stores can use
 568	 * XKPHYS addresses with VA<48>==1 */
 569	cvmmemctl.s.xkioenau = 0;
 570
 571	/* R/W If set, all stores act as SYNCW (NOMERGE must be set
 572	 * when this is set) RW, reset to 0. */
 573	cvmmemctl.s.allsyncw = 0;
 574
 575	/* R/W If set, no stores merge, and all stores reach the
 576	 * coherent bus in order. */
 577	cvmmemctl.s.nomerge = 0;
 578	/* R/W Selects the bit in the counter used for DID time-outs 0
 579	 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
 580	 * between 1x and 2x this interval. For example, with
 581	 * DIDTTO=3, expiration interval is between 16K and 32K. */
 582	cvmmemctl.s.didtto = 0;
 583	/* R/W If set, the (mem) CSR clock never turns off. */
 584	cvmmemctl.s.csrckalwys = 0;
 585	/* R/W If set, mclk never turns off. */
 586	cvmmemctl.s.mclkalwys = 0;
 587	/* R/W Selects the bit in the counter used for write buffer
 588	 * flush time-outs (WBFLT+11) is the bit position in an
 589	 * internal counter used to determine expiration. The write
 590	 * buffer expires between 1x and 2x this interval. For
 591	 * example, with WBFLT = 0, a write buffer expires between 2K
 592	 * and 4K cycles after the write buffer entry is allocated. */
 593	cvmmemctl.s.wbfltime = 0;
 594	/* R/W If set, do not put Istream in the L2 cache. */
 595	cvmmemctl.s.istrnol2 = 0;
 596
 597	/*
 598	 * R/W The write buffer threshold. As per erratum Core-14752
 599	 * for CN63XX, a sc/scd might fail if the write buffer is
 600	 * full.  Lowering WBTHRESH greatly lowers the chances of the
 601	 * write buffer ever being full and triggering the erratum.
 602	 */
 603	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
 604		cvmmemctl.s.wbthresh = 4;
 605	else
 606		cvmmemctl.s.wbthresh = 10;
 607
 608	/* R/W If set, CVMSEG is available for loads/stores in
 609	 * kernel/debug mode. */
 610#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
 611	cvmmemctl.s.cvmsegenak = 1;
 612#else
 613	cvmmemctl.s.cvmsegenak = 0;
 614#endif
 615	/* R/W If set, CVMSEG is available for loads/stores in
 616	 * supervisor mode. */
 617	cvmmemctl.s.cvmsegenas = 0;
 618	/* R/W If set, CVMSEG is available for loads/stores in user
 619	 * mode. */
 620	cvmmemctl.s.cvmsegenau = 0;
 621
 622	write_c0_cvmmemctl(cvmmemctl.u64);
 623
 624	/* Setup of CVMSEG is done in kernel-entry-init.h */
 625	if (smp_processor_id() == 0)
 626		pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
 627			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
 628			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
 629
 630	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
 631		union cvmx_iob_fau_timeout fau_timeout;
 632
 633		/* Set a default for the hardware timeouts */
 634		fau_timeout.u64 = 0;
 635		fau_timeout.s.tout_val = 0xfff;
 636		/* Disable tagwait FAU timeout */
 637		fau_timeout.s.tout_enb = 0;
 638		cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
 639	}
 640
 641	if ((!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
 642	     !OCTEON_IS_MODEL(OCTEON_CN7XXX)) ||
 643	    OCTEON_IS_MODEL(OCTEON_CN70XX)) {
 644		union cvmx_pow_nw_tim nm_tim;
 645
 646		nm_tim.u64 = 0;
 647		/* 4096 cycles */
 648		nm_tim.s.nw_tim = 3;
 649		cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
 650	}
 651
 652	write_octeon_c0_icacheerr(0);
 653	write_c0_derraddr1(0);
 654}
 655
 656/**
 657 * Early entry point for arch setup
 658 */
 659void __init prom_init(void)
 660{
 661	struct cvmx_sysinfo *sysinfo;
 662	const char *arg;
 663	char *p;
 664	int i;
 665	u64 t;
 666	int argc;
 667#ifdef CONFIG_CAVIUM_RESERVE32
 668	int64_t addr = -1;
 669#endif
 670	/*
 671	 * The bootloader passes a pointer to the boot descriptor in
 672	 * $a3, this is available as fw_arg3.
 673	 */
 674	octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
 675	octeon_bootinfo =
 676		cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
 677	cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
 678
 679	sysinfo = cvmx_sysinfo_get();
 680	memset(sysinfo, 0, sizeof(*sysinfo));
 681	sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
 682	sysinfo->phy_mem_desc_addr = (u64)phys_to_virt(octeon_bootinfo->phy_mem_desc_addr);
 683
 684	if ((octeon_bootinfo->major_version > 1) ||
 685	    (octeon_bootinfo->major_version == 1 &&
 686	     octeon_bootinfo->minor_version >= 4))
 687		cvmx_coremask_copy(&sysinfo->core_mask,
 688				   &octeon_bootinfo->ext_core_mask);
 689	else
 690		cvmx_coremask_set64(&sysinfo->core_mask,
 691				    octeon_bootinfo->core_mask);
 692
 693	/* Some broken u-boot pass garbage in upper bits, clear them out */
 694	if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
 695		for (i = 512; i < 1024; i++)
 696			cvmx_coremask_clear_core(&sysinfo->core_mask, i);
 697
 698	sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
 699	sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
 700	sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
 701	sysinfo->board_type = octeon_bootinfo->board_type;
 702	sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
 703	sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
 704	memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
 705	       sizeof(sysinfo->mac_addr_base));
 706	sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
 707	memcpy(sysinfo->board_serial_number,
 708	       octeon_bootinfo->board_serial_number,
 709	       sizeof(sysinfo->board_serial_number));
 710	sysinfo->compact_flash_common_base_addr =
 711		octeon_bootinfo->compact_flash_common_base_addr;
 712	sysinfo->compact_flash_attribute_base_addr =
 713		octeon_bootinfo->compact_flash_attribute_base_addr;
 714	sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
 715	sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
 716	sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
 717
 718	if (OCTEON_IS_OCTEON2()) {
 719		/* I/O clock runs at a different rate than the CPU. */
 720		union cvmx_mio_rst_boot rst_boot;
 721		rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
 722		octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
 723	} else if (OCTEON_IS_OCTEON3()) {
 724		/* I/O clock runs at a different rate than the CPU. */
 725		union cvmx_rst_boot rst_boot;
 726		rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
 727		octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
 728	} else {
 729		octeon_io_clock_rate = sysinfo->cpu_clock_hz;
 730	}
 731
 732	t = read_c0_cvmctl();
 733	if ((t & (1ull << 27)) == 0) {
 734		/*
 735		 * Setup the multiplier save/restore code if
 736		 * CvmCtl[NOMUL] clear.
 737		 */
 738		void *save;
 739		void *save_end;
 740		void *restore;
 741		void *restore_end;
 742		int save_len;
 743		int restore_len;
 744		int save_max = (char *)octeon_mult_save_end -
 745			(char *)octeon_mult_save;
 746		int restore_max = (char *)octeon_mult_restore_end -
 747			(char *)octeon_mult_restore;
 748		if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
 749			save = octeon_mult_save3;
 750			save_end = octeon_mult_save3_end;
 751			restore = octeon_mult_restore3;
 752			restore_end = octeon_mult_restore3_end;
 753		} else {
 754			save = octeon_mult_save2;
 755			save_end = octeon_mult_save2_end;
 756			restore = octeon_mult_restore2;
 757			restore_end = octeon_mult_restore2_end;
 758		}
 759		save_len = (char *)save_end - (char *)save;
 760		restore_len = (char *)restore_end - (char *)restore;
 761		if (!WARN_ON(save_len > save_max ||
 762				restore_len > restore_max)) {
 763			memcpy(octeon_mult_save, save, save_len);
 764			memcpy(octeon_mult_restore, restore, restore_len);
 765		}
 766	}
 767
 768	/*
 769	 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
 770	 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
 771	 */
 772	if (!octeon_is_simulation() &&
 773	    octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
 774		cvmx_write_csr(CVMX_LED_EN, 0);
 775		cvmx_write_csr(CVMX_LED_PRT, 0);
 776		cvmx_write_csr(CVMX_LED_DBG, 0);
 777		cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
 778		cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
 779		cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
 780		cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
 781		cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
 782		cvmx_write_csr(CVMX_LED_EN, 1);
 783	}
 784#ifdef CONFIG_CAVIUM_RESERVE32
 785	/*
 786	 * We need to temporarily allocate all memory in the reserve32
 787	 * region. This makes sure the kernel doesn't allocate this
 788	 * memory when it is getting memory from the
 789	 * bootloader. Later, after the memory allocations are
 790	 * complete, the reserve32 will be freed.
 791	 *
 792	 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
 793	 * is in case we later use hugetlb entries with it.
 794	 */
 795	addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
 796						0, 0, 2 << 20,
 797						"CAVIUM_RESERVE32", 0);
 798	if (addr < 0)
 799		pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
 800	else
 801		octeon_reserve32_memory = addr;
 802#endif
 
 
 803
 804#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
 805	if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
 806		pr_info("Skipping L2 locking due to reduced L2 cache size\n");
 807	} else {
 808		uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000;
 809#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
 810		/* TLB refill */
 811		cvmx_l2c_lock_mem_region(ebase, 0x100);
 812#endif
 813#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
 814		/* General exception */
 815		cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
 816#endif
 817#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
 818		/* Interrupt handler */
 819		cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
 820#endif
 821#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
 822		cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
 823		cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
 824#endif
 825#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
 826		cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
 827#endif
 828	}
 829#endif
 830
 831	octeon_check_cpu_bist();
 832
 833	octeon_uart = octeon_get_boot_uart();
 834
 835#ifdef CONFIG_SMP
 836	octeon_write_lcd("LinuxSMP");
 837#else
 838	octeon_write_lcd("Linux");
 839#endif
 840
 841	octeon_setup_delays();
 842
 843	/*
 844	 * BIST should always be enabled when doing a soft reset. L2
 845	 * Cache locking for instance is not cleared unless BIST is
 846	 * enabled.  Unfortunately due to a chip errata G-200 for
 847	 * Cn38XX and CN31XX, BIST msut be disabled on these parts.
 848	 */
 849	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
 850	    OCTEON_IS_MODEL(OCTEON_CN31XX))
 851		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
 852	else
 853		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
 854
 855	/* Default to 64MB in the simulator to speed things up */
 856	if (octeon_is_simulation())
 857		max_memory = 64ull << 20;
 858
 859	arg = strstr(arcs_cmdline, "mem=");
 860	if (arg) {
 861		max_memory = memparse(arg + 4, &p);
 862		if (max_memory == 0)
 863			max_memory = 32ull << 30;
 864		if (*p == '@')
 865			reserve_low_mem = memparse(p + 1, &p);
 866	}
 867
 868	arcs_cmdline[0] = 0;
 869	argc = octeon_boot_desc_ptr->argc;
 870	for (i = 0; i < argc; i++) {
 871		const char *arg =
 872			cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
 873		if ((strncmp(arg, "MEM=", 4) == 0) ||
 874		    (strncmp(arg, "mem=", 4) == 0)) {
 875			max_memory = memparse(arg + 4, &p);
 876			if (max_memory == 0)
 877				max_memory = 32ull << 30;
 878			if (*p == '@')
 879				reserve_low_mem = memparse(p + 1, &p);
 880#ifdef CONFIG_KEXEC
 881		} else if (strncmp(arg, "crashkernel=", 12) == 0) {
 882			crashk_size = memparse(arg+12, &p);
 883			if (*p == '@')
 884				crashk_base = memparse(p+1, &p);
 885			strcat(arcs_cmdline, " ");
 886			strcat(arcs_cmdline, arg);
 887			/*
 888			 * To do: switch parsing to new style, something like:
 889			 * parse_crashkernel(arg, sysinfo->system_dram_size,
 890			 *		  &crashk_size, &crashk_base);
 891			 */
 892#endif
 893		} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
 894			   sizeof(arcs_cmdline) - 1) {
 895			strcat(arcs_cmdline, " ");
 896			strcat(arcs_cmdline, arg);
 897		}
 898	}
 899
 900	if (strstr(arcs_cmdline, "console=") == NULL) {
 901		if (octeon_uart == 1)
 902			strcat(arcs_cmdline, " console=ttyS1,115200");
 903		else
 904			strcat(arcs_cmdline, " console=ttyS0,115200");
 905	}
 906
 907	mips_hpt_frequency = octeon_get_clock_rate();
 908
 909	octeon_init_cvmcount();
 910
 911	_machine_restart = octeon_restart;
 912	_machine_halt = octeon_halt;
 913
 914#ifdef CONFIG_KEXEC
 915	_machine_kexec_shutdown = octeon_shutdown;
 916	_machine_crash_shutdown = octeon_crash_shutdown;
 917	_machine_kexec_prepare = octeon_kexec_prepare;
 918#ifdef CONFIG_SMP
 919	_crash_smp_send_stop = octeon_crash_smp_send_stop;
 920#endif
 921#endif
 922
 923	octeon_user_io_init();
 924	octeon_setup_smp();
 925}
 926
 927/* Exclude a single page from the regions obtained in plat_mem_setup. */
 928#ifndef CONFIG_CRASH_DUMP
 929static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
 930{
 931	if (addr > *mem && addr < *mem + *size) {
 932		u64 inc = addr - *mem;
 933		add_memory_region(*mem, inc, BOOT_MEM_RAM);
 934		*mem += inc;
 935		*size -= inc;
 936	}
 937
 938	if (addr == *mem && *size > PAGE_SIZE) {
 939		*mem += PAGE_SIZE;
 940		*size -= PAGE_SIZE;
 941	}
 942}
 943#endif /* CONFIG_CRASH_DUMP */
 944
 945void __init fw_init_cmdline(void)
 946{
 947	int i;
 948
 949	octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
 950	for (i = 0; i < octeon_boot_desc_ptr->argc; i++) {
 951		const char *arg =
 952			cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
 953		if (strlen(arcs_cmdline) + strlen(arg) + 1 <
 954			   sizeof(arcs_cmdline) - 1) {
 955			strcat(arcs_cmdline, " ");
 956			strcat(arcs_cmdline, arg);
 957		}
 958	}
 959}
 960
 961void __init *plat_get_fdt(void)
 962{
 963	octeon_bootinfo =
 964		cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
 965	return phys_to_virt(octeon_bootinfo->fdt_addr);
 966}
 967
 968void __init plat_mem_setup(void)
 969{
 970	uint64_t mem_alloc_size;
 971	uint64_t total;
 972	uint64_t crashk_end;
 973#ifndef CONFIG_CRASH_DUMP
 974	int64_t memory;
 975	uint64_t kernel_start;
 976	uint64_t kernel_size;
 977#endif
 978
 979	total = 0;
 980	crashk_end = 0;
 981
 982	/*
 983	 * The Mips memory init uses the first memory location for
 984	 * some memory vectors. When SPARSEMEM is in use, it doesn't
 985	 * verify that the size is big enough for the final
 986	 * vectors. Making the smallest chuck 4MB seems to be enough
 987	 * to consistently work.
 988	 */
 989	mem_alloc_size = 4 << 20;
 990	if (mem_alloc_size > max_memory)
 991		mem_alloc_size = max_memory;
 992
 993/* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
 994#ifdef CONFIG_CRASH_DUMP
 995	add_memory_region(reserve_low_mem, max_memory, BOOT_MEM_RAM);
 996	total += max_memory;
 997#else
 998#ifdef CONFIG_KEXEC
 999	if (crashk_size > 0) {
1000		add_memory_region(crashk_base, crashk_size, BOOT_MEM_RAM);
1001		crashk_end = crashk_base + crashk_size;
1002	}
1003#endif
1004	/*
1005	 * When allocating memory, we want incrementing addresses from
1006	 * bootmem_alloc so the code in add_memory_region can merge
1007	 * regions next to each other.
1008	 */
1009	cvmx_bootmem_lock();
1010	while (total < max_memory) {
1011		memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
1012						__pa_symbol(&_end), -1,
1013						0x100000,
1014						CVMX_BOOTMEM_FLAG_NO_LOCKING);
1015		if (memory >= 0) {
1016			u64 size = mem_alloc_size;
1017#ifdef CONFIG_KEXEC
1018			uint64_t end;
1019#endif
1020
1021			/*
1022			 * exclude a page at the beginning and end of
1023			 * the 256MB PCIe 'hole' so the kernel will not
1024			 * try to allocate multi-page buffers that
1025			 * span the discontinuity.
1026			 */
1027			memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE,
1028					    &memory, &size);
1029			memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
1030					    CVMX_PCIE_BAR1_PHYS_SIZE,
1031					    &memory, &size);
1032#ifdef CONFIG_KEXEC
1033			end = memory + mem_alloc_size;
1034
1035			/*
1036			 * This function automatically merges address regions
1037			 * next to each other if they are received in
1038			 * incrementing order
1039			 */
1040			if (memory < crashk_base && end >  crashk_end) {
1041				/* region is fully in */
1042				add_memory_region(memory,
1043						  crashk_base - memory,
1044						  BOOT_MEM_RAM);
1045				total += crashk_base - memory;
1046				add_memory_region(crashk_end,
1047						  end - crashk_end,
1048						  BOOT_MEM_RAM);
1049				total += end - crashk_end;
1050				continue;
1051			}
1052
1053			if (memory >= crashk_base && end <= crashk_end)
1054				/*
1055				 * Entire memory region is within the new
1056				 *  kernel's memory, ignore it.
1057				 */
1058				continue;
1059
1060			if (memory > crashk_base && memory < crashk_end &&
1061			    end > crashk_end) {
1062				/*
1063				 * Overlap with the beginning of the region,
1064				 * reserve the beginning.
1065				  */
1066				mem_alloc_size -= crashk_end - memory;
1067				memory = crashk_end;
1068			} else if (memory < crashk_base && end > crashk_base &&
1069				   end < crashk_end)
1070				/*
1071				 * Overlap with the beginning of the region,
1072				 * chop of end.
1073				 */
1074				mem_alloc_size -= end - crashk_base;
1075#endif
1076			add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
1077			total += mem_alloc_size;
1078			/* Recovering mem_alloc_size */
1079			mem_alloc_size = 4 << 20;
1080		} else {
1081			break;
1082		}
1083	}
1084	cvmx_bootmem_unlock();
1085	/* Add the memory region for the kernel. */
1086	kernel_start = (unsigned long) _text;
1087	kernel_size = _end - _text;
1088
1089	/* Adjust for physical offset. */
1090	kernel_start &= ~0xffffffff80000000ULL;
1091	add_memory_region(kernel_start, kernel_size, BOOT_MEM_RAM);
1092#endif /* CONFIG_CRASH_DUMP */
1093
1094#ifdef CONFIG_CAVIUM_RESERVE32
1095	/*
1096	 * Now that we've allocated the kernel memory it is safe to
1097	 * free the reserved region. We free it here so that builtin
1098	 * drivers can use the memory.
1099	 */
1100	if (octeon_reserve32_memory)
1101		cvmx_bootmem_free_named("CAVIUM_RESERVE32");
1102#endif /* CONFIG_CAVIUM_RESERVE32 */
1103
1104	if (total == 0)
1105		panic("Unable to allocate memory from "
1106		      "cvmx_bootmem_phy_alloc");
1107}
1108
1109/*
1110 * Emit one character to the boot UART.	 Exported for use by the
1111 * watchdog timer.
1112 */
1113void prom_putchar(char c)
1114{
1115	uint64_t lsrval;
1116
1117	/* Spin until there is room */
1118	do {
1119		lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
1120	} while ((lsrval & 0x20) == 0);
1121
1122	/* Write the byte */
1123	cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
1124}
1125EXPORT_SYMBOL(prom_putchar);
1126
1127void __init prom_free_prom_memory(void)
1128{
1129	if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) {
1130		/* Check for presence of Core-14449 fix.  */
1131		u32 insn;
1132		u32 *foo;
1133
1134		foo = &insn;
1135
1136		asm volatile("# before" : : : "memory");
1137		prefetch(foo);
1138		asm volatile(
1139			".set push\n\t"
1140			".set noreorder\n\t"
1141			"bal 1f\n\t"
1142			"nop\n"
1143			"1:\tlw %0,-12($31)\n\t"
1144			".set pop\n\t"
1145			: "=r" (insn) : : "$31", "memory");
1146
1147		if ((insn >> 26) != 0x33)
1148			panic("No PREF instruction at Core-14449 probe point.");
1149
1150		if (((insn >> 16) & 0x1f) != 28)
1151			panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
1152			      "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
1153			      insn);
1154	}
1155}
1156
1157void __init octeon_fill_mac_addresses(void);
1158
1159void __init device_tree_init(void)
1160{
1161	const void *fdt;
1162	bool do_prune;
1163	bool fill_mac;
1164
1165	if (fw_passed_dtb) {
1166		fdt = (void *)fw_passed_dtb;
 
1167		do_prune = false;
1168		fill_mac = true;
1169		pr_info("Using appended Device Tree.\n");
1170	} else if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
 
 
1171		fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
1172		if (fdt_check_header(fdt))
1173			panic("Corrupt Device Tree passed to kernel.");
1174		do_prune = false;
1175		fill_mac = false;
1176		pr_info("Using passed Device Tree.\n");
1177	} else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
1178		fdt = &__dtb_octeon_68xx_begin;
1179		do_prune = true;
1180		fill_mac = true;
1181	} else {
1182		fdt = &__dtb_octeon_3xxx_begin;
1183		do_prune = true;
1184		fill_mac = true;
1185	}
1186
1187	initial_boot_params = (void *)fdt;
1188
1189	if (do_prune) {
1190		octeon_prune_device_tree();
1191		pr_info("Using internal Device Tree.\n");
1192	}
1193	if (fill_mac)
1194		octeon_fill_mac_addresses();
1195	unflatten_and_copy_device_tree();
1196	init_octeon_system_type();
1197}
1198
1199static int __initdata disable_octeon_edac_p;
1200
1201static int __init disable_octeon_edac(char *str)
1202{
1203	disable_octeon_edac_p = 1;
1204	return 0;
1205}
1206early_param("disable_octeon_edac", disable_octeon_edac);
1207
1208static char *edac_device_names[] = {
1209	"octeon_l2c_edac",
1210	"octeon_pc_edac",
1211};
1212
1213static int __init edac_devinit(void)
1214{
1215	struct platform_device *dev;
1216	int i, err = 0;
1217	int num_lmc;
1218	char *name;
1219
1220	if (disable_octeon_edac_p)
1221		return 0;
1222
1223	for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) {
1224		name = edac_device_names[i];
1225		dev = platform_device_register_simple(name, -1, NULL, 0);
1226		if (IS_ERR(dev)) {
1227			pr_err("Registration of %s failed!\n", name);
1228			err = PTR_ERR(dev);
1229		}
1230	}
1231
1232	num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 :
1233		(OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1);
1234	for (i = 0; i < num_lmc; i++) {
1235		dev = platform_device_register_simple("octeon_lmc_edac",
1236						      i, NULL, 0);
1237		if (IS_ERR(dev)) {
1238			pr_err("Registration of octeon_lmc_edac %d failed!\n", i);
1239			err = PTR_ERR(dev);
1240		}
1241	}
1242
1243	return err;
1244}
1245device_initcall(edac_devinit);
1246
1247static void __initdata *octeon_dummy_iospace;
1248
1249static int __init octeon_no_pci_init(void)
1250{
1251	/*
1252	 * Initially assume there is no PCI. The PCI/PCIe platform code will
1253	 * later re-initialize these to correct values if they are present.
1254	 */
1255	octeon_dummy_iospace = vzalloc(IO_SPACE_LIMIT);
1256	set_io_port_base((unsigned long)octeon_dummy_iospace);
1257	ioport_resource.start = MAX_RESOURCE;
1258	ioport_resource.end = 0;
1259	return 0;
1260}
1261core_initcall(octeon_no_pci_init);
1262
1263static int __init octeon_no_pci_release(void)
1264{
1265	/*
1266	 * Release the allocated memory if a real IO space is there.
1267	 */
1268	if ((unsigned long)octeon_dummy_iospace != mips_io_port_base)
1269		vfree(octeon_dummy_iospace);
1270	return 0;
1271}
1272late_initcall(octeon_no_pci_release);
v6.2
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2004-2007 Cavium Networks
   7 * Copyright (C) 2008, 2009 Wind River Systems
   8 *   written by Ralf Baechle <ralf@linux-mips.org>
   9 */
  10#include <linux/compiler.h>
  11#include <linux/vmalloc.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/console.h>
  15#include <linux/delay.h>
  16#include <linux/export.h>
  17#include <linux/interrupt.h>
  18#include <linux/io.h>
  19#include <linux/memblock.h>
  20#include <linux/serial.h>
  21#include <linux/smp.h>
  22#include <linux/types.h>
  23#include <linux/string.h>	/* for memset */
  24#include <linux/tty.h>
  25#include <linux/time.h>
  26#include <linux/platform_device.h>
  27#include <linux/serial_core.h>
  28#include <linux/serial_8250.h>
  29#include <linux/of_fdt.h>
  30#include <linux/libfdt.h>
  31#include <linux/kexec.h>
  32
  33#include <asm/processor.h>
  34#include <asm/reboot.h>
  35#include <asm/smp-ops.h>
  36#include <asm/irq_cpu.h>
  37#include <asm/mipsregs.h>
  38#include <asm/bootinfo.h>
  39#include <asm/sections.h>
  40#include <asm/fw/fw.h>
  41#include <asm/setup.h>
  42#include <asm/prom.h>
  43#include <asm/time.h>
  44
  45#include <asm/octeon/octeon.h>
  46#include <asm/octeon/pci-octeon.h>
  47#include <asm/octeon/cvmx-rst-defs.h>
  48
  49/*
  50 * TRUE for devices having registers with little-endian byte
  51 * order, FALSE for registers with native-endian byte order.
  52 * PCI mandates little-endian, USB and SATA are configuraable,
  53 * but we chose little-endian for these.
  54 */
  55const bool octeon_should_swizzle_table[256] = {
  56	[0x00] = true,	/* bootbus/CF */
  57	[0x1b] = true,	/* PCI mmio window */
  58	[0x1c] = true,	/* PCI mmio window */
  59	[0x1d] = true,	/* PCI mmio window */
  60	[0x1e] = true,	/* PCI mmio window */
  61	[0x68] = true,	/* OCTEON III USB */
  62	[0x69] = true,	/* OCTEON III USB */
  63	[0x6c] = true,	/* OCTEON III SATA */
  64	[0x6f] = true,	/* OCTEON II USB */
  65};
  66EXPORT_SYMBOL(octeon_should_swizzle_table);
  67
  68#ifdef CONFIG_PCI
  69extern void pci_console_init(const char *arg);
  70#endif
  71
  72static unsigned long long max_memory = ULLONG_MAX;
  73static unsigned long long reserve_low_mem;
  74
  75DEFINE_SEMAPHORE(octeon_bootbus_sem);
  76EXPORT_SYMBOL(octeon_bootbus_sem);
  77
  78static struct octeon_boot_descriptor *octeon_boot_desc_ptr;
  79
  80struct cvmx_bootinfo *octeon_bootinfo;
  81EXPORT_SYMBOL(octeon_bootinfo);
  82
  83#ifdef CONFIG_KEXEC
  84#ifdef CONFIG_SMP
  85/*
  86 * Wait for relocation code is prepared and send
  87 * secondary CPUs to spin until kernel is relocated.
  88 */
  89static void octeon_kexec_smp_down(void *ignored)
  90{
  91	int cpu = smp_processor_id();
  92
  93	local_irq_disable();
  94	set_cpu_online(cpu, false);
  95	while (!atomic_read(&kexec_ready_to_reboot))
  96		cpu_relax();
  97
  98	asm volatile (
  99	"	sync						\n"
 100	"	synci	($0)					\n");
 101
 102	kexec_reboot();
 103}
 104#endif
 105
 106#define OCTEON_DDR0_BASE    (0x0ULL)
 107#define OCTEON_DDR0_SIZE    (0x010000000ULL)
 108#define OCTEON_DDR1_BASE    (0x410000000ULL)
 109#define OCTEON_DDR1_SIZE    (0x010000000ULL)
 110#define OCTEON_DDR2_BASE    (0x020000000ULL)
 111#define OCTEON_DDR2_SIZE    (0x3e0000000ULL)
 112#define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
 113
 114static struct kimage *kimage_ptr;
 115
 116static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes)
 117{
 118	int64_t addr;
 119	struct cvmx_bootmem_desc *bootmem_desc;
 120
 121	bootmem_desc = cvmx_bootmem_get_desc();
 122
 123	if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) {
 124		mem_size = OCTEON_MAX_PHY_MEM_SIZE;
 125		pr_err("Error: requested memory too large,"
 126		       "truncating to maximum size\n");
 127	}
 128
 129	bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
 130	bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
 131
 132	addr = (OCTEON_DDR0_BASE + reserve_low_mem + low_reserved_bytes);
 133	bootmem_desc->head_addr = 0;
 134
 135	if (mem_size <= OCTEON_DDR0_SIZE) {
 136		__cvmx_bootmem_phy_free(addr,
 137				mem_size - reserve_low_mem -
 138				low_reserved_bytes, 0);
 139		return;
 140	}
 141
 142	__cvmx_bootmem_phy_free(addr,
 143			OCTEON_DDR0_SIZE - reserve_low_mem -
 144			low_reserved_bytes, 0);
 145
 146	mem_size -= OCTEON_DDR0_SIZE;
 147
 148	if (mem_size > OCTEON_DDR1_SIZE) {
 149		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
 150		__cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
 151				mem_size - OCTEON_DDR1_SIZE, 0);
 152	} else
 153		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
 154}
 155
 156static int octeon_kexec_prepare(struct kimage *image)
 157{
 158	int i;
 159	char *bootloader = "kexec";
 160
 161	octeon_boot_desc_ptr->argc = 0;
 162	for (i = 0; i < image->nr_segments; i++) {
 163		if (!strncmp(bootloader, (char *)image->segment[i].buf,
 164				strlen(bootloader))) {
 165			/*
 166			 * convert command line string to array
 167			 * of parameters (as bootloader does).
 168			 */
 169			int argc = 0, offt;
 170			char *str = (char *)image->segment[i].buf;
 171			char *ptr = strchr(str, ' ');
 172			while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) {
 173				*ptr = '\0';
 174				if (ptr[1] != ' ') {
 175					offt = (int)(ptr - str + 1);
 176					octeon_boot_desc_ptr->argv[argc] =
 177						image->segment[i].mem + offt;
 178					argc++;
 179				}
 180				ptr = strchr(ptr + 1, ' ');
 181			}
 182			octeon_boot_desc_ptr->argc = argc;
 183			break;
 184		}
 185	}
 186
 187	/*
 188	 * Information about segments will be needed during pre-boot memory
 189	 * initialization.
 190	 */
 191	kimage_ptr = image;
 192	return 0;
 193}
 194
 195static void octeon_generic_shutdown(void)
 196{
 197	int i;
 198#ifdef CONFIG_SMP
 199	int cpu;
 200#endif
 201	struct cvmx_bootmem_desc *bootmem_desc;
 202	void *named_block_array_ptr;
 203
 204	bootmem_desc = cvmx_bootmem_get_desc();
 205	named_block_array_ptr =
 206		cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr);
 207
 208#ifdef CONFIG_SMP
 209	/* disable watchdogs */
 210	for_each_online_cpu(cpu)
 211		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
 212#else
 213	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
 214#endif
 215	if (kimage_ptr != kexec_crash_image) {
 216		memset(named_block_array_ptr,
 217			0x0,
 218			CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
 219			sizeof(struct cvmx_bootmem_named_block_desc));
 220		/*
 221		 * Mark all memory (except low 0x100000 bytes) as free.
 222		 * It is the same thing that bootloader does.
 223		 */
 224		kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL,
 225				0x100000);
 226		/*
 227		 * Allocate all segments to avoid their corruption during boot.
 228		 */
 229		for (i = 0; i < kimage_ptr->nr_segments; i++)
 230			cvmx_bootmem_alloc_address(
 231				kimage_ptr->segment[i].memsz + 2*PAGE_SIZE,
 232				kimage_ptr->segment[i].mem - PAGE_SIZE,
 233				PAGE_SIZE);
 234	} else {
 235		/*
 236		 * Do not mark all memory as free. Free only named sections
 237		 * leaving the rest of memory unchanged.
 238		 */
 239		struct cvmx_bootmem_named_block_desc *ptr =
 240			(struct cvmx_bootmem_named_block_desc *)
 241			named_block_array_ptr;
 242
 243		for (i = 0; i < bootmem_desc->named_block_num_blocks; i++)
 244			if (ptr[i].size)
 245				cvmx_bootmem_free_named(ptr[i].name);
 246	}
 247	kexec_args[2] = 1UL; /* running on octeon_main_processor */
 248	kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
 249#ifdef CONFIG_SMP
 250	secondary_kexec_args[2] = 0UL; /* running on secondary cpu */
 251	secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
 252#endif
 253}
 254
 255static void octeon_shutdown(void)
 256{
 257	octeon_generic_shutdown();
 258#ifdef CONFIG_SMP
 259	smp_call_function(octeon_kexec_smp_down, NULL, 0);
 260	smp_wmb();
 261	while (num_online_cpus() > 1) {
 262		cpu_relax();
 263		mdelay(1);
 264	}
 265#endif
 266}
 267
 268static void octeon_crash_shutdown(struct pt_regs *regs)
 269{
 270	octeon_generic_shutdown();
 271	default_machine_crash_shutdown(regs);
 272}
 273
 274#ifdef CONFIG_SMP
 275void octeon_crash_smp_send_stop(void)
 276{
 277	int cpu;
 278
 279	/* disable watchdogs */
 280	for_each_online_cpu(cpu)
 281		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
 282}
 283#endif
 284
 285#endif /* CONFIG_KEXEC */
 286
 
 287uint64_t octeon_reserve32_memory;
 288EXPORT_SYMBOL(octeon_reserve32_memory);
 
 289
 290#ifdef CONFIG_KEXEC
 291/* crashkernel cmdline parameter is parsed _after_ memory setup
 292 * we also parse it here (workaround for EHB5200) */
 293static uint64_t crashk_size, crashk_base;
 294#endif
 295
 296static int octeon_uart;
 297
 298extern asmlinkage void handle_int(void);
 299
 300/**
 301 * octeon_is_simulation - Return non-zero if we are currently running
 302 * in the Octeon simulator
 303 *
 304 * Return: non-0 if running in the Octeon simulator, 0 otherwise
 305 */
 306int octeon_is_simulation(void)
 307{
 308	return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
 309}
 310EXPORT_SYMBOL(octeon_is_simulation);
 311
 312/**
 313 * octeon_is_pci_host - Return true if Octeon is in PCI Host mode. This means
 314 * Linux can control the PCI bus.
 315 *
 316 * Return: Non-zero if Octeon is in host mode.
 317 */
 318int octeon_is_pci_host(void)
 319{
 320#ifdef CONFIG_PCI
 321	return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
 322#else
 323	return 0;
 324#endif
 325}
 326
 327/**
 328 * octeon_get_clock_rate - Get the clock rate of Octeon
 329 *
 330 * Return: Clock rate in HZ
 331 */
 332uint64_t octeon_get_clock_rate(void)
 333{
 334	struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
 335
 336	return sysinfo->cpu_clock_hz;
 337}
 338EXPORT_SYMBOL(octeon_get_clock_rate);
 339
 340static u64 octeon_io_clock_rate;
 341
 342u64 octeon_get_io_clock_rate(void)
 343{
 344	return octeon_io_clock_rate;
 345}
 346EXPORT_SYMBOL(octeon_get_io_clock_rate);
 347
 348
 349/**
 350 * octeon_write_lcd - Write to the LCD display connected to the bootbus.
 
 
 
 351 * @s:	    String to write
 352 *
 353 * This display exists on most Cavium evaluation boards. If it doesn't exist,
 354 * then this function doesn't do anything.
 355 */
 356static void octeon_write_lcd(const char *s)
 357{
 358	if (octeon_bootinfo->led_display_base_addr) {
 359		void __iomem *lcd_address =
 360			ioremap(octeon_bootinfo->led_display_base_addr,
 361					8);
 362		int i;
 363		for (i = 0; i < 8; i++, s++) {
 364			if (*s)
 365				iowrite8(*s, lcd_address + i);
 366			else
 367				iowrite8(' ', lcd_address + i);
 368		}
 369		iounmap(lcd_address);
 370	}
 371}
 372
 373/**
 374 * octeon_get_boot_uart - Return the console uart passed by the bootloader
 375 *
 376 * Return: uart number (0 or 1)
 377 */
 378static int octeon_get_boot_uart(void)
 379{
 380	return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
 381		1 : 0;
 382}
 383
 384/**
 385 * octeon_get_boot_coremask - Get the coremask Linux was booted on.
 386 *
 387 * Return: Core mask
 388 */
 389int octeon_get_boot_coremask(void)
 390{
 391	return octeon_boot_desc_ptr->core_mask;
 392}
 393
 394/**
 395 * octeon_check_cpu_bist - Check the hardware BIST results for a CPU
 396 */
 397void octeon_check_cpu_bist(void)
 398{
 399	const int coreid = cvmx_get_core_num();
 400	unsigned long long mask;
 401	unsigned long long bist_val;
 402
 403	/* Check BIST results for COP0 registers */
 404	mask = 0x1f00000000ull;
 405	bist_val = read_octeon_c0_icacheerr();
 406	if (bist_val & mask)
 407		pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
 408		       coreid, bist_val);
 409
 410	bist_val = read_octeon_c0_dcacheerr();
 411	if (bist_val & 1)
 412		pr_err("Core%d L1 Dcache parity error: "
 413		       "CacheErr(dcache) = 0x%llx\n",
 414		       coreid, bist_val);
 415
 416	mask = 0xfc00000000000000ull;
 417	bist_val = read_c0_cvmmemctl();
 418	if (bist_val & mask)
 419		pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
 420		       coreid, bist_val);
 421
 422	write_octeon_c0_dcacheerr(0);
 423}
 424
 425/**
 426 * octeon_restart - Reboot Octeon
 427 *
 428 * @command: Command to pass to the bootloader. Currently ignored.
 429 */
 430static void octeon_restart(char *command)
 431{
 432	/* Disable all watchdogs before soft reset. They don't get cleared */
 433#ifdef CONFIG_SMP
 434	int cpu;
 435	for_each_online_cpu(cpu)
 436		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
 437#else
 438	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
 439#endif
 440
 441	mb();
 442	while (1)
 443		if (OCTEON_IS_OCTEON3())
 444			cvmx_write_csr(CVMX_RST_SOFT_RST, 1);
 445		else
 446			cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
 447}
 448
 449
 450/**
 451 * octeon_kill_core - Permanently stop a core.
 452 *
 453 * @arg: Ignored.
 454 */
 455static void octeon_kill_core(void *arg)
 456{
 457	if (octeon_is_simulation())
 458		/* A break instruction causes the simulator stop a core */
 459		asm volatile ("break" ::: "memory");
 460
 461	local_irq_disable();
 462	/* Disable watchdog on this core. */
 463	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
 464	/* Spin in a low power mode. */
 465	while (true)
 466		asm volatile ("wait" ::: "memory");
 467}
 468
 469
 470/**
 471 * octeon_halt - Halt the system
 472 */
 473static void octeon_halt(void)
 474{
 475	smp_call_function(octeon_kill_core, NULL, 0);
 476
 477	switch (octeon_bootinfo->board_type) {
 478	case CVMX_BOARD_TYPE_NAO38:
 479		/* Driving a 1 to GPIO 12 shuts off this board */
 480		cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
 481		cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
 482		break;
 483	default:
 484		octeon_write_lcd("PowerOff");
 485		break;
 486	}
 487
 488	octeon_kill_core(NULL);
 489}
 490
 491static char __read_mostly octeon_system_type[80];
 492
 493static void __init init_octeon_system_type(void)
 494{
 495	char const *board_type;
 496
 497	board_type = cvmx_board_type_to_string(octeon_bootinfo->board_type);
 498	if (board_type == NULL) {
 499		struct device_node *root;
 500		int ret;
 501
 502		root = of_find_node_by_path("/");
 503		ret = of_property_read_string(root, "model", &board_type);
 504		of_node_put(root);
 505		if (ret)
 506			board_type = "Unsupported Board";
 507	}
 508
 509	snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
 510		 board_type, octeon_model_get_string(read_c0_prid()));
 511}
 512
 513/**
 514 * octeon_board_type_string - Return a string representing the system type
 515 *
 516 * Return: system type string
 517 */
 518const char *octeon_board_type_string(void)
 519{
 520	return octeon_system_type;
 521}
 522
 523const char *get_system_type(void)
 524	__attribute__ ((alias("octeon_board_type_string")));
 525
 526void octeon_user_io_init(void)
 527{
 528	union octeon_cvmemctl cvmmemctl;
 529
 530	/* Get the current settings for CP0_CVMMEMCTL_REG */
 531	cvmmemctl.u64 = read_c0_cvmmemctl();
 532	/* R/W If set, marked write-buffer entries time out the same
 533	 * as other entries; if clear, marked write-buffer entries
 534	 * use the maximum timeout. */
 535	cvmmemctl.s.dismarkwblongto = 1;
 536	/* R/W If set, a merged store does not clear the write-buffer
 537	 * entry timeout state. */
 538	cvmmemctl.s.dismrgclrwbto = 0;
 539	/* R/W Two bits that are the MSBs of the resultant CVMSEG LM
 540	 * word location for an IOBDMA. The other 8 bits come from the
 541	 * SCRADDR field of the IOBDMA. */
 542	cvmmemctl.s.iobdmascrmsb = 0;
 543	/* R/W If set, SYNCWS and SYNCS only order marked stores; if
 544	 * clear, SYNCWS and SYNCS only order unmarked
 545	 * stores. SYNCWSMARKED has no effect when DISSYNCWS is
 546	 * set. */
 547	cvmmemctl.s.syncwsmarked = 0;
 548	/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
 549	cvmmemctl.s.dissyncws = 0;
 550	/* R/W If set, no stall happens on write buffer full. */
 551	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
 552		cvmmemctl.s.diswbfst = 1;
 553	else
 554		cvmmemctl.s.diswbfst = 0;
 555	/* R/W If set (and SX set), supervisor-level loads/stores can
 556	 * use XKPHYS addresses with <48>==0 */
 557	cvmmemctl.s.xkmemenas = 0;
 558
 559	/* R/W If set (and UX set), user-level loads/stores can use
 560	 * XKPHYS addresses with VA<48>==0 */
 561	cvmmemctl.s.xkmemenau = 0;
 562
 563	/* R/W If set (and SX set), supervisor-level loads/stores can
 564	 * use XKPHYS addresses with VA<48>==1 */
 565	cvmmemctl.s.xkioenas = 0;
 566
 567	/* R/W If set (and UX set), user-level loads/stores can use
 568	 * XKPHYS addresses with VA<48>==1 */
 569	cvmmemctl.s.xkioenau = 0;
 570
 571	/* R/W If set, all stores act as SYNCW (NOMERGE must be set
 572	 * when this is set) RW, reset to 0. */
 573	cvmmemctl.s.allsyncw = 0;
 574
 575	/* R/W If set, no stores merge, and all stores reach the
 576	 * coherent bus in order. */
 577	cvmmemctl.s.nomerge = 0;
 578	/* R/W Selects the bit in the counter used for DID time-outs 0
 579	 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
 580	 * between 1x and 2x this interval. For example, with
 581	 * DIDTTO=3, expiration interval is between 16K and 32K. */
 582	cvmmemctl.s.didtto = 0;
 583	/* R/W If set, the (mem) CSR clock never turns off. */
 584	cvmmemctl.s.csrckalwys = 0;
 585	/* R/W If set, mclk never turns off. */
 586	cvmmemctl.s.mclkalwys = 0;
 587	/* R/W Selects the bit in the counter used for write buffer
 588	 * flush time-outs (WBFLT+11) is the bit position in an
 589	 * internal counter used to determine expiration. The write
 590	 * buffer expires between 1x and 2x this interval. For
 591	 * example, with WBFLT = 0, a write buffer expires between 2K
 592	 * and 4K cycles after the write buffer entry is allocated. */
 593	cvmmemctl.s.wbfltime = 0;
 594	/* R/W If set, do not put Istream in the L2 cache. */
 595	cvmmemctl.s.istrnol2 = 0;
 596
 597	/*
 598	 * R/W The write buffer threshold. As per erratum Core-14752
 599	 * for CN63XX, a sc/scd might fail if the write buffer is
 600	 * full.  Lowering WBTHRESH greatly lowers the chances of the
 601	 * write buffer ever being full and triggering the erratum.
 602	 */
 603	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
 604		cvmmemctl.s.wbthresh = 4;
 605	else
 606		cvmmemctl.s.wbthresh = 10;
 607
 608	/* R/W If set, CVMSEG is available for loads/stores in
 609	 * kernel/debug mode. */
 610#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
 611	cvmmemctl.s.cvmsegenak = 1;
 612#else
 613	cvmmemctl.s.cvmsegenak = 0;
 614#endif
 615	/* R/W If set, CVMSEG is available for loads/stores in
 616	 * supervisor mode. */
 617	cvmmemctl.s.cvmsegenas = 0;
 618	/* R/W If set, CVMSEG is available for loads/stores in user
 619	 * mode. */
 620	cvmmemctl.s.cvmsegenau = 0;
 621
 622	write_c0_cvmmemctl(cvmmemctl.u64);
 623
 624	/* Setup of CVMSEG is done in kernel-entry-init.h */
 625	if (smp_processor_id() == 0)
 626		pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
 627			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
 628			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
 629
 630	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
 631		union cvmx_iob_fau_timeout fau_timeout;
 632
 633		/* Set a default for the hardware timeouts */
 634		fau_timeout.u64 = 0;
 635		fau_timeout.s.tout_val = 0xfff;
 636		/* Disable tagwait FAU timeout */
 637		fau_timeout.s.tout_enb = 0;
 638		cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
 639	}
 640
 641	if ((!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
 642	     !OCTEON_IS_MODEL(OCTEON_CN7XXX)) ||
 643	    OCTEON_IS_MODEL(OCTEON_CN70XX)) {
 644		union cvmx_pow_nw_tim nm_tim;
 645
 646		nm_tim.u64 = 0;
 647		/* 4096 cycles */
 648		nm_tim.s.nw_tim = 3;
 649		cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
 650	}
 651
 652	write_octeon_c0_icacheerr(0);
 653	write_c0_derraddr1(0);
 654}
 655
 656/**
 657 * prom_init - Early entry point for arch setup
 658 */
 659void __init prom_init(void)
 660{
 661	struct cvmx_sysinfo *sysinfo;
 662	const char *arg;
 663	char *p;
 664	int i;
 665	u64 t;
 666	int argc;
 
 
 
 667	/*
 668	 * The bootloader passes a pointer to the boot descriptor in
 669	 * $a3, this is available as fw_arg3.
 670	 */
 671	octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
 672	octeon_bootinfo =
 673		cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
 674	cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
 675
 676	sysinfo = cvmx_sysinfo_get();
 677	memset(sysinfo, 0, sizeof(*sysinfo));
 678	sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
 679	sysinfo->phy_mem_desc_addr = (u64)phys_to_virt(octeon_bootinfo->phy_mem_desc_addr);
 680
 681	if ((octeon_bootinfo->major_version > 1) ||
 682	    (octeon_bootinfo->major_version == 1 &&
 683	     octeon_bootinfo->minor_version >= 4))
 684		cvmx_coremask_copy(&sysinfo->core_mask,
 685				   &octeon_bootinfo->ext_core_mask);
 686	else
 687		cvmx_coremask_set64(&sysinfo->core_mask,
 688				    octeon_bootinfo->core_mask);
 689
 690	/* Some broken u-boot pass garbage in upper bits, clear them out */
 691	if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
 692		for (i = 512; i < 1024; i++)
 693			cvmx_coremask_clear_core(&sysinfo->core_mask, i);
 694
 695	sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
 696	sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
 697	sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
 698	sysinfo->board_type = octeon_bootinfo->board_type;
 699	sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
 700	sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
 701	memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
 702	       sizeof(sysinfo->mac_addr_base));
 703	sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
 704	memcpy(sysinfo->board_serial_number,
 705	       octeon_bootinfo->board_serial_number,
 706	       sizeof(sysinfo->board_serial_number));
 707	sysinfo->compact_flash_common_base_addr =
 708		octeon_bootinfo->compact_flash_common_base_addr;
 709	sysinfo->compact_flash_attribute_base_addr =
 710		octeon_bootinfo->compact_flash_attribute_base_addr;
 711	sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
 712	sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
 713	sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
 714
 715	if (OCTEON_IS_OCTEON2()) {
 716		/* I/O clock runs at a different rate than the CPU. */
 717		union cvmx_mio_rst_boot rst_boot;
 718		rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
 719		octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
 720	} else if (OCTEON_IS_OCTEON3()) {
 721		/* I/O clock runs at a different rate than the CPU. */
 722		union cvmx_rst_boot rst_boot;
 723		rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
 724		octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
 725	} else {
 726		octeon_io_clock_rate = sysinfo->cpu_clock_hz;
 727	}
 728
 729	t = read_c0_cvmctl();
 730	if ((t & (1ull << 27)) == 0) {
 731		/*
 732		 * Setup the multiplier save/restore code if
 733		 * CvmCtl[NOMUL] clear.
 734		 */
 735		void *save;
 736		void *save_end;
 737		void *restore;
 738		void *restore_end;
 739		int save_len;
 740		int restore_len;
 741		int save_max = (char *)octeon_mult_save_end -
 742			(char *)octeon_mult_save;
 743		int restore_max = (char *)octeon_mult_restore_end -
 744			(char *)octeon_mult_restore;
 745		if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
 746			save = octeon_mult_save3;
 747			save_end = octeon_mult_save3_end;
 748			restore = octeon_mult_restore3;
 749			restore_end = octeon_mult_restore3_end;
 750		} else {
 751			save = octeon_mult_save2;
 752			save_end = octeon_mult_save2_end;
 753			restore = octeon_mult_restore2;
 754			restore_end = octeon_mult_restore2_end;
 755		}
 756		save_len = (char *)save_end - (char *)save;
 757		restore_len = (char *)restore_end - (char *)restore;
 758		if (!WARN_ON(save_len > save_max ||
 759				restore_len > restore_max)) {
 760			memcpy(octeon_mult_save, save, save_len);
 761			memcpy(octeon_mult_restore, restore, restore_len);
 762		}
 763	}
 764
 765	/*
 766	 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
 767	 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
 768	 */
 769	if (!octeon_is_simulation() &&
 770	    octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
 771		cvmx_write_csr(CVMX_LED_EN, 0);
 772		cvmx_write_csr(CVMX_LED_PRT, 0);
 773		cvmx_write_csr(CVMX_LED_DBG, 0);
 774		cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
 775		cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
 776		cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
 777		cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
 778		cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
 779		cvmx_write_csr(CVMX_LED_EN, 1);
 780	}
 781
 782	/*
 783	 * We need to temporarily allocate all memory in the reserve32
 784	 * region. This makes sure the kernel doesn't allocate this
 785	 * memory when it is getting memory from the
 786	 * bootloader. Later, after the memory allocations are
 787	 * complete, the reserve32 will be freed.
 788	 *
 789	 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
 790	 * is in case we later use hugetlb entries with it.
 791	 */
 792	if (CONFIG_CAVIUM_RESERVE32) {
 793		int64_t addr =
 794			cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
 795							   0, 0, 2 << 20,
 796							   "CAVIUM_RESERVE32", 0);
 797		if (addr < 0)
 798			pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
 799		else
 800			octeon_reserve32_memory = addr;
 801	}
 802
 803#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
 804	if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
 805		pr_info("Skipping L2 locking due to reduced L2 cache size\n");
 806	} else {
 807		uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000;
 808#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
 809		/* TLB refill */
 810		cvmx_l2c_lock_mem_region(ebase, 0x100);
 811#endif
 812#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
 813		/* General exception */
 814		cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
 815#endif
 816#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
 817		/* Interrupt handler */
 818		cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
 819#endif
 820#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
 821		cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
 822		cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
 823#endif
 824#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
 825		cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
 826#endif
 827	}
 828#endif
 829
 830	octeon_check_cpu_bist();
 831
 832	octeon_uart = octeon_get_boot_uart();
 833
 834#ifdef CONFIG_SMP
 835	octeon_write_lcd("LinuxSMP");
 836#else
 837	octeon_write_lcd("Linux");
 838#endif
 839
 840	octeon_setup_delays();
 841
 842	/*
 843	 * BIST should always be enabled when doing a soft reset. L2
 844	 * Cache locking for instance is not cleared unless BIST is
 845	 * enabled.  Unfortunately due to a chip errata G-200 for
 846	 * Cn38XX and CN31XX, BIST must be disabled on these parts.
 847	 */
 848	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
 849	    OCTEON_IS_MODEL(OCTEON_CN31XX))
 850		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
 851	else
 852		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
 853
 854	/* Default to 64MB in the simulator to speed things up */
 855	if (octeon_is_simulation())
 856		max_memory = 64ull << 20;
 857
 858	arg = strstr(arcs_cmdline, "mem=");
 859	if (arg) {
 860		max_memory = memparse(arg + 4, &p);
 861		if (max_memory == 0)
 862			max_memory = 32ull << 30;
 863		if (*p == '@')
 864			reserve_low_mem = memparse(p + 1, &p);
 865	}
 866
 867	arcs_cmdline[0] = 0;
 868	argc = octeon_boot_desc_ptr->argc;
 869	for (i = 0; i < argc; i++) {
 870		const char *arg =
 871			cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
 872		if ((strncmp(arg, "MEM=", 4) == 0) ||
 873		    (strncmp(arg, "mem=", 4) == 0)) {
 874			max_memory = memparse(arg + 4, &p);
 875			if (max_memory == 0)
 876				max_memory = 32ull << 30;
 877			if (*p == '@')
 878				reserve_low_mem = memparse(p + 1, &p);
 879#ifdef CONFIG_KEXEC
 880		} else if (strncmp(arg, "crashkernel=", 12) == 0) {
 881			crashk_size = memparse(arg+12, &p);
 882			if (*p == '@')
 883				crashk_base = memparse(p+1, &p);
 884			strcat(arcs_cmdline, " ");
 885			strcat(arcs_cmdline, arg);
 886			/*
 887			 * To do: switch parsing to new style, something like:
 888			 * parse_crashkernel(arg, sysinfo->system_dram_size,
 889			 *		  &crashk_size, &crashk_base);
 890			 */
 891#endif
 892		} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
 893			   sizeof(arcs_cmdline) - 1) {
 894			strcat(arcs_cmdline, " ");
 895			strcat(arcs_cmdline, arg);
 896		}
 897	}
 898
 899	if (strstr(arcs_cmdline, "console=") == NULL) {
 900		if (octeon_uart == 1)
 901			strcat(arcs_cmdline, " console=ttyS1,115200");
 902		else
 903			strcat(arcs_cmdline, " console=ttyS0,115200");
 904	}
 905
 906	mips_hpt_frequency = octeon_get_clock_rate();
 907
 908	octeon_init_cvmcount();
 909
 910	_machine_restart = octeon_restart;
 911	_machine_halt = octeon_halt;
 912
 913#ifdef CONFIG_KEXEC
 914	_machine_kexec_shutdown = octeon_shutdown;
 915	_machine_crash_shutdown = octeon_crash_shutdown;
 916	_machine_kexec_prepare = octeon_kexec_prepare;
 917#ifdef CONFIG_SMP
 918	_crash_smp_send_stop = octeon_crash_smp_send_stop;
 919#endif
 920#endif
 921
 922	octeon_user_io_init();
 923	octeon_setup_smp();
 924}
 925
 926/* Exclude a single page from the regions obtained in plat_mem_setup. */
 927#ifndef CONFIG_CRASH_DUMP
 928static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
 929{
 930	if (addr > *mem && addr < *mem + *size) {
 931		u64 inc = addr - *mem;
 932		memblock_add(*mem, inc);
 933		*mem += inc;
 934		*size -= inc;
 935	}
 936
 937	if (addr == *mem && *size > PAGE_SIZE) {
 938		*mem += PAGE_SIZE;
 939		*size -= PAGE_SIZE;
 940	}
 941}
 942#endif /* CONFIG_CRASH_DUMP */
 943
 944void __init fw_init_cmdline(void)
 945{
 946	int i;
 947
 948	octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
 949	for (i = 0; i < octeon_boot_desc_ptr->argc; i++) {
 950		const char *arg =
 951			cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
 952		if (strlen(arcs_cmdline) + strlen(arg) + 1 <
 953			   sizeof(arcs_cmdline) - 1) {
 954			strcat(arcs_cmdline, " ");
 955			strcat(arcs_cmdline, arg);
 956		}
 957	}
 958}
 959
 960void __init *plat_get_fdt(void)
 961{
 962	octeon_bootinfo =
 963		cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
 964	return phys_to_virt(octeon_bootinfo->fdt_addr);
 965}
 966
 967void __init plat_mem_setup(void)
 968{
 969	uint64_t mem_alloc_size;
 970	uint64_t total;
 971	uint64_t crashk_end;
 972#ifndef CONFIG_CRASH_DUMP
 973	int64_t memory;
 
 
 974#endif
 975
 976	total = 0;
 977	crashk_end = 0;
 978
 979	/*
 980	 * The Mips memory init uses the first memory location for
 981	 * some memory vectors. When SPARSEMEM is in use, it doesn't
 982	 * verify that the size is big enough for the final
 983	 * vectors. Making the smallest chuck 4MB seems to be enough
 984	 * to consistently work.
 985	 */
 986	mem_alloc_size = 4 << 20;
 987	if (mem_alloc_size > max_memory)
 988		mem_alloc_size = max_memory;
 989
 990/* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
 991#ifdef CONFIG_CRASH_DUMP
 992	memblock_add(reserve_low_mem, max_memory);
 993	total += max_memory;
 994#else
 995#ifdef CONFIG_KEXEC
 996	if (crashk_size > 0) {
 997		memblock_add(crashk_base, crashk_size);
 998		crashk_end = crashk_base + crashk_size;
 999	}
1000#endif
1001	/*
1002	 * When allocating memory, we want incrementing addresses,
1003	 * which is handled by memblock
 
1004	 */
1005	cvmx_bootmem_lock();
1006	while (total < max_memory) {
1007		memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
1008						__pa_symbol(&_end), -1,
1009						0x100000,
1010						CVMX_BOOTMEM_FLAG_NO_LOCKING);
1011		if (memory >= 0) {
1012			u64 size = mem_alloc_size;
1013#ifdef CONFIG_KEXEC
1014			uint64_t end;
1015#endif
1016
1017			/*
1018			 * exclude a page at the beginning and end of
1019			 * the 256MB PCIe 'hole' so the kernel will not
1020			 * try to allocate multi-page buffers that
1021			 * span the discontinuity.
1022			 */
1023			memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE,
1024					    &memory, &size);
1025			memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
1026					    CVMX_PCIE_BAR1_PHYS_SIZE,
1027					    &memory, &size);
1028#ifdef CONFIG_KEXEC
1029			end = memory + mem_alloc_size;
1030
1031			/*
1032			 * This function automatically merges address regions
1033			 * next to each other if they are received in
1034			 * incrementing order
1035			 */
1036			if (memory < crashk_base && end >  crashk_end) {
1037				/* region is fully in */
1038				memblock_add(memory, crashk_base - memory);
 
 
1039				total += crashk_base - memory;
1040				memblock_add(crashk_end, end - crashk_end);
 
 
1041				total += end - crashk_end;
1042				continue;
1043			}
1044
1045			if (memory >= crashk_base && end <= crashk_end)
1046				/*
1047				 * Entire memory region is within the new
1048				 *  kernel's memory, ignore it.
1049				 */
1050				continue;
1051
1052			if (memory > crashk_base && memory < crashk_end &&
1053			    end > crashk_end) {
1054				/*
1055				 * Overlap with the beginning of the region,
1056				 * reserve the beginning.
1057				  */
1058				mem_alloc_size -= crashk_end - memory;
1059				memory = crashk_end;
1060			} else if (memory < crashk_base && end > crashk_base &&
1061				   end < crashk_end)
1062				/*
1063				 * Overlap with the beginning of the region,
1064				 * chop of end.
1065				 */
1066				mem_alloc_size -= end - crashk_base;
1067#endif
1068			memblock_add(memory, mem_alloc_size);
1069			total += mem_alloc_size;
1070			/* Recovering mem_alloc_size */
1071			mem_alloc_size = 4 << 20;
1072		} else {
1073			break;
1074		}
1075	}
1076	cvmx_bootmem_unlock();
 
 
 
 
 
 
 
1077#endif /* CONFIG_CRASH_DUMP */
1078
 
1079	/*
1080	 * Now that we've allocated the kernel memory it is safe to
1081	 * free the reserved region. We free it here so that builtin
1082	 * drivers can use the memory.
1083	 */
1084	if (octeon_reserve32_memory)
1085		cvmx_bootmem_free_named("CAVIUM_RESERVE32");
 
1086
1087	if (total == 0)
1088		panic("Unable to allocate memory from "
1089		      "cvmx_bootmem_phy_alloc");
1090}
1091
1092/*
1093 * Emit one character to the boot UART.	 Exported for use by the
1094 * watchdog timer.
1095 */
1096void prom_putchar(char c)
1097{
1098	uint64_t lsrval;
1099
1100	/* Spin until there is room */
1101	do {
1102		lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
1103	} while ((lsrval & 0x20) == 0);
1104
1105	/* Write the byte */
1106	cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
1107}
1108EXPORT_SYMBOL(prom_putchar);
1109
1110void __init prom_free_prom_memory(void)
1111{
1112	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
1113		/* Check for presence of Core-14449 fix.  */
1114		u32 insn;
1115		u32 *foo;
1116
1117		foo = &insn;
1118
1119		asm volatile("# before" : : : "memory");
1120		prefetch(foo);
1121		asm volatile(
1122			".set push\n\t"
1123			".set noreorder\n\t"
1124			"bal 1f\n\t"
1125			"nop\n"
1126			"1:\tlw %0,-12($31)\n\t"
1127			".set pop\n\t"
1128			: "=r" (insn) : : "$31", "memory");
1129
1130		if ((insn >> 26) != 0x33)
1131			panic("No PREF instruction at Core-14449 probe point.");
1132
1133		if (((insn >> 16) & 0x1f) != 28)
1134			panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
1135			      "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
1136			      insn);
1137	}
1138}
1139
1140void __init octeon_fill_mac_addresses(void);
1141
1142void __init device_tree_init(void)
1143{
1144	const void *fdt;
1145	bool do_prune;
1146	bool fill_mac;
1147
1148#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
1149	if (!fdt_check_header(&__appended_dtb)) {
1150		fdt = &__appended_dtb;
1151		do_prune = false;
1152		fill_mac = true;
1153		pr_info("Using appended Device Tree.\n");
1154	} else
1155#endif
1156	if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
1157		fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
1158		if (fdt_check_header(fdt))
1159			panic("Corrupt Device Tree passed to kernel.");
1160		do_prune = false;
1161		fill_mac = false;
1162		pr_info("Using passed Device Tree.\n");
1163	} else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
1164		fdt = &__dtb_octeon_68xx_begin;
1165		do_prune = true;
1166		fill_mac = true;
1167	} else {
1168		fdt = &__dtb_octeon_3xxx_begin;
1169		do_prune = true;
1170		fill_mac = true;
1171	}
1172
1173	initial_boot_params = (void *)fdt;
1174
1175	if (do_prune) {
1176		octeon_prune_device_tree();
1177		pr_info("Using internal Device Tree.\n");
1178	}
1179	if (fill_mac)
1180		octeon_fill_mac_addresses();
1181	unflatten_and_copy_device_tree();
1182	init_octeon_system_type();
1183}
1184
1185static int __initdata disable_octeon_edac_p;
1186
1187static int __init disable_octeon_edac(char *str)
1188{
1189	disable_octeon_edac_p = 1;
1190	return 0;
1191}
1192early_param("disable_octeon_edac", disable_octeon_edac);
1193
1194static char *edac_device_names[] = {
1195	"octeon_l2c_edac",
1196	"octeon_pc_edac",
1197};
1198
1199static int __init edac_devinit(void)
1200{
1201	struct platform_device *dev;
1202	int i, err = 0;
1203	int num_lmc;
1204	char *name;
1205
1206	if (disable_octeon_edac_p)
1207		return 0;
1208
1209	for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) {
1210		name = edac_device_names[i];
1211		dev = platform_device_register_simple(name, -1, NULL, 0);
1212		if (IS_ERR(dev)) {
1213			pr_err("Registration of %s failed!\n", name);
1214			err = PTR_ERR(dev);
1215		}
1216	}
1217
1218	num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 :
1219		(OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1);
1220	for (i = 0; i < num_lmc; i++) {
1221		dev = platform_device_register_simple("octeon_lmc_edac",
1222						      i, NULL, 0);
1223		if (IS_ERR(dev)) {
1224			pr_err("Registration of octeon_lmc_edac %d failed!\n", i);
1225			err = PTR_ERR(dev);
1226		}
1227	}
1228
1229	return err;
1230}
1231device_initcall(edac_devinit);
1232
1233static void __initdata *octeon_dummy_iospace;
1234
1235static int __init octeon_no_pci_init(void)
1236{
1237	/*
1238	 * Initially assume there is no PCI. The PCI/PCIe platform code will
1239	 * later re-initialize these to correct values if they are present.
1240	 */
1241	octeon_dummy_iospace = vzalloc(IO_SPACE_LIMIT);
1242	set_io_port_base((unsigned long)octeon_dummy_iospace);
1243	ioport_resource.start = MAX_RESOURCE;
1244	ioport_resource.end = 0;
1245	return 0;
1246}
1247core_initcall(octeon_no_pci_init);
1248
1249static int __init octeon_no_pci_release(void)
1250{
1251	/*
1252	 * Release the allocated memory if a real IO space is there.
1253	 */
1254	if ((unsigned long)octeon_dummy_iospace != mips_io_port_base)
1255		vfree(octeon_dummy_iospace);
1256	return 0;
1257}
1258late_initcall(octeon_no_pci_release);