Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2004-2007 Cavium Networks
   7 * Copyright (C) 2008, 2009 Wind River Systems
   8 *   written by Ralf Baechle <ralf@linux-mips.org>
   9 */
  10#include <linux/compiler.h>
  11#include <linux/vmalloc.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/console.h>
  15#include <linux/delay.h>
  16#include <linux/export.h>
  17#include <linux/interrupt.h>
  18#include <linux/io.h>
  19#include <linux/memblock.h>
  20#include <linux/serial.h>
  21#include <linux/smp.h>
  22#include <linux/types.h>
  23#include <linux/string.h>	/* for memset */
  24#include <linux/tty.h>
  25#include <linux/time.h>
  26#include <linux/platform_device.h>
  27#include <linux/serial_core.h>
  28#include <linux/serial_8250.h>
  29#include <linux/of_fdt.h>
  30#include <linux/libfdt.h>
  31#include <linux/kexec.h>
  32
  33#include <asm/processor.h>
  34#include <asm/reboot.h>
  35#include <asm/smp-ops.h>
  36#include <asm/irq_cpu.h>
  37#include <asm/mipsregs.h>
  38#include <asm/bootinfo.h>
  39#include <asm/sections.h>
  40#include <asm/fw/fw.h>
  41#include <asm/setup.h>
  42#include <asm/prom.h>
  43#include <asm/time.h>
  44
  45#include <asm/octeon/octeon.h>
  46#include <asm/octeon/pci-octeon.h>
  47#include <asm/octeon/cvmx-rst-defs.h>
  48
  49/*
  50 * TRUE for devices having registers with little-endian byte
  51 * order, FALSE for registers with native-endian byte order.
  52 * PCI mandates little-endian, USB and SATA are configuraable,
  53 * but we chose little-endian for these.
  54 */
  55const bool octeon_should_swizzle_table[256] = {
  56	[0x00] = true,	/* bootbus/CF */
  57	[0x1b] = true,	/* PCI mmio window */
  58	[0x1c] = true,	/* PCI mmio window */
  59	[0x1d] = true,	/* PCI mmio window */
  60	[0x1e] = true,	/* PCI mmio window */
  61	[0x68] = true,	/* OCTEON III USB */
  62	[0x69] = true,	/* OCTEON III USB */
  63	[0x6c] = true,	/* OCTEON III SATA */
  64	[0x6f] = true,	/* OCTEON II USB */
  65};
  66EXPORT_SYMBOL(octeon_should_swizzle_table);
  67
  68#ifdef CONFIG_PCI
  69extern void pci_console_init(const char *arg);
  70#endif
  71
  72static unsigned long long max_memory = ULLONG_MAX;
  73static unsigned long long reserve_low_mem;
  74
  75DEFINE_SEMAPHORE(octeon_bootbus_sem, 1);
  76EXPORT_SYMBOL(octeon_bootbus_sem);
  77
  78static struct octeon_boot_descriptor *octeon_boot_desc_ptr;
  79
  80struct cvmx_bootinfo *octeon_bootinfo;
  81EXPORT_SYMBOL(octeon_bootinfo);
  82
  83#ifdef CONFIG_KEXEC
  84#ifdef CONFIG_SMP
  85/*
  86 * Wait for relocation code is prepared and send
  87 * secondary CPUs to spin until kernel is relocated.
  88 */
  89static void octeon_kexec_smp_down(void *ignored)
  90{
  91	int cpu = smp_processor_id();
  92
  93	local_irq_disable();
  94	set_cpu_online(cpu, false);
  95	while (!atomic_read(&kexec_ready_to_reboot))
  96		cpu_relax();
  97
  98	asm volatile (
  99	"	sync						\n"
 100	"	synci	($0)					\n");
 101
 102	kexec_reboot();
 103}
 104#endif
 105
 106#define OCTEON_DDR0_BASE    (0x0ULL)
 107#define OCTEON_DDR0_SIZE    (0x010000000ULL)
 108#define OCTEON_DDR1_BASE    (0x410000000ULL)
 109#define OCTEON_DDR1_SIZE    (0x010000000ULL)
 110#define OCTEON_DDR2_BASE    (0x020000000ULL)
 111#define OCTEON_DDR2_SIZE    (0x3e0000000ULL)
 112#define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
 113
 114static struct kimage *kimage_ptr;
 115
 116static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes)
 117{
 118	int64_t addr;
 119	struct cvmx_bootmem_desc *bootmem_desc;
 120
 121	bootmem_desc = cvmx_bootmem_get_desc();
 122
 123	if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) {
 124		mem_size = OCTEON_MAX_PHY_MEM_SIZE;
 125		pr_err("Error: requested memory too large,"
 126		       "truncating to maximum size\n");
 127	}
 128
 129	bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
 130	bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
 131
 132	addr = (OCTEON_DDR0_BASE + reserve_low_mem + low_reserved_bytes);
 133	bootmem_desc->head_addr = 0;
 134
 135	if (mem_size <= OCTEON_DDR0_SIZE) {
 136		__cvmx_bootmem_phy_free(addr,
 137				mem_size - reserve_low_mem -
 138				low_reserved_bytes, 0);
 139		return;
 140	}
 141
 142	__cvmx_bootmem_phy_free(addr,
 143			OCTEON_DDR0_SIZE - reserve_low_mem -
 144			low_reserved_bytes, 0);
 145
 146	mem_size -= OCTEON_DDR0_SIZE;
 147
 148	if (mem_size > OCTEON_DDR1_SIZE) {
 149		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
 150		__cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
 151				mem_size - OCTEON_DDR1_SIZE, 0);
 152	} else
 153		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
 154}
 155
 156static int octeon_kexec_prepare(struct kimage *image)
 157{
 158	int i;
 159	char *bootloader = "kexec";
 160
 161	octeon_boot_desc_ptr->argc = 0;
 162	for (i = 0; i < image->nr_segments; i++) {
 163		if (!strncmp(bootloader, (char *)image->segment[i].buf,
 164				strlen(bootloader))) {
 165			/*
 166			 * convert command line string to array
 167			 * of parameters (as bootloader does).
 168			 */
 169			int argc = 0, offt;
 170			char *str = (char *)image->segment[i].buf;
 171			char *ptr = strchr(str, ' ');
 172			while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) {
 173				*ptr = '\0';
 174				if (ptr[1] != ' ') {
 175					offt = (int)(ptr - str + 1);
 176					octeon_boot_desc_ptr->argv[argc] =
 177						image->segment[i].mem + offt;
 178					argc++;
 179				}
 180				ptr = strchr(ptr + 1, ' ');
 181			}
 182			octeon_boot_desc_ptr->argc = argc;
 183			break;
 184		}
 185	}
 186
 187	/*
 188	 * Information about segments will be needed during pre-boot memory
 189	 * initialization.
 190	 */
 191	kimage_ptr = image;
 192	return 0;
 193}
 194
 195static void octeon_generic_shutdown(void)
 196{
 197	int i;
 198#ifdef CONFIG_SMP
 199	int cpu;
 200#endif
 201	struct cvmx_bootmem_desc *bootmem_desc;
 202	void *named_block_array_ptr;
 203
 204	bootmem_desc = cvmx_bootmem_get_desc();
 205	named_block_array_ptr =
 206		cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr);
 207
 208#ifdef CONFIG_SMP
 209	/* disable watchdogs */
 210	for_each_online_cpu(cpu)
 211		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
 212#else
 213	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
 214#endif
 215	if (kimage_ptr != kexec_crash_image) {
 216		memset(named_block_array_ptr,
 217			0x0,
 218			CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
 219			sizeof(struct cvmx_bootmem_named_block_desc));
 220		/*
 221		 * Mark all memory (except low 0x100000 bytes) as free.
 222		 * It is the same thing that bootloader does.
 223		 */
 224		kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL,
 225				0x100000);
 226		/*
 227		 * Allocate all segments to avoid their corruption during boot.
 228		 */
 229		for (i = 0; i < kimage_ptr->nr_segments; i++)
 230			cvmx_bootmem_alloc_address(
 231				kimage_ptr->segment[i].memsz + 2*PAGE_SIZE,
 232				kimage_ptr->segment[i].mem - PAGE_SIZE,
 233				PAGE_SIZE);
 234	} else {
 235		/*
 236		 * Do not mark all memory as free. Free only named sections
 237		 * leaving the rest of memory unchanged.
 238		 */
 239		struct cvmx_bootmem_named_block_desc *ptr =
 240			(struct cvmx_bootmem_named_block_desc *)
 241			named_block_array_ptr;
 242
 243		for (i = 0; i < bootmem_desc->named_block_num_blocks; i++)
 244			if (ptr[i].size)
 245				cvmx_bootmem_free_named(ptr[i].name);
 246	}
 247	kexec_args[2] = 1UL; /* running on octeon_main_processor */
 248	kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
 249#ifdef CONFIG_SMP
 250	secondary_kexec_args[2] = 0UL; /* running on secondary cpu */
 251	secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
 252#endif
 253}
 254
 255static void octeon_shutdown(void)
 256{
 257	octeon_generic_shutdown();
 258#ifdef CONFIG_SMP
 259	smp_call_function(octeon_kexec_smp_down, NULL, 0);
 260	smp_wmb();
 261	while (num_online_cpus() > 1) {
 262		cpu_relax();
 263		mdelay(1);
 264	}
 265#endif
 266}
 267
 268static void octeon_crash_shutdown(struct pt_regs *regs)
 269{
 270	octeon_generic_shutdown();
 271	default_machine_crash_shutdown(regs);
 272}
 273
 274#ifdef CONFIG_SMP
 275void octeon_crash_smp_send_stop(void)
 276{
 277	int cpu;
 278
 279	/* disable watchdogs */
 280	for_each_online_cpu(cpu)
 281		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
 282}
 283#endif
 284
 285#endif /* CONFIG_KEXEC */
 286
 287uint64_t octeon_reserve32_memory;
 288EXPORT_SYMBOL(octeon_reserve32_memory);
 289
 290#ifdef CONFIG_KEXEC
 291/* crashkernel cmdline parameter is parsed _after_ memory setup
 292 * we also parse it here (workaround for EHB5200) */
 293static uint64_t crashk_size, crashk_base;
 294#endif
 295
 296static int octeon_uart;
 297
 298extern asmlinkage void handle_int(void);
 299
 300/**
 301 * octeon_is_simulation - Return non-zero if we are currently running
 302 * in the Octeon simulator
 303 *
 304 * Return: non-0 if running in the Octeon simulator, 0 otherwise
 305 */
 306int octeon_is_simulation(void)
 307{
 308	return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
 309}
 310EXPORT_SYMBOL(octeon_is_simulation);
 311
 312/**
 313 * octeon_is_pci_host - Return true if Octeon is in PCI Host mode. This means
 314 * Linux can control the PCI bus.
 315 *
 316 * Return: Non-zero if Octeon is in host mode.
 317 */
 318int octeon_is_pci_host(void)
 319{
 320#ifdef CONFIG_PCI
 321	return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
 322#else
 323	return 0;
 324#endif
 325}
 326
 327/**
 328 * octeon_get_clock_rate - Get the clock rate of Octeon
 329 *
 330 * Return: Clock rate in HZ
 331 */
 332uint64_t octeon_get_clock_rate(void)
 333{
 334	struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
 335
 336	return sysinfo->cpu_clock_hz;
 337}
 338EXPORT_SYMBOL(octeon_get_clock_rate);
 339
 340static u64 octeon_io_clock_rate;
 341
 342u64 octeon_get_io_clock_rate(void)
 343{
 344	return octeon_io_clock_rate;
 345}
 346EXPORT_SYMBOL(octeon_get_io_clock_rate);
 347
 348
 349/**
 350 * octeon_write_lcd - Write to the LCD display connected to the bootbus.
 351 * @s:	    String to write
 352 *
 353 * This display exists on most Cavium evaluation boards. If it doesn't exist,
 354 * then this function doesn't do anything.
 355 */
 356static void octeon_write_lcd(const char *s)
 357{
 358	if (octeon_bootinfo->led_display_base_addr) {
 359		void __iomem *lcd_address =
 360			ioremap(octeon_bootinfo->led_display_base_addr,
 361					8);
 362		int i;
 363		for (i = 0; i < 8; i++, s++) {
 364			if (*s)
 365				iowrite8(*s, lcd_address + i);
 366			else
 367				iowrite8(' ', lcd_address + i);
 368		}
 369		iounmap(lcd_address);
 370	}
 371}
 372
 373/**
 374 * octeon_get_boot_uart - Return the console uart passed by the bootloader
 375 *
 376 * Return: uart number (0 or 1)
 377 */
 378static int octeon_get_boot_uart(void)
 379{
 380	return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
 381		1 : 0;
 382}
 383
 384/**
 385 * octeon_get_boot_coremask - Get the coremask Linux was booted on.
 386 *
 387 * Return: Core mask
 388 */
 389int octeon_get_boot_coremask(void)
 390{
 391	return octeon_boot_desc_ptr->core_mask;
 392}
 393
 394/**
 395 * octeon_check_cpu_bist - Check the hardware BIST results for a CPU
 396 */
 397void octeon_check_cpu_bist(void)
 398{
 399	const int coreid = cvmx_get_core_num();
 400	unsigned long long mask;
 401	unsigned long long bist_val;
 402
 403	/* Check BIST results for COP0 registers */
 404	mask = 0x1f00000000ull;
 405	bist_val = read_octeon_c0_icacheerr();
 406	if (bist_val & mask)
 407		pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
 408		       coreid, bist_val);
 409
 410	bist_val = read_octeon_c0_dcacheerr();
 411	if (bist_val & 1)
 412		pr_err("Core%d L1 Dcache parity error: "
 413		       "CacheErr(dcache) = 0x%llx\n",
 414		       coreid, bist_val);
 415
 416	mask = 0xfc00000000000000ull;
 417	bist_val = read_c0_cvmmemctl();
 418	if (bist_val & mask)
 419		pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
 420		       coreid, bist_val);
 421
 422	write_octeon_c0_dcacheerr(0);
 423}
 424
 425/**
 426 * octeon_restart - Reboot Octeon
 427 *
 428 * @command: Command to pass to the bootloader. Currently ignored.
 429 */
 430static void octeon_restart(char *command)
 431{
 432	/* Disable all watchdogs before soft reset. They don't get cleared */
 433#ifdef CONFIG_SMP
 434	int cpu;
 435	for_each_online_cpu(cpu)
 436		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
 437#else
 438	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
 439#endif
 440
 441	mb();
 442	while (1)
 443		if (OCTEON_IS_OCTEON3())
 444			cvmx_write_csr(CVMX_RST_SOFT_RST, 1);
 445		else
 446			cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
 447}
 448
 449
 450/**
 451 * octeon_kill_core - Permanently stop a core.
 452 *
 453 * @arg: Ignored.
 454 */
 455static void octeon_kill_core(void *arg)
 456{
 457	if (octeon_is_simulation())
 458		/* A break instruction causes the simulator stop a core */
 459		asm volatile ("break" ::: "memory");
 460
 461	local_irq_disable();
 462	/* Disable watchdog on this core. */
 463	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
 464	/* Spin in a low power mode. */
 465	while (true)
 466		asm volatile ("wait" ::: "memory");
 467}
 468
 469
 470/**
 471 * octeon_halt - Halt the system
 472 */
 473static void octeon_halt(void)
 474{
 475	smp_call_function(octeon_kill_core, NULL, 0);
 476
 477	switch (octeon_bootinfo->board_type) {
 478	case CVMX_BOARD_TYPE_NAO38:
 479		/* Driving a 1 to GPIO 12 shuts off this board */
 480		cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
 481		cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
 482		break;
 483	default:
 484		octeon_write_lcd("PowerOff");
 485		break;
 486	}
 487
 488	octeon_kill_core(NULL);
 489}
 490
 491static char __read_mostly octeon_system_type[80];
 492
 493static void __init init_octeon_system_type(void)
 494{
 495	char const *board_type;
 496
 497	board_type = cvmx_board_type_to_string(octeon_bootinfo->board_type);
 498	if (board_type == NULL) {
 499		struct device_node *root;
 500		int ret;
 501
 502		root = of_find_node_by_path("/");
 503		ret = of_property_read_string(root, "model", &board_type);
 504		of_node_put(root);
 505		if (ret)
 506			board_type = "Unsupported Board";
 507	}
 508
 509	snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
 510		 board_type, octeon_model_get_string(read_c0_prid()));
 511}
 512
 513/**
 514 * octeon_board_type_string - Return a string representing the system type
 515 *
 516 * Return: system type string
 517 */
 518const char *octeon_board_type_string(void)
 519{
 520	return octeon_system_type;
 521}
 522
 523const char *get_system_type(void)
 524	__attribute__ ((alias("octeon_board_type_string")));
 525
 526void octeon_user_io_init(void)
 527{
 528	union octeon_cvmemctl cvmmemctl;
 529
 530	/* Get the current settings for CP0_CVMMEMCTL_REG */
 531	cvmmemctl.u64 = read_c0_cvmmemctl();
 532	/* R/W If set, marked write-buffer entries time out the same
 533	 * as other entries; if clear, marked write-buffer entries
 534	 * use the maximum timeout. */
 535	cvmmemctl.s.dismarkwblongto = 1;
 536	/* R/W If set, a merged store does not clear the write-buffer
 537	 * entry timeout state. */
 538	cvmmemctl.s.dismrgclrwbto = 0;
 539	/* R/W Two bits that are the MSBs of the resultant CVMSEG LM
 540	 * word location for an IOBDMA. The other 8 bits come from the
 541	 * SCRADDR field of the IOBDMA. */
 542	cvmmemctl.s.iobdmascrmsb = 0;
 543	/* R/W If set, SYNCWS and SYNCS only order marked stores; if
 544	 * clear, SYNCWS and SYNCS only order unmarked
 545	 * stores. SYNCWSMARKED has no effect when DISSYNCWS is
 546	 * set. */
 547	cvmmemctl.s.syncwsmarked = 0;
 548	/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
 549	cvmmemctl.s.dissyncws = 0;
 550	/* R/W If set, no stall happens on write buffer full. */
 551	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
 552		cvmmemctl.s.diswbfst = 1;
 553	else
 554		cvmmemctl.s.diswbfst = 0;
 555	/* R/W If set (and SX set), supervisor-level loads/stores can
 556	 * use XKPHYS addresses with <48>==0 */
 557	cvmmemctl.s.xkmemenas = 0;
 558
 559	/* R/W If set (and UX set), user-level loads/stores can use
 560	 * XKPHYS addresses with VA<48>==0 */
 561	cvmmemctl.s.xkmemenau = 0;
 562
 563	/* R/W If set (and SX set), supervisor-level loads/stores can
 564	 * use XKPHYS addresses with VA<48>==1 */
 565	cvmmemctl.s.xkioenas = 0;
 566
 567	/* R/W If set (and UX set), user-level loads/stores can use
 568	 * XKPHYS addresses with VA<48>==1 */
 569	cvmmemctl.s.xkioenau = 0;
 570
 571	/* R/W If set, all stores act as SYNCW (NOMERGE must be set
 572	 * when this is set) RW, reset to 0. */
 573	cvmmemctl.s.allsyncw = 0;
 574
 575	/* R/W If set, no stores merge, and all stores reach the
 576	 * coherent bus in order. */
 577	cvmmemctl.s.nomerge = 0;
 578	/* R/W Selects the bit in the counter used for DID time-outs 0
 579	 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
 580	 * between 1x and 2x this interval. For example, with
 581	 * DIDTTO=3, expiration interval is between 16K and 32K. */
 582	cvmmemctl.s.didtto = 0;
 583	/* R/W If set, the (mem) CSR clock never turns off. */
 584	cvmmemctl.s.csrckalwys = 0;
 585	/* R/W If set, mclk never turns off. */
 586	cvmmemctl.s.mclkalwys = 0;
 587	/* R/W Selects the bit in the counter used for write buffer
 588	 * flush time-outs (WBFLT+11) is the bit position in an
 589	 * internal counter used to determine expiration. The write
 590	 * buffer expires between 1x and 2x this interval. For
 591	 * example, with WBFLT = 0, a write buffer expires between 2K
 592	 * and 4K cycles after the write buffer entry is allocated. */
 593	cvmmemctl.s.wbfltime = 0;
 594	/* R/W If set, do not put Istream in the L2 cache. */
 595	cvmmemctl.s.istrnol2 = 0;
 596
 597	/*
 598	 * R/W The write buffer threshold. As per erratum Core-14752
 599	 * for CN63XX, a sc/scd might fail if the write buffer is
 600	 * full.  Lowering WBTHRESH greatly lowers the chances of the
 601	 * write buffer ever being full and triggering the erratum.
 602	 */
 603	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
 604		cvmmemctl.s.wbthresh = 4;
 605	else
 606		cvmmemctl.s.wbthresh = 10;
 607
 608	/* R/W If set, CVMSEG is available for loads/stores in
 609	 * kernel/debug mode. */
 610#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
 611	cvmmemctl.s.cvmsegenak = 1;
 612#else
 613	cvmmemctl.s.cvmsegenak = 0;
 614#endif
 615	/* R/W If set, CVMSEG is available for loads/stores in
 616	 * supervisor mode. */
 617	cvmmemctl.s.cvmsegenas = 0;
 618	/* R/W If set, CVMSEG is available for loads/stores in user
 619	 * mode. */
 620	cvmmemctl.s.cvmsegenau = 0;
 621
 622	write_c0_cvmmemctl(cvmmemctl.u64);
 623
 624	/* Setup of CVMSEG is done in kernel-entry-init.h */
 625	if (smp_processor_id() == 0)
 626		pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
 627			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
 628			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
 629
 630	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
 631		union cvmx_iob_fau_timeout fau_timeout;
 632
 633		/* Set a default for the hardware timeouts */
 634		fau_timeout.u64 = 0;
 635		fau_timeout.s.tout_val = 0xfff;
 636		/* Disable tagwait FAU timeout */
 637		fau_timeout.s.tout_enb = 0;
 638		cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
 639	}
 640
 641	if ((!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
 642	     !OCTEON_IS_MODEL(OCTEON_CN7XXX)) ||
 643	    OCTEON_IS_MODEL(OCTEON_CN70XX)) {
 644		union cvmx_pow_nw_tim nm_tim;
 645
 646		nm_tim.u64 = 0;
 647		/* 4096 cycles */
 648		nm_tim.s.nw_tim = 3;
 649		cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
 650	}
 651
 652	write_octeon_c0_icacheerr(0);
 653	write_c0_derraddr1(0);
 654}
 655
 656/**
 657 * prom_init - Early entry point for arch setup
 658 */
 659void __init prom_init(void)
 660{
 661	struct cvmx_sysinfo *sysinfo;
 662	const char *arg;
 663	char *p;
 664	int i;
 665	u64 t;
 666	int argc;
 667	/*
 668	 * The bootloader passes a pointer to the boot descriptor in
 669	 * $a3, this is available as fw_arg3.
 670	 */
 671	octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
 672	octeon_bootinfo =
 673		cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
 674	cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
 675
 676	sysinfo = cvmx_sysinfo_get();
 677	memset(sysinfo, 0, sizeof(*sysinfo));
 678	sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
 679	sysinfo->phy_mem_desc_addr = (u64)phys_to_virt(octeon_bootinfo->phy_mem_desc_addr);
 680
 681	if ((octeon_bootinfo->major_version > 1) ||
 682	    (octeon_bootinfo->major_version == 1 &&
 683	     octeon_bootinfo->minor_version >= 4))
 684		cvmx_coremask_copy(&sysinfo->core_mask,
 685				   &octeon_bootinfo->ext_core_mask);
 686	else
 687		cvmx_coremask_set64(&sysinfo->core_mask,
 688				    octeon_bootinfo->core_mask);
 689
 690	/* Some broken u-boot pass garbage in upper bits, clear them out */
 691	if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
 692		for (i = 512; i < 1024; i++)
 693			cvmx_coremask_clear_core(&sysinfo->core_mask, i);
 694
 695	sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
 696	sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
 697	sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
 698	sysinfo->board_type = octeon_bootinfo->board_type;
 699	sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
 700	sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
 701	memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
 702	       sizeof(sysinfo->mac_addr_base));
 703	sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
 704	memcpy(sysinfo->board_serial_number,
 705	       octeon_bootinfo->board_serial_number,
 706	       sizeof(sysinfo->board_serial_number));
 707	sysinfo->compact_flash_common_base_addr =
 708		octeon_bootinfo->compact_flash_common_base_addr;
 709	sysinfo->compact_flash_attribute_base_addr =
 710		octeon_bootinfo->compact_flash_attribute_base_addr;
 711	sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
 712	sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
 713	sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
 714
 715	if (OCTEON_IS_OCTEON2()) {
 716		/* I/O clock runs at a different rate than the CPU. */
 717		union cvmx_mio_rst_boot rst_boot;
 718		rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
 719		octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
 720	} else if (OCTEON_IS_OCTEON3()) {
 721		/* I/O clock runs at a different rate than the CPU. */
 722		union cvmx_rst_boot rst_boot;
 723		rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
 724		octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
 725	} else {
 726		octeon_io_clock_rate = sysinfo->cpu_clock_hz;
 727	}
 728
 729	t = read_c0_cvmctl();
 730	if ((t & (1ull << 27)) == 0) {
 731		/*
 732		 * Setup the multiplier save/restore code if
 733		 * CvmCtl[NOMUL] clear.
 734		 */
 735		void *save;
 736		void *save_end;
 737		void *restore;
 738		void *restore_end;
 739		int save_len;
 740		int restore_len;
 741		int save_max = (char *)octeon_mult_save_end -
 742			(char *)octeon_mult_save;
 743		int restore_max = (char *)octeon_mult_restore_end -
 744			(char *)octeon_mult_restore;
 745		if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
 746			save = octeon_mult_save3;
 747			save_end = octeon_mult_save3_end;
 748			restore = octeon_mult_restore3;
 749			restore_end = octeon_mult_restore3_end;
 750		} else {
 751			save = octeon_mult_save2;
 752			save_end = octeon_mult_save2_end;
 753			restore = octeon_mult_restore2;
 754			restore_end = octeon_mult_restore2_end;
 755		}
 756		save_len = (char *)save_end - (char *)save;
 757		restore_len = (char *)restore_end - (char *)restore;
 758		if (!WARN_ON(save_len > save_max ||
 759				restore_len > restore_max)) {
 760			memcpy(octeon_mult_save, save, save_len);
 761			memcpy(octeon_mult_restore, restore, restore_len);
 762		}
 763	}
 764
 765	/*
 766	 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
 767	 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
 768	 */
 769	if (!octeon_is_simulation() &&
 770	    octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
 771		cvmx_write_csr(CVMX_LED_EN, 0);
 772		cvmx_write_csr(CVMX_LED_PRT, 0);
 773		cvmx_write_csr(CVMX_LED_DBG, 0);
 774		cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
 775		cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
 776		cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
 777		cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
 778		cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
 779		cvmx_write_csr(CVMX_LED_EN, 1);
 780	}
 781
 782	/*
 783	 * We need to temporarily allocate all memory in the reserve32
 784	 * region. This makes sure the kernel doesn't allocate this
 785	 * memory when it is getting memory from the
 786	 * bootloader. Later, after the memory allocations are
 787	 * complete, the reserve32 will be freed.
 788	 *
 789	 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
 790	 * is in case we later use hugetlb entries with it.
 791	 */
 792	if (CONFIG_CAVIUM_RESERVE32) {
 793		int64_t addr =
 794			cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
 795							   0, 0, 2 << 20,
 796							   "CAVIUM_RESERVE32", 0);
 797		if (addr < 0)
 798			pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
 799		else
 800			octeon_reserve32_memory = addr;
 801	}
 802
 803#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
 804	if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
 805		pr_info("Skipping L2 locking due to reduced L2 cache size\n");
 806	} else {
 807		uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000;
 808#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
 809		/* TLB refill */
 810		cvmx_l2c_lock_mem_region(ebase, 0x100);
 811#endif
 812#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
 813		/* General exception */
 814		cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
 815#endif
 816#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
 817		/* Interrupt handler */
 818		cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
 819#endif
 820#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
 821		cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
 822		cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
 823#endif
 824#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
 825		cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
 826#endif
 827	}
 828#endif
 829
 830	octeon_check_cpu_bist();
 831
 832	octeon_uart = octeon_get_boot_uart();
 833
 834#ifdef CONFIG_SMP
 835	octeon_write_lcd("LinuxSMP");
 836#else
 837	octeon_write_lcd("Linux");
 838#endif
 839
 840	octeon_setup_delays();
 841
 842	/*
 843	 * BIST should always be enabled when doing a soft reset. L2
 844	 * Cache locking for instance is not cleared unless BIST is
 845	 * enabled.  Unfortunately due to a chip errata G-200 for
 846	 * Cn38XX and CN31XX, BIST must be disabled on these parts.
 847	 */
 848	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
 849	    OCTEON_IS_MODEL(OCTEON_CN31XX))
 850		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
 851	else
 852		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
 853
 854	/* Default to 64MB in the simulator to speed things up */
 855	if (octeon_is_simulation())
 856		max_memory = 64ull << 20;
 857
 858	arg = strstr(arcs_cmdline, "mem=");
 859	if (arg) {
 860		max_memory = memparse(arg + 4, &p);
 861		if (max_memory == 0)
 862			max_memory = 32ull << 30;
 863		if (*p == '@')
 864			reserve_low_mem = memparse(p + 1, &p);
 865	}
 866
 867	arcs_cmdline[0] = 0;
 868	argc = octeon_boot_desc_ptr->argc;
 869	for (i = 0; i < argc; i++) {
 870		const char *arg =
 871			cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
 872		if ((strncmp(arg, "MEM=", 4) == 0) ||
 873		    (strncmp(arg, "mem=", 4) == 0)) {
 874			max_memory = memparse(arg + 4, &p);
 875			if (max_memory == 0)
 876				max_memory = 32ull << 30;
 877			if (*p == '@')
 878				reserve_low_mem = memparse(p + 1, &p);
 879#ifdef CONFIG_KEXEC
 880		} else if (strncmp(arg, "crashkernel=", 12) == 0) {
 881			crashk_size = memparse(arg+12, &p);
 882			if (*p == '@')
 883				crashk_base = memparse(p+1, &p);
 884			strcat(arcs_cmdline, " ");
 885			strcat(arcs_cmdline, arg);
 886			/*
 887			 * To do: switch parsing to new style, something like:
 888			 * parse_crashkernel(arg, sysinfo->system_dram_size,
 889			 *		  &crashk_size, &crashk_base);
 890			 */
 891#endif
 892		} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
 893			   sizeof(arcs_cmdline) - 1) {
 894			strcat(arcs_cmdline, " ");
 895			strcat(arcs_cmdline, arg);
 896		}
 897	}
 898
 899	if (strstr(arcs_cmdline, "console=") == NULL) {
 900		if (octeon_uart == 1)
 901			strcat(arcs_cmdline, " console=ttyS1,115200");
 902		else
 903			strcat(arcs_cmdline, " console=ttyS0,115200");
 904	}
 905
 906	mips_hpt_frequency = octeon_get_clock_rate();
 907
 908	octeon_init_cvmcount();
 909
 910	_machine_restart = octeon_restart;
 911	_machine_halt = octeon_halt;
 912
 913#ifdef CONFIG_KEXEC
 914	_machine_kexec_shutdown = octeon_shutdown;
 915	_machine_crash_shutdown = octeon_crash_shutdown;
 916	_machine_kexec_prepare = octeon_kexec_prepare;
 917#ifdef CONFIG_SMP
 918	_crash_smp_send_stop = octeon_crash_smp_send_stop;
 919#endif
 920#endif
 921
 922	octeon_user_io_init();
 923	octeon_setup_smp();
 924}
 925
 926/* Exclude a single page from the regions obtained in plat_mem_setup. */
 927#ifndef CONFIG_CRASH_DUMP
 928static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
 929{
 930	if (addr > *mem && addr < *mem + *size) {
 931		u64 inc = addr - *mem;
 932		memblock_add(*mem, inc);
 933		*mem += inc;
 934		*size -= inc;
 935	}
 936
 937	if (addr == *mem && *size > PAGE_SIZE) {
 938		*mem += PAGE_SIZE;
 939		*size -= PAGE_SIZE;
 940	}
 941}
 942#endif /* CONFIG_CRASH_DUMP */
 943
 944void __init fw_init_cmdline(void)
 945{
 946	int i;
 947
 948	octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
 949	for (i = 0; i < octeon_boot_desc_ptr->argc; i++) {
 950		const char *arg =
 951			cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
 952		if (strlen(arcs_cmdline) + strlen(arg) + 1 <
 953			   sizeof(arcs_cmdline) - 1) {
 954			strcat(arcs_cmdline, " ");
 955			strcat(arcs_cmdline, arg);
 956		}
 957	}
 958}
 959
 960void __init *plat_get_fdt(void)
 961{
 962	octeon_bootinfo =
 963		cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
 964	return phys_to_virt(octeon_bootinfo->fdt_addr);
 965}
 966
 967void __init plat_mem_setup(void)
 968{
 969	uint64_t mem_alloc_size;
 970	uint64_t total;
 971	uint64_t crashk_end;
 972#ifndef CONFIG_CRASH_DUMP
 973	int64_t memory;
 974#endif
 975
 976	total = 0;
 977	crashk_end = 0;
 978
 979	/*
 980	 * The Mips memory init uses the first memory location for
 981	 * some memory vectors. When SPARSEMEM is in use, it doesn't
 982	 * verify that the size is big enough for the final
 983	 * vectors. Making the smallest chuck 4MB seems to be enough
 984	 * to consistently work.
 985	 */
 986	mem_alloc_size = 4 << 20;
 987	if (mem_alloc_size > max_memory)
 988		mem_alloc_size = max_memory;
 989
 990/* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
 991#ifdef CONFIG_CRASH_DUMP
 992	memblock_add(reserve_low_mem, max_memory);
 993	total += max_memory;
 994#else
 995#ifdef CONFIG_KEXEC
 996	if (crashk_size > 0) {
 997		memblock_add(crashk_base, crashk_size);
 998		crashk_end = crashk_base + crashk_size;
 999	}
1000#endif
1001	/*
1002	 * When allocating memory, we want incrementing addresses,
1003	 * which is handled by memblock
1004	 */
1005	cvmx_bootmem_lock();
1006	while (total < max_memory) {
1007		memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
1008						__pa_symbol(&_end), -1,
1009						0x100000,
1010						CVMX_BOOTMEM_FLAG_NO_LOCKING);
1011		if (memory >= 0) {
1012			u64 size = mem_alloc_size;
1013#ifdef CONFIG_KEXEC
1014			uint64_t end;
1015#endif
1016
1017			/*
1018			 * exclude a page at the beginning and end of
1019			 * the 256MB PCIe 'hole' so the kernel will not
1020			 * try to allocate multi-page buffers that
1021			 * span the discontinuity.
1022			 */
1023			memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE,
1024					    &memory, &size);
1025			memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
1026					    CVMX_PCIE_BAR1_PHYS_SIZE,
1027					    &memory, &size);
1028#ifdef CONFIG_KEXEC
1029			end = memory + mem_alloc_size;
1030
1031			/*
1032			 * This function automatically merges address regions
1033			 * next to each other if they are received in
1034			 * incrementing order
1035			 */
1036			if (memory < crashk_base && end >  crashk_end) {
1037				/* region is fully in */
1038				memblock_add(memory, crashk_base - memory);
1039				total += crashk_base - memory;
1040				memblock_add(crashk_end, end - crashk_end);
1041				total += end - crashk_end;
1042				continue;
1043			}
1044
1045			if (memory >= crashk_base && end <= crashk_end)
1046				/*
1047				 * Entire memory region is within the new
1048				 *  kernel's memory, ignore it.
1049				 */
1050				continue;
1051
1052			if (memory > crashk_base && memory < crashk_end &&
1053			    end > crashk_end) {
1054				/*
1055				 * Overlap with the beginning of the region,
1056				 * reserve the beginning.
1057				  */
1058				mem_alloc_size -= crashk_end - memory;
1059				memory = crashk_end;
1060			} else if (memory < crashk_base && end > crashk_base &&
1061				   end < crashk_end)
1062				/*
1063				 * Overlap with the beginning of the region,
1064				 * chop of end.
1065				 */
1066				mem_alloc_size -= end - crashk_base;
1067#endif
1068			memblock_add(memory, mem_alloc_size);
1069			total += mem_alloc_size;
1070			/* Recovering mem_alloc_size */
1071			mem_alloc_size = 4 << 20;
1072		} else {
1073			break;
1074		}
1075	}
1076	cvmx_bootmem_unlock();
1077#endif /* CONFIG_CRASH_DUMP */
1078
1079	/*
1080	 * Now that we've allocated the kernel memory it is safe to
1081	 * free the reserved region. We free it here so that builtin
1082	 * drivers can use the memory.
1083	 */
1084	if (octeon_reserve32_memory)
1085		cvmx_bootmem_free_named("CAVIUM_RESERVE32");
1086
1087	if (total == 0)
1088		panic("Unable to allocate memory from "
1089		      "cvmx_bootmem_phy_alloc");
1090}
1091
1092/*
1093 * Emit one character to the boot UART.	 Exported for use by the
1094 * watchdog timer.
1095 */
1096void prom_putchar(char c)
1097{
1098	uint64_t lsrval;
1099
1100	/* Spin until there is room */
1101	do {
1102		lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
1103	} while ((lsrval & 0x20) == 0);
1104
1105	/* Write the byte */
1106	cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
1107}
1108EXPORT_SYMBOL(prom_putchar);
1109
1110void __init prom_free_prom_memory(void)
1111{
1112	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
1113		/* Check for presence of Core-14449 fix.  */
1114		u32 insn;
1115		u32 *foo;
1116
1117		foo = &insn;
1118
1119		asm volatile("# before" : : : "memory");
1120		prefetch(foo);
1121		asm volatile(
1122			".set push\n\t"
1123			".set noreorder\n\t"
1124			"bal 1f\n\t"
1125			"nop\n"
1126			"1:\tlw %0,-12($31)\n\t"
1127			".set pop\n\t"
1128			: "=r" (insn) : : "$31", "memory");
1129
1130		if ((insn >> 26) != 0x33)
1131			panic("No PREF instruction at Core-14449 probe point.");
1132
1133		if (((insn >> 16) & 0x1f) != 28)
1134			panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
1135			      "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
1136			      insn);
1137	}
1138}
1139
1140void __init octeon_fill_mac_addresses(void);
1141
1142void __init device_tree_init(void)
1143{
1144	const void *fdt;
1145	bool do_prune;
1146	bool fill_mac;
1147
1148#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
1149	if (!fdt_check_header(&__appended_dtb)) {
1150		fdt = &__appended_dtb;
1151		do_prune = false;
1152		fill_mac = true;
1153		pr_info("Using appended Device Tree.\n");
1154	} else
1155#endif
1156	if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
1157		fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
1158		if (fdt_check_header(fdt))
1159			panic("Corrupt Device Tree passed to kernel.");
1160		do_prune = false;
1161		fill_mac = false;
1162		pr_info("Using passed Device Tree.\n");
1163	} else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
1164		fdt = &__dtb_octeon_68xx_begin;
1165		do_prune = true;
1166		fill_mac = true;
1167	} else {
1168		fdt = &__dtb_octeon_3xxx_begin;
1169		do_prune = true;
1170		fill_mac = true;
1171	}
1172
1173	initial_boot_params = (void *)fdt;
1174
1175	if (do_prune) {
1176		octeon_prune_device_tree();
1177		pr_info("Using internal Device Tree.\n");
1178	}
1179	if (fill_mac)
1180		octeon_fill_mac_addresses();
1181	unflatten_and_copy_device_tree();
1182	init_octeon_system_type();
1183}
1184
1185static int __initdata disable_octeon_edac_p;
1186
1187static int __init disable_octeon_edac(char *str)
1188{
1189	disable_octeon_edac_p = 1;
1190	return 0;
1191}
1192early_param("disable_octeon_edac", disable_octeon_edac);
1193
1194static char *edac_device_names[] = {
1195	"octeon_l2c_edac",
1196	"octeon_pc_edac",
1197};
1198
1199static int __init edac_devinit(void)
1200{
1201	struct platform_device *dev;
1202	int i, err = 0;
1203	int num_lmc;
1204	char *name;
1205
1206	if (disable_octeon_edac_p)
1207		return 0;
1208
1209	for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) {
1210		name = edac_device_names[i];
1211		dev = platform_device_register_simple(name, -1, NULL, 0);
1212		if (IS_ERR(dev)) {
1213			pr_err("Registration of %s failed!\n", name);
1214			err = PTR_ERR(dev);
1215		}
1216	}
1217
1218	num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 :
1219		(OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1);
1220	for (i = 0; i < num_lmc; i++) {
1221		dev = platform_device_register_simple("octeon_lmc_edac",
1222						      i, NULL, 0);
1223		if (IS_ERR(dev)) {
1224			pr_err("Registration of octeon_lmc_edac %d failed!\n", i);
1225			err = PTR_ERR(dev);
1226		}
1227	}
1228
1229	return err;
1230}
1231device_initcall(edac_devinit);
1232
1233static void __initdata *octeon_dummy_iospace;
1234
1235static int __init octeon_no_pci_init(void)
1236{
1237	/*
1238	 * Initially assume there is no PCI. The PCI/PCIe platform code will
1239	 * later re-initialize these to correct values if they are present.
1240	 */
1241	octeon_dummy_iospace = vzalloc(IO_SPACE_LIMIT);
1242	set_io_port_base((unsigned long)octeon_dummy_iospace);
1243	ioport_resource.start = RESOURCE_SIZE_MAX;
1244	ioport_resource.end = 0;
1245	return 0;
1246}
1247core_initcall(octeon_no_pci_init);
1248
1249static int __init octeon_no_pci_release(void)
1250{
1251	/*
1252	 * Release the allocated memory if a real IO space is there.
1253	 */
1254	if ((unsigned long)octeon_dummy_iospace != mips_io_port_base)
1255		vfree(octeon_dummy_iospace);
1256	return 0;
1257}
1258late_initcall(octeon_no_pci_release);