Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1/*
   2 * PCI / PCI-X / PCI-Express support for 4xx parts
   3 *
   4 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
   5 *
   6 * Most PCI Express code is coming from Stefan Roese implementation for
   7 * arch/ppc in the Denx tree, slightly reworked by me.
   8 *
   9 * Copyright 2007 DENX Software Engineering, Stefan Roese <sr@denx.de>
  10 *
  11 * Some of that comes itself from a previous implementation for 440SPE only
  12 * by Roland Dreier:
  13 *
  14 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
  15 * Roland Dreier <rolandd@cisco.com>
  16 *
  17 */
  18
  19#undef DEBUG
  20
  21#include <linux/kernel.h>
  22#include <linux/pci.h>
  23#include <linux/init.h>
  24#include <linux/of.h>
  25#include <linux/bootmem.h>
  26#include <linux/delay.h>
  27#include <linux/slab.h>
  28
  29#include <asm/io.h>
  30#include <asm/pci-bridge.h>
  31#include <asm/machdep.h>
  32#include <asm/dcr.h>
  33#include <asm/dcr-regs.h>
  34#include <mm/mmu_decl.h>
  35
  36#include "ppc4xx_pci.h"
  37
  38static int dma_offset_set;
  39
  40#define U64_TO_U32_LOW(val)	((u32)((val) & 0x00000000ffffffffULL))
  41#define U64_TO_U32_HIGH(val)	((u32)((val) >> 32))
  42
  43#define RES_TO_U32_LOW(val)	\
  44	((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
  45#define RES_TO_U32_HIGH(val)	\
  46	((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
  47
  48static inline int ppc440spe_revA(void)
  49{
  50	/* Catch both 440SPe variants, with and without RAID6 support */
  51        if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
  52                return 1;
  53        else
  54                return 0;
  55}
  56
  57static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
  58{
  59	struct pci_controller *hose;
  60	int i;
  61
  62	if (dev->devfn != 0 || dev->bus->self != NULL)
  63		return;
  64
  65	hose = pci_bus_to_host(dev->bus);
  66	if (hose == NULL)
  67		return;
  68
  69	if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
  70	    !of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
  71	    !of_device_is_compatible(hose->dn, "ibm,plb-pci"))
  72		return;
  73
  74	if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
  75		of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
  76		hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
  77	}
  78
  79	/* Hide the PCI host BARs from the kernel as their content doesn't
  80	 * fit well in the resource management
  81	 */
  82	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  83		dev->resource[i].start = dev->resource[i].end = 0;
  84		dev->resource[i].flags = 0;
  85	}
  86
  87	printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
  88	       pci_name(dev));
  89}
  90DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
  91
  92static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
  93					  void __iomem *reg,
  94					  struct resource *res)
  95{
  96	u64 size;
  97	const u32 *ranges;
  98	int rlen;
  99	int pna = of_n_addr_cells(hose->dn);
 100	int np = pna + 5;
 101
 102	/* Default */
 103	res->start = 0;
 104	size = 0x80000000;
 105	res->end = size - 1;
 106	res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
 107
 108	/* Get dma-ranges property */
 109	ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
 110	if (ranges == NULL)
 111		goto out;
 112
 113	/* Walk it */
 114	while ((rlen -= np * 4) >= 0) {
 115		u32 pci_space = ranges[0];
 116		u64 pci_addr = of_read_number(ranges + 1, 2);
 117		u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
 118		size = of_read_number(ranges + pna + 3, 2);
 119		ranges += np;
 120		if (cpu_addr == OF_BAD_ADDR || size == 0)
 121			continue;
 122
 123		/* We only care about memory */
 124		if ((pci_space & 0x03000000) != 0x02000000)
 125			continue;
 126
 127		/* We currently only support memory at 0, and pci_addr
 128		 * within 32 bits space
 129		 */
 130		if (cpu_addr != 0 || pci_addr > 0xffffffff) {
 131			printk(KERN_WARNING "%s: Ignored unsupported dma range"
 132			       " 0x%016llx...0x%016llx -> 0x%016llx\n",
 133			       hose->dn->full_name,
 134			       pci_addr, pci_addr + size - 1, cpu_addr);
 135			continue;
 136		}
 137
 138		/* Check if not prefetchable */
 139		if (!(pci_space & 0x40000000))
 140			res->flags &= ~IORESOURCE_PREFETCH;
 141
 142
 143		/* Use that */
 144		res->start = pci_addr;
 145		/* Beware of 32 bits resources */
 146		if (sizeof(resource_size_t) == sizeof(u32) &&
 147		    (pci_addr + size) > 0x100000000ull)
 148			res->end = 0xffffffff;
 149		else
 150			res->end = res->start + size - 1;
 151		break;
 152	}
 153
 154	/* We only support one global DMA offset */
 155	if (dma_offset_set && pci_dram_offset != res->start) {
 156		printk(KERN_ERR "%s: dma-ranges(s) mismatch\n",
 157		       hose->dn->full_name);
 158		return -ENXIO;
 159	}
 160
 161	/* Check that we can fit all of memory as we don't support
 162	 * DMA bounce buffers
 163	 */
 164	if (size < total_memory) {
 165		printk(KERN_ERR "%s: dma-ranges too small "
 166		       "(size=%llx total_memory=%llx)\n",
 167		       hose->dn->full_name, size, (u64)total_memory);
 168		return -ENXIO;
 169	}
 170
 171	/* Check we are a power of 2 size and that base is a multiple of size*/
 172	if ((size & (size - 1)) != 0  ||
 173	    (res->start & (size - 1)) != 0) {
 174		printk(KERN_ERR "%s: dma-ranges unaligned\n",
 175		       hose->dn->full_name);
 176		return -ENXIO;
 177	}
 178
 179	/* Check that we are fully contained within 32 bits space */
 180	if (res->end > 0xffffffff) {
 181		printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n",
 182		       hose->dn->full_name);
 183		return -ENXIO;
 184	}
 185 out:
 186	dma_offset_set = 1;
 187	pci_dram_offset = res->start;
 188
 189	printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
 190	       pci_dram_offset);
 191	return 0;
 192}
 193
 194/*
 195 * 4xx PCI 2.x part
 196 */
 197
 198static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller	*hose,
 199					   void __iomem			*reg,
 200					   u64				plb_addr,
 201					   u64				pci_addr,
 202					   u64				size,
 203					   unsigned int			flags,
 204					   int				index)
 205{
 206	u32 ma, pcila, pciha;
 207
 208	/* Hack warning ! The "old" PCI 2.x cell only let us configure the low
 209	 * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
 210	 * address are actually hard wired to a value that appears to depend
 211	 * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
 212	 *
 213	 * The trick here is we just crop those top bits and ignore them when
 214	 * programming the chip. That means the device-tree has to be right
 215	 * for the specific part used (we don't print a warning if it's wrong
 216	 * but on the other hand, you'll crash quickly enough), but at least
 217	 * this code should work whatever the hard coded value is
 218	 */
 219	plb_addr &= 0xffffffffull;
 220
 221	/* Note: Due to the above hack, the test below doesn't actually test
 222	 * if you address is above 4G, but it tests that address and
 223	 * (address + size) are both contained in the same 4G
 224	 */
 225	if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
 226	    size < 0x1000 || (plb_addr & (size - 1)) != 0) {
 227		printk(KERN_WARNING "%s: Resource out of range\n",
 228		       hose->dn->full_name);
 229		return -1;
 230	}
 231	ma = (0xffffffffu << ilog2(size)) | 1;
 232	if (flags & IORESOURCE_PREFETCH)
 233		ma |= 2;
 234
 235	pciha = RES_TO_U32_HIGH(pci_addr);
 236	pcila = RES_TO_U32_LOW(pci_addr);
 237
 238	writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
 239	writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
 240	writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
 241	writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
 242
 243	return 0;
 244}
 245
 246static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
 247					     void __iomem *reg)
 248{
 249	int i, j, found_isa_hole = 0;
 250
 251	/* Setup outbound memory windows */
 252	for (i = j = 0; i < 3; i++) {
 253		struct resource *res = &hose->mem_resources[i];
 254
 255		/* we only care about memory windows */
 256		if (!(res->flags & IORESOURCE_MEM))
 257			continue;
 258		if (j > 2) {
 259			printk(KERN_WARNING "%s: Too many ranges\n",
 260			       hose->dn->full_name);
 261			break;
 262		}
 263
 264		/* Configure the resource */
 265		if (ppc4xx_setup_one_pci_PMM(hose, reg,
 266					     res->start,
 267					     res->start - hose->pci_mem_offset,
 268					     resource_size(res),
 269					     res->flags,
 270					     j) == 0) {
 271			j++;
 272
 273			/* If the resource PCI address is 0 then we have our
 274			 * ISA memory hole
 275			 */
 276			if (res->start == hose->pci_mem_offset)
 277				found_isa_hole = 1;
 278		}
 279	}
 280
 281	/* Handle ISA memory hole if not already covered */
 282	if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
 283		if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
 284					     hose->isa_mem_size, 0, j) == 0)
 285			printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
 286			       hose->dn->full_name);
 287}
 288
 289static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
 290					     void __iomem *reg,
 291					     const struct resource *res)
 292{
 293	resource_size_t size = resource_size(res);
 294	u32 sa;
 295
 296	/* Calculate window size */
 297	sa = (0xffffffffu << ilog2(size)) | 1;
 298	sa |= 0x1;
 299
 300	/* RAM is always at 0 local for now */
 301	writel(0, reg + PCIL0_PTM1LA);
 302	writel(sa, reg + PCIL0_PTM1MS);
 303
 304	/* Map on PCI side */
 305	early_write_config_dword(hose, hose->first_busno, 0,
 306				 PCI_BASE_ADDRESS_1, res->start);
 307	early_write_config_dword(hose, hose->first_busno, 0,
 308				 PCI_BASE_ADDRESS_2, 0x00000000);
 309	early_write_config_word(hose, hose->first_busno, 0,
 310				PCI_COMMAND, 0x0006);
 311}
 312
 313static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
 314{
 315	/* NYI */
 316	struct resource rsrc_cfg;
 317	struct resource rsrc_reg;
 318	struct resource dma_window;
 319	struct pci_controller *hose = NULL;
 320	void __iomem *reg = NULL;
 321	const int *bus_range;
 322	int primary = 0;
 323
 324	/* Check if device is enabled */
 325	if (!of_device_is_available(np)) {
 326		printk(KERN_INFO "%s: Port disabled via device-tree\n",
 327		       np->full_name);
 328		return;
 329	}
 330
 331	/* Fetch config space registers address */
 332	if (of_address_to_resource(np, 0, &rsrc_cfg)) {
 333		printk(KERN_ERR "%s: Can't get PCI config register base !",
 334		       np->full_name);
 335		return;
 336	}
 337	/* Fetch host bridge internal registers address */
 338	if (of_address_to_resource(np, 3, &rsrc_reg)) {
 339		printk(KERN_ERR "%s: Can't get PCI internal register base !",
 340		       np->full_name);
 341		return;
 342	}
 343
 344	/* Check if primary bridge */
 345	if (of_get_property(np, "primary", NULL))
 346		primary = 1;
 347
 348	/* Get bus range if any */
 349	bus_range = of_get_property(np, "bus-range", NULL);
 350
 351	/* Map registers */
 352	reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
 353	if (reg == NULL) {
 354		printk(KERN_ERR "%s: Can't map registers !", np->full_name);
 355		goto fail;
 356	}
 357
 358	/* Allocate the host controller data structure */
 359	hose = pcibios_alloc_controller(np);
 360	if (!hose)
 361		goto fail;
 362
 363	hose->first_busno = bus_range ? bus_range[0] : 0x0;
 364	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 365
 366	/* Setup config space */
 367	setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
 368
 369	/* Disable all windows */
 370	writel(0, reg + PCIL0_PMM0MA);
 371	writel(0, reg + PCIL0_PMM1MA);
 372	writel(0, reg + PCIL0_PMM2MA);
 373	writel(0, reg + PCIL0_PTM1MS);
 374	writel(0, reg + PCIL0_PTM2MS);
 375
 376	/* Parse outbound mapping resources */
 377	pci_process_bridge_OF_ranges(hose, np, primary);
 378
 379	/* Parse inbound mapping resources */
 380	if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
 381		goto fail;
 382
 383	/* Configure outbound ranges POMs */
 384	ppc4xx_configure_pci_PMMs(hose, reg);
 385
 386	/* Configure inbound ranges PIMs */
 387	ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
 388
 389	/* We don't need the registers anymore */
 390	iounmap(reg);
 391	return;
 392
 393 fail:
 394	if (hose)
 395		pcibios_free_controller(hose);
 396	if (reg)
 397		iounmap(reg);
 398}
 399
 400/*
 401 * 4xx PCI-X part
 402 */
 403
 404static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller	*hose,
 405					    void __iomem		*reg,
 406					    u64				plb_addr,
 407					    u64				pci_addr,
 408					    u64				size,
 409					    unsigned int		flags,
 410					    int				index)
 411{
 412	u32 lah, lal, pciah, pcial, sa;
 413
 414	if (!is_power_of_2(size) || size < 0x1000 ||
 415	    (plb_addr & (size - 1)) != 0) {
 416		printk(KERN_WARNING "%s: Resource out of range\n",
 417		       hose->dn->full_name);
 418		return -1;
 419	}
 420
 421	/* Calculate register values */
 422	lah = RES_TO_U32_HIGH(plb_addr);
 423	lal = RES_TO_U32_LOW(plb_addr);
 424	pciah = RES_TO_U32_HIGH(pci_addr);
 425	pcial = RES_TO_U32_LOW(pci_addr);
 426	sa = (0xffffffffu << ilog2(size)) | 0x1;
 427
 428	/* Program register values */
 429	if (index == 0) {
 430		writel(lah, reg + PCIX0_POM0LAH);
 431		writel(lal, reg + PCIX0_POM0LAL);
 432		writel(pciah, reg + PCIX0_POM0PCIAH);
 433		writel(pcial, reg + PCIX0_POM0PCIAL);
 434		writel(sa, reg + PCIX0_POM0SA);
 435	} else {
 436		writel(lah, reg + PCIX0_POM1LAH);
 437		writel(lal, reg + PCIX0_POM1LAL);
 438		writel(pciah, reg + PCIX0_POM1PCIAH);
 439		writel(pcial, reg + PCIX0_POM1PCIAL);
 440		writel(sa, reg + PCIX0_POM1SA);
 441	}
 442
 443	return 0;
 444}
 445
 446static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
 447					      void __iomem *reg)
 448{
 449	int i, j, found_isa_hole = 0;
 450
 451	/* Setup outbound memory windows */
 452	for (i = j = 0; i < 3; i++) {
 453		struct resource *res = &hose->mem_resources[i];
 454
 455		/* we only care about memory windows */
 456		if (!(res->flags & IORESOURCE_MEM))
 457			continue;
 458		if (j > 1) {
 459			printk(KERN_WARNING "%s: Too many ranges\n",
 460			       hose->dn->full_name);
 461			break;
 462		}
 463
 464		/* Configure the resource */
 465		if (ppc4xx_setup_one_pcix_POM(hose, reg,
 466					      res->start,
 467					      res->start - hose->pci_mem_offset,
 468					      resource_size(res),
 469					      res->flags,
 470					      j) == 0) {
 471			j++;
 472
 473			/* If the resource PCI address is 0 then we have our
 474			 * ISA memory hole
 475			 */
 476			if (res->start == hose->pci_mem_offset)
 477				found_isa_hole = 1;
 478		}
 479	}
 480
 481	/* Handle ISA memory hole if not already covered */
 482	if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
 483		if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
 484					      hose->isa_mem_size, 0, j) == 0)
 485			printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
 486			       hose->dn->full_name);
 487}
 488
 489static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
 490					      void __iomem *reg,
 491					      const struct resource *res,
 492					      int big_pim,
 493					      int enable_msi_hole)
 494{
 495	resource_size_t size = resource_size(res);
 496	u32 sa;
 497
 498	/* RAM is always at 0 */
 499	writel(0x00000000, reg + PCIX0_PIM0LAH);
 500	writel(0x00000000, reg + PCIX0_PIM0LAL);
 501
 502	/* Calculate window size */
 503	sa = (0xffffffffu << ilog2(size)) | 1;
 504	sa |= 0x1;
 505	if (res->flags & IORESOURCE_PREFETCH)
 506		sa |= 0x2;
 507	if (enable_msi_hole)
 508		sa |= 0x4;
 509	writel(sa, reg + PCIX0_PIM0SA);
 510	if (big_pim)
 511		writel(0xffffffff, reg + PCIX0_PIM0SAH);
 512
 513	/* Map on PCI side */
 514	writel(0x00000000, reg + PCIX0_BAR0H);
 515	writel(res->start, reg + PCIX0_BAR0L);
 516	writew(0x0006, reg + PCIX0_COMMAND);
 517}
 518
 519static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
 520{
 521	struct resource rsrc_cfg;
 522	struct resource rsrc_reg;
 523	struct resource dma_window;
 524	struct pci_controller *hose = NULL;
 525	void __iomem *reg = NULL;
 526	const int *bus_range;
 527	int big_pim = 0, msi = 0, primary = 0;
 528
 529	/* Fetch config space registers address */
 530	if (of_address_to_resource(np, 0, &rsrc_cfg)) {
 531		printk(KERN_ERR "%s:Can't get PCI-X config register base !",
 532		       np->full_name);
 533		return;
 534	}
 535	/* Fetch host bridge internal registers address */
 536	if (of_address_to_resource(np, 3, &rsrc_reg)) {
 537		printk(KERN_ERR "%s: Can't get PCI-X internal register base !",
 538		       np->full_name);
 539		return;
 540	}
 541
 542	/* Check if it supports large PIMs (440GX) */
 543	if (of_get_property(np, "large-inbound-windows", NULL))
 544		big_pim = 1;
 545
 546	/* Check if we should enable MSIs inbound hole */
 547	if (of_get_property(np, "enable-msi-hole", NULL))
 548		msi = 1;
 549
 550	/* Check if primary bridge */
 551	if (of_get_property(np, "primary", NULL))
 552		primary = 1;
 553
 554	/* Get bus range if any */
 555	bus_range = of_get_property(np, "bus-range", NULL);
 556
 557	/* Map registers */
 558	reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
 559	if (reg == NULL) {
 560		printk(KERN_ERR "%s: Can't map registers !", np->full_name);
 561		goto fail;
 562	}
 563
 564	/* Allocate the host controller data structure */
 565	hose = pcibios_alloc_controller(np);
 566	if (!hose)
 567		goto fail;
 568
 569	hose->first_busno = bus_range ? bus_range[0] : 0x0;
 570	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 571
 572	/* Setup config space */
 573	setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4,
 574					PPC_INDIRECT_TYPE_SET_CFG_TYPE);
 575
 576	/* Disable all windows */
 577	writel(0, reg + PCIX0_POM0SA);
 578	writel(0, reg + PCIX0_POM1SA);
 579	writel(0, reg + PCIX0_POM2SA);
 580	writel(0, reg + PCIX0_PIM0SA);
 581	writel(0, reg + PCIX0_PIM1SA);
 582	writel(0, reg + PCIX0_PIM2SA);
 583	if (big_pim) {
 584		writel(0, reg + PCIX0_PIM0SAH);
 585		writel(0, reg + PCIX0_PIM2SAH);
 586	}
 587
 588	/* Parse outbound mapping resources */
 589	pci_process_bridge_OF_ranges(hose, np, primary);
 590
 591	/* Parse inbound mapping resources */
 592	if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
 593		goto fail;
 594
 595	/* Configure outbound ranges POMs */
 596	ppc4xx_configure_pcix_POMs(hose, reg);
 597
 598	/* Configure inbound ranges PIMs */
 599	ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
 600
 601	/* We don't need the registers anymore */
 602	iounmap(reg);
 603	return;
 604
 605 fail:
 606	if (hose)
 607		pcibios_free_controller(hose);
 608	if (reg)
 609		iounmap(reg);
 610}
 611
 612#ifdef CONFIG_PPC4xx_PCI_EXPRESS
 613
 614/*
 615 * 4xx PCI-Express part
 616 *
 617 * We support 3 parts currently based on the compatible property:
 618 *
 619 * ibm,plb-pciex-440spe
 620 * ibm,plb-pciex-405ex
 621 * ibm,plb-pciex-460ex
 622 *
 623 * Anything else will be rejected for now as they are all subtly
 624 * different unfortunately.
 625 *
 626 */
 627
 628#define MAX_PCIE_BUS_MAPPED	0x40
 629
 630struct ppc4xx_pciex_port
 631{
 632	struct pci_controller	*hose;
 633	struct device_node	*node;
 634	unsigned int		index;
 635	int			endpoint;
 636	int			link;
 637	int			has_ibpre;
 638	unsigned int		sdr_base;
 639	dcr_host_t		dcrs;
 640	struct resource		cfg_space;
 641	struct resource		utl_regs;
 642	void __iomem		*utl_base;
 643};
 644
 645static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
 646static unsigned int ppc4xx_pciex_port_count;
 647
 648struct ppc4xx_pciex_hwops
 649{
 650	int (*core_init)(struct device_node *np);
 651	int (*port_init_hw)(struct ppc4xx_pciex_port *port);
 652	int (*setup_utl)(struct ppc4xx_pciex_port *port);
 653	void (*check_link)(struct ppc4xx_pciex_port *port);
 654};
 655
 656static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
 657
 658static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
 659					   unsigned int sdr_offset,
 660					   unsigned int mask,
 661					   unsigned int value,
 662					   int timeout_ms)
 663{
 664	u32 val;
 665
 666	while(timeout_ms--) {
 667		val = mfdcri(SDR0, port->sdr_base + sdr_offset);
 668		if ((val & mask) == value) {
 669			pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
 670				 port->index, sdr_offset, timeout_ms, val);
 671			return 0;
 672		}
 673		msleep(1);
 674	}
 675	return -1;
 676}
 677
 678static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
 679{
 680	/* Wait for reset to complete */
 681	if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
 682		printk(KERN_WARNING "PCIE%d: PGRST failed\n",
 683		       port->index);
 684		return -1;
 685	}
 686	return 0;
 687}
 688
 689
 690static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
 691{
 692	printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
 693
 694	/* Check for card presence detect if supported, if not, just wait for
 695	 * link unconditionally.
 696	 *
 697	 * note that we don't fail if there is no link, we just filter out
 698	 * config space accesses. That way, it will be easier to implement
 699	 * hotplug later on.
 700	 */
 701	if (!port->has_ibpre ||
 702	    !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
 703				      1 << 28, 1 << 28, 100)) {
 704		printk(KERN_INFO
 705		       "PCIE%d: Device detected, waiting for link...\n",
 706		       port->index);
 707		if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
 708					     0x1000, 0x1000, 2000))
 709			printk(KERN_WARNING
 710			       "PCIE%d: Link up failed\n", port->index);
 711		else {
 712			printk(KERN_INFO
 713			       "PCIE%d: link is up !\n", port->index);
 714			port->link = 1;
 715		}
 716	} else
 717		printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
 718}
 719
 720#ifdef CONFIG_44x
 721
 722/* Check various reset bits of the 440SPe PCIe core */
 723static int __init ppc440spe_pciex_check_reset(struct device_node *np)
 724{
 725	u32 valPE0, valPE1, valPE2;
 726	int err = 0;
 727
 728	/* SDR0_PEGPLLLCT1 reset */
 729	if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
 730		/*
 731		 * the PCIe core was probably already initialised
 732		 * by firmware - let's re-reset RCSSET regs
 733		 *
 734		 * -- Shouldn't we also re-reset the whole thing ? -- BenH
 735		 */
 736		pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
 737		mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
 738		mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
 739		mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
 740	}
 741
 742	valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
 743	valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
 744	valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
 745
 746	/* SDR0_PExRCSSET rstgu */
 747	if (!(valPE0 & 0x01000000) ||
 748	    !(valPE1 & 0x01000000) ||
 749	    !(valPE2 & 0x01000000)) {
 750		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
 751		err = -1;
 752	}
 753
 754	/* SDR0_PExRCSSET rstdl */
 755	if (!(valPE0 & 0x00010000) ||
 756	    !(valPE1 & 0x00010000) ||
 757	    !(valPE2 & 0x00010000)) {
 758		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
 759		err = -1;
 760	}
 761
 762	/* SDR0_PExRCSSET rstpyn */
 763	if ((valPE0 & 0x00001000) ||
 764	    (valPE1 & 0x00001000) ||
 765	    (valPE2 & 0x00001000)) {
 766		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
 767		err = -1;
 768	}
 769
 770	/* SDR0_PExRCSSET hldplb */
 771	if ((valPE0 & 0x10000000) ||
 772	    (valPE1 & 0x10000000) ||
 773	    (valPE2 & 0x10000000)) {
 774		printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
 775		err = -1;
 776	}
 777
 778	/* SDR0_PExRCSSET rdy */
 779	if ((valPE0 & 0x00100000) ||
 780	    (valPE1 & 0x00100000) ||
 781	    (valPE2 & 0x00100000)) {
 782		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
 783		err = -1;
 784	}
 785
 786	/* SDR0_PExRCSSET shutdown */
 787	if ((valPE0 & 0x00000100) ||
 788	    (valPE1 & 0x00000100) ||
 789	    (valPE2 & 0x00000100)) {
 790		printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
 791		err = -1;
 792	}
 793
 794	return err;
 795}
 796
 797/* Global PCIe core initializations for 440SPe core */
 798static int __init ppc440spe_pciex_core_init(struct device_node *np)
 799{
 800	int time_out = 20;
 801
 802	/* Set PLL clock receiver to LVPECL */
 803	dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
 804
 805	/* Shouldn't we do all the calibration stuff etc... here ? */
 806	if (ppc440spe_pciex_check_reset(np))
 807		return -ENXIO;
 808
 809	if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
 810		printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
 811		       "failed (0x%08x)\n",
 812		       mfdcri(SDR0, PESDR0_PLLLCT2));
 813		return -1;
 814	}
 815
 816	/* De-assert reset of PCIe PLL, wait for lock */
 817	dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
 818	udelay(3);
 819
 820	while (time_out) {
 821		if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
 822			time_out--;
 823			udelay(1);
 824		} else
 825			break;
 826	}
 827	if (!time_out) {
 828		printk(KERN_INFO "PCIE: VCO output not locked\n");
 829		return -1;
 830	}
 831
 832	pr_debug("PCIE initialization OK\n");
 833
 834	return 3;
 835}
 836
 837static int ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
 838{
 839	u32 val = 1 << 24;
 840
 841	if (port->endpoint)
 842		val = PTYPE_LEGACY_ENDPOINT << 20;
 843	else
 844		val = PTYPE_ROOT_PORT << 20;
 845
 846	if (port->index == 0)
 847		val |= LNKW_X8 << 12;
 848	else
 849		val |= LNKW_X4 << 12;
 850
 851	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
 852	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
 853	if (ppc440spe_revA())
 854		mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
 855	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
 856	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
 857	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
 858	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
 859	if (port->index == 0) {
 860		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
 861		       0x35000000);
 862		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
 863		       0x35000000);
 864		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
 865		       0x35000000);
 866		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
 867		       0x35000000);
 868	}
 869	dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
 870			(1 << 24) | (1 << 16), 1 << 12);
 871
 872	return ppc4xx_pciex_port_reset_sdr(port);
 873}
 874
 875static int ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
 876{
 877	return ppc440spe_pciex_init_port_hw(port);
 878}
 879
 880static int ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
 881{
 882	int rc = ppc440spe_pciex_init_port_hw(port);
 883
 884	port->has_ibpre = 1;
 885
 886	return rc;
 887}
 888
 889static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
 890{
 891	/* XXX Check what that value means... I hate magic */
 892	dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
 893
 894	/*
 895	 * Set buffer allocations and then assert VRB and TXE.
 896	 */
 897	out_be32(port->utl_base + PEUTL_OUTTR,   0x08000000);
 898	out_be32(port->utl_base + PEUTL_INTR,    0x02000000);
 899	out_be32(port->utl_base + PEUTL_OPDBSZ,  0x10000000);
 900	out_be32(port->utl_base + PEUTL_PBBSZ,   0x53000000);
 901	out_be32(port->utl_base + PEUTL_IPHBSZ,  0x08000000);
 902	out_be32(port->utl_base + PEUTL_IPDBSZ,  0x10000000);
 903	out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
 904	out_be32(port->utl_base + PEUTL_PCTL,    0x80800066);
 905
 906	return 0;
 907}
 908
 909static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
 910{
 911	/* Report CRS to the operating system */
 912	out_be32(port->utl_base + PEUTL_PBCTL,    0x08000000);
 913
 914	return 0;
 915}
 916
 917static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
 918{
 919	.core_init	= ppc440spe_pciex_core_init,
 920	.port_init_hw	= ppc440speA_pciex_init_port_hw,
 921	.setup_utl	= ppc440speA_pciex_init_utl,
 922	.check_link	= ppc4xx_pciex_check_link_sdr,
 923};
 924
 925static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
 926{
 927	.core_init	= ppc440spe_pciex_core_init,
 928	.port_init_hw	= ppc440speB_pciex_init_port_hw,
 929	.setup_utl	= ppc440speB_pciex_init_utl,
 930	.check_link	= ppc4xx_pciex_check_link_sdr,
 931};
 932
 933static int __init ppc460ex_pciex_core_init(struct device_node *np)
 934{
 935	/* Nothing to do, return 2 ports */
 936	return 2;
 937}
 938
 939static int ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
 940{
 941	u32 val;
 942	u32 utlset1;
 943
 944	if (port->endpoint)
 945		val = PTYPE_LEGACY_ENDPOINT << 20;
 946	else
 947		val = PTYPE_ROOT_PORT << 20;
 948
 949	if (port->index == 0) {
 950		val |= LNKW_X1 << 12;
 951		utlset1 = 0x20000000;
 952	} else {
 953		val |= LNKW_X4 << 12;
 954		utlset1 = 0x20101101;
 955	}
 956
 957	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
 958	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
 959	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
 960
 961	switch (port->index) {
 962	case 0:
 963		mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
 964		mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
 965		mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
 966
 967		mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
 968		break;
 969
 970	case 1:
 971		mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
 972		mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
 973		mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
 974		mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
 975		mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
 976		mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
 977		mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
 978		mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
 979		mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
 980		mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
 981		mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
 982		mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
 983
 984		mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
 985		break;
 986	}
 987
 988	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
 989	       mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
 990	       (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
 991
 992	/* Poll for PHY reset */
 993	/* XXX FIXME add timeout */
 994	switch (port->index) {
 995	case 0:
 996		while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
 997			udelay(10);
 998		break;
 999	case 1:
1000		while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
1001			udelay(10);
1002		break;
1003	}
1004
1005	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1006	       (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1007		~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1008	       PESDRx_RCSSET_RSTPYN);
1009
1010	port->has_ibpre = 1;
1011
1012	return ppc4xx_pciex_port_reset_sdr(port);
1013}
1014
1015static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1016{
1017	dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1018
1019	/*
1020	 * Set buffer allocations and then assert VRB and TXE.
1021	 */
1022	out_be32(port->utl_base + PEUTL_PBCTL,	0x0800000c);
1023	out_be32(port->utl_base + PEUTL_OUTTR,	0x08000000);
1024	out_be32(port->utl_base + PEUTL_INTR,	0x02000000);
1025	out_be32(port->utl_base + PEUTL_OPDBSZ,	0x04000000);
1026	out_be32(port->utl_base + PEUTL_PBBSZ,	0x00000000);
1027	out_be32(port->utl_base + PEUTL_IPHBSZ,	0x02000000);
1028	out_be32(port->utl_base + PEUTL_IPDBSZ,	0x04000000);
1029	out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
1030	out_be32(port->utl_base + PEUTL_PCTL,	0x80800066);
1031
1032	return 0;
1033}
1034
1035static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
1036{
1037	.core_init	= ppc460ex_pciex_core_init,
1038	.port_init_hw	= ppc460ex_pciex_init_port_hw,
1039	.setup_utl	= ppc460ex_pciex_init_utl,
1040	.check_link	= ppc4xx_pciex_check_link_sdr,
1041};
1042
1043static int __init ppc460sx_pciex_core_init(struct device_node *np)
1044{
1045	/* HSS drive amplitude */
1046	mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
1047	mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
1048	mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
1049	mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
1050	mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
1051	mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
1052	mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
1053	mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
1054
1055	mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
1056	mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
1057	mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
1058	mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
1059
1060	mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
1061	mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
1062	mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
1063	mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
1064
1065	/* HSS TX pre-emphasis */
1066	mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
1067	mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
1068	mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
1069	mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
1070	mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
1071	mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
1072	mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
1073	mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
1074
1075	mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
1076	mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
1077	mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
1078	mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
1079
1080	mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
1081	mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
1082	mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
1083	mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
1084
1085	/* HSS TX calibration control */
1086	mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
1087	mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
1088	mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
1089
1090	/* HSS TX slew control */
1091	mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
1092	mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
1093	mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
1094
1095	udelay(100);
1096
1097	/* De-assert PLLRESET */
1098	dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
1099
1100	/* Reset DL, UTL, GPL before configuration */
1101	mtdcri(SDR0, PESDR0_460SX_RCSSET,
1102			PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1103	mtdcri(SDR0, PESDR1_460SX_RCSSET,
1104			PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1105	mtdcri(SDR0, PESDR2_460SX_RCSSET,
1106			PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1107
1108	udelay(100);
1109
1110	/*
1111	 * If bifurcation is not enabled, u-boot would have disabled the
1112	 * third PCIe port
1113	 */
1114	if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
1115				0x00000001)) {
1116		printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
1117		printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
1118		return 3;
1119	}
1120
1121	printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
1122	return 2;
1123}
1124
1125static int ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1126{
1127
1128	if (port->endpoint)
1129		dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1130				0x01000000, 0);
1131	else
1132		dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1133				0, 0x01000000);
1134
1135	/*Gen-1*/
1136	mtdcri(SDR0, port->sdr_base + PESDRn_460SX_RCEI, 0x08000000);
1137
1138	dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
1139			(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
1140			PESDRx_RCSSET_RSTPYN);
1141
1142	port->has_ibpre = 1;
1143
1144	return ppc4xx_pciex_port_reset_sdr(port);
1145}
1146
1147static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
1148{
1149	/* Max 128 Bytes */
1150	out_be32 (port->utl_base + PEUTL_PBBSZ,   0x00000000);
1151	return 0;
1152}
1153
1154static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
1155	.core_init	= ppc460sx_pciex_core_init,
1156	.port_init_hw	= ppc460sx_pciex_init_port_hw,
1157	.setup_utl	= ppc460sx_pciex_init_utl,
1158	.check_link	= ppc4xx_pciex_check_link_sdr,
1159};
1160
1161#endif /* CONFIG_44x */
1162
1163#ifdef CONFIG_40x
1164
1165static int __init ppc405ex_pciex_core_init(struct device_node *np)
1166{
1167	/* Nothing to do, return 2 ports */
1168	return 2;
1169}
1170
1171static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
1172{
1173	/* Assert the PE0_PHY reset */
1174	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
1175	msleep(1);
1176
1177	/* deassert the PE0_hotreset */
1178	if (port->endpoint)
1179		mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
1180	else
1181		mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
1182
1183	/* poll for phy !reset */
1184	/* XXX FIXME add timeout */
1185	while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
1186		;
1187
1188	/* deassert the PE0_gpl_utl_reset */
1189	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
1190}
1191
1192static int ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1193{
1194	u32 val;
1195
1196	if (port->endpoint)
1197		val = PTYPE_LEGACY_ENDPOINT;
1198	else
1199		val = PTYPE_ROOT_PORT;
1200
1201	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
1202	       1 << 24 | val << 20 | LNKW_X1 << 12);
1203
1204	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1205	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1206	mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
1207	mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
1208
1209	/*
1210	 * Only reset the PHY when no link is currently established.
1211	 * This is for the Atheros PCIe board which has problems to establish
1212	 * the link (again) after this PHY reset. All other currently tested
1213	 * PCIe boards don't show this problem.
1214	 * This has to be re-tested and fixed in a later release!
1215	 */
1216	val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
1217	if (!(val & 0x00001000))
1218		ppc405ex_pcie_phy_reset(port);
1219
1220	dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000);  /* guarded on */
1221
1222	port->has_ibpre = 1;
1223
1224	return ppc4xx_pciex_port_reset_sdr(port);
1225}
1226
1227static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1228{
1229	dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1230
1231	/*
1232	 * Set buffer allocations and then assert VRB and TXE.
1233	 */
1234	out_be32(port->utl_base + PEUTL_OUTTR,   0x02000000);
1235	out_be32(port->utl_base + PEUTL_INTR,    0x02000000);
1236	out_be32(port->utl_base + PEUTL_OPDBSZ,  0x04000000);
1237	out_be32(port->utl_base + PEUTL_PBBSZ,   0x21000000);
1238	out_be32(port->utl_base + PEUTL_IPHBSZ,  0x02000000);
1239	out_be32(port->utl_base + PEUTL_IPDBSZ,  0x04000000);
1240	out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
1241	out_be32(port->utl_base + PEUTL_PCTL,    0x80800066);
1242
1243	out_be32(port->utl_base + PEUTL_PBCTL,   0x08000000);
1244
1245	return 0;
1246}
1247
1248static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
1249{
1250	.core_init	= ppc405ex_pciex_core_init,
1251	.port_init_hw	= ppc405ex_pciex_init_port_hw,
1252	.setup_utl	= ppc405ex_pciex_init_utl,
1253	.check_link	= ppc4xx_pciex_check_link_sdr,
1254};
1255
1256#endif /* CONFIG_40x */
1257
1258/* Check that the core has been initied and if not, do it */
1259static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1260{
1261	static int core_init;
1262	int count = -ENODEV;
1263
1264	if (core_init++)
1265		return 0;
1266
1267#ifdef CONFIG_44x
1268	if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
1269		if (ppc440spe_revA())
1270			ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
1271		else
1272			ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
1273	}
1274	if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
1275		ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
1276	if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
1277		ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
1278#endif /* CONFIG_44x    */
1279#ifdef CONFIG_40x
1280	if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
1281		ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
1282#endif
1283	if (ppc4xx_pciex_hwops == NULL) {
1284		printk(KERN_WARNING "PCIE: unknown host type %s\n",
1285		       np->full_name);
1286		return -ENODEV;
1287	}
1288
1289	count = ppc4xx_pciex_hwops->core_init(np);
1290	if (count > 0) {
1291		ppc4xx_pciex_ports =
1292		       kzalloc(count * sizeof(struct ppc4xx_pciex_port),
1293			       GFP_KERNEL);
1294		if (ppc4xx_pciex_ports) {
1295			ppc4xx_pciex_port_count = count;
1296			return 0;
1297		}
1298		printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
1299		return -ENOMEM;
1300	}
1301	return -ENODEV;
1302}
1303
1304static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
1305{
1306	/* We map PCI Express configuration based on the reg property */
1307	dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
1308		  RES_TO_U32_HIGH(port->cfg_space.start));
1309	dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
1310		  RES_TO_U32_LOW(port->cfg_space.start));
1311
1312	/* XXX FIXME: Use size from reg property. For now, map 512M */
1313	dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
1314
1315	/* We map UTL registers based on the reg property */
1316	dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
1317		  RES_TO_U32_HIGH(port->utl_regs.start));
1318	dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
1319		  RES_TO_U32_LOW(port->utl_regs.start));
1320
1321	/* XXX FIXME: Use size from reg property */
1322	dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
1323
1324	/* Disable all other outbound windows */
1325	dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
1326	dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
1327	dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
1328	dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
1329}
1330
1331static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1332{
1333	int rc = 0;
1334
1335	/* Init HW */
1336	if (ppc4xx_pciex_hwops->port_init_hw)
1337		rc = ppc4xx_pciex_hwops->port_init_hw(port);
1338	if (rc != 0)
1339		return rc;
1340
1341	if (ppc4xx_pciex_hwops->check_link)
1342		ppc4xx_pciex_hwops->check_link(port);
1343
1344	/*
1345	 * Initialize mapping: disable all regions and configure
1346	 * CFG and REG regions based on resources in the device tree
1347	 */
1348	ppc4xx_pciex_port_init_mapping(port);
1349
1350	/*
1351	 * Map UTL
1352	 */
1353	port->utl_base = ioremap(port->utl_regs.start, 0x100);
1354	BUG_ON(port->utl_base == NULL);
1355
1356	/*
1357	 * Setup UTL registers --BenH.
1358	 */
1359	if (ppc4xx_pciex_hwops->setup_utl)
1360		ppc4xx_pciex_hwops->setup_utl(port);
1361
1362	/*
1363	 * Check for VC0 active and assert RDY.
1364	 */
1365	if (port->sdr_base) {
1366		if (port->link &&
1367		    ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1368					     1 << 16, 1 << 16, 5000)) {
1369			printk(KERN_INFO "PCIE%d: VC0 not active\n", port->index);
1370			port->link = 0;
1371		}
1372
1373		dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
1374	}
1375
1376	msleep(100);
1377
1378	return 0;
1379}
1380
1381static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
1382				     struct pci_bus *bus,
1383				     unsigned int devfn)
1384{
1385	static int message;
1386
1387	/* Endpoint can not generate upstream(remote) config cycles */
1388	if (port->endpoint && bus->number != port->hose->first_busno)
1389		return PCIBIOS_DEVICE_NOT_FOUND;
1390
1391	/* Check we are within the mapped range */
1392	if (bus->number > port->hose->last_busno) {
1393		if (!message) {
1394			printk(KERN_WARNING "Warning! Probing bus %u"
1395			       " out of range !\n", bus->number);
1396			message++;
1397		}
1398		return PCIBIOS_DEVICE_NOT_FOUND;
1399	}
1400
1401	/* The root complex has only one device / function */
1402	if (bus->number == port->hose->first_busno && devfn != 0)
1403		return PCIBIOS_DEVICE_NOT_FOUND;
1404
1405	/* The other side of the RC has only one device as well */
1406	if (bus->number == (port->hose->first_busno + 1) &&
1407	    PCI_SLOT(devfn) != 0)
1408		return PCIBIOS_DEVICE_NOT_FOUND;
1409
1410	/* Check if we have a link */
1411	if ((bus->number != port->hose->first_busno) && !port->link)
1412		return PCIBIOS_DEVICE_NOT_FOUND;
1413
1414	return 0;
1415}
1416
1417static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
1418						  struct pci_bus *bus,
1419						  unsigned int devfn)
1420{
1421	int relbus;
1422
1423	/* Remove the casts when we finally remove the stupid volatile
1424	 * in struct pci_controller
1425	 */
1426	if (bus->number == port->hose->first_busno)
1427		return (void __iomem *)port->hose->cfg_addr;
1428
1429	relbus = bus->number - (port->hose->first_busno + 1);
1430	return (void __iomem *)port->hose->cfg_data +
1431		((relbus  << 20) | (devfn << 12));
1432}
1433
1434static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
1435				    int offset, int len, u32 *val)
1436{
1437	struct pci_controller *hose = pci_bus_to_host(bus);
1438	struct ppc4xx_pciex_port *port =
1439		&ppc4xx_pciex_ports[hose->indirect_type];
1440	void __iomem *addr;
1441	u32 gpl_cfg;
1442
1443	BUG_ON(hose != port->hose);
1444
1445	if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1446		return PCIBIOS_DEVICE_NOT_FOUND;
1447
1448	addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1449
1450	/*
1451	 * Reading from configuration space of non-existing device can
1452	 * generate transaction errors. For the read duration we suppress
1453	 * assertion of machine check exceptions to avoid those.
1454	 */
1455	gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1456	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1457
1458	/* Make sure no CRS is recorded */
1459	out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
1460
1461	switch (len) {
1462	case 1:
1463		*val = in_8((u8 *)(addr + offset));
1464		break;
1465	case 2:
1466		*val = in_le16((u16 *)(addr + offset));
1467		break;
1468	default:
1469		*val = in_le32((u32 *)(addr + offset));
1470		break;
1471	}
1472
1473	pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
1474		 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1475		 bus->number, hose->first_busno, hose->last_busno,
1476		 devfn, offset, len, addr + offset, *val);
1477
1478	/* Check for CRS (440SPe rev B does that for us but heh ..) */
1479	if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
1480		pr_debug("Got CRS !\n");
1481		if (len != 4 || offset != 0)
1482			return PCIBIOS_DEVICE_NOT_FOUND;
1483		*val = 0xffff0001;
1484	}
1485
1486	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1487
1488	return PCIBIOS_SUCCESSFUL;
1489}
1490
1491static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
1492				     int offset, int len, u32 val)
1493{
1494	struct pci_controller *hose = pci_bus_to_host(bus);
1495	struct ppc4xx_pciex_port *port =
1496		&ppc4xx_pciex_ports[hose->indirect_type];
1497	void __iomem *addr;
1498	u32 gpl_cfg;
1499
1500	if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1501		return PCIBIOS_DEVICE_NOT_FOUND;
1502
1503	addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1504
1505	/*
1506	 * Reading from configuration space of non-existing device can
1507	 * generate transaction errors. For the read duration we suppress
1508	 * assertion of machine check exceptions to avoid those.
1509	 */
1510	gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1511	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1512
1513	pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
1514		 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1515		 bus->number, hose->first_busno, hose->last_busno,
1516		 devfn, offset, len, addr + offset, val);
1517
1518	switch (len) {
1519	case 1:
1520		out_8((u8 *)(addr + offset), val);
1521		break;
1522	case 2:
1523		out_le16((u16 *)(addr + offset), val);
1524		break;
1525	default:
1526		out_le32((u32 *)(addr + offset), val);
1527		break;
1528	}
1529
1530	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1531
1532	return PCIBIOS_SUCCESSFUL;
1533}
1534
1535static struct pci_ops ppc4xx_pciex_pci_ops =
1536{
1537	.read  = ppc4xx_pciex_read_config,
1538	.write = ppc4xx_pciex_write_config,
1539};
1540
1541static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port	*port,
1542					     struct pci_controller	*hose,
1543					     void __iomem		*mbase,
1544					     u64			plb_addr,
1545					     u64			pci_addr,
1546					     u64			size,
1547					     unsigned int		flags,
1548					     int			index)
1549{
1550	u32 lah, lal, pciah, pcial, sa;
1551
1552	if (!is_power_of_2(size) ||
1553	    (index < 2 && size < 0x100000) ||
1554	    (index == 2 && size < 0x100) ||
1555	    (plb_addr & (size - 1)) != 0) {
1556		printk(KERN_WARNING "%s: Resource out of range\n",
1557		       hose->dn->full_name);
1558		return -1;
1559	}
1560
1561	/* Calculate register values */
1562	lah = RES_TO_U32_HIGH(plb_addr);
1563	lal = RES_TO_U32_LOW(plb_addr);
1564	pciah = RES_TO_U32_HIGH(pci_addr);
1565	pcial = RES_TO_U32_LOW(pci_addr);
1566	sa = (0xffffffffu << ilog2(size)) | 0x1;
1567
1568	/* Program register values */
1569	switch (index) {
1570	case 0:
1571		out_le32(mbase + PECFG_POM0LAH, pciah);
1572		out_le32(mbase + PECFG_POM0LAL, pcial);
1573		dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
1574		dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
1575		dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
1576		/* Note that 3 here means enabled | single region */
1577		dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3);
1578		break;
1579	case 1:
1580		out_le32(mbase + PECFG_POM1LAH, pciah);
1581		out_le32(mbase + PECFG_POM1LAL, pcial);
1582		dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
1583		dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
1584		dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
1585		/* Note that 3 here means enabled | single region */
1586		dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3);
1587		break;
1588	case 2:
1589		out_le32(mbase + PECFG_POM2LAH, pciah);
1590		out_le32(mbase + PECFG_POM2LAL, pcial);
1591		dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
1592		dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
1593		dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
1594		/* Note that 3 here means enabled | IO space !!! */
1595		dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, sa | 3);
1596		break;
1597	}
1598
1599	return 0;
1600}
1601
1602static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1603					       struct pci_controller *hose,
1604					       void __iomem *mbase)
1605{
1606	int i, j, found_isa_hole = 0;
1607
1608	/* Setup outbound memory windows */
1609	for (i = j = 0; i < 3; i++) {
1610		struct resource *res = &hose->mem_resources[i];
1611
1612		/* we only care about memory windows */
1613		if (!(res->flags & IORESOURCE_MEM))
1614			continue;
1615		if (j > 1) {
1616			printk(KERN_WARNING "%s: Too many ranges\n",
1617			       port->node->full_name);
1618			break;
1619		}
1620
1621		/* Configure the resource */
1622		if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1623					       res->start,
1624					       res->start - hose->pci_mem_offset,
1625					       resource_size(res),
1626					       res->flags,
1627					       j) == 0) {
1628			j++;
1629
1630			/* If the resource PCI address is 0 then we have our
1631			 * ISA memory hole
1632			 */
1633			if (res->start == hose->pci_mem_offset)
1634				found_isa_hole = 1;
1635		}
1636	}
1637
1638	/* Handle ISA memory hole if not already covered */
1639	if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
1640		if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1641					       hose->isa_mem_phys, 0,
1642					       hose->isa_mem_size, 0, j) == 0)
1643			printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
1644			       hose->dn->full_name);
1645
1646	/* Configure IO, always 64K starting at 0. We hard wire it to 64K !
1647	 * Note also that it -has- to be region index 2 on this HW
1648	 */
1649	if (hose->io_resource.flags & IORESOURCE_IO)
1650		ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1651					   hose->io_base_phys, 0,
1652					   0x10000, IORESOURCE_IO, 2);
1653}
1654
1655static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
1656					       struct pci_controller *hose,
1657					       void __iomem *mbase,
1658					       struct resource *res)
1659{
1660	resource_size_t size = resource_size(res);
1661	u64 sa;
1662
1663	if (port->endpoint) {
1664		resource_size_t ep_addr = 0;
1665		resource_size_t ep_size = 32 << 20;
1666
1667		/* Currently we map a fixed 64MByte window to PLB address
1668		 * 0 (SDRAM). This should probably be configurable via a dts
1669		 * property.
1670		 */
1671
1672		/* Calculate window size */
1673		sa = (0xffffffffffffffffull << ilog2(ep_size));
1674
1675		/* Setup BAR0 */
1676		out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1677		out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
1678			 PCI_BASE_ADDRESS_MEM_TYPE_64);
1679
1680		/* Disable BAR1 & BAR2 */
1681		out_le32(mbase + PECFG_BAR1MPA, 0);
1682		out_le32(mbase + PECFG_BAR2HMPA, 0);
1683		out_le32(mbase + PECFG_BAR2LMPA, 0);
1684
1685		out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
1686		out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
1687
1688		out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
1689		out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
1690	} else {
1691		/* Calculate window size */
1692		sa = (0xffffffffffffffffull << ilog2(size));
1693		if (res->flags & IORESOURCE_PREFETCH)
1694			sa |= 0x8;
1695
1696		out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1697		out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
1698
1699		/* The setup of the split looks weird to me ... let's see
1700		 * if it works
1701		 */
1702		out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
1703		out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
1704		out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
1705		out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
1706		out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
1707		out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
1708
1709		out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
1710		out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
1711	}
1712
1713	/* Enable inbound mapping */
1714	out_le32(mbase + PECFG_PIMEN, 0x1);
1715
1716	/* Enable I/O, Mem, and Busmaster cycles */
1717	out_le16(mbase + PCI_COMMAND,
1718		 in_le16(mbase + PCI_COMMAND) |
1719		 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1720}
1721
1722static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
1723{
1724	struct resource dma_window;
1725	struct pci_controller *hose = NULL;
1726	const int *bus_range;
1727	int primary = 0, busses;
1728	void __iomem *mbase = NULL, *cfg_data = NULL;
1729	const u32 *pval;
1730	u32 val;
1731
1732	/* Check if primary bridge */
1733	if (of_get_property(port->node, "primary", NULL))
1734		primary = 1;
1735
1736	/* Get bus range if any */
1737	bus_range = of_get_property(port->node, "bus-range", NULL);
1738
1739	/* Allocate the host controller data structure */
1740	hose = pcibios_alloc_controller(port->node);
1741	if (!hose)
1742		goto fail;
1743
1744	/* We stick the port number in "indirect_type" so the config space
1745	 * ops can retrieve the port data structure easily
1746	 */
1747	hose->indirect_type = port->index;
1748
1749	/* Get bus range */
1750	hose->first_busno = bus_range ? bus_range[0] : 0x0;
1751	hose->last_busno = bus_range ? bus_range[1] : 0xff;
1752
1753	/* Because of how big mapping the config space is (1M per bus), we
1754	 * limit how many busses we support. In the long run, we could replace
1755	 * that with something akin to kmap_atomic instead. We set aside 1 bus
1756	 * for the host itself too.
1757	 */
1758	busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
1759	if (busses > MAX_PCIE_BUS_MAPPED) {
1760		busses = MAX_PCIE_BUS_MAPPED;
1761		hose->last_busno = hose->first_busno + busses;
1762	}
1763
1764	if (!port->endpoint) {
1765		/* Only map the external config space in cfg_data for
1766		 * PCIe root-complexes. External space is 1M per bus
1767		 */
1768		cfg_data = ioremap(port->cfg_space.start +
1769				   (hose->first_busno + 1) * 0x100000,
1770				   busses * 0x100000);
1771		if (cfg_data == NULL) {
1772			printk(KERN_ERR "%s: Can't map external config space !",
1773			       port->node->full_name);
1774			goto fail;
1775		}
1776		hose->cfg_data = cfg_data;
1777	}
1778
1779	/* Always map the host config space in cfg_addr.
1780	 * Internal space is 4K
1781	 */
1782	mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1783	if (mbase == NULL) {
1784		printk(KERN_ERR "%s: Can't map internal config space !",
1785		       port->node->full_name);
1786		goto fail;
1787	}
1788	hose->cfg_addr = mbase;
1789
1790	pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
1791		 hose->first_busno, hose->last_busno);
1792	pr_debug("     config space mapped at: root @0x%p, other @0x%p\n",
1793		 hose->cfg_addr, hose->cfg_data);
1794
1795	/* Setup config space */
1796	hose->ops = &ppc4xx_pciex_pci_ops;
1797	port->hose = hose;
1798	mbase = (void __iomem *)hose->cfg_addr;
1799
1800	if (!port->endpoint) {
1801		/*
1802		 * Set bus numbers on our root port
1803		 */
1804		out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
1805		out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
1806		out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
1807	}
1808
1809	/*
1810	 * OMRs are already reset, also disable PIMs
1811	 */
1812	out_le32(mbase + PECFG_PIMEN, 0);
1813
1814	/* Parse outbound mapping resources */
1815	pci_process_bridge_OF_ranges(hose, port->node, primary);
1816
1817	/* Parse inbound mapping resources */
1818	if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
1819		goto fail;
1820
1821	/* Configure outbound ranges POMs */
1822	ppc4xx_configure_pciex_POMs(port, hose, mbase);
1823
1824	/* Configure inbound ranges PIMs */
1825	ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
1826
1827	/* The root complex doesn't show up if we don't set some vendor
1828	 * and device IDs into it. The defaults below are the same bogus
1829	 * one that the initial code in arch/ppc had. This can be
1830	 * overwritten by setting the "vendor-id/device-id" properties
1831	 * in the pciex node.
1832	 */
1833
1834	/* Get the (optional) vendor-/device-id from the device-tree */
1835	pval = of_get_property(port->node, "vendor-id", NULL);
1836	if (pval) {
1837		val = *pval;
1838	} else {
1839		if (!port->endpoint)
1840			val = 0xaaa0 + port->index;
1841		else
1842			val = 0xeee0 + port->index;
1843	}
1844	out_le16(mbase + 0x200, val);
1845
1846	pval = of_get_property(port->node, "device-id", NULL);
1847	if (pval) {
1848		val = *pval;
1849	} else {
1850		if (!port->endpoint)
1851			val = 0xbed0 + port->index;
1852		else
1853			val = 0xfed0 + port->index;
1854	}
1855	out_le16(mbase + 0x202, val);
1856
1857	if (!port->endpoint) {
1858		/* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
1859		out_le32(mbase + 0x208, 0x06040001);
1860
1861		printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
1862		       port->index);
1863	} else {
1864		/* Set Class Code to Processor/PPC */
1865		out_le32(mbase + 0x208, 0x0b200001);
1866
1867		printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
1868		       port->index);
1869	}
1870
1871	return;
1872 fail:
1873	if (hose)
1874		pcibios_free_controller(hose);
1875	if (cfg_data)
1876		iounmap(cfg_data);
1877	if (mbase)
1878		iounmap(mbase);
1879}
1880
1881static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
1882{
1883	struct ppc4xx_pciex_port *port;
1884	const u32 *pval;
1885	int portno;
1886	unsigned int dcrs;
1887	const char *val;
1888
1889	/* First, proceed to core initialization as we assume there's
1890	 * only one PCIe core in the system
1891	 */
1892	if (ppc4xx_pciex_check_core_init(np))
1893		return;
1894
1895	/* Get the port number from the device-tree */
1896	pval = of_get_property(np, "port", NULL);
1897	if (pval == NULL) {
1898		printk(KERN_ERR "PCIE: Can't find port number for %s\n",
1899		       np->full_name);
1900		return;
1901	}
1902	portno = *pval;
1903	if (portno >= ppc4xx_pciex_port_count) {
1904		printk(KERN_ERR "PCIE: port number out of range for %s\n",
1905		       np->full_name);
1906		return;
1907	}
1908	port = &ppc4xx_pciex_ports[portno];
1909	port->index = portno;
1910
1911	/*
1912	 * Check if device is enabled
1913	 */
1914	if (!of_device_is_available(np)) {
1915		printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
1916		return;
1917	}
1918
1919	port->node = of_node_get(np);
1920	pval = of_get_property(np, "sdr-base", NULL);
1921	if (pval == NULL) {
1922		printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
1923		       np->full_name);
1924		return;
1925	}
1926	port->sdr_base = *pval;
1927
1928	/* Check if device_type property is set to "pci" or "pci-endpoint".
1929	 * Resulting from this setup this PCIe port will be configured
1930	 * as root-complex or as endpoint.
1931	 */
1932	val = of_get_property(port->node, "device_type", NULL);
1933	if (!strcmp(val, "pci-endpoint")) {
1934		port->endpoint = 1;
1935	} else if (!strcmp(val, "pci")) {
1936		port->endpoint = 0;
1937	} else {
1938		printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n",
1939		       np->full_name);
1940		return;
1941	}
1942
1943	/* Fetch config space registers address */
1944	if (of_address_to_resource(np, 0, &port->cfg_space)) {
1945		printk(KERN_ERR "%s: Can't get PCI-E config space !",
1946		       np->full_name);
1947		return;
1948	}
1949	/* Fetch host bridge internal registers address */
1950	if (of_address_to_resource(np, 1, &port->utl_regs)) {
1951		printk(KERN_ERR "%s: Can't get UTL register base !",
1952		       np->full_name);
1953		return;
1954	}
1955
1956	/* Map DCRs */
1957	dcrs = dcr_resource_start(np, 0);
1958	if (dcrs == 0) {
1959		printk(KERN_ERR "%s: Can't get DCR register base !",
1960		       np->full_name);
1961		return;
1962	}
1963	port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
1964
1965	/* Initialize the port specific registers */
1966	if (ppc4xx_pciex_port_init(port)) {
1967		printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
1968		return;
1969	}
1970
1971	/* Setup the linux hose data structure */
1972	ppc4xx_pciex_port_setup_hose(port);
1973}
1974
1975#endif /* CONFIG_PPC4xx_PCI_EXPRESS */
1976
1977static int __init ppc4xx_pci_find_bridges(void)
1978{
1979	struct device_node *np;
1980
1981	pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
1982
1983#ifdef CONFIG_PPC4xx_PCI_EXPRESS
1984	for_each_compatible_node(np, NULL, "ibm,plb-pciex")
1985		ppc4xx_probe_pciex_bridge(np);
1986#endif
1987	for_each_compatible_node(np, NULL, "ibm,plb-pcix")
1988		ppc4xx_probe_pcix_bridge(np);
1989	for_each_compatible_node(np, NULL, "ibm,plb-pci")
1990		ppc4xx_probe_pci_bridge(np);
1991
1992	return 0;
1993}
1994arch_initcall(ppc4xx_pci_find_bridges);
1995