Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * MPC83xx/85xx/86xx PCI/PCIE support routing.
   4 *
   5 * Copyright 2007-2012 Freescale Semiconductor, Inc.
   6 * Copyright 2008-2009 MontaVista Software, Inc.
   7 *
   8 * Initial author: Xianghua Xiao <x.xiao@freescale.com>
   9 * Recode: ZHANG WEI <wei.zhang@freescale.com>
  10 * Rewrite the routing for Frescale PCI and PCI Express
  11 * 	Roy Zang <tie-fei.zang@freescale.com>
  12 * MPC83xx PCI-Express support:
  13 * 	Tony Li <tony.li@freescale.com>
  14 * 	Anton Vorontsov <avorontsov@ru.mvista.com>
  15 */
  16#include <linux/kernel.h>
  17#include <linux/pci.h>
  18#include <linux/delay.h>
  19#include <linux/string.h>
  20#include <linux/fsl/edac.h>
  21#include <linux/init.h>
  22#include <linux/interrupt.h>
  23#include <linux/memblock.h>
  24#include <linux/log2.h>
  25#include <linux/platform_device.h>
  26#include <linux/slab.h>
  27#include <linux/suspend.h>
  28#include <linux/syscore_ops.h>
  29#include <linux/uaccess.h>
  30
  31#include <asm/io.h>
  32#include <asm/prom.h>
  33#include <asm/pci-bridge.h>
  34#include <asm/ppc-pci.h>
  35#include <asm/machdep.h>
  36#include <asm/mpc85xx.h>
  37#include <asm/disassemble.h>
  38#include <asm/ppc-opcode.h>
  39#include <asm/swiotlb.h>
  40#include <sysdev/fsl_soc.h>
  41#include <sysdev/fsl_pci.h>
  42
  43static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
  44
  45static void quirk_fsl_pcie_early(struct pci_dev *dev)
  46{
  47	u8 hdr_type;
  48
  49	/* if we aren't a PCIe don't bother */
  50	if (!pci_is_pcie(dev))
  51		return;
  52
  53	/* if we aren't in host mode don't bother */
  54	pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
  55	if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
  56		return;
  57
  58	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  59	fsl_pcie_bus_fixup = 1;
  60	return;
  61}
  62
  63static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
  64				    int, int, u32 *);
  65
  66static int fsl_pcie_check_link(struct pci_controller *hose)
  67{
  68	u32 val = 0;
  69
  70	if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
  71		if (hose->ops->read == fsl_indirect_read_config)
  72			__indirect_read_config(hose, hose->first_busno, 0,
  73					       PCIE_LTSSM, 4, &val);
  74		else
  75			early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
  76		if (val < PCIE_LTSSM_L0)
  77			return 1;
  78	} else {
  79		struct ccsr_pci __iomem *pci = hose->private_data;
  80		/* for PCIe IP rev 3.0 or greater use CSR0 for link state */
  81		val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
  82				>> PEX_CSR0_LTSSM_SHIFT;
  83		if (val != PEX_CSR0_LTSSM_L0)
  84			return 1;
  85	}
  86
  87	return 0;
  88}
  89
  90static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
  91				    int offset, int len, u32 *val)
  92{
  93	struct pci_controller *hose = pci_bus_to_host(bus);
  94
  95	if (fsl_pcie_check_link(hose))
  96		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
  97	else
  98		hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
  99
 100	return indirect_read_config(bus, devfn, offset, len, val);
 101}
 102
 103#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 104
 105static struct pci_ops fsl_indirect_pcie_ops =
 106{
 107	.read = fsl_indirect_read_config,
 108	.write = indirect_write_config,
 109};
 110
 111static u64 pci64_dma_offset;
 112
 113#ifdef CONFIG_SWIOTLB
 114static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
 115{
 116	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 117
 118	pdev->dev.bus_dma_mask =
 119		hose->dma_window_base_cur + hose->dma_window_size;
 120}
 121
 122static void setup_swiotlb_ops(struct pci_controller *hose)
 123{
 124	if (ppc_swiotlb_enable)
 125		hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
 126}
 127#else
 128static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
 129#endif
 130
 131static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
 132{
 133	/*
 134	 * Fix up PCI devices that are able to DMA to the large inbound
 135	 * mapping that allows addressing any RAM address from across PCI.
 136	 */
 137	if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
 138		dev->bus_dma_mask = 0;
 139		dev->archdata.dma_offset = pci64_dma_offset;
 140	}
 141}
 142
 143static int setup_one_atmu(struct ccsr_pci __iomem *pci,
 144	unsigned int index, const struct resource *res,
 145	resource_size_t offset)
 146{
 147	resource_size_t pci_addr = res->start - offset;
 148	resource_size_t phys_addr = res->start;
 149	resource_size_t size = resource_size(res);
 150	u32 flags = 0x80044000; /* enable & mem R/W */
 151	unsigned int i;
 152
 153	pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
 154		(u64)res->start, (u64)size);
 155
 156	if (res->flags & IORESOURCE_PREFETCH)
 157		flags |= 0x10000000; /* enable relaxed ordering */
 158
 159	for (i = 0; size > 0; i++) {
 160		unsigned int bits = min_t(u32, ilog2(size),
 161					__ffs(pci_addr | phys_addr));
 162
 163		if (index + i >= 5)
 164			return -1;
 165
 166		out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
 167		out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
 168		out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
 169		out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
 170
 171		pci_addr += (resource_size_t)1U << bits;
 172		phys_addr += (resource_size_t)1U << bits;
 173		size -= (resource_size_t)1U << bits;
 174	}
 175
 176	return i;
 177}
 178
 179static bool is_kdump(void)
 180{
 181	struct device_node *node;
 182
 183	node = of_find_node_by_type(NULL, "memory");
 184	if (!node) {
 185		WARN_ON_ONCE(1);
 186		return false;
 187	}
 188
 189	return of_property_read_bool(node, "linux,usable-memory");
 190}
 191
 192/* atmu setup for fsl pci/pcie controller */
 193static void setup_pci_atmu(struct pci_controller *hose)
 194{
 195	struct ccsr_pci __iomem *pci = hose->private_data;
 196	int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
 197	u64 mem, sz, paddr_hi = 0;
 198	u64 offset = 0, paddr_lo = ULLONG_MAX;
 199	u32 pcicsrbar = 0, pcicsrbar_sz;
 200	u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
 201			PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
 202	const u64 *reg;
 203	int len;
 204	bool setup_inbound;
 205
 206	/*
 207	 * If this is kdump, we don't want to trigger a bunch of PCI
 208	 * errors by closing the window on in-flight DMA.
 209	 *
 210	 * We still run most of the function's logic so that things like
 211	 * hose->dma_window_size still get set.
 212	 */
 213	setup_inbound = !is_kdump();
 214
 215	if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
 216		/*
 217		 * BSC9132 Rev1.0 has an issue where all the PEX inbound
 218		 * windows have implemented the default target value as 0xf
 219		 * for CCSR space.In all Freescale legacy devices the target
 220		 * of 0xf is reserved for local memory space. 9132 Rev1.0
 221		 * now has local mempry space mapped to target 0x0 instead of
 222		 * 0xf. Hence adding a workaround to remove the target 0xf
 223		 * defined for memory space from Inbound window attributes.
 224		 */
 225		piwar &= ~PIWAR_TGI_LOCAL;
 226	}
 227
 228	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 229		if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
 230			win_idx = 2;
 231			start_idx = 0;
 232			end_idx = 3;
 233		}
 234	}
 235
 236	/* Disable all windows (except powar0 since it's ignored) */
 237	for(i = 1; i < 5; i++)
 238		out_be32(&pci->pow[i].powar, 0);
 239
 240	if (setup_inbound) {
 241		for (i = start_idx; i < end_idx; i++)
 242			out_be32(&pci->piw[i].piwar, 0);
 243	}
 244
 245	/* Setup outbound MEM window */
 246	for(i = 0, j = 1; i < 3; i++) {
 247		if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
 248			continue;
 249
 250		paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
 251		paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
 252
 253		/* We assume all memory resources have the same offset */
 254		offset = hose->mem_offset[i];
 255		n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
 256
 257		if (n < 0 || j >= 5) {
 258			pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
 259			hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
 260		} else
 261			j += n;
 262	}
 263
 264	/* Setup outbound IO window */
 265	if (hose->io_resource.flags & IORESOURCE_IO) {
 266		if (j >= 5) {
 267			pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
 268		} else {
 269			pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
 270				 "phy base 0x%016llx.\n",
 271				 (u64)hose->io_resource.start,
 272				 (u64)resource_size(&hose->io_resource),
 273				 (u64)hose->io_base_phys);
 274			out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
 275			out_be32(&pci->pow[j].potear, 0);
 276			out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
 277			/* Enable, IO R/W */
 278			out_be32(&pci->pow[j].powar, 0x80088000
 279				| (ilog2(hose->io_resource.end
 280				- hose->io_resource.start + 1) - 1));
 281		}
 282	}
 283
 284	/* convert to pci address space */
 285	paddr_hi -= offset;
 286	paddr_lo -= offset;
 287
 288	if (paddr_hi == paddr_lo) {
 289		pr_err("%pOF: No outbound window space\n", hose->dn);
 290		return;
 291	}
 292
 293	if (paddr_lo == 0) {
 294		pr_err("%pOF: No space for inbound window\n", hose->dn);
 295		return;
 296	}
 297
 298	/* setup PCSRBAR/PEXCSRBAR */
 299	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
 300	early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
 301	pcicsrbar_sz = ~pcicsrbar_sz + 1;
 302
 303	if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
 304		(paddr_lo > 0x100000000ull))
 305		pcicsrbar = 0x100000000ull - pcicsrbar_sz;
 306	else
 307		pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
 308	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
 309
 310	paddr_lo = min(paddr_lo, (u64)pcicsrbar);
 311
 312	pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
 313
 314	/* Setup inbound mem window */
 315	mem = memblock_end_of_DRAM();
 316	pr_info("%s: end of DRAM %llx\n", __func__, mem);
 317
 318	/*
 319	 * The msi-address-64 property, if it exists, indicates the physical
 320	 * address of the MSIIR register.  Normally, this register is located
 321	 * inside CCSR, so the ATMU that covers all of CCSR is used. But if
 322	 * this property exists, then we normally need to create a new ATMU
 323	 * for it.  For now, however, we cheat.  The only entity that creates
 324	 * this property is the Freescale hypervisor, and the address is
 325	 * specified in the partition configuration.  Typically, the address
 326	 * is located in the page immediately after the end of DDR.  If so, we
 327	 * can avoid allocating a new ATMU by extending the DDR ATMU by one
 328	 * page.
 329	 */
 330	reg = of_get_property(hose->dn, "msi-address-64", &len);
 331	if (reg && (len == sizeof(u64))) {
 332		u64 address = be64_to_cpup(reg);
 333
 334		if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
 335			pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
 336			mem += PAGE_SIZE;
 337		} else {
 338			/* TODO: Create a new ATMU for MSIIR */
 339			pr_warn("%pOF: msi-address-64 address of %llx is "
 340				"unsupported\n", hose->dn, address);
 341		}
 342	}
 343
 344	sz = min(mem, paddr_lo);
 345	mem_log = ilog2(sz);
 346
 347	/* PCIe can overmap inbound & outbound since RX & TX are separated */
 348	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 349		/* Size window to exact size if power-of-two or one size up */
 350		if ((1ull << mem_log) != mem) {
 351			mem_log++;
 352			if ((1ull << mem_log) > mem)
 353				pr_info("%pOF: Setting PCI inbound window "
 354					"greater than memory size\n", hose->dn);
 355		}
 356
 357		piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
 358
 359		if (setup_inbound) {
 360			/* Setup inbound memory window */
 361			out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 362			out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
 363			out_be32(&pci->piw[win_idx].piwar,  piwar);
 364		}
 365
 366		win_idx--;
 367		hose->dma_window_base_cur = 0x00000000;
 368		hose->dma_window_size = (resource_size_t)sz;
 369
 370		/*
 371		 * if we have >4G of memory setup second PCI inbound window to
 372		 * let devices that are 64-bit address capable to work w/o
 373		 * SWIOTLB and access the full range of memory
 374		 */
 375		if (sz != mem) {
 376			mem_log = ilog2(mem);
 377
 378			/* Size window up if we dont fit in exact power-of-2 */
 379			if ((1ull << mem_log) != mem)
 380				mem_log++;
 381
 382			piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
 383			pci64_dma_offset = 1ULL << mem_log;
 384
 385			if (setup_inbound) {
 386				/* Setup inbound memory window */
 387				out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 388				out_be32(&pci->piw[win_idx].piwbear,
 389						pci64_dma_offset >> 44);
 390				out_be32(&pci->piw[win_idx].piwbar,
 391						pci64_dma_offset >> 12);
 392				out_be32(&pci->piw[win_idx].piwar,  piwar);
 393			}
 394
 395			/*
 396			 * install our own dma_set_mask handler to fixup dma_ops
 397			 * and dma_offset
 398			 */
 399			ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
 400
 401			pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
 402		}
 403	} else {
 404		u64 paddr = 0;
 405
 406		if (setup_inbound) {
 407			/* Setup inbound memory window */
 408			out_be32(&pci->piw[win_idx].pitar,  paddr >> 12);
 409			out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
 410			out_be32(&pci->piw[win_idx].piwar,
 411				 (piwar | (mem_log - 1)));
 412		}
 413
 414		win_idx--;
 415		paddr += 1ull << mem_log;
 416		sz -= 1ull << mem_log;
 417
 418		if (sz) {
 419			mem_log = ilog2(sz);
 420			piwar |= (mem_log - 1);
 421
 422			if (setup_inbound) {
 423				out_be32(&pci->piw[win_idx].pitar,
 424					 paddr >> 12);
 425				out_be32(&pci->piw[win_idx].piwbar,
 426					 paddr >> 12);
 427				out_be32(&pci->piw[win_idx].piwar, piwar);
 428			}
 429
 430			win_idx--;
 431			paddr += 1ull << mem_log;
 432		}
 433
 434		hose->dma_window_base_cur = 0x00000000;
 435		hose->dma_window_size = (resource_size_t)paddr;
 436	}
 437
 438	if (hose->dma_window_size < mem) {
 439#ifdef CONFIG_SWIOTLB
 440		ppc_swiotlb_enable = 1;
 441#else
 442		pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
 443			"map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
 444			 hose->dn);
 445#endif
 446		/* adjusting outbound windows could reclaim space in mem map */
 447		if (paddr_hi < 0xffffffffull)
 448			pr_warn("%pOF: WARNING: Outbound window cfg leaves "
 449				"gaps in memory map. Adjusting the memory map "
 450				"could reduce unnecessary bounce buffering.\n",
 451				hose->dn);
 452
 453		pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
 454			(u64)hose->dma_window_size);
 455	}
 456}
 457
 458static void __init setup_pci_cmd(struct pci_controller *hose)
 459{
 460	u16 cmd;
 461	int cap_x;
 462
 463	early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
 464	cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
 465		| PCI_COMMAND_IO;
 466	early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
 467
 468	cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
 469	if (cap_x) {
 470		int pci_x_cmd = cap_x + PCI_X_CMD;
 471		cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
 472			| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
 473		early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
 474	} else {
 475		early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
 476	}
 477}
 478
 479void fsl_pcibios_fixup_bus(struct pci_bus *bus)
 480{
 481	struct pci_controller *hose = pci_bus_to_host(bus);
 482	int i, is_pcie = 0, no_link;
 483
 484	/* The root complex bridge comes up with bogus resources,
 485	 * we copy the PHB ones in.
 486	 *
 487	 * With the current generic PCI code, the PHB bus no longer
 488	 * has bus->resource[0..4] set, so things are a bit more
 489	 * tricky.
 490	 */
 491
 492	if (fsl_pcie_bus_fixup)
 493		is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
 494	no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
 495
 496	if (bus->parent == hose->bus && (is_pcie || no_link)) {
 497		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
 498			struct resource *res = bus->resource[i];
 499			struct resource *par;
 500
 501			if (!res)
 502				continue;
 503			if (i == 0)
 504				par = &hose->io_resource;
 505			else if (i < 4)
 506				par = &hose->mem_resources[i-1];
 507			else par = NULL;
 508
 509			res->start = par ? par->start : 0;
 510			res->end   = par ? par->end   : 0;
 511			res->flags = par ? par->flags : 0;
 512		}
 513	}
 514}
 515
 516int fsl_add_bridge(struct platform_device *pdev, int is_primary)
 517{
 518	int len;
 519	struct pci_controller *hose;
 520	struct resource rsrc;
 521	const int *bus_range;
 522	u8 hdr_type, progif;
 523	struct device_node *dev;
 524	struct ccsr_pci __iomem *pci;
 525	u16 temp;
 526	u32 svr = mfspr(SPRN_SVR);
 527
 528	dev = pdev->dev.of_node;
 529
 530	if (!of_device_is_available(dev)) {
 531		pr_warn("%pOF: disabled\n", dev);
 532		return -ENODEV;
 533	}
 534
 535	pr_debug("Adding PCI host bridge %pOF\n", dev);
 536
 537	/* Fetch host bridge registers address */
 538	if (of_address_to_resource(dev, 0, &rsrc)) {
 539		printk(KERN_WARNING "Can't get pci register base!");
 540		return -ENOMEM;
 541	}
 542
 543	/* Get bus range if any */
 544	bus_range = of_get_property(dev, "bus-range", &len);
 545	if (bus_range == NULL || len < 2 * sizeof(int))
 546		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 547			" bus 0\n", dev);
 548
 549	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 550	hose = pcibios_alloc_controller(dev);
 551	if (!hose)
 552		return -ENOMEM;
 553
 554	/* set platform device as the parent */
 555	hose->parent = &pdev->dev;
 556	hose->first_busno = bus_range ? bus_range[0] : 0x0;
 557	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 558
 559	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
 560		 (u64)rsrc.start, (u64)resource_size(&rsrc));
 561
 562	pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
 563	if (!hose->private_data)
 564		goto no_bridge;
 565
 566	setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
 567			   PPC_INDIRECT_TYPE_BIG_ENDIAN);
 568
 569	if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
 570		hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 571
 572	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 573		/* use fsl_indirect_read_config for PCIe */
 574		hose->ops = &fsl_indirect_pcie_ops;
 575		/* For PCIE read HEADER_TYPE to identify controller mode */
 576		early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
 577		if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
 578			goto no_bridge;
 579
 580	} else {
 581		/* For PCI read PROG to identify controller mode */
 582		early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
 583		if ((progif & 1) &&
 584		    !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
 585			goto no_bridge;
 586	}
 587
 588	setup_pci_cmd(hose);
 589
 590	/* check PCI express link status */
 591	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 592		hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
 593			PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
 594		if (fsl_pcie_check_link(hose))
 595			hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 596	} else {
 597		/*
 598		 * Set PBFR(PCI Bus Function Register)[10] = 1 to
 599		 * disable the combining of crossing cacheline
 600		 * boundary requests into one burst transaction.
 601		 * PCI-X operation is not affected.
 602		 * Fix erratum PCI 5 on MPC8548
 603		 */
 604#define PCI_BUS_FUNCTION 0x44
 605#define PCI_BUS_FUNCTION_MDS 0x400	/* Master disable streaming */
 606		if (((SVR_SOC_VER(svr) == SVR_8543) ||
 607		     (SVR_SOC_VER(svr) == SVR_8545) ||
 608		     (SVR_SOC_VER(svr) == SVR_8547) ||
 609		     (SVR_SOC_VER(svr) == SVR_8548)) &&
 610		    !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
 611			early_read_config_word(hose, 0, 0,
 612					PCI_BUS_FUNCTION, &temp);
 613			temp |= PCI_BUS_FUNCTION_MDS;
 614			early_write_config_word(hose, 0, 0,
 615					PCI_BUS_FUNCTION, temp);
 616		}
 617	}
 618
 619	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 620		"Firmware bus number: %d->%d\n",
 621		(unsigned long long)rsrc.start, hose->first_busno,
 622		hose->last_busno);
 623
 624	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 625		hose, hose->cfg_addr, hose->cfg_data);
 626
 627	/* Interpret the "ranges" property */
 628	/* This also maps the I/O region and sets isa_io/mem_base */
 629	pci_process_bridge_OF_ranges(hose, dev, is_primary);
 630
 631	/* Setup PEX window registers */
 632	setup_pci_atmu(hose);
 633
 634	/* Set up controller operations */
 635	setup_swiotlb_ops(hose);
 636
 637	return 0;
 638
 639no_bridge:
 640	iounmap(hose->private_data);
 641	/* unmap cfg_data & cfg_addr separately if not on same page */
 642	if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
 643	    ((unsigned long)hose->cfg_addr & PAGE_MASK))
 644		iounmap(hose->cfg_data);
 645	iounmap(hose->cfg_addr);
 646	pcibios_free_controller(hose);
 647	return -ENODEV;
 648}
 649#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
 650
 651DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
 652			quirk_fsl_pcie_early);
 653
 654#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
 655struct mpc83xx_pcie_priv {
 656	void __iomem *cfg_type0;
 657	void __iomem *cfg_type1;
 658	u32 dev_base;
 659};
 660
 661struct pex_inbound_window {
 662	u32 ar;
 663	u32 tar;
 664	u32 barl;
 665	u32 barh;
 666};
 667
 668/*
 669 * With the convention of u-boot, the PCIE outbound window 0 serves
 670 * as configuration transactions outbound.
 671 */
 672#define PEX_OUTWIN0_BAR		0xCA4
 673#define PEX_OUTWIN0_TAL		0xCA8
 674#define PEX_OUTWIN0_TAH		0xCAC
 675#define PEX_RC_INWIN_BASE	0xE60
 676#define PEX_RCIWARn_EN		0x1
 677
 678static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
 679{
 680	struct pci_controller *hose = pci_bus_to_host(bus);
 681
 682	if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
 683		return PCIBIOS_DEVICE_NOT_FOUND;
 684	/*
 685	 * Workaround for the HW bug: for Type 0 configure transactions the
 686	 * PCI-E controller does not check the device number bits and just
 687	 * assumes that the device number bits are 0.
 688	 */
 689	if (bus->number == hose->first_busno ||
 690			bus->primary == hose->first_busno) {
 691		if (devfn & 0xf8)
 692			return PCIBIOS_DEVICE_NOT_FOUND;
 693	}
 694
 695	if (ppc_md.pci_exclude_device) {
 696		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 697			return PCIBIOS_DEVICE_NOT_FOUND;
 698	}
 699
 700	return PCIBIOS_SUCCESSFUL;
 701}
 702
 703static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
 704					    unsigned int devfn, int offset)
 705{
 706	struct pci_controller *hose = pci_bus_to_host(bus);
 707	struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 708	u32 dev_base = bus->number << 24 | devfn << 16;
 709	int ret;
 710
 711	ret = mpc83xx_pcie_exclude_device(bus, devfn);
 712	if (ret)
 713		return NULL;
 714
 715	offset &= 0xfff;
 716
 717	/* Type 0 */
 718	if (bus->number == hose->first_busno)
 719		return pcie->cfg_type0 + offset;
 720
 721	if (pcie->dev_base == dev_base)
 722		goto mapped;
 723
 724	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
 725
 726	pcie->dev_base = dev_base;
 727mapped:
 728	return pcie->cfg_type1 + offset;
 729}
 730
 731static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
 732				     int offset, int len, u32 val)
 733{
 734	struct pci_controller *hose = pci_bus_to_host(bus);
 735
 736	/* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
 737	if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
 738		val &= 0xffffff00;
 739
 740	return pci_generic_config_write(bus, devfn, offset, len, val);
 741}
 742
 743static struct pci_ops mpc83xx_pcie_ops = {
 744	.map_bus = mpc83xx_pcie_remap_cfg,
 745	.read = pci_generic_config_read,
 746	.write = mpc83xx_pcie_write_config,
 747};
 748
 749static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
 750				     struct resource *reg)
 751{
 752	struct mpc83xx_pcie_priv *pcie;
 753	u32 cfg_bar;
 754	int ret = -ENOMEM;
 755
 756	pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL);
 757	if (!pcie)
 758		return ret;
 759
 760	pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
 761	if (!pcie->cfg_type0)
 762		goto err0;
 763
 764	cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
 765	if (!cfg_bar) {
 766		/* PCI-E isn't configured. */
 767		ret = -ENODEV;
 768		goto err1;
 769	}
 770
 771	pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
 772	if (!pcie->cfg_type1)
 773		goto err1;
 774
 775	WARN_ON(hose->dn->data);
 776	hose->dn->data = pcie;
 777	hose->ops = &mpc83xx_pcie_ops;
 778	hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 779
 780	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
 781	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
 782
 783	if (fsl_pcie_check_link(hose))
 784		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 785
 786	return 0;
 787err1:
 788	iounmap(pcie->cfg_type0);
 789err0:
 790	kfree(pcie);
 791	return ret;
 792
 793}
 794
 795int __init mpc83xx_add_bridge(struct device_node *dev)
 796{
 797	int ret;
 798	int len;
 799	struct pci_controller *hose;
 800	struct resource rsrc_reg;
 801	struct resource rsrc_cfg;
 802	const int *bus_range;
 803	int primary;
 804
 805	is_mpc83xx_pci = 1;
 806
 807	if (!of_device_is_available(dev)) {
 808		pr_warn("%pOF: disabled by the firmware.\n",
 809			dev);
 810		return -ENODEV;
 811	}
 812	pr_debug("Adding PCI host bridge %pOF\n", dev);
 813
 814	/* Fetch host bridge registers address */
 815	if (of_address_to_resource(dev, 0, &rsrc_reg)) {
 816		printk(KERN_WARNING "Can't get pci register base!\n");
 817		return -ENOMEM;
 818	}
 819
 820	memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
 821
 822	if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
 823		printk(KERN_WARNING
 824			"No pci config register base in dev tree, "
 825			"using default\n");
 826		/*
 827		 * MPC83xx supports up to two host controllers
 828		 * 	one at 0x8500 has config space registers at 0x8300
 829		 * 	one at 0x8600 has config space registers at 0x8380
 830		 */
 831		if ((rsrc_reg.start & 0xfffff) == 0x8500)
 832			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
 833		else if ((rsrc_reg.start & 0xfffff) == 0x8600)
 834			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
 835	}
 836	/*
 837	 * Controller at offset 0x8500 is primary
 838	 */
 839	if ((rsrc_reg.start & 0xfffff) == 0x8500)
 840		primary = 1;
 841	else
 842		primary = 0;
 843
 844	/* Get bus range if any */
 845	bus_range = of_get_property(dev, "bus-range", &len);
 846	if (bus_range == NULL || len < 2 * sizeof(int)) {
 847		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 848		       " bus 0\n", dev);
 849	}
 850
 851	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 852	hose = pcibios_alloc_controller(dev);
 853	if (!hose)
 854		return -ENOMEM;
 855
 856	hose->first_busno = bus_range ? bus_range[0] : 0;
 857	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 858
 859	if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
 860		ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
 861		if (ret)
 862			goto err0;
 863	} else {
 864		setup_indirect_pci(hose, rsrc_cfg.start,
 865				   rsrc_cfg.start + 4, 0);
 866	}
 867
 868	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 869	       "Firmware bus number: %d->%d\n",
 870	       (unsigned long long)rsrc_reg.start, hose->first_busno,
 871	       hose->last_busno);
 872
 873	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 874	    hose, hose->cfg_addr, hose->cfg_data);
 875
 876	/* Interpret the "ranges" property */
 877	/* This also maps the I/O region and sets isa_io/mem_base */
 878	pci_process_bridge_OF_ranges(hose, dev, primary);
 879
 880	return 0;
 881err0:
 882	pcibios_free_controller(hose);
 883	return ret;
 884}
 885#endif /* CONFIG_PPC_83xx */
 886
 887u64 fsl_pci_immrbar_base(struct pci_controller *hose)
 888{
 889#ifdef CONFIG_PPC_83xx
 890	if (is_mpc83xx_pci) {
 891		struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 892		struct pex_inbound_window *in;
 893		int i;
 894
 895		/* Walk the Root Complex Inbound windows to match IMMR base */
 896		in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
 897		for (i = 0; i < 4; i++) {
 898			/* not enabled, skip */
 899			if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
 900				continue;
 901
 902			if (get_immrbase() == in_le32(&in[i].tar))
 903				return (u64)in_le32(&in[i].barh) << 32 |
 904					    in_le32(&in[i].barl);
 905		}
 906
 907		printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
 908	}
 909#endif
 910
 911#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 912	if (!is_mpc83xx_pci) {
 913		u32 base;
 914
 915		pci_bus_read_config_dword(hose->bus,
 916			PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
 917
 918		/*
 919		 * For PEXCSRBAR, bit 3-0 indicate prefetchable and
 920		 * address type. So when getting base address, these
 921		 * bits should be masked
 922		 */
 923		base &= PCI_BASE_ADDRESS_MEM_MASK;
 924
 925		return base;
 926	}
 927#endif
 928
 929	return 0;
 930}
 931
 932#ifdef CONFIG_E500
 933static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
 934{
 935	unsigned int rd, ra, rb, d;
 936
 937	rd = get_rt(inst);
 938	ra = get_ra(inst);
 939	rb = get_rb(inst);
 940	d = get_d(inst);
 941
 942	switch (get_op(inst)) {
 943	case 31:
 944		switch (get_xop(inst)) {
 945		case OP_31_XOP_LWZX:
 946		case OP_31_XOP_LWBRX:
 947			regs->gpr[rd] = 0xffffffff;
 948			break;
 949
 950		case OP_31_XOP_LWZUX:
 951			regs->gpr[rd] = 0xffffffff;
 952			regs->gpr[ra] += regs->gpr[rb];
 953			break;
 954
 955		case OP_31_XOP_LBZX:
 956			regs->gpr[rd] = 0xff;
 957			break;
 958
 959		case OP_31_XOP_LBZUX:
 960			regs->gpr[rd] = 0xff;
 961			regs->gpr[ra] += regs->gpr[rb];
 962			break;
 963
 964		case OP_31_XOP_LHZX:
 965		case OP_31_XOP_LHBRX:
 966			regs->gpr[rd] = 0xffff;
 967			break;
 968
 969		case OP_31_XOP_LHZUX:
 970			regs->gpr[rd] = 0xffff;
 971			regs->gpr[ra] += regs->gpr[rb];
 972			break;
 973
 974		case OP_31_XOP_LHAX:
 975			regs->gpr[rd] = ~0UL;
 976			break;
 977
 978		case OP_31_XOP_LHAUX:
 979			regs->gpr[rd] = ~0UL;
 980			regs->gpr[ra] += regs->gpr[rb];
 981			break;
 982
 983		default:
 984			return 0;
 985		}
 986		break;
 987
 988	case OP_LWZ:
 989		regs->gpr[rd] = 0xffffffff;
 990		break;
 991
 992	case OP_LWZU:
 993		regs->gpr[rd] = 0xffffffff;
 994		regs->gpr[ra] += (s16)d;
 995		break;
 996
 997	case OP_LBZ:
 998		regs->gpr[rd] = 0xff;
 999		break;
1000
1001	case OP_LBZU:
1002		regs->gpr[rd] = 0xff;
1003		regs->gpr[ra] += (s16)d;
1004		break;
1005
1006	case OP_LHZ:
1007		regs->gpr[rd] = 0xffff;
1008		break;
1009
1010	case OP_LHZU:
1011		regs->gpr[rd] = 0xffff;
1012		regs->gpr[ra] += (s16)d;
1013		break;
1014
1015	case OP_LHA:
1016		regs->gpr[rd] = ~0UL;
1017		break;
1018
1019	case OP_LHAU:
1020		regs->gpr[rd] = ~0UL;
1021		regs->gpr[ra] += (s16)d;
1022		break;
1023
1024	default:
1025		return 0;
1026	}
1027
1028	return 1;
1029}
1030
1031static int is_in_pci_mem_space(phys_addr_t addr)
1032{
1033	struct pci_controller *hose;
1034	struct resource *res;
1035	int i;
1036
1037	list_for_each_entry(hose, &hose_list, list_node) {
1038		if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
1039			continue;
1040
1041		for (i = 0; i < 3; i++) {
1042			res = &hose->mem_resources[i];
1043			if ((res->flags & IORESOURCE_MEM) &&
1044				addr >= res->start && addr <= res->end)
1045				return 1;
1046		}
1047	}
1048	return 0;
1049}
1050
1051int fsl_pci_mcheck_exception(struct pt_regs *regs)
1052{
1053	u32 inst;
1054	int ret;
1055	phys_addr_t addr = 0;
1056
1057	/* Let KVM/QEMU deal with the exception */
1058	if (regs->msr & MSR_GS)
1059		return 0;
1060
1061#ifdef CONFIG_PHYS_64BIT
1062	addr = mfspr(SPRN_MCARU);
1063	addr <<= 32;
1064#endif
1065	addr += mfspr(SPRN_MCAR);
1066
1067	if (is_in_pci_mem_space(addr)) {
1068		if (user_mode(regs)) {
1069			pagefault_disable();
1070			ret = get_user(inst, (__u32 __user *)regs->nip);
1071			pagefault_enable();
1072		} else {
1073			ret = probe_kernel_address((void *)regs->nip, inst);
1074		}
1075
1076		if (!ret && mcheck_handle_load(regs, inst)) {
1077			regs->nip += 4;
1078			return 1;
1079		}
1080	}
1081
1082	return 0;
1083}
1084#endif
1085
1086#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
1087static const struct of_device_id pci_ids[] = {
1088	{ .compatible = "fsl,mpc8540-pci", },
1089	{ .compatible = "fsl,mpc8548-pcie", },
1090	{ .compatible = "fsl,mpc8610-pci", },
1091	{ .compatible = "fsl,mpc8641-pcie", },
1092	{ .compatible = "fsl,qoriq-pcie", },
1093	{ .compatible = "fsl,qoriq-pcie-v2.1", },
1094	{ .compatible = "fsl,qoriq-pcie-v2.2", },
1095	{ .compatible = "fsl,qoriq-pcie-v2.3", },
1096	{ .compatible = "fsl,qoriq-pcie-v2.4", },
1097	{ .compatible = "fsl,qoriq-pcie-v3.0", },
1098
1099	/*
1100	 * The following entries are for compatibility with older device
1101	 * trees.
1102	 */
1103	{ .compatible = "fsl,p1022-pcie", },
1104	{ .compatible = "fsl,p4080-pcie", },
1105
1106	{},
1107};
1108
1109struct device_node *fsl_pci_primary;
1110
1111void fsl_pci_assign_primary(void)
1112{
1113	struct device_node *np;
1114
1115	/* Callers can specify the primary bus using other means. */
1116	if (fsl_pci_primary)
1117		return;
1118
1119	/* If a PCI host bridge contains an ISA node, it's primary. */
1120	np = of_find_node_by_type(NULL, "isa");
1121	while ((fsl_pci_primary = of_get_parent(np))) {
1122		of_node_put(np);
1123		np = fsl_pci_primary;
1124
1125		if (of_match_node(pci_ids, np) && of_device_is_available(np))
1126			return;
1127	}
1128
1129	/*
1130	 * If there's no PCI host bridge with ISA, arbitrarily
1131	 * designate one as primary.  This can go away once
1132	 * various bugs with primary-less systems are fixed.
1133	 */
1134	for_each_matching_node(np, pci_ids) {
1135		if (of_device_is_available(np)) {
1136			fsl_pci_primary = np;
1137			of_node_put(np);
1138			return;
1139		}
1140	}
1141}
1142
1143#ifdef CONFIG_PM_SLEEP
1144static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
1145{
1146	struct pci_controller *hose = dev_id;
1147	struct ccsr_pci __iomem *pci = hose->private_data;
1148	u32 dr;
1149
1150	dr = in_be32(&pci->pex_pme_mes_dr);
1151	if (!dr)
1152		return IRQ_NONE;
1153
1154	out_be32(&pci->pex_pme_mes_dr, dr);
1155
1156	return IRQ_HANDLED;
1157}
1158
1159static int fsl_pci_pme_probe(struct pci_controller *hose)
1160{
1161	struct ccsr_pci __iomem *pci;
1162	struct pci_dev *dev;
1163	int pme_irq;
1164	int res;
1165	u16 pms;
1166
1167	/* Get hose's pci_dev */
1168	dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
1169
1170	/* PME Disable */
1171	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1172	pms &= ~PCI_PM_CTRL_PME_ENABLE;
1173	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1174
1175	pme_irq = irq_of_parse_and_map(hose->dn, 0);
1176	if (!pme_irq) {
1177		dev_err(&dev->dev, "Failed to map PME interrupt.\n");
1178
1179		return -ENXIO;
1180	}
1181
1182	res = devm_request_irq(hose->parent, pme_irq,
1183			fsl_pci_pme_handle,
1184			IRQF_SHARED,
1185			"[PCI] PME", hose);
1186	if (res < 0) {
1187		dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
1188		irq_dispose_mapping(pme_irq);
1189
1190		return -ENODEV;
1191	}
1192
1193	pci = hose->private_data;
1194
1195	/* Enable PTOD, ENL23D & EXL23D */
1196	clrbits32(&pci->pex_pme_mes_disr,
1197		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1198
1199	out_be32(&pci->pex_pme_mes_ier, 0);
1200	setbits32(&pci->pex_pme_mes_ier,
1201		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1202
1203	/* PME Enable */
1204	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1205	pms |= PCI_PM_CTRL_PME_ENABLE;
1206	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1207
1208	return 0;
1209}
1210
1211static void send_pme_turnoff_message(struct pci_controller *hose)
1212{
1213	struct ccsr_pci __iomem *pci = hose->private_data;
1214	u32 dr;
1215	int i;
1216
1217	/* Send PME_Turn_Off Message Request */
1218	setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
1219
1220	/* Wait trun off done */
1221	for (i = 0; i < 150; i++) {
1222		dr = in_be32(&pci->pex_pme_mes_dr);
1223		if (dr) {
1224			out_be32(&pci->pex_pme_mes_dr, dr);
1225			break;
1226		}
1227
1228		udelay(1000);
1229	}
1230}
1231
1232static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
1233{
1234	send_pme_turnoff_message(hose);
1235}
1236
1237static int fsl_pci_syscore_suspend(void)
1238{
1239	struct pci_controller *hose, *tmp;
1240
1241	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1242		fsl_pci_syscore_do_suspend(hose);
1243
1244	return 0;
1245}
1246
1247static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
1248{
1249	struct ccsr_pci __iomem *pci = hose->private_data;
1250	u32 dr;
1251	int i;
1252
1253	/* Send Exit L2 State Message */
1254	setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
1255
1256	/* Wait exit done */
1257	for (i = 0; i < 150; i++) {
1258		dr = in_be32(&pci->pex_pme_mes_dr);
1259		if (dr) {
1260			out_be32(&pci->pex_pme_mes_dr, dr);
1261			break;
1262		}
1263
1264		udelay(1000);
1265	}
1266
1267	setup_pci_atmu(hose);
1268}
1269
1270static void fsl_pci_syscore_resume(void)
1271{
1272	struct pci_controller *hose, *tmp;
1273
1274	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1275		fsl_pci_syscore_do_resume(hose);
1276}
1277
1278static struct syscore_ops pci_syscore_pm_ops = {
1279	.suspend = fsl_pci_syscore_suspend,
1280	.resume = fsl_pci_syscore_resume,
1281};
1282#endif
1283
1284void fsl_pcibios_fixup_phb(struct pci_controller *phb)
1285{
1286#ifdef CONFIG_PM_SLEEP
1287	fsl_pci_pme_probe(phb);
1288#endif
1289}
1290
1291static int add_err_dev(struct platform_device *pdev)
1292{
1293	struct platform_device *errdev;
1294	struct mpc85xx_edac_pci_plat_data pd = {
1295		.of_node = pdev->dev.of_node
1296	};
1297
1298	errdev = platform_device_register_resndata(&pdev->dev,
1299						   "mpc85xx-pci-edac",
1300						   PLATFORM_DEVID_AUTO,
1301						   pdev->resource,
1302						   pdev->num_resources,
1303						   &pd, sizeof(pd));
1304
1305	return PTR_ERR_OR_ZERO(errdev);
1306}
1307
1308static int fsl_pci_probe(struct platform_device *pdev)
1309{
1310	struct device_node *node;
1311	int ret;
1312
1313	node = pdev->dev.of_node;
1314	ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
1315	if (ret)
1316		return ret;
1317
1318	ret = add_err_dev(pdev);
1319	if (ret)
1320		dev_err(&pdev->dev, "couldn't register error device: %d\n",
1321			ret);
1322
1323	return 0;
1324}
1325
1326static struct platform_driver fsl_pci_driver = {
1327	.driver = {
1328		.name = "fsl-pci",
1329		.of_match_table = pci_ids,
1330	},
1331	.probe = fsl_pci_probe,
1332};
1333
1334static int __init fsl_pci_init(void)
1335{
1336#ifdef CONFIG_PM_SLEEP
1337	register_syscore_ops(&pci_syscore_pm_ops);
1338#endif
1339	return platform_driver_register(&fsl_pci_driver);
1340}
1341arch_initcall(fsl_pci_init);
1342#endif
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * MPC83xx/85xx/86xx PCI/PCIE support routing.
   4 *
   5 * Copyright 2007-2012 Freescale Semiconductor, Inc.
   6 * Copyright 2008-2009 MontaVista Software, Inc.
   7 *
   8 * Initial author: Xianghua Xiao <x.xiao@freescale.com>
   9 * Recode: ZHANG WEI <wei.zhang@freescale.com>
  10 * Rewrite the routing for Frescale PCI and PCI Express
  11 * 	Roy Zang <tie-fei.zang@freescale.com>
  12 * MPC83xx PCI-Express support:
  13 * 	Tony Li <tony.li@freescale.com>
  14 * 	Anton Vorontsov <avorontsov@ru.mvista.com>
  15 */
  16#include <linux/kernel.h>
  17#include <linux/pci.h>
  18#include <linux/delay.h>
  19#include <linux/string.h>
  20#include <linux/fsl/edac.h>
  21#include <linux/init.h>
  22#include <linux/interrupt.h>
  23#include <linux/memblock.h>
  24#include <linux/log2.h>
  25#include <linux/platform_device.h>
  26#include <linux/slab.h>
  27#include <linux/suspend.h>
  28#include <linux/syscore_ops.h>
  29#include <linux/uaccess.h>
  30
  31#include <asm/io.h>
  32#include <asm/prom.h>
  33#include <asm/pci-bridge.h>
  34#include <asm/ppc-pci.h>
  35#include <asm/machdep.h>
  36#include <asm/mpc85xx.h>
  37#include <asm/disassemble.h>
  38#include <asm/ppc-opcode.h>
  39#include <asm/swiotlb.h>
  40#include <sysdev/fsl_soc.h>
  41#include <sysdev/fsl_pci.h>
  42
  43static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
  44
  45static void quirk_fsl_pcie_early(struct pci_dev *dev)
  46{
  47	u8 hdr_type;
  48
  49	/* if we aren't a PCIe don't bother */
  50	if (!pci_is_pcie(dev))
  51		return;
  52
  53	/* if we aren't in host mode don't bother */
  54	pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
  55	if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
  56		return;
  57
  58	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  59	fsl_pcie_bus_fixup = 1;
  60	return;
  61}
  62
  63static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
  64				    int, int, u32 *);
  65
  66static int fsl_pcie_check_link(struct pci_controller *hose)
  67{
  68	u32 val = 0;
  69
  70	if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
  71		if (hose->ops->read == fsl_indirect_read_config)
  72			__indirect_read_config(hose, hose->first_busno, 0,
  73					       PCIE_LTSSM, 4, &val);
  74		else
  75			early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
  76		if (val < PCIE_LTSSM_L0)
  77			return 1;
  78	} else {
  79		struct ccsr_pci __iomem *pci = hose->private_data;
  80		/* for PCIe IP rev 3.0 or greater use CSR0 for link state */
  81		val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
  82				>> PEX_CSR0_LTSSM_SHIFT;
  83		if (val != PEX_CSR0_LTSSM_L0)
  84			return 1;
  85	}
  86
  87	return 0;
  88}
  89
  90static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
  91				    int offset, int len, u32 *val)
  92{
  93	struct pci_controller *hose = pci_bus_to_host(bus);
  94
  95	if (fsl_pcie_check_link(hose))
  96		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
  97	else
  98		hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
  99
 100	return indirect_read_config(bus, devfn, offset, len, val);
 101}
 102
 103#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 104
 105static struct pci_ops fsl_indirect_pcie_ops =
 106{
 107	.read = fsl_indirect_read_config,
 108	.write = indirect_write_config,
 109};
 110
 111static u64 pci64_dma_offset;
 112
 113#ifdef CONFIG_SWIOTLB
 114static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
 115{
 116	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 117
 118	pdev->dev.bus_dma_limit =
 119		hose->dma_window_base_cur + hose->dma_window_size - 1;
 120}
 121
 122static void setup_swiotlb_ops(struct pci_controller *hose)
 123{
 124	if (ppc_swiotlb_enable)
 125		hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
 126}
 127#else
 128static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
 129#endif
 130
 131static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
 132{
 133	/*
 134	 * Fix up PCI devices that are able to DMA to the large inbound
 135	 * mapping that allows addressing any RAM address from across PCI.
 136	 */
 137	if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
 138		dev->bus_dma_limit = 0;
 139		dev->archdata.dma_offset = pci64_dma_offset;
 140	}
 141}
 142
 143static int setup_one_atmu(struct ccsr_pci __iomem *pci,
 144	unsigned int index, const struct resource *res,
 145	resource_size_t offset)
 146{
 147	resource_size_t pci_addr = res->start - offset;
 148	resource_size_t phys_addr = res->start;
 149	resource_size_t size = resource_size(res);
 150	u32 flags = 0x80044000; /* enable & mem R/W */
 151	unsigned int i;
 152
 153	pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
 154		(u64)res->start, (u64)size);
 155
 156	if (res->flags & IORESOURCE_PREFETCH)
 157		flags |= 0x10000000; /* enable relaxed ordering */
 158
 159	for (i = 0; size > 0; i++) {
 160		unsigned int bits = min_t(u32, ilog2(size),
 161					__ffs(pci_addr | phys_addr));
 162
 163		if (index + i >= 5)
 164			return -1;
 165
 166		out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
 167		out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
 168		out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
 169		out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
 170
 171		pci_addr += (resource_size_t)1U << bits;
 172		phys_addr += (resource_size_t)1U << bits;
 173		size -= (resource_size_t)1U << bits;
 174	}
 175
 176	return i;
 177}
 178
 179static bool is_kdump(void)
 180{
 181	struct device_node *node;
 182
 183	node = of_find_node_by_type(NULL, "memory");
 184	if (!node) {
 185		WARN_ON_ONCE(1);
 186		return false;
 187	}
 188
 189	return of_property_read_bool(node, "linux,usable-memory");
 190}
 191
 192/* atmu setup for fsl pci/pcie controller */
 193static void setup_pci_atmu(struct pci_controller *hose)
 194{
 195	struct ccsr_pci __iomem *pci = hose->private_data;
 196	int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
 197	u64 mem, sz, paddr_hi = 0;
 198	u64 offset = 0, paddr_lo = ULLONG_MAX;
 199	u32 pcicsrbar = 0, pcicsrbar_sz;
 200	u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
 201			PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
 202	const u64 *reg;
 203	int len;
 204	bool setup_inbound;
 205
 206	/*
 207	 * If this is kdump, we don't want to trigger a bunch of PCI
 208	 * errors by closing the window on in-flight DMA.
 209	 *
 210	 * We still run most of the function's logic so that things like
 211	 * hose->dma_window_size still get set.
 212	 */
 213	setup_inbound = !is_kdump();
 214
 215	if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
 216		/*
 217		 * BSC9132 Rev1.0 has an issue where all the PEX inbound
 218		 * windows have implemented the default target value as 0xf
 219		 * for CCSR space.In all Freescale legacy devices the target
 220		 * of 0xf is reserved for local memory space. 9132 Rev1.0
 221		 * now has local mempry space mapped to target 0x0 instead of
 222		 * 0xf. Hence adding a workaround to remove the target 0xf
 223		 * defined for memory space from Inbound window attributes.
 224		 */
 225		piwar &= ~PIWAR_TGI_LOCAL;
 226	}
 227
 228	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 229		if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
 230			win_idx = 2;
 231			start_idx = 0;
 232			end_idx = 3;
 233		}
 234	}
 235
 236	/* Disable all windows (except powar0 since it's ignored) */
 237	for(i = 1; i < 5; i++)
 238		out_be32(&pci->pow[i].powar, 0);
 239
 240	if (setup_inbound) {
 241		for (i = start_idx; i < end_idx; i++)
 242			out_be32(&pci->piw[i].piwar, 0);
 243	}
 244
 245	/* Setup outbound MEM window */
 246	for(i = 0, j = 1; i < 3; i++) {
 247		if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
 248			continue;
 249
 250		paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
 251		paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
 252
 253		/* We assume all memory resources have the same offset */
 254		offset = hose->mem_offset[i];
 255		n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
 256
 257		if (n < 0 || j >= 5) {
 258			pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
 259			hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
 260		} else
 261			j += n;
 262	}
 263
 264	/* Setup outbound IO window */
 265	if (hose->io_resource.flags & IORESOURCE_IO) {
 266		if (j >= 5) {
 267			pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
 268		} else {
 269			pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
 270				 "phy base 0x%016llx.\n",
 271				 (u64)hose->io_resource.start,
 272				 (u64)resource_size(&hose->io_resource),
 273				 (u64)hose->io_base_phys);
 274			out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
 275			out_be32(&pci->pow[j].potear, 0);
 276			out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
 277			/* Enable, IO R/W */
 278			out_be32(&pci->pow[j].powar, 0x80088000
 279				| (ilog2(hose->io_resource.end
 280				- hose->io_resource.start + 1) - 1));
 281		}
 282	}
 283
 284	/* convert to pci address space */
 285	paddr_hi -= offset;
 286	paddr_lo -= offset;
 287
 288	if (paddr_hi == paddr_lo) {
 289		pr_err("%pOF: No outbound window space\n", hose->dn);
 290		return;
 291	}
 292
 293	if (paddr_lo == 0) {
 294		pr_err("%pOF: No space for inbound window\n", hose->dn);
 295		return;
 296	}
 297
 298	/* setup PCSRBAR/PEXCSRBAR */
 299	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
 300	early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
 301	pcicsrbar_sz = ~pcicsrbar_sz + 1;
 302
 303	if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
 304		(paddr_lo > 0x100000000ull))
 305		pcicsrbar = 0x100000000ull - pcicsrbar_sz;
 306	else
 307		pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
 308	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
 309
 310	paddr_lo = min(paddr_lo, (u64)pcicsrbar);
 311
 312	pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
 313
 314	/* Setup inbound mem window */
 315	mem = memblock_end_of_DRAM();
 316	pr_info("%s: end of DRAM %llx\n", __func__, mem);
 317
 318	/*
 319	 * The msi-address-64 property, if it exists, indicates the physical
 320	 * address of the MSIIR register.  Normally, this register is located
 321	 * inside CCSR, so the ATMU that covers all of CCSR is used. But if
 322	 * this property exists, then we normally need to create a new ATMU
 323	 * for it.  For now, however, we cheat.  The only entity that creates
 324	 * this property is the Freescale hypervisor, and the address is
 325	 * specified in the partition configuration.  Typically, the address
 326	 * is located in the page immediately after the end of DDR.  If so, we
 327	 * can avoid allocating a new ATMU by extending the DDR ATMU by one
 328	 * page.
 329	 */
 330	reg = of_get_property(hose->dn, "msi-address-64", &len);
 331	if (reg && (len == sizeof(u64))) {
 332		u64 address = be64_to_cpup(reg);
 333
 334		if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
 335			pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
 336			mem += PAGE_SIZE;
 337		} else {
 338			/* TODO: Create a new ATMU for MSIIR */
 339			pr_warn("%pOF: msi-address-64 address of %llx is "
 340				"unsupported\n", hose->dn, address);
 341		}
 342	}
 343
 344	sz = min(mem, paddr_lo);
 345	mem_log = ilog2(sz);
 346
 347	/* PCIe can overmap inbound & outbound since RX & TX are separated */
 348	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 349		/* Size window to exact size if power-of-two or one size up */
 350		if ((1ull << mem_log) != mem) {
 351			mem_log++;
 352			if ((1ull << mem_log) > mem)
 353				pr_info("%pOF: Setting PCI inbound window "
 354					"greater than memory size\n", hose->dn);
 355		}
 356
 357		piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
 358
 359		if (setup_inbound) {
 360			/* Setup inbound memory window */
 361			out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 362			out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
 363			out_be32(&pci->piw[win_idx].piwar,  piwar);
 364		}
 365
 366		win_idx--;
 367		hose->dma_window_base_cur = 0x00000000;
 368		hose->dma_window_size = (resource_size_t)sz;
 369
 370		/*
 371		 * if we have >4G of memory setup second PCI inbound window to
 372		 * let devices that are 64-bit address capable to work w/o
 373		 * SWIOTLB and access the full range of memory
 374		 */
 375		if (sz != mem) {
 376			mem_log = ilog2(mem);
 377
 378			/* Size window up if we dont fit in exact power-of-2 */
 379			if ((1ull << mem_log) != mem)
 380				mem_log++;
 381
 382			piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
 383			pci64_dma_offset = 1ULL << mem_log;
 384
 385			if (setup_inbound) {
 386				/* Setup inbound memory window */
 387				out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 388				out_be32(&pci->piw[win_idx].piwbear,
 389						pci64_dma_offset >> 44);
 390				out_be32(&pci->piw[win_idx].piwbar,
 391						pci64_dma_offset >> 12);
 392				out_be32(&pci->piw[win_idx].piwar,  piwar);
 393			}
 394
 395			/*
 396			 * install our own dma_set_mask handler to fixup dma_ops
 397			 * and dma_offset
 398			 */
 399			ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
 400
 401			pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
 402		}
 403	} else {
 404		u64 paddr = 0;
 405
 406		if (setup_inbound) {
 407			/* Setup inbound memory window */
 408			out_be32(&pci->piw[win_idx].pitar,  paddr >> 12);
 409			out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
 410			out_be32(&pci->piw[win_idx].piwar,
 411				 (piwar | (mem_log - 1)));
 412		}
 413
 414		win_idx--;
 415		paddr += 1ull << mem_log;
 416		sz -= 1ull << mem_log;
 417
 418		if (sz) {
 419			mem_log = ilog2(sz);
 420			piwar |= (mem_log - 1);
 421
 422			if (setup_inbound) {
 423				out_be32(&pci->piw[win_idx].pitar,
 424					 paddr >> 12);
 425				out_be32(&pci->piw[win_idx].piwbar,
 426					 paddr >> 12);
 427				out_be32(&pci->piw[win_idx].piwar, piwar);
 428			}
 429
 430			win_idx--;
 431			paddr += 1ull << mem_log;
 432		}
 433
 434		hose->dma_window_base_cur = 0x00000000;
 435		hose->dma_window_size = (resource_size_t)paddr;
 436	}
 437
 438	if (hose->dma_window_size < mem) {
 439#ifdef CONFIG_SWIOTLB
 440		ppc_swiotlb_enable = 1;
 441#else
 442		pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
 443			"map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
 444			 hose->dn);
 445#endif
 446		/* adjusting outbound windows could reclaim space in mem map */
 447		if (paddr_hi < 0xffffffffull)
 448			pr_warn("%pOF: WARNING: Outbound window cfg leaves "
 449				"gaps in memory map. Adjusting the memory map "
 450				"could reduce unnecessary bounce buffering.\n",
 451				hose->dn);
 452
 453		pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
 454			(u64)hose->dma_window_size);
 455	}
 456}
 457
 458static void setup_pci_cmd(struct pci_controller *hose)
 459{
 460	u16 cmd;
 461	int cap_x;
 462
 463	early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
 464	cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
 465		| PCI_COMMAND_IO;
 466	early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
 467
 468	cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
 469	if (cap_x) {
 470		int pci_x_cmd = cap_x + PCI_X_CMD;
 471		cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
 472			| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
 473		early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
 474	} else {
 475		early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
 476	}
 477}
 478
 479void fsl_pcibios_fixup_bus(struct pci_bus *bus)
 480{
 481	struct pci_controller *hose = pci_bus_to_host(bus);
 482	int i, is_pcie = 0, no_link;
 483
 484	/* The root complex bridge comes up with bogus resources,
 485	 * we copy the PHB ones in.
 486	 *
 487	 * With the current generic PCI code, the PHB bus no longer
 488	 * has bus->resource[0..4] set, so things are a bit more
 489	 * tricky.
 490	 */
 491
 492	if (fsl_pcie_bus_fixup)
 493		is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
 494	no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
 495
 496	if (bus->parent == hose->bus && (is_pcie || no_link)) {
 497		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
 498			struct resource *res = bus->resource[i];
 499			struct resource *par;
 500
 501			if (!res)
 502				continue;
 503			if (i == 0)
 504				par = &hose->io_resource;
 505			else if (i < 4)
 506				par = &hose->mem_resources[i-1];
 507			else par = NULL;
 508
 509			res->start = par ? par->start : 0;
 510			res->end   = par ? par->end   : 0;
 511			res->flags = par ? par->flags : 0;
 512		}
 513	}
 514}
 515
 516int fsl_add_bridge(struct platform_device *pdev, int is_primary)
 517{
 518	int len;
 519	struct pci_controller *hose;
 520	struct resource rsrc;
 521	const int *bus_range;
 522	u8 hdr_type, progif;
 523	struct device_node *dev;
 524	struct ccsr_pci __iomem *pci;
 525	u16 temp;
 526	u32 svr = mfspr(SPRN_SVR);
 527
 528	dev = pdev->dev.of_node;
 529
 530	if (!of_device_is_available(dev)) {
 531		pr_warn("%pOF: disabled\n", dev);
 532		return -ENODEV;
 533	}
 534
 535	pr_debug("Adding PCI host bridge %pOF\n", dev);
 536
 537	/* Fetch host bridge registers address */
 538	if (of_address_to_resource(dev, 0, &rsrc)) {
 539		printk(KERN_WARNING "Can't get pci register base!");
 540		return -ENOMEM;
 541	}
 542
 543	/* Get bus range if any */
 544	bus_range = of_get_property(dev, "bus-range", &len);
 545	if (bus_range == NULL || len < 2 * sizeof(int))
 546		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 547			" bus 0\n", dev);
 548
 549	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 550	hose = pcibios_alloc_controller(dev);
 551	if (!hose)
 552		return -ENOMEM;
 553
 554	/* set platform device as the parent */
 555	hose->parent = &pdev->dev;
 556	hose->first_busno = bus_range ? bus_range[0] : 0x0;
 557	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 558
 559	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
 560		 (u64)rsrc.start, (u64)resource_size(&rsrc));
 561
 562	pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
 563	if (!hose->private_data)
 564		goto no_bridge;
 565
 566	setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
 567			   PPC_INDIRECT_TYPE_BIG_ENDIAN);
 568
 569	if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
 570		hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 571
 572	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 573		/* use fsl_indirect_read_config for PCIe */
 574		hose->ops = &fsl_indirect_pcie_ops;
 575		/* For PCIE read HEADER_TYPE to identify controller mode */
 576		early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
 577		if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
 578			goto no_bridge;
 579
 580	} else {
 581		/* For PCI read PROG to identify controller mode */
 582		early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
 583		if ((progif & 1) &&
 584		    !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
 585			goto no_bridge;
 586	}
 587
 588	setup_pci_cmd(hose);
 589
 590	/* check PCI express link status */
 591	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 592		hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
 593			PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
 594		if (fsl_pcie_check_link(hose))
 595			hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 596	} else {
 597		/*
 598		 * Set PBFR(PCI Bus Function Register)[10] = 1 to
 599		 * disable the combining of crossing cacheline
 600		 * boundary requests into one burst transaction.
 601		 * PCI-X operation is not affected.
 602		 * Fix erratum PCI 5 on MPC8548
 603		 */
 604#define PCI_BUS_FUNCTION 0x44
 605#define PCI_BUS_FUNCTION_MDS 0x400	/* Master disable streaming */
 606		if (((SVR_SOC_VER(svr) == SVR_8543) ||
 607		     (SVR_SOC_VER(svr) == SVR_8545) ||
 608		     (SVR_SOC_VER(svr) == SVR_8547) ||
 609		     (SVR_SOC_VER(svr) == SVR_8548)) &&
 610		    !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
 611			early_read_config_word(hose, 0, 0,
 612					PCI_BUS_FUNCTION, &temp);
 613			temp |= PCI_BUS_FUNCTION_MDS;
 614			early_write_config_word(hose, 0, 0,
 615					PCI_BUS_FUNCTION, temp);
 616		}
 617	}
 618
 619	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 620		"Firmware bus number: %d->%d\n",
 621		(unsigned long long)rsrc.start, hose->first_busno,
 622		hose->last_busno);
 623
 624	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 625		hose, hose->cfg_addr, hose->cfg_data);
 626
 627	/* Interpret the "ranges" property */
 628	/* This also maps the I/O region and sets isa_io/mem_base */
 629	pci_process_bridge_OF_ranges(hose, dev, is_primary);
 630
 631	/* Setup PEX window registers */
 632	setup_pci_atmu(hose);
 633
 634	/* Set up controller operations */
 635	setup_swiotlb_ops(hose);
 636
 637	return 0;
 638
 639no_bridge:
 640	iounmap(hose->private_data);
 641	/* unmap cfg_data & cfg_addr separately if not on same page */
 642	if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
 643	    ((unsigned long)hose->cfg_addr & PAGE_MASK))
 644		iounmap(hose->cfg_data);
 645	iounmap(hose->cfg_addr);
 646	pcibios_free_controller(hose);
 647	return -ENODEV;
 648}
 649#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
 650
 651DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
 652			quirk_fsl_pcie_early);
 653
 654#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
 655struct mpc83xx_pcie_priv {
 656	void __iomem *cfg_type0;
 657	void __iomem *cfg_type1;
 658	u32 dev_base;
 659};
 660
 661struct pex_inbound_window {
 662	u32 ar;
 663	u32 tar;
 664	u32 barl;
 665	u32 barh;
 666};
 667
 668/*
 669 * With the convention of u-boot, the PCIE outbound window 0 serves
 670 * as configuration transactions outbound.
 671 */
 672#define PEX_OUTWIN0_BAR		0xCA4
 673#define PEX_OUTWIN0_TAL		0xCA8
 674#define PEX_OUTWIN0_TAH		0xCAC
 675#define PEX_RC_INWIN_BASE	0xE60
 676#define PEX_RCIWARn_EN		0x1
 677
 678static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
 679{
 680	struct pci_controller *hose = pci_bus_to_host(bus);
 681
 682	if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
 683		return PCIBIOS_DEVICE_NOT_FOUND;
 684	/*
 685	 * Workaround for the HW bug: for Type 0 configure transactions the
 686	 * PCI-E controller does not check the device number bits and just
 687	 * assumes that the device number bits are 0.
 688	 */
 689	if (bus->number == hose->first_busno ||
 690			bus->primary == hose->first_busno) {
 691		if (devfn & 0xf8)
 692			return PCIBIOS_DEVICE_NOT_FOUND;
 693	}
 694
 695	if (ppc_md.pci_exclude_device) {
 696		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 697			return PCIBIOS_DEVICE_NOT_FOUND;
 698	}
 699
 700	return PCIBIOS_SUCCESSFUL;
 701}
 702
 703static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
 704					    unsigned int devfn, int offset)
 705{
 706	struct pci_controller *hose = pci_bus_to_host(bus);
 707	struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 708	u32 dev_base = bus->number << 24 | devfn << 16;
 709	int ret;
 710
 711	ret = mpc83xx_pcie_exclude_device(bus, devfn);
 712	if (ret)
 713		return NULL;
 714
 715	offset &= 0xfff;
 716
 717	/* Type 0 */
 718	if (bus->number == hose->first_busno)
 719		return pcie->cfg_type0 + offset;
 720
 721	if (pcie->dev_base == dev_base)
 722		goto mapped;
 723
 724	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
 725
 726	pcie->dev_base = dev_base;
 727mapped:
 728	return pcie->cfg_type1 + offset;
 729}
 730
 731static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
 732				     int offset, int len, u32 val)
 733{
 734	struct pci_controller *hose = pci_bus_to_host(bus);
 735
 736	/* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
 737	if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
 738		val &= 0xffffff00;
 739
 740	return pci_generic_config_write(bus, devfn, offset, len, val);
 741}
 742
 743static struct pci_ops mpc83xx_pcie_ops = {
 744	.map_bus = mpc83xx_pcie_remap_cfg,
 745	.read = pci_generic_config_read,
 746	.write = mpc83xx_pcie_write_config,
 747};
 748
 749static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
 750				     struct resource *reg)
 751{
 752	struct mpc83xx_pcie_priv *pcie;
 753	u32 cfg_bar;
 754	int ret = -ENOMEM;
 755
 756	pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL);
 757	if (!pcie)
 758		return ret;
 759
 760	pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
 761	if (!pcie->cfg_type0)
 762		goto err0;
 763
 764	cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
 765	if (!cfg_bar) {
 766		/* PCI-E isn't configured. */
 767		ret = -ENODEV;
 768		goto err1;
 769	}
 770
 771	pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
 772	if (!pcie->cfg_type1)
 773		goto err1;
 774
 775	WARN_ON(hose->dn->data);
 776	hose->dn->data = pcie;
 777	hose->ops = &mpc83xx_pcie_ops;
 778	hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 779
 780	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
 781	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
 782
 783	if (fsl_pcie_check_link(hose))
 784		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 785
 786	return 0;
 787err1:
 788	iounmap(pcie->cfg_type0);
 789err0:
 790	kfree(pcie);
 791	return ret;
 792
 793}
 794
 795int __init mpc83xx_add_bridge(struct device_node *dev)
 796{
 797	int ret;
 798	int len;
 799	struct pci_controller *hose;
 800	struct resource rsrc_reg;
 801	struct resource rsrc_cfg;
 802	const int *bus_range;
 803	int primary;
 804
 805	is_mpc83xx_pci = 1;
 806
 807	if (!of_device_is_available(dev)) {
 808		pr_warn("%pOF: disabled by the firmware.\n",
 809			dev);
 810		return -ENODEV;
 811	}
 812	pr_debug("Adding PCI host bridge %pOF\n", dev);
 813
 814	/* Fetch host bridge registers address */
 815	if (of_address_to_resource(dev, 0, &rsrc_reg)) {
 816		printk(KERN_WARNING "Can't get pci register base!\n");
 817		return -ENOMEM;
 818	}
 819
 820	memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
 821
 822	if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
 823		printk(KERN_WARNING
 824			"No pci config register base in dev tree, "
 825			"using default\n");
 826		/*
 827		 * MPC83xx supports up to two host controllers
 828		 * 	one at 0x8500 has config space registers at 0x8300
 829		 * 	one at 0x8600 has config space registers at 0x8380
 830		 */
 831		if ((rsrc_reg.start & 0xfffff) == 0x8500)
 832			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
 833		else if ((rsrc_reg.start & 0xfffff) == 0x8600)
 834			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
 835	}
 836	/*
 837	 * Controller at offset 0x8500 is primary
 838	 */
 839	if ((rsrc_reg.start & 0xfffff) == 0x8500)
 840		primary = 1;
 841	else
 842		primary = 0;
 843
 844	/* Get bus range if any */
 845	bus_range = of_get_property(dev, "bus-range", &len);
 846	if (bus_range == NULL || len < 2 * sizeof(int)) {
 847		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 848		       " bus 0\n", dev);
 849	}
 850
 851	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 852	hose = pcibios_alloc_controller(dev);
 853	if (!hose)
 854		return -ENOMEM;
 855
 856	hose->first_busno = bus_range ? bus_range[0] : 0;
 857	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 858
 859	if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
 860		ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
 861		if (ret)
 862			goto err0;
 863	} else {
 864		setup_indirect_pci(hose, rsrc_cfg.start,
 865				   rsrc_cfg.start + 4, 0);
 866	}
 867
 868	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 869	       "Firmware bus number: %d->%d\n",
 870	       (unsigned long long)rsrc_reg.start, hose->first_busno,
 871	       hose->last_busno);
 872
 873	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 874	    hose, hose->cfg_addr, hose->cfg_data);
 875
 876	/* Interpret the "ranges" property */
 877	/* This also maps the I/O region and sets isa_io/mem_base */
 878	pci_process_bridge_OF_ranges(hose, dev, primary);
 879
 880	return 0;
 881err0:
 882	pcibios_free_controller(hose);
 883	return ret;
 884}
 885#endif /* CONFIG_PPC_83xx */
 886
 887u64 fsl_pci_immrbar_base(struct pci_controller *hose)
 888{
 889#ifdef CONFIG_PPC_83xx
 890	if (is_mpc83xx_pci) {
 891		struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 892		struct pex_inbound_window *in;
 893		int i;
 894
 895		/* Walk the Root Complex Inbound windows to match IMMR base */
 896		in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
 897		for (i = 0; i < 4; i++) {
 898			/* not enabled, skip */
 899			if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
 900				continue;
 901
 902			if (get_immrbase() == in_le32(&in[i].tar))
 903				return (u64)in_le32(&in[i].barh) << 32 |
 904					    in_le32(&in[i].barl);
 905		}
 906
 907		printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
 908	}
 909#endif
 910
 911#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 912	if (!is_mpc83xx_pci) {
 913		u32 base;
 914
 915		pci_bus_read_config_dword(hose->bus,
 916			PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
 917
 918		/*
 919		 * For PEXCSRBAR, bit 3-0 indicate prefetchable and
 920		 * address type. So when getting base address, these
 921		 * bits should be masked
 922		 */
 923		base &= PCI_BASE_ADDRESS_MEM_MASK;
 924
 925		return base;
 926	}
 927#endif
 928
 929	return 0;
 930}
 931
 932#ifdef CONFIG_E500
 933static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
 934{
 935	unsigned int rd, ra, rb, d;
 936
 937	rd = get_rt(inst);
 938	ra = get_ra(inst);
 939	rb = get_rb(inst);
 940	d = get_d(inst);
 941
 942	switch (get_op(inst)) {
 943	case 31:
 944		switch (get_xop(inst)) {
 945		case OP_31_XOP_LWZX:
 946		case OP_31_XOP_LWBRX:
 947			regs->gpr[rd] = 0xffffffff;
 948			break;
 949
 950		case OP_31_XOP_LWZUX:
 951			regs->gpr[rd] = 0xffffffff;
 952			regs->gpr[ra] += regs->gpr[rb];
 953			break;
 954
 955		case OP_31_XOP_LBZX:
 956			regs->gpr[rd] = 0xff;
 957			break;
 958
 959		case OP_31_XOP_LBZUX:
 960			regs->gpr[rd] = 0xff;
 961			regs->gpr[ra] += regs->gpr[rb];
 962			break;
 963
 964		case OP_31_XOP_LHZX:
 965		case OP_31_XOP_LHBRX:
 966			regs->gpr[rd] = 0xffff;
 967			break;
 968
 969		case OP_31_XOP_LHZUX:
 970			regs->gpr[rd] = 0xffff;
 971			regs->gpr[ra] += regs->gpr[rb];
 972			break;
 973
 974		case OP_31_XOP_LHAX:
 975			regs->gpr[rd] = ~0UL;
 976			break;
 977
 978		case OP_31_XOP_LHAUX:
 979			regs->gpr[rd] = ~0UL;
 980			regs->gpr[ra] += regs->gpr[rb];
 981			break;
 982
 983		default:
 984			return 0;
 985		}
 986		break;
 987
 988	case OP_LWZ:
 989		regs->gpr[rd] = 0xffffffff;
 990		break;
 991
 992	case OP_LWZU:
 993		regs->gpr[rd] = 0xffffffff;
 994		regs->gpr[ra] += (s16)d;
 995		break;
 996
 997	case OP_LBZ:
 998		regs->gpr[rd] = 0xff;
 999		break;
1000
1001	case OP_LBZU:
1002		regs->gpr[rd] = 0xff;
1003		regs->gpr[ra] += (s16)d;
1004		break;
1005
1006	case OP_LHZ:
1007		regs->gpr[rd] = 0xffff;
1008		break;
1009
1010	case OP_LHZU:
1011		regs->gpr[rd] = 0xffff;
1012		regs->gpr[ra] += (s16)d;
1013		break;
1014
1015	case OP_LHA:
1016		regs->gpr[rd] = ~0UL;
1017		break;
1018
1019	case OP_LHAU:
1020		regs->gpr[rd] = ~0UL;
1021		regs->gpr[ra] += (s16)d;
1022		break;
1023
1024	default:
1025		return 0;
1026	}
1027
1028	return 1;
1029}
1030
1031static int is_in_pci_mem_space(phys_addr_t addr)
1032{
1033	struct pci_controller *hose;
1034	struct resource *res;
1035	int i;
1036
1037	list_for_each_entry(hose, &hose_list, list_node) {
1038		if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
1039			continue;
1040
1041		for (i = 0; i < 3; i++) {
1042			res = &hose->mem_resources[i];
1043			if ((res->flags & IORESOURCE_MEM) &&
1044				addr >= res->start && addr <= res->end)
1045				return 1;
1046		}
1047	}
1048	return 0;
1049}
1050
1051int fsl_pci_mcheck_exception(struct pt_regs *regs)
1052{
1053	u32 inst;
1054	int ret;
1055	phys_addr_t addr = 0;
1056
1057	/* Let KVM/QEMU deal with the exception */
1058	if (regs->msr & MSR_GS)
1059		return 0;
1060
1061#ifdef CONFIG_PHYS_64BIT
1062	addr = mfspr(SPRN_MCARU);
1063	addr <<= 32;
1064#endif
1065	addr += mfspr(SPRN_MCAR);
1066
1067	if (is_in_pci_mem_space(addr)) {
1068		if (user_mode(regs))
1069			ret = copy_from_user_nofault(&inst,
1070					(void __user *)regs->nip, sizeof(inst));
1071		else
1072			ret = get_kernel_nofault(inst, (void *)regs->nip);
 
 
1073
1074		if (!ret && mcheck_handle_load(regs, inst)) {
1075			regs_add_return_ip(regs, 4);
1076			return 1;
1077		}
1078	}
1079
1080	return 0;
1081}
1082#endif
1083
1084#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
1085static const struct of_device_id pci_ids[] = {
1086	{ .compatible = "fsl,mpc8540-pci", },
1087	{ .compatible = "fsl,mpc8548-pcie", },
1088	{ .compatible = "fsl,mpc8610-pci", },
1089	{ .compatible = "fsl,mpc8641-pcie", },
1090	{ .compatible = "fsl,qoriq-pcie", },
1091	{ .compatible = "fsl,qoriq-pcie-v2.1", },
1092	{ .compatible = "fsl,qoriq-pcie-v2.2", },
1093	{ .compatible = "fsl,qoriq-pcie-v2.3", },
1094	{ .compatible = "fsl,qoriq-pcie-v2.4", },
1095	{ .compatible = "fsl,qoriq-pcie-v3.0", },
1096
1097	/*
1098	 * The following entries are for compatibility with older device
1099	 * trees.
1100	 */
1101	{ .compatible = "fsl,p1022-pcie", },
1102	{ .compatible = "fsl,p4080-pcie", },
1103
1104	{},
1105};
1106
1107struct device_node *fsl_pci_primary;
1108
1109void fsl_pci_assign_primary(void)
1110{
1111	struct device_node *np;
1112
1113	/* Callers can specify the primary bus using other means. */
1114	if (fsl_pci_primary)
1115		return;
1116
1117	/* If a PCI host bridge contains an ISA node, it's primary. */
1118	np = of_find_node_by_type(NULL, "isa");
1119	while ((fsl_pci_primary = of_get_parent(np))) {
1120		of_node_put(np);
1121		np = fsl_pci_primary;
1122
1123		if (of_match_node(pci_ids, np) && of_device_is_available(np))
1124			return;
1125	}
1126
1127	/*
1128	 * If there's no PCI host bridge with ISA, arbitrarily
1129	 * designate one as primary.  This can go away once
1130	 * various bugs with primary-less systems are fixed.
1131	 */
1132	for_each_matching_node(np, pci_ids) {
1133		if (of_device_is_available(np)) {
1134			fsl_pci_primary = np;
1135			of_node_put(np);
1136			return;
1137		}
1138	}
1139}
1140
1141#ifdef CONFIG_PM_SLEEP
1142static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
1143{
1144	struct pci_controller *hose = dev_id;
1145	struct ccsr_pci __iomem *pci = hose->private_data;
1146	u32 dr;
1147
1148	dr = in_be32(&pci->pex_pme_mes_dr);
1149	if (!dr)
1150		return IRQ_NONE;
1151
1152	out_be32(&pci->pex_pme_mes_dr, dr);
1153
1154	return IRQ_HANDLED;
1155}
1156
1157static int fsl_pci_pme_probe(struct pci_controller *hose)
1158{
1159	struct ccsr_pci __iomem *pci;
1160	struct pci_dev *dev;
1161	int pme_irq;
1162	int res;
1163	u16 pms;
1164
1165	/* Get hose's pci_dev */
1166	dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
1167
1168	/* PME Disable */
1169	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1170	pms &= ~PCI_PM_CTRL_PME_ENABLE;
1171	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1172
1173	pme_irq = irq_of_parse_and_map(hose->dn, 0);
1174	if (!pme_irq) {
1175		dev_err(&dev->dev, "Failed to map PME interrupt.\n");
1176
1177		return -ENXIO;
1178	}
1179
1180	res = devm_request_irq(hose->parent, pme_irq,
1181			fsl_pci_pme_handle,
1182			IRQF_SHARED,
1183			"[PCI] PME", hose);
1184	if (res < 0) {
1185		dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
1186		irq_dispose_mapping(pme_irq);
1187
1188		return -ENODEV;
1189	}
1190
1191	pci = hose->private_data;
1192
1193	/* Enable PTOD, ENL23D & EXL23D */
1194	clrbits32(&pci->pex_pme_mes_disr,
1195		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1196
1197	out_be32(&pci->pex_pme_mes_ier, 0);
1198	setbits32(&pci->pex_pme_mes_ier,
1199		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1200
1201	/* PME Enable */
1202	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1203	pms |= PCI_PM_CTRL_PME_ENABLE;
1204	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1205
1206	return 0;
1207}
1208
1209static void send_pme_turnoff_message(struct pci_controller *hose)
1210{
1211	struct ccsr_pci __iomem *pci = hose->private_data;
1212	u32 dr;
1213	int i;
1214
1215	/* Send PME_Turn_Off Message Request */
1216	setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
1217
1218	/* Wait trun off done */
1219	for (i = 0; i < 150; i++) {
1220		dr = in_be32(&pci->pex_pme_mes_dr);
1221		if (dr) {
1222			out_be32(&pci->pex_pme_mes_dr, dr);
1223			break;
1224		}
1225
1226		udelay(1000);
1227	}
1228}
1229
1230static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
1231{
1232	send_pme_turnoff_message(hose);
1233}
1234
1235static int fsl_pci_syscore_suspend(void)
1236{
1237	struct pci_controller *hose, *tmp;
1238
1239	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1240		fsl_pci_syscore_do_suspend(hose);
1241
1242	return 0;
1243}
1244
1245static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
1246{
1247	struct ccsr_pci __iomem *pci = hose->private_data;
1248	u32 dr;
1249	int i;
1250
1251	/* Send Exit L2 State Message */
1252	setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
1253
1254	/* Wait exit done */
1255	for (i = 0; i < 150; i++) {
1256		dr = in_be32(&pci->pex_pme_mes_dr);
1257		if (dr) {
1258			out_be32(&pci->pex_pme_mes_dr, dr);
1259			break;
1260		}
1261
1262		udelay(1000);
1263	}
1264
1265	setup_pci_atmu(hose);
1266}
1267
1268static void fsl_pci_syscore_resume(void)
1269{
1270	struct pci_controller *hose, *tmp;
1271
1272	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1273		fsl_pci_syscore_do_resume(hose);
1274}
1275
1276static struct syscore_ops pci_syscore_pm_ops = {
1277	.suspend = fsl_pci_syscore_suspend,
1278	.resume = fsl_pci_syscore_resume,
1279};
1280#endif
1281
1282void fsl_pcibios_fixup_phb(struct pci_controller *phb)
1283{
1284#ifdef CONFIG_PM_SLEEP
1285	fsl_pci_pme_probe(phb);
1286#endif
1287}
1288
1289static int add_err_dev(struct platform_device *pdev)
1290{
1291	struct platform_device *errdev;
1292	struct mpc85xx_edac_pci_plat_data pd = {
1293		.of_node = pdev->dev.of_node
1294	};
1295
1296	errdev = platform_device_register_resndata(&pdev->dev,
1297						   "mpc85xx-pci-edac",
1298						   PLATFORM_DEVID_AUTO,
1299						   pdev->resource,
1300						   pdev->num_resources,
1301						   &pd, sizeof(pd));
1302
1303	return PTR_ERR_OR_ZERO(errdev);
1304}
1305
1306static int fsl_pci_probe(struct platform_device *pdev)
1307{
1308	struct device_node *node;
1309	int ret;
1310
1311	node = pdev->dev.of_node;
1312	ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
1313	if (ret)
1314		return ret;
1315
1316	ret = add_err_dev(pdev);
1317	if (ret)
1318		dev_err(&pdev->dev, "couldn't register error device: %d\n",
1319			ret);
1320
1321	return 0;
1322}
1323
1324static struct platform_driver fsl_pci_driver = {
1325	.driver = {
1326		.name = "fsl-pci",
1327		.of_match_table = pci_ids,
1328	},
1329	.probe = fsl_pci_probe,
1330};
1331
1332static int __init fsl_pci_init(void)
1333{
1334#ifdef CONFIG_PM_SLEEP
1335	register_syscore_ops(&pci_syscore_pm_ops);
1336#endif
1337	return platform_driver_register(&fsl_pci_driver);
1338}
1339arch_initcall(fsl_pci_init);
1340#endif