Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * MPC83xx/85xx/86xx PCI/PCIE support routing.
   3 *
   4 * Copyright 2007-2012 Freescale Semiconductor, Inc.
   5 * Copyright 2008-2009 MontaVista Software, Inc.
   6 *
   7 * Initial author: Xianghua Xiao <x.xiao@freescale.com>
   8 * Recode: ZHANG WEI <wei.zhang@freescale.com>
   9 * Rewrite the routing for Frescale PCI and PCI Express
  10 * 	Roy Zang <tie-fei.zang@freescale.com>
  11 * MPC83xx PCI-Express support:
  12 * 	Tony Li <tony.li@freescale.com>
  13 * 	Anton Vorontsov <avorontsov@ru.mvista.com>
  14 *
  15 * This program is free software; you can redistribute  it and/or modify it
  16 * under  the terms of  the GNU General  Public License as published by the
  17 * Free Software Foundation;  either version 2 of the  License, or (at your
  18 * option) any later version.
  19 */
  20#include <linux/kernel.h>
  21#include <linux/pci.h>
  22#include <linux/delay.h>
  23#include <linux/string.h>
  24#include <linux/fsl/edac.h>
  25#include <linux/init.h>
  26#include <linux/interrupt.h>
  27#include <linux/memblock.h>
  28#include <linux/log2.h>
  29#include <linux/platform_device.h>
  30#include <linux/slab.h>
  31#include <linux/suspend.h>
  32#include <linux/syscore_ops.h>
  33#include <linux/uaccess.h>
  34
  35#include <asm/io.h>
  36#include <asm/prom.h>
  37#include <asm/pci-bridge.h>
  38#include <asm/ppc-pci.h>
  39#include <asm/machdep.h>
  40#include <asm/mpc85xx.h>
  41#include <asm/disassemble.h>
  42#include <asm/ppc-opcode.h>
 
  43#include <sysdev/fsl_soc.h>
  44#include <sysdev/fsl_pci.h>
  45
  46static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
  47
  48static void quirk_fsl_pcie_early(struct pci_dev *dev)
  49{
  50	u8 hdr_type;
  51
  52	/* if we aren't a PCIe don't bother */
  53	if (!pci_is_pcie(dev))
  54		return;
  55
  56	/* if we aren't in host mode don't bother */
  57	pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
  58	if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
  59		return;
  60
  61	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  62	fsl_pcie_bus_fixup = 1;
  63	return;
  64}
  65
  66static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
  67				    int, int, u32 *);
  68
  69static int fsl_pcie_check_link(struct pci_controller *hose)
  70{
  71	u32 val = 0;
  72
  73	if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
  74		if (hose->ops->read == fsl_indirect_read_config)
  75			__indirect_read_config(hose, hose->first_busno, 0,
  76					       PCIE_LTSSM, 4, &val);
  77		else
  78			early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
  79		if (val < PCIE_LTSSM_L0)
  80			return 1;
  81	} else {
  82		struct ccsr_pci __iomem *pci = hose->private_data;
  83		/* for PCIe IP rev 3.0 or greater use CSR0 for link state */
  84		val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
  85				>> PEX_CSR0_LTSSM_SHIFT;
  86		if (val != PEX_CSR0_LTSSM_L0)
  87			return 1;
  88	}
  89
  90	return 0;
  91}
  92
  93static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
  94				    int offset, int len, u32 *val)
  95{
  96	struct pci_controller *hose = pci_bus_to_host(bus);
  97
  98	if (fsl_pcie_check_link(hose))
  99		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 100	else
 101		hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 102
 103	return indirect_read_config(bus, devfn, offset, len, val);
 104}
 105
 106#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 107
 108static struct pci_ops fsl_indirect_pcie_ops =
 109{
 110	.read = fsl_indirect_read_config,
 111	.write = indirect_write_config,
 112};
 113
 114static u64 pci64_dma_offset;
 115
 116#ifdef CONFIG_SWIOTLB
 
 
 
 
 
 
 
 
 117static void setup_swiotlb_ops(struct pci_controller *hose)
 118{
 119	if (ppc_swiotlb_enable) {
 120		hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
 121		set_pci_dma_ops(&powerpc_swiotlb_dma_ops);
 122	}
 123}
 124#else
 125static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
 126#endif
 127
 128static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
 129{
 130	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
 131		return -EIO;
 132
 133	/*
 134	 * Fix up PCI devices that are able to DMA to the large inbound
 135	 * mapping that allows addressing any RAM address from across PCI.
 136	 */
 137	if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
 138		set_dma_ops(dev, &dma_nommu_ops);
 139		set_dma_offset(dev, pci64_dma_offset);
 140	}
 141
 142	*dev->dma_mask = dma_mask;
 143	return 0;
 144}
 145
 146static int setup_one_atmu(struct ccsr_pci __iomem *pci,
 147	unsigned int index, const struct resource *res,
 148	resource_size_t offset)
 149{
 150	resource_size_t pci_addr = res->start - offset;
 151	resource_size_t phys_addr = res->start;
 152	resource_size_t size = resource_size(res);
 153	u32 flags = 0x80044000; /* enable & mem R/W */
 154	unsigned int i;
 155
 156	pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
 157		(u64)res->start, (u64)size);
 158
 159	if (res->flags & IORESOURCE_PREFETCH)
 160		flags |= 0x10000000; /* enable relaxed ordering */
 161
 162	for (i = 0; size > 0; i++) {
 163		unsigned int bits = min_t(u32, ilog2(size),
 164					__ffs(pci_addr | phys_addr));
 165
 166		if (index + i >= 5)
 167			return -1;
 168
 169		out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
 170		out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
 171		out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
 172		out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
 173
 174		pci_addr += (resource_size_t)1U << bits;
 175		phys_addr += (resource_size_t)1U << bits;
 176		size -= (resource_size_t)1U << bits;
 177	}
 178
 179	return i;
 180}
 181
 182static bool is_kdump(void)
 183{
 184	struct device_node *node;
 185
 186	node = of_find_node_by_type(NULL, "memory");
 187	if (!node) {
 188		WARN_ON_ONCE(1);
 189		return false;
 190	}
 191
 192	return of_property_read_bool(node, "linux,usable-memory");
 193}
 194
 195/* atmu setup for fsl pci/pcie controller */
 196static void setup_pci_atmu(struct pci_controller *hose)
 197{
 198	struct ccsr_pci __iomem *pci = hose->private_data;
 199	int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
 200	u64 mem, sz, paddr_hi = 0;
 201	u64 offset = 0, paddr_lo = ULLONG_MAX;
 202	u32 pcicsrbar = 0, pcicsrbar_sz;
 203	u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
 204			PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
 205	const u64 *reg;
 206	int len;
 207	bool setup_inbound;
 208
 209	/*
 210	 * If this is kdump, we don't want to trigger a bunch of PCI
 211	 * errors by closing the window on in-flight DMA.
 212	 *
 213	 * We still run most of the function's logic so that things like
 214	 * hose->dma_window_size still get set.
 215	 */
 216	setup_inbound = !is_kdump();
 217
 218	if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
 219		/*
 220		 * BSC9132 Rev1.0 has an issue where all the PEX inbound
 221		 * windows have implemented the default target value as 0xf
 222		 * for CCSR space.In all Freescale legacy devices the target
 223		 * of 0xf is reserved for local memory space. 9132 Rev1.0
 224		 * now has local mempry space mapped to target 0x0 instead of
 225		 * 0xf. Hence adding a workaround to remove the target 0xf
 226		 * defined for memory space from Inbound window attributes.
 227		 */
 228		piwar &= ~PIWAR_TGI_LOCAL;
 229	}
 230
 231	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 232		if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
 233			win_idx = 2;
 234			start_idx = 0;
 235			end_idx = 3;
 236		}
 237	}
 238
 239	/* Disable all windows (except powar0 since it's ignored) */
 240	for(i = 1; i < 5; i++)
 241		out_be32(&pci->pow[i].powar, 0);
 242
 243	if (setup_inbound) {
 244		for (i = start_idx; i < end_idx; i++)
 245			out_be32(&pci->piw[i].piwar, 0);
 246	}
 247
 248	/* Setup outbound MEM window */
 249	for(i = 0, j = 1; i < 3; i++) {
 250		if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
 251			continue;
 252
 253		paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
 254		paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
 255
 256		/* We assume all memory resources have the same offset */
 257		offset = hose->mem_offset[i];
 258		n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
 259
 260		if (n < 0 || j >= 5) {
 261			pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
 262			hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
 263		} else
 264			j += n;
 265	}
 266
 267	/* Setup outbound IO window */
 268	if (hose->io_resource.flags & IORESOURCE_IO) {
 269		if (j >= 5) {
 270			pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
 271		} else {
 272			pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
 273				 "phy base 0x%016llx.\n",
 274				 (u64)hose->io_resource.start,
 275				 (u64)resource_size(&hose->io_resource),
 276				 (u64)hose->io_base_phys);
 277			out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
 278			out_be32(&pci->pow[j].potear, 0);
 279			out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
 280			/* Enable, IO R/W */
 281			out_be32(&pci->pow[j].powar, 0x80088000
 282				| (ilog2(hose->io_resource.end
 283				- hose->io_resource.start + 1) - 1));
 284		}
 285	}
 286
 287	/* convert to pci address space */
 288	paddr_hi -= offset;
 289	paddr_lo -= offset;
 290
 291	if (paddr_hi == paddr_lo) {
 292		pr_err("%pOF: No outbound window space\n", hose->dn);
 293		return;
 294	}
 295
 296	if (paddr_lo == 0) {
 297		pr_err("%pOF: No space for inbound window\n", hose->dn);
 298		return;
 299	}
 300
 301	/* setup PCSRBAR/PEXCSRBAR */
 302	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
 303	early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
 304	pcicsrbar_sz = ~pcicsrbar_sz + 1;
 305
 306	if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
 307		(paddr_lo > 0x100000000ull))
 308		pcicsrbar = 0x100000000ull - pcicsrbar_sz;
 309	else
 310		pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
 311	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
 312
 313	paddr_lo = min(paddr_lo, (u64)pcicsrbar);
 314
 315	pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
 316
 317	/* Setup inbound mem window */
 318	mem = memblock_end_of_DRAM();
 319	pr_info("%s: end of DRAM %llx\n", __func__, mem);
 320
 321	/*
 322	 * The msi-address-64 property, if it exists, indicates the physical
 323	 * address of the MSIIR register.  Normally, this register is located
 324	 * inside CCSR, so the ATMU that covers all of CCSR is used. But if
 325	 * this property exists, then we normally need to create a new ATMU
 326	 * for it.  For now, however, we cheat.  The only entity that creates
 327	 * this property is the Freescale hypervisor, and the address is
 328	 * specified in the partition configuration.  Typically, the address
 329	 * is located in the page immediately after the end of DDR.  If so, we
 330	 * can avoid allocating a new ATMU by extending the DDR ATMU by one
 331	 * page.
 332	 */
 333	reg = of_get_property(hose->dn, "msi-address-64", &len);
 334	if (reg && (len == sizeof(u64))) {
 335		u64 address = be64_to_cpup(reg);
 336
 337		if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
 338			pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
 339			mem += PAGE_SIZE;
 340		} else {
 341			/* TODO: Create a new ATMU for MSIIR */
 342			pr_warn("%pOF: msi-address-64 address of %llx is "
 343				"unsupported\n", hose->dn, address);
 344		}
 345	}
 346
 347	sz = min(mem, paddr_lo);
 348	mem_log = ilog2(sz);
 349
 350	/* PCIe can overmap inbound & outbound since RX & TX are separated */
 351	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 352		/* Size window to exact size if power-of-two or one size up */
 353		if ((1ull << mem_log) != mem) {
 354			mem_log++;
 355			if ((1ull << mem_log) > mem)
 356				pr_info("%pOF: Setting PCI inbound window "
 357					"greater than memory size\n", hose->dn);
 358		}
 359
 360		piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
 361
 362		if (setup_inbound) {
 363			/* Setup inbound memory window */
 364			out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 365			out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
 366			out_be32(&pci->piw[win_idx].piwar,  piwar);
 367		}
 368
 369		win_idx--;
 370		hose->dma_window_base_cur = 0x00000000;
 371		hose->dma_window_size = (resource_size_t)sz;
 372
 373		/*
 374		 * if we have >4G of memory setup second PCI inbound window to
 375		 * let devices that are 64-bit address capable to work w/o
 376		 * SWIOTLB and access the full range of memory
 377		 */
 378		if (sz != mem) {
 379			mem_log = ilog2(mem);
 380
 381			/* Size window up if we dont fit in exact power-of-2 */
 382			if ((1ull << mem_log) != mem)
 383				mem_log++;
 384
 385			piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
 386			pci64_dma_offset = 1ULL << mem_log;
 387
 388			if (setup_inbound) {
 389				/* Setup inbound memory window */
 390				out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 391				out_be32(&pci->piw[win_idx].piwbear,
 392						pci64_dma_offset >> 44);
 393				out_be32(&pci->piw[win_idx].piwbar,
 394						pci64_dma_offset >> 12);
 395				out_be32(&pci->piw[win_idx].piwar,  piwar);
 396			}
 397
 398			/*
 399			 * install our own dma_set_mask handler to fixup dma_ops
 400			 * and dma_offset
 401			 */
 402			ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
 403
 404			pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
 405		}
 406	} else {
 407		u64 paddr = 0;
 408
 409		if (setup_inbound) {
 410			/* Setup inbound memory window */
 411			out_be32(&pci->piw[win_idx].pitar,  paddr >> 12);
 412			out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
 413			out_be32(&pci->piw[win_idx].piwar,
 414				 (piwar | (mem_log - 1)));
 415		}
 416
 417		win_idx--;
 418		paddr += 1ull << mem_log;
 419		sz -= 1ull << mem_log;
 420
 421		if (sz) {
 422			mem_log = ilog2(sz);
 423			piwar |= (mem_log - 1);
 424
 425			if (setup_inbound) {
 426				out_be32(&pci->piw[win_idx].pitar,
 427					 paddr >> 12);
 428				out_be32(&pci->piw[win_idx].piwbar,
 429					 paddr >> 12);
 430				out_be32(&pci->piw[win_idx].piwar, piwar);
 431			}
 432
 433			win_idx--;
 434			paddr += 1ull << mem_log;
 435		}
 436
 437		hose->dma_window_base_cur = 0x00000000;
 438		hose->dma_window_size = (resource_size_t)paddr;
 439	}
 440
 441	if (hose->dma_window_size < mem) {
 442#ifdef CONFIG_SWIOTLB
 443		ppc_swiotlb_enable = 1;
 444#else
 445		pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
 446			"map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
 447			 hose->dn);
 448#endif
 449		/* adjusting outbound windows could reclaim space in mem map */
 450		if (paddr_hi < 0xffffffffull)
 451			pr_warn("%pOF: WARNING: Outbound window cfg leaves "
 452				"gaps in memory map. Adjusting the memory map "
 453				"could reduce unnecessary bounce buffering.\n",
 454				hose->dn);
 455
 456		pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
 457			(u64)hose->dma_window_size);
 458	}
 459}
 460
 461static void __init setup_pci_cmd(struct pci_controller *hose)
 462{
 463	u16 cmd;
 464	int cap_x;
 465
 466	early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
 467	cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
 468		| PCI_COMMAND_IO;
 469	early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
 470
 471	cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
 472	if (cap_x) {
 473		int pci_x_cmd = cap_x + PCI_X_CMD;
 474		cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
 475			| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
 476		early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
 477	} else {
 478		early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
 479	}
 480}
 481
 482void fsl_pcibios_fixup_bus(struct pci_bus *bus)
 483{
 484	struct pci_controller *hose = pci_bus_to_host(bus);
 485	int i, is_pcie = 0, no_link;
 486
 487	/* The root complex bridge comes up with bogus resources,
 488	 * we copy the PHB ones in.
 489	 *
 490	 * With the current generic PCI code, the PHB bus no longer
 491	 * has bus->resource[0..4] set, so things are a bit more
 492	 * tricky.
 493	 */
 494
 495	if (fsl_pcie_bus_fixup)
 496		is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
 497	no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
 498
 499	if (bus->parent == hose->bus && (is_pcie || no_link)) {
 500		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
 501			struct resource *res = bus->resource[i];
 502			struct resource *par;
 503
 504			if (!res)
 505				continue;
 506			if (i == 0)
 507				par = &hose->io_resource;
 508			else if (i < 4)
 509				par = &hose->mem_resources[i-1];
 510			else par = NULL;
 511
 512			res->start = par ? par->start : 0;
 513			res->end   = par ? par->end   : 0;
 514			res->flags = par ? par->flags : 0;
 515		}
 516	}
 517}
 518
 519int fsl_add_bridge(struct platform_device *pdev, int is_primary)
 520{
 521	int len;
 522	struct pci_controller *hose;
 523	struct resource rsrc;
 524	const int *bus_range;
 525	u8 hdr_type, progif;
 526	struct device_node *dev;
 527	struct ccsr_pci __iomem *pci;
 528	u16 temp;
 529	u32 svr = mfspr(SPRN_SVR);
 530
 531	dev = pdev->dev.of_node;
 532
 533	if (!of_device_is_available(dev)) {
 534		pr_warn("%pOF: disabled\n", dev);
 535		return -ENODEV;
 536	}
 537
 538	pr_debug("Adding PCI host bridge %pOF\n", dev);
 539
 540	/* Fetch host bridge registers address */
 541	if (of_address_to_resource(dev, 0, &rsrc)) {
 542		printk(KERN_WARNING "Can't get pci register base!");
 543		return -ENOMEM;
 544	}
 545
 546	/* Get bus range if any */
 547	bus_range = of_get_property(dev, "bus-range", &len);
 548	if (bus_range == NULL || len < 2 * sizeof(int))
 549		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 550			" bus 0\n", dev);
 551
 552	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 553	hose = pcibios_alloc_controller(dev);
 554	if (!hose)
 555		return -ENOMEM;
 556
 557	/* set platform device as the parent */
 558	hose->parent = &pdev->dev;
 559	hose->first_busno = bus_range ? bus_range[0] : 0x0;
 560	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 561
 562	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
 563		 (u64)rsrc.start, (u64)resource_size(&rsrc));
 564
 565	pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
 566	if (!hose->private_data)
 567		goto no_bridge;
 568
 569	setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
 570			   PPC_INDIRECT_TYPE_BIG_ENDIAN);
 571
 572	if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
 573		hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 574
 575	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 576		/* use fsl_indirect_read_config for PCIe */
 577		hose->ops = &fsl_indirect_pcie_ops;
 578		/* For PCIE read HEADER_TYPE to identify controller mode */
 579		early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
 580		if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
 581			goto no_bridge;
 582
 583	} else {
 584		/* For PCI read PROG to identify controller mode */
 585		early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
 586		if ((progif & 1) &&
 587		    !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
 588			goto no_bridge;
 589	}
 590
 591	setup_pci_cmd(hose);
 592
 593	/* check PCI express link status */
 594	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 595		hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
 596			PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
 597		if (fsl_pcie_check_link(hose))
 598			hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 599	} else {
 600		/*
 601		 * Set PBFR(PCI Bus Function Register)[10] = 1 to
 602		 * disable the combining of crossing cacheline
 603		 * boundary requests into one burst transaction.
 604		 * PCI-X operation is not affected.
 605		 * Fix erratum PCI 5 on MPC8548
 606		 */
 607#define PCI_BUS_FUNCTION 0x44
 608#define PCI_BUS_FUNCTION_MDS 0x400	/* Master disable streaming */
 609		if (((SVR_SOC_VER(svr) == SVR_8543) ||
 610		     (SVR_SOC_VER(svr) == SVR_8545) ||
 611		     (SVR_SOC_VER(svr) == SVR_8547) ||
 612		     (SVR_SOC_VER(svr) == SVR_8548)) &&
 613		    !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
 614			early_read_config_word(hose, 0, 0,
 615					PCI_BUS_FUNCTION, &temp);
 616			temp |= PCI_BUS_FUNCTION_MDS;
 617			early_write_config_word(hose, 0, 0,
 618					PCI_BUS_FUNCTION, temp);
 619		}
 620	}
 621
 622	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 623		"Firmware bus number: %d->%d\n",
 624		(unsigned long long)rsrc.start, hose->first_busno,
 625		hose->last_busno);
 626
 627	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 628		hose, hose->cfg_addr, hose->cfg_data);
 629
 630	/* Interpret the "ranges" property */
 631	/* This also maps the I/O region and sets isa_io/mem_base */
 632	pci_process_bridge_OF_ranges(hose, dev, is_primary);
 633
 634	/* Setup PEX window registers */
 635	setup_pci_atmu(hose);
 636
 637	/* Set up controller operations */
 638	setup_swiotlb_ops(hose);
 639
 640	return 0;
 641
 642no_bridge:
 643	iounmap(hose->private_data);
 644	/* unmap cfg_data & cfg_addr separately if not on same page */
 645	if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
 646	    ((unsigned long)hose->cfg_addr & PAGE_MASK))
 647		iounmap(hose->cfg_data);
 648	iounmap(hose->cfg_addr);
 649	pcibios_free_controller(hose);
 650	return -ENODEV;
 651}
 652#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
 653
 654DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
 655			quirk_fsl_pcie_early);
 656
 657#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
 658struct mpc83xx_pcie_priv {
 659	void __iomem *cfg_type0;
 660	void __iomem *cfg_type1;
 661	u32 dev_base;
 662};
 663
 664struct pex_inbound_window {
 665	u32 ar;
 666	u32 tar;
 667	u32 barl;
 668	u32 barh;
 669};
 670
 671/*
 672 * With the convention of u-boot, the PCIE outbound window 0 serves
 673 * as configuration transactions outbound.
 674 */
 675#define PEX_OUTWIN0_BAR		0xCA4
 676#define PEX_OUTWIN0_TAL		0xCA8
 677#define PEX_OUTWIN0_TAH		0xCAC
 678#define PEX_RC_INWIN_BASE	0xE60
 679#define PEX_RCIWARn_EN		0x1
 680
 681static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
 682{
 683	struct pci_controller *hose = pci_bus_to_host(bus);
 684
 685	if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
 686		return PCIBIOS_DEVICE_NOT_FOUND;
 687	/*
 688	 * Workaround for the HW bug: for Type 0 configure transactions the
 689	 * PCI-E controller does not check the device number bits and just
 690	 * assumes that the device number bits are 0.
 691	 */
 692	if (bus->number == hose->first_busno ||
 693			bus->primary == hose->first_busno) {
 694		if (devfn & 0xf8)
 695			return PCIBIOS_DEVICE_NOT_FOUND;
 696	}
 697
 698	if (ppc_md.pci_exclude_device) {
 699		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 700			return PCIBIOS_DEVICE_NOT_FOUND;
 701	}
 702
 703	return PCIBIOS_SUCCESSFUL;
 704}
 705
 706static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
 707					    unsigned int devfn, int offset)
 708{
 709	struct pci_controller *hose = pci_bus_to_host(bus);
 710	struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 711	u32 dev_base = bus->number << 24 | devfn << 16;
 712	int ret;
 713
 714	ret = mpc83xx_pcie_exclude_device(bus, devfn);
 715	if (ret)
 716		return NULL;
 717
 718	offset &= 0xfff;
 719
 720	/* Type 0 */
 721	if (bus->number == hose->first_busno)
 722		return pcie->cfg_type0 + offset;
 723
 724	if (pcie->dev_base == dev_base)
 725		goto mapped;
 726
 727	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
 728
 729	pcie->dev_base = dev_base;
 730mapped:
 731	return pcie->cfg_type1 + offset;
 732}
 733
 734static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
 735				     int offset, int len, u32 val)
 736{
 737	struct pci_controller *hose = pci_bus_to_host(bus);
 738
 739	/* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
 740	if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
 741		val &= 0xffffff00;
 742
 743	return pci_generic_config_write(bus, devfn, offset, len, val);
 744}
 745
 746static struct pci_ops mpc83xx_pcie_ops = {
 747	.map_bus = mpc83xx_pcie_remap_cfg,
 748	.read = pci_generic_config_read,
 749	.write = mpc83xx_pcie_write_config,
 750};
 751
 752static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
 753				     struct resource *reg)
 754{
 755	struct mpc83xx_pcie_priv *pcie;
 756	u32 cfg_bar;
 757	int ret = -ENOMEM;
 758
 759	pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL);
 760	if (!pcie)
 761		return ret;
 762
 763	pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
 764	if (!pcie->cfg_type0)
 765		goto err0;
 766
 767	cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
 768	if (!cfg_bar) {
 769		/* PCI-E isn't configured. */
 770		ret = -ENODEV;
 771		goto err1;
 772	}
 773
 774	pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
 775	if (!pcie->cfg_type1)
 776		goto err1;
 777
 778	WARN_ON(hose->dn->data);
 779	hose->dn->data = pcie;
 780	hose->ops = &mpc83xx_pcie_ops;
 781	hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 782
 783	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
 784	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
 785
 786	if (fsl_pcie_check_link(hose))
 787		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 788
 789	return 0;
 790err1:
 791	iounmap(pcie->cfg_type0);
 792err0:
 793	kfree(pcie);
 794	return ret;
 795
 796}
 797
 798int __init mpc83xx_add_bridge(struct device_node *dev)
 799{
 800	int ret;
 801	int len;
 802	struct pci_controller *hose;
 803	struct resource rsrc_reg;
 804	struct resource rsrc_cfg;
 805	const int *bus_range;
 806	int primary;
 807
 808	is_mpc83xx_pci = 1;
 809
 810	if (!of_device_is_available(dev)) {
 811		pr_warn("%pOF: disabled by the firmware.\n",
 812			dev);
 813		return -ENODEV;
 814	}
 815	pr_debug("Adding PCI host bridge %pOF\n", dev);
 816
 817	/* Fetch host bridge registers address */
 818	if (of_address_to_resource(dev, 0, &rsrc_reg)) {
 819		printk(KERN_WARNING "Can't get pci register base!\n");
 820		return -ENOMEM;
 821	}
 822
 823	memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
 824
 825	if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
 826		printk(KERN_WARNING
 827			"No pci config register base in dev tree, "
 828			"using default\n");
 829		/*
 830		 * MPC83xx supports up to two host controllers
 831		 * 	one at 0x8500 has config space registers at 0x8300
 832		 * 	one at 0x8600 has config space registers at 0x8380
 833		 */
 834		if ((rsrc_reg.start & 0xfffff) == 0x8500)
 835			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
 836		else if ((rsrc_reg.start & 0xfffff) == 0x8600)
 837			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
 838	}
 839	/*
 840	 * Controller at offset 0x8500 is primary
 841	 */
 842	if ((rsrc_reg.start & 0xfffff) == 0x8500)
 843		primary = 1;
 844	else
 845		primary = 0;
 846
 847	/* Get bus range if any */
 848	bus_range = of_get_property(dev, "bus-range", &len);
 849	if (bus_range == NULL || len < 2 * sizeof(int)) {
 850		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 851		       " bus 0\n", dev);
 852	}
 853
 854	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 855	hose = pcibios_alloc_controller(dev);
 856	if (!hose)
 857		return -ENOMEM;
 858
 859	hose->first_busno = bus_range ? bus_range[0] : 0;
 860	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 861
 862	if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
 863		ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
 864		if (ret)
 865			goto err0;
 866	} else {
 867		setup_indirect_pci(hose, rsrc_cfg.start,
 868				   rsrc_cfg.start + 4, 0);
 869	}
 870
 871	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 872	       "Firmware bus number: %d->%d\n",
 873	       (unsigned long long)rsrc_reg.start, hose->first_busno,
 874	       hose->last_busno);
 875
 876	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 877	    hose, hose->cfg_addr, hose->cfg_data);
 878
 879	/* Interpret the "ranges" property */
 880	/* This also maps the I/O region and sets isa_io/mem_base */
 881	pci_process_bridge_OF_ranges(hose, dev, primary);
 882
 883	return 0;
 884err0:
 885	pcibios_free_controller(hose);
 886	return ret;
 887}
 888#endif /* CONFIG_PPC_83xx */
 889
 890u64 fsl_pci_immrbar_base(struct pci_controller *hose)
 891{
 892#ifdef CONFIG_PPC_83xx
 893	if (is_mpc83xx_pci) {
 894		struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 895		struct pex_inbound_window *in;
 896		int i;
 897
 898		/* Walk the Root Complex Inbound windows to match IMMR base */
 899		in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
 900		for (i = 0; i < 4; i++) {
 901			/* not enabled, skip */
 902			if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
 903				continue;
 904
 905			if (get_immrbase() == in_le32(&in[i].tar))
 906				return (u64)in_le32(&in[i].barh) << 32 |
 907					    in_le32(&in[i].barl);
 908		}
 909
 910		printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
 911	}
 912#endif
 913
 914#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 915	if (!is_mpc83xx_pci) {
 916		u32 base;
 917
 918		pci_bus_read_config_dword(hose->bus,
 919			PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
 920
 921		/*
 922		 * For PEXCSRBAR, bit 3-0 indicate prefetchable and
 923		 * address type. So when getting base address, these
 924		 * bits should be masked
 925		 */
 926		base &= PCI_BASE_ADDRESS_MEM_MASK;
 927
 928		return base;
 929	}
 930#endif
 931
 932	return 0;
 933}
 934
 935#ifdef CONFIG_E500
 936static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
 937{
 938	unsigned int rd, ra, rb, d;
 939
 940	rd = get_rt(inst);
 941	ra = get_ra(inst);
 942	rb = get_rb(inst);
 943	d = get_d(inst);
 944
 945	switch (get_op(inst)) {
 946	case 31:
 947		switch (get_xop(inst)) {
 948		case OP_31_XOP_LWZX:
 949		case OP_31_XOP_LWBRX:
 950			regs->gpr[rd] = 0xffffffff;
 951			break;
 952
 953		case OP_31_XOP_LWZUX:
 954			regs->gpr[rd] = 0xffffffff;
 955			regs->gpr[ra] += regs->gpr[rb];
 956			break;
 957
 958		case OP_31_XOP_LBZX:
 959			regs->gpr[rd] = 0xff;
 960			break;
 961
 962		case OP_31_XOP_LBZUX:
 963			regs->gpr[rd] = 0xff;
 964			regs->gpr[ra] += regs->gpr[rb];
 965			break;
 966
 967		case OP_31_XOP_LHZX:
 968		case OP_31_XOP_LHBRX:
 969			regs->gpr[rd] = 0xffff;
 970			break;
 971
 972		case OP_31_XOP_LHZUX:
 973			regs->gpr[rd] = 0xffff;
 974			regs->gpr[ra] += regs->gpr[rb];
 975			break;
 976
 977		case OP_31_XOP_LHAX:
 978			regs->gpr[rd] = ~0UL;
 979			break;
 980
 981		case OP_31_XOP_LHAUX:
 982			regs->gpr[rd] = ~0UL;
 983			regs->gpr[ra] += regs->gpr[rb];
 984			break;
 985
 986		default:
 987			return 0;
 988		}
 989		break;
 990
 991	case OP_LWZ:
 992		regs->gpr[rd] = 0xffffffff;
 993		break;
 994
 995	case OP_LWZU:
 996		regs->gpr[rd] = 0xffffffff;
 997		regs->gpr[ra] += (s16)d;
 998		break;
 999
1000	case OP_LBZ:
1001		regs->gpr[rd] = 0xff;
1002		break;
1003
1004	case OP_LBZU:
1005		regs->gpr[rd] = 0xff;
1006		regs->gpr[ra] += (s16)d;
1007		break;
1008
1009	case OP_LHZ:
1010		regs->gpr[rd] = 0xffff;
1011		break;
1012
1013	case OP_LHZU:
1014		regs->gpr[rd] = 0xffff;
1015		regs->gpr[ra] += (s16)d;
1016		break;
1017
1018	case OP_LHA:
1019		regs->gpr[rd] = ~0UL;
1020		break;
1021
1022	case OP_LHAU:
1023		regs->gpr[rd] = ~0UL;
1024		regs->gpr[ra] += (s16)d;
1025		break;
1026
1027	default:
1028		return 0;
1029	}
1030
1031	return 1;
1032}
1033
1034static int is_in_pci_mem_space(phys_addr_t addr)
1035{
1036	struct pci_controller *hose;
1037	struct resource *res;
1038	int i;
1039
1040	list_for_each_entry(hose, &hose_list, list_node) {
1041		if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
1042			continue;
1043
1044		for (i = 0; i < 3; i++) {
1045			res = &hose->mem_resources[i];
1046			if ((res->flags & IORESOURCE_MEM) &&
1047				addr >= res->start && addr <= res->end)
1048				return 1;
1049		}
1050	}
1051	return 0;
1052}
1053
1054int fsl_pci_mcheck_exception(struct pt_regs *regs)
1055{
1056	u32 inst;
1057	int ret;
1058	phys_addr_t addr = 0;
1059
1060	/* Let KVM/QEMU deal with the exception */
1061	if (regs->msr & MSR_GS)
1062		return 0;
1063
1064#ifdef CONFIG_PHYS_64BIT
1065	addr = mfspr(SPRN_MCARU);
1066	addr <<= 32;
1067#endif
1068	addr += mfspr(SPRN_MCAR);
1069
1070	if (is_in_pci_mem_space(addr)) {
1071		if (user_mode(regs)) {
1072			pagefault_disable();
1073			ret = get_user(inst, (__u32 __user *)regs->nip);
1074			pagefault_enable();
1075		} else {
1076			ret = probe_kernel_address((void *)regs->nip, inst);
1077		}
1078
1079		if (!ret && mcheck_handle_load(regs, inst)) {
1080			regs->nip += 4;
1081			return 1;
1082		}
1083	}
1084
1085	return 0;
1086}
1087#endif
1088
1089#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
1090static const struct of_device_id pci_ids[] = {
1091	{ .compatible = "fsl,mpc8540-pci", },
1092	{ .compatible = "fsl,mpc8548-pcie", },
1093	{ .compatible = "fsl,mpc8610-pci", },
1094	{ .compatible = "fsl,mpc8641-pcie", },
1095	{ .compatible = "fsl,qoriq-pcie", },
1096	{ .compatible = "fsl,qoriq-pcie-v2.1", },
1097	{ .compatible = "fsl,qoriq-pcie-v2.2", },
1098	{ .compatible = "fsl,qoriq-pcie-v2.3", },
1099	{ .compatible = "fsl,qoriq-pcie-v2.4", },
1100	{ .compatible = "fsl,qoriq-pcie-v3.0", },
1101
1102	/*
1103	 * The following entries are for compatibility with older device
1104	 * trees.
1105	 */
1106	{ .compatible = "fsl,p1022-pcie", },
1107	{ .compatible = "fsl,p4080-pcie", },
1108
1109	{},
1110};
1111
1112struct device_node *fsl_pci_primary;
1113
1114void fsl_pci_assign_primary(void)
1115{
1116	struct device_node *np;
1117
1118	/* Callers can specify the primary bus using other means. */
1119	if (fsl_pci_primary)
1120		return;
1121
1122	/* If a PCI host bridge contains an ISA node, it's primary. */
1123	np = of_find_node_by_type(NULL, "isa");
1124	while ((fsl_pci_primary = of_get_parent(np))) {
1125		of_node_put(np);
1126		np = fsl_pci_primary;
1127
1128		if (of_match_node(pci_ids, np) && of_device_is_available(np))
1129			return;
1130	}
1131
1132	/*
1133	 * If there's no PCI host bridge with ISA, arbitrarily
1134	 * designate one as primary.  This can go away once
1135	 * various bugs with primary-less systems are fixed.
1136	 */
1137	for_each_matching_node(np, pci_ids) {
1138		if (of_device_is_available(np)) {
1139			fsl_pci_primary = np;
1140			of_node_put(np);
1141			return;
1142		}
1143	}
1144}
1145
1146#ifdef CONFIG_PM_SLEEP
1147static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
1148{
1149	struct pci_controller *hose = dev_id;
1150	struct ccsr_pci __iomem *pci = hose->private_data;
1151	u32 dr;
1152
1153	dr = in_be32(&pci->pex_pme_mes_dr);
1154	if (!dr)
1155		return IRQ_NONE;
1156
1157	out_be32(&pci->pex_pme_mes_dr, dr);
1158
1159	return IRQ_HANDLED;
1160}
1161
1162static int fsl_pci_pme_probe(struct pci_controller *hose)
1163{
1164	struct ccsr_pci __iomem *pci;
1165	struct pci_dev *dev;
1166	int pme_irq;
1167	int res;
1168	u16 pms;
1169
1170	/* Get hose's pci_dev */
1171	dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
1172
1173	/* PME Disable */
1174	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1175	pms &= ~PCI_PM_CTRL_PME_ENABLE;
1176	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1177
1178	pme_irq = irq_of_parse_and_map(hose->dn, 0);
1179	if (!pme_irq) {
1180		dev_err(&dev->dev, "Failed to map PME interrupt.\n");
1181
1182		return -ENXIO;
1183	}
1184
1185	res = devm_request_irq(hose->parent, pme_irq,
1186			fsl_pci_pme_handle,
1187			IRQF_SHARED,
1188			"[PCI] PME", hose);
1189	if (res < 0) {
1190		dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
1191		irq_dispose_mapping(pme_irq);
1192
1193		return -ENODEV;
1194	}
1195
1196	pci = hose->private_data;
1197
1198	/* Enable PTOD, ENL23D & EXL23D */
1199	clrbits32(&pci->pex_pme_mes_disr,
1200		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1201
1202	out_be32(&pci->pex_pme_mes_ier, 0);
1203	setbits32(&pci->pex_pme_mes_ier,
1204		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1205
1206	/* PME Enable */
1207	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1208	pms |= PCI_PM_CTRL_PME_ENABLE;
1209	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1210
1211	return 0;
1212}
1213
1214static void send_pme_turnoff_message(struct pci_controller *hose)
1215{
1216	struct ccsr_pci __iomem *pci = hose->private_data;
1217	u32 dr;
1218	int i;
1219
1220	/* Send PME_Turn_Off Message Request */
1221	setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
1222
1223	/* Wait trun off done */
1224	for (i = 0; i < 150; i++) {
1225		dr = in_be32(&pci->pex_pme_mes_dr);
1226		if (dr) {
1227			out_be32(&pci->pex_pme_mes_dr, dr);
1228			break;
1229		}
1230
1231		udelay(1000);
1232	}
1233}
1234
1235static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
1236{
1237	send_pme_turnoff_message(hose);
1238}
1239
1240static int fsl_pci_syscore_suspend(void)
1241{
1242	struct pci_controller *hose, *tmp;
1243
1244	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1245		fsl_pci_syscore_do_suspend(hose);
1246
1247	return 0;
1248}
1249
1250static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
1251{
1252	struct ccsr_pci __iomem *pci = hose->private_data;
1253	u32 dr;
1254	int i;
1255
1256	/* Send Exit L2 State Message */
1257	setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
1258
1259	/* Wait exit done */
1260	for (i = 0; i < 150; i++) {
1261		dr = in_be32(&pci->pex_pme_mes_dr);
1262		if (dr) {
1263			out_be32(&pci->pex_pme_mes_dr, dr);
1264			break;
1265		}
1266
1267		udelay(1000);
1268	}
1269
1270	setup_pci_atmu(hose);
1271}
1272
1273static void fsl_pci_syscore_resume(void)
1274{
1275	struct pci_controller *hose, *tmp;
1276
1277	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1278		fsl_pci_syscore_do_resume(hose);
1279}
1280
1281static struct syscore_ops pci_syscore_pm_ops = {
1282	.suspend = fsl_pci_syscore_suspend,
1283	.resume = fsl_pci_syscore_resume,
1284};
1285#endif
1286
1287void fsl_pcibios_fixup_phb(struct pci_controller *phb)
1288{
1289#ifdef CONFIG_PM_SLEEP
1290	fsl_pci_pme_probe(phb);
1291#endif
1292}
1293
1294static int add_err_dev(struct platform_device *pdev)
1295{
1296	struct platform_device *errdev;
1297	struct mpc85xx_edac_pci_plat_data pd = {
1298		.of_node = pdev->dev.of_node
1299	};
1300
1301	errdev = platform_device_register_resndata(&pdev->dev,
1302						   "mpc85xx-pci-edac",
1303						   PLATFORM_DEVID_AUTO,
1304						   pdev->resource,
1305						   pdev->num_resources,
1306						   &pd, sizeof(pd));
1307
1308	return PTR_ERR_OR_ZERO(errdev);
1309}
1310
1311static int fsl_pci_probe(struct platform_device *pdev)
1312{
1313	struct device_node *node;
1314	int ret;
1315
1316	node = pdev->dev.of_node;
1317	ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
1318	if (ret)
1319		return ret;
1320
1321	ret = add_err_dev(pdev);
1322	if (ret)
1323		dev_err(&pdev->dev, "couldn't register error device: %d\n",
1324			ret);
1325
1326	return 0;
1327}
1328
1329static struct platform_driver fsl_pci_driver = {
1330	.driver = {
1331		.name = "fsl-pci",
1332		.of_match_table = pci_ids,
1333	},
1334	.probe = fsl_pci_probe,
1335};
1336
1337static int __init fsl_pci_init(void)
1338{
1339#ifdef CONFIG_PM_SLEEP
1340	register_syscore_ops(&pci_syscore_pm_ops);
1341#endif
1342	return platform_driver_register(&fsl_pci_driver);
1343}
1344arch_initcall(fsl_pci_init);
1345#endif
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * MPC83xx/85xx/86xx PCI/PCIE support routing.
   4 *
   5 * Copyright 2007-2012 Freescale Semiconductor, Inc.
   6 * Copyright 2008-2009 MontaVista Software, Inc.
   7 *
   8 * Initial author: Xianghua Xiao <x.xiao@freescale.com>
   9 * Recode: ZHANG WEI <wei.zhang@freescale.com>
  10 * Rewrite the routing for Frescale PCI and PCI Express
  11 * 	Roy Zang <tie-fei.zang@freescale.com>
  12 * MPC83xx PCI-Express support:
  13 * 	Tony Li <tony.li@freescale.com>
  14 * 	Anton Vorontsov <avorontsov@ru.mvista.com>
 
 
 
 
 
  15 */
  16#include <linux/kernel.h>
  17#include <linux/pci.h>
  18#include <linux/delay.h>
  19#include <linux/string.h>
  20#include <linux/fsl/edac.h>
  21#include <linux/init.h>
  22#include <linux/interrupt.h>
  23#include <linux/memblock.h>
  24#include <linux/log2.h>
  25#include <linux/platform_device.h>
  26#include <linux/slab.h>
  27#include <linux/suspend.h>
  28#include <linux/syscore_ops.h>
  29#include <linux/uaccess.h>
  30
  31#include <asm/io.h>
  32#include <asm/prom.h>
  33#include <asm/pci-bridge.h>
  34#include <asm/ppc-pci.h>
  35#include <asm/machdep.h>
  36#include <asm/mpc85xx.h>
  37#include <asm/disassemble.h>
  38#include <asm/ppc-opcode.h>
  39#include <asm/swiotlb.h>
  40#include <sysdev/fsl_soc.h>
  41#include <sysdev/fsl_pci.h>
  42
  43static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
  44
  45static void quirk_fsl_pcie_early(struct pci_dev *dev)
  46{
  47	u8 hdr_type;
  48
  49	/* if we aren't a PCIe don't bother */
  50	if (!pci_is_pcie(dev))
  51		return;
  52
  53	/* if we aren't in host mode don't bother */
  54	pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
  55	if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
  56		return;
  57
  58	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  59	fsl_pcie_bus_fixup = 1;
  60	return;
  61}
  62
  63static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
  64				    int, int, u32 *);
  65
  66static int fsl_pcie_check_link(struct pci_controller *hose)
  67{
  68	u32 val = 0;
  69
  70	if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
  71		if (hose->ops->read == fsl_indirect_read_config)
  72			__indirect_read_config(hose, hose->first_busno, 0,
  73					       PCIE_LTSSM, 4, &val);
  74		else
  75			early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
  76		if (val < PCIE_LTSSM_L0)
  77			return 1;
  78	} else {
  79		struct ccsr_pci __iomem *pci = hose->private_data;
  80		/* for PCIe IP rev 3.0 or greater use CSR0 for link state */
  81		val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
  82				>> PEX_CSR0_LTSSM_SHIFT;
  83		if (val != PEX_CSR0_LTSSM_L0)
  84			return 1;
  85	}
  86
  87	return 0;
  88}
  89
  90static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
  91				    int offset, int len, u32 *val)
  92{
  93	struct pci_controller *hose = pci_bus_to_host(bus);
  94
  95	if (fsl_pcie_check_link(hose))
  96		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
  97	else
  98		hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
  99
 100	return indirect_read_config(bus, devfn, offset, len, val);
 101}
 102
 103#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 104
 105static struct pci_ops fsl_indirect_pcie_ops =
 106{
 107	.read = fsl_indirect_read_config,
 108	.write = indirect_write_config,
 109};
 110
 111static u64 pci64_dma_offset;
 112
 113#ifdef CONFIG_SWIOTLB
 114static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
 115{
 116	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 117
 118	pdev->dev.bus_dma_limit =
 119		hose->dma_window_base_cur + hose->dma_window_size - 1;
 120}
 121
 122static void setup_swiotlb_ops(struct pci_controller *hose)
 123{
 124	if (ppc_swiotlb_enable)
 125		hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
 
 
 126}
 127#else
 128static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
 129#endif
 130
 131static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
 132{
 
 
 
 133	/*
 134	 * Fix up PCI devices that are able to DMA to the large inbound
 135	 * mapping that allows addressing any RAM address from across PCI.
 136	 */
 137	if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
 138		dev->bus_dma_limit = 0;
 139		dev->archdata.dma_offset = pci64_dma_offset;
 140	}
 
 
 
 141}
 142
 143static int setup_one_atmu(struct ccsr_pci __iomem *pci,
 144	unsigned int index, const struct resource *res,
 145	resource_size_t offset)
 146{
 147	resource_size_t pci_addr = res->start - offset;
 148	resource_size_t phys_addr = res->start;
 149	resource_size_t size = resource_size(res);
 150	u32 flags = 0x80044000; /* enable & mem R/W */
 151	unsigned int i;
 152
 153	pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
 154		(u64)res->start, (u64)size);
 155
 156	if (res->flags & IORESOURCE_PREFETCH)
 157		flags |= 0x10000000; /* enable relaxed ordering */
 158
 159	for (i = 0; size > 0; i++) {
 160		unsigned int bits = min_t(u32, ilog2(size),
 161					__ffs(pci_addr | phys_addr));
 162
 163		if (index + i >= 5)
 164			return -1;
 165
 166		out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
 167		out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
 168		out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
 169		out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
 170
 171		pci_addr += (resource_size_t)1U << bits;
 172		phys_addr += (resource_size_t)1U << bits;
 173		size -= (resource_size_t)1U << bits;
 174	}
 175
 176	return i;
 177}
 178
 179static bool is_kdump(void)
 180{
 181	struct device_node *node;
 182
 183	node = of_find_node_by_type(NULL, "memory");
 184	if (!node) {
 185		WARN_ON_ONCE(1);
 186		return false;
 187	}
 188
 189	return of_property_read_bool(node, "linux,usable-memory");
 190}
 191
 192/* atmu setup for fsl pci/pcie controller */
 193static void setup_pci_atmu(struct pci_controller *hose)
 194{
 195	struct ccsr_pci __iomem *pci = hose->private_data;
 196	int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
 197	u64 mem, sz, paddr_hi = 0;
 198	u64 offset = 0, paddr_lo = ULLONG_MAX;
 199	u32 pcicsrbar = 0, pcicsrbar_sz;
 200	u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
 201			PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
 202	const u64 *reg;
 203	int len;
 204	bool setup_inbound;
 205
 206	/*
 207	 * If this is kdump, we don't want to trigger a bunch of PCI
 208	 * errors by closing the window on in-flight DMA.
 209	 *
 210	 * We still run most of the function's logic so that things like
 211	 * hose->dma_window_size still get set.
 212	 */
 213	setup_inbound = !is_kdump();
 214
 215	if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
 216		/*
 217		 * BSC9132 Rev1.0 has an issue where all the PEX inbound
 218		 * windows have implemented the default target value as 0xf
 219		 * for CCSR space.In all Freescale legacy devices the target
 220		 * of 0xf is reserved for local memory space. 9132 Rev1.0
 221		 * now has local mempry space mapped to target 0x0 instead of
 222		 * 0xf. Hence adding a workaround to remove the target 0xf
 223		 * defined for memory space from Inbound window attributes.
 224		 */
 225		piwar &= ~PIWAR_TGI_LOCAL;
 226	}
 227
 228	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 229		if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
 230			win_idx = 2;
 231			start_idx = 0;
 232			end_idx = 3;
 233		}
 234	}
 235
 236	/* Disable all windows (except powar0 since it's ignored) */
 237	for(i = 1; i < 5; i++)
 238		out_be32(&pci->pow[i].powar, 0);
 239
 240	if (setup_inbound) {
 241		for (i = start_idx; i < end_idx; i++)
 242			out_be32(&pci->piw[i].piwar, 0);
 243	}
 244
 245	/* Setup outbound MEM window */
 246	for(i = 0, j = 1; i < 3; i++) {
 247		if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
 248			continue;
 249
 250		paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
 251		paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
 252
 253		/* We assume all memory resources have the same offset */
 254		offset = hose->mem_offset[i];
 255		n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
 256
 257		if (n < 0 || j >= 5) {
 258			pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
 259			hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
 260		} else
 261			j += n;
 262	}
 263
 264	/* Setup outbound IO window */
 265	if (hose->io_resource.flags & IORESOURCE_IO) {
 266		if (j >= 5) {
 267			pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
 268		} else {
 269			pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
 270				 "phy base 0x%016llx.\n",
 271				 (u64)hose->io_resource.start,
 272				 (u64)resource_size(&hose->io_resource),
 273				 (u64)hose->io_base_phys);
 274			out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
 275			out_be32(&pci->pow[j].potear, 0);
 276			out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
 277			/* Enable, IO R/W */
 278			out_be32(&pci->pow[j].powar, 0x80088000
 279				| (ilog2(hose->io_resource.end
 280				- hose->io_resource.start + 1) - 1));
 281		}
 282	}
 283
 284	/* convert to pci address space */
 285	paddr_hi -= offset;
 286	paddr_lo -= offset;
 287
 288	if (paddr_hi == paddr_lo) {
 289		pr_err("%pOF: No outbound window space\n", hose->dn);
 290		return;
 291	}
 292
 293	if (paddr_lo == 0) {
 294		pr_err("%pOF: No space for inbound window\n", hose->dn);
 295		return;
 296	}
 297
 298	/* setup PCSRBAR/PEXCSRBAR */
 299	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
 300	early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
 301	pcicsrbar_sz = ~pcicsrbar_sz + 1;
 302
 303	if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
 304		(paddr_lo > 0x100000000ull))
 305		pcicsrbar = 0x100000000ull - pcicsrbar_sz;
 306	else
 307		pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
 308	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
 309
 310	paddr_lo = min(paddr_lo, (u64)pcicsrbar);
 311
 312	pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
 313
 314	/* Setup inbound mem window */
 315	mem = memblock_end_of_DRAM();
 316	pr_info("%s: end of DRAM %llx\n", __func__, mem);
 317
 318	/*
 319	 * The msi-address-64 property, if it exists, indicates the physical
 320	 * address of the MSIIR register.  Normally, this register is located
 321	 * inside CCSR, so the ATMU that covers all of CCSR is used. But if
 322	 * this property exists, then we normally need to create a new ATMU
 323	 * for it.  For now, however, we cheat.  The only entity that creates
 324	 * this property is the Freescale hypervisor, and the address is
 325	 * specified in the partition configuration.  Typically, the address
 326	 * is located in the page immediately after the end of DDR.  If so, we
 327	 * can avoid allocating a new ATMU by extending the DDR ATMU by one
 328	 * page.
 329	 */
 330	reg = of_get_property(hose->dn, "msi-address-64", &len);
 331	if (reg && (len == sizeof(u64))) {
 332		u64 address = be64_to_cpup(reg);
 333
 334		if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
 335			pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
 336			mem += PAGE_SIZE;
 337		} else {
 338			/* TODO: Create a new ATMU for MSIIR */
 339			pr_warn("%pOF: msi-address-64 address of %llx is "
 340				"unsupported\n", hose->dn, address);
 341		}
 342	}
 343
 344	sz = min(mem, paddr_lo);
 345	mem_log = ilog2(sz);
 346
 347	/* PCIe can overmap inbound & outbound since RX & TX are separated */
 348	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 349		/* Size window to exact size if power-of-two or one size up */
 350		if ((1ull << mem_log) != mem) {
 351			mem_log++;
 352			if ((1ull << mem_log) > mem)
 353				pr_info("%pOF: Setting PCI inbound window "
 354					"greater than memory size\n", hose->dn);
 355		}
 356
 357		piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
 358
 359		if (setup_inbound) {
 360			/* Setup inbound memory window */
 361			out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 362			out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
 363			out_be32(&pci->piw[win_idx].piwar,  piwar);
 364		}
 365
 366		win_idx--;
 367		hose->dma_window_base_cur = 0x00000000;
 368		hose->dma_window_size = (resource_size_t)sz;
 369
 370		/*
 371		 * if we have >4G of memory setup second PCI inbound window to
 372		 * let devices that are 64-bit address capable to work w/o
 373		 * SWIOTLB and access the full range of memory
 374		 */
 375		if (sz != mem) {
 376			mem_log = ilog2(mem);
 377
 378			/* Size window up if we dont fit in exact power-of-2 */
 379			if ((1ull << mem_log) != mem)
 380				mem_log++;
 381
 382			piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
 383			pci64_dma_offset = 1ULL << mem_log;
 384
 385			if (setup_inbound) {
 386				/* Setup inbound memory window */
 387				out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 388				out_be32(&pci->piw[win_idx].piwbear,
 389						pci64_dma_offset >> 44);
 390				out_be32(&pci->piw[win_idx].piwbar,
 391						pci64_dma_offset >> 12);
 392				out_be32(&pci->piw[win_idx].piwar,  piwar);
 393			}
 394
 395			/*
 396			 * install our own dma_set_mask handler to fixup dma_ops
 397			 * and dma_offset
 398			 */
 399			ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
 400
 401			pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
 402		}
 403	} else {
 404		u64 paddr = 0;
 405
 406		if (setup_inbound) {
 407			/* Setup inbound memory window */
 408			out_be32(&pci->piw[win_idx].pitar,  paddr >> 12);
 409			out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
 410			out_be32(&pci->piw[win_idx].piwar,
 411				 (piwar | (mem_log - 1)));
 412		}
 413
 414		win_idx--;
 415		paddr += 1ull << mem_log;
 416		sz -= 1ull << mem_log;
 417
 418		if (sz) {
 419			mem_log = ilog2(sz);
 420			piwar |= (mem_log - 1);
 421
 422			if (setup_inbound) {
 423				out_be32(&pci->piw[win_idx].pitar,
 424					 paddr >> 12);
 425				out_be32(&pci->piw[win_idx].piwbar,
 426					 paddr >> 12);
 427				out_be32(&pci->piw[win_idx].piwar, piwar);
 428			}
 429
 430			win_idx--;
 431			paddr += 1ull << mem_log;
 432		}
 433
 434		hose->dma_window_base_cur = 0x00000000;
 435		hose->dma_window_size = (resource_size_t)paddr;
 436	}
 437
 438	if (hose->dma_window_size < mem) {
 439#ifdef CONFIG_SWIOTLB
 440		ppc_swiotlb_enable = 1;
 441#else
 442		pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
 443			"map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
 444			 hose->dn);
 445#endif
 446		/* adjusting outbound windows could reclaim space in mem map */
 447		if (paddr_hi < 0xffffffffull)
 448			pr_warn("%pOF: WARNING: Outbound window cfg leaves "
 449				"gaps in memory map. Adjusting the memory map "
 450				"could reduce unnecessary bounce buffering.\n",
 451				hose->dn);
 452
 453		pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
 454			(u64)hose->dma_window_size);
 455	}
 456}
 457
 458static void setup_pci_cmd(struct pci_controller *hose)
 459{
 460	u16 cmd;
 461	int cap_x;
 462
 463	early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
 464	cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
 465		| PCI_COMMAND_IO;
 466	early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
 467
 468	cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
 469	if (cap_x) {
 470		int pci_x_cmd = cap_x + PCI_X_CMD;
 471		cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
 472			| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
 473		early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
 474	} else {
 475		early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
 476	}
 477}
 478
 479void fsl_pcibios_fixup_bus(struct pci_bus *bus)
 480{
 481	struct pci_controller *hose = pci_bus_to_host(bus);
 482	int i, is_pcie = 0, no_link;
 483
 484	/* The root complex bridge comes up with bogus resources,
 485	 * we copy the PHB ones in.
 486	 *
 487	 * With the current generic PCI code, the PHB bus no longer
 488	 * has bus->resource[0..4] set, so things are a bit more
 489	 * tricky.
 490	 */
 491
 492	if (fsl_pcie_bus_fixup)
 493		is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
 494	no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
 495
 496	if (bus->parent == hose->bus && (is_pcie || no_link)) {
 497		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
 498			struct resource *res = bus->resource[i];
 499			struct resource *par;
 500
 501			if (!res)
 502				continue;
 503			if (i == 0)
 504				par = &hose->io_resource;
 505			else if (i < 4)
 506				par = &hose->mem_resources[i-1];
 507			else par = NULL;
 508
 509			res->start = par ? par->start : 0;
 510			res->end   = par ? par->end   : 0;
 511			res->flags = par ? par->flags : 0;
 512		}
 513	}
 514}
 515
 516int fsl_add_bridge(struct platform_device *pdev, int is_primary)
 517{
 518	int len;
 519	struct pci_controller *hose;
 520	struct resource rsrc;
 521	const int *bus_range;
 522	u8 hdr_type, progif;
 523	struct device_node *dev;
 524	struct ccsr_pci __iomem *pci;
 525	u16 temp;
 526	u32 svr = mfspr(SPRN_SVR);
 527
 528	dev = pdev->dev.of_node;
 529
 530	if (!of_device_is_available(dev)) {
 531		pr_warn("%pOF: disabled\n", dev);
 532		return -ENODEV;
 533	}
 534
 535	pr_debug("Adding PCI host bridge %pOF\n", dev);
 536
 537	/* Fetch host bridge registers address */
 538	if (of_address_to_resource(dev, 0, &rsrc)) {
 539		printk(KERN_WARNING "Can't get pci register base!");
 540		return -ENOMEM;
 541	}
 542
 543	/* Get bus range if any */
 544	bus_range = of_get_property(dev, "bus-range", &len);
 545	if (bus_range == NULL || len < 2 * sizeof(int))
 546		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 547			" bus 0\n", dev);
 548
 549	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 550	hose = pcibios_alloc_controller(dev);
 551	if (!hose)
 552		return -ENOMEM;
 553
 554	/* set platform device as the parent */
 555	hose->parent = &pdev->dev;
 556	hose->first_busno = bus_range ? bus_range[0] : 0x0;
 557	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 558
 559	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
 560		 (u64)rsrc.start, (u64)resource_size(&rsrc));
 561
 562	pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
 563	if (!hose->private_data)
 564		goto no_bridge;
 565
 566	setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
 567			   PPC_INDIRECT_TYPE_BIG_ENDIAN);
 568
 569	if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
 570		hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 571
 572	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 573		/* use fsl_indirect_read_config for PCIe */
 574		hose->ops = &fsl_indirect_pcie_ops;
 575		/* For PCIE read HEADER_TYPE to identify controller mode */
 576		early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
 577		if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
 578			goto no_bridge;
 579
 580	} else {
 581		/* For PCI read PROG to identify controller mode */
 582		early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
 583		if ((progif & 1) &&
 584		    !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
 585			goto no_bridge;
 586	}
 587
 588	setup_pci_cmd(hose);
 589
 590	/* check PCI express link status */
 591	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 592		hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
 593			PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
 594		if (fsl_pcie_check_link(hose))
 595			hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 596	} else {
 597		/*
 598		 * Set PBFR(PCI Bus Function Register)[10] = 1 to
 599		 * disable the combining of crossing cacheline
 600		 * boundary requests into one burst transaction.
 601		 * PCI-X operation is not affected.
 602		 * Fix erratum PCI 5 on MPC8548
 603		 */
 604#define PCI_BUS_FUNCTION 0x44
 605#define PCI_BUS_FUNCTION_MDS 0x400	/* Master disable streaming */
 606		if (((SVR_SOC_VER(svr) == SVR_8543) ||
 607		     (SVR_SOC_VER(svr) == SVR_8545) ||
 608		     (SVR_SOC_VER(svr) == SVR_8547) ||
 609		     (SVR_SOC_VER(svr) == SVR_8548)) &&
 610		    !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
 611			early_read_config_word(hose, 0, 0,
 612					PCI_BUS_FUNCTION, &temp);
 613			temp |= PCI_BUS_FUNCTION_MDS;
 614			early_write_config_word(hose, 0, 0,
 615					PCI_BUS_FUNCTION, temp);
 616		}
 617	}
 618
 619	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 620		"Firmware bus number: %d->%d\n",
 621		(unsigned long long)rsrc.start, hose->first_busno,
 622		hose->last_busno);
 623
 624	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 625		hose, hose->cfg_addr, hose->cfg_data);
 626
 627	/* Interpret the "ranges" property */
 628	/* This also maps the I/O region and sets isa_io/mem_base */
 629	pci_process_bridge_OF_ranges(hose, dev, is_primary);
 630
 631	/* Setup PEX window registers */
 632	setup_pci_atmu(hose);
 633
 634	/* Set up controller operations */
 635	setup_swiotlb_ops(hose);
 636
 637	return 0;
 638
 639no_bridge:
 640	iounmap(hose->private_data);
 641	/* unmap cfg_data & cfg_addr separately if not on same page */
 642	if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
 643	    ((unsigned long)hose->cfg_addr & PAGE_MASK))
 644		iounmap(hose->cfg_data);
 645	iounmap(hose->cfg_addr);
 646	pcibios_free_controller(hose);
 647	return -ENODEV;
 648}
 649#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
 650
 651DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
 652			quirk_fsl_pcie_early);
 653
 654#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
 655struct mpc83xx_pcie_priv {
 656	void __iomem *cfg_type0;
 657	void __iomem *cfg_type1;
 658	u32 dev_base;
 659};
 660
 661struct pex_inbound_window {
 662	u32 ar;
 663	u32 tar;
 664	u32 barl;
 665	u32 barh;
 666};
 667
 668/*
 669 * With the convention of u-boot, the PCIE outbound window 0 serves
 670 * as configuration transactions outbound.
 671 */
 672#define PEX_OUTWIN0_BAR		0xCA4
 673#define PEX_OUTWIN0_TAL		0xCA8
 674#define PEX_OUTWIN0_TAH		0xCAC
 675#define PEX_RC_INWIN_BASE	0xE60
 676#define PEX_RCIWARn_EN		0x1
 677
 678static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
 679{
 680	struct pci_controller *hose = pci_bus_to_host(bus);
 681
 682	if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
 683		return PCIBIOS_DEVICE_NOT_FOUND;
 684	/*
 685	 * Workaround for the HW bug: for Type 0 configure transactions the
 686	 * PCI-E controller does not check the device number bits and just
 687	 * assumes that the device number bits are 0.
 688	 */
 689	if (bus->number == hose->first_busno ||
 690			bus->primary == hose->first_busno) {
 691		if (devfn & 0xf8)
 692			return PCIBIOS_DEVICE_NOT_FOUND;
 693	}
 694
 695	if (ppc_md.pci_exclude_device) {
 696		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 697			return PCIBIOS_DEVICE_NOT_FOUND;
 698	}
 699
 700	return PCIBIOS_SUCCESSFUL;
 701}
 702
 703static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
 704					    unsigned int devfn, int offset)
 705{
 706	struct pci_controller *hose = pci_bus_to_host(bus);
 707	struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 708	u32 dev_base = bus->number << 24 | devfn << 16;
 709	int ret;
 710
 711	ret = mpc83xx_pcie_exclude_device(bus, devfn);
 712	if (ret)
 713		return NULL;
 714
 715	offset &= 0xfff;
 716
 717	/* Type 0 */
 718	if (bus->number == hose->first_busno)
 719		return pcie->cfg_type0 + offset;
 720
 721	if (pcie->dev_base == dev_base)
 722		goto mapped;
 723
 724	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
 725
 726	pcie->dev_base = dev_base;
 727mapped:
 728	return pcie->cfg_type1 + offset;
 729}
 730
 731static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
 732				     int offset, int len, u32 val)
 733{
 734	struct pci_controller *hose = pci_bus_to_host(bus);
 735
 736	/* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
 737	if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
 738		val &= 0xffffff00;
 739
 740	return pci_generic_config_write(bus, devfn, offset, len, val);
 741}
 742
 743static struct pci_ops mpc83xx_pcie_ops = {
 744	.map_bus = mpc83xx_pcie_remap_cfg,
 745	.read = pci_generic_config_read,
 746	.write = mpc83xx_pcie_write_config,
 747};
 748
 749static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
 750				     struct resource *reg)
 751{
 752	struct mpc83xx_pcie_priv *pcie;
 753	u32 cfg_bar;
 754	int ret = -ENOMEM;
 755
 756	pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL);
 757	if (!pcie)
 758		return ret;
 759
 760	pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
 761	if (!pcie->cfg_type0)
 762		goto err0;
 763
 764	cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
 765	if (!cfg_bar) {
 766		/* PCI-E isn't configured. */
 767		ret = -ENODEV;
 768		goto err1;
 769	}
 770
 771	pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
 772	if (!pcie->cfg_type1)
 773		goto err1;
 774
 775	WARN_ON(hose->dn->data);
 776	hose->dn->data = pcie;
 777	hose->ops = &mpc83xx_pcie_ops;
 778	hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 779
 780	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
 781	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
 782
 783	if (fsl_pcie_check_link(hose))
 784		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 785
 786	return 0;
 787err1:
 788	iounmap(pcie->cfg_type0);
 789err0:
 790	kfree(pcie);
 791	return ret;
 792
 793}
 794
 795int __init mpc83xx_add_bridge(struct device_node *dev)
 796{
 797	int ret;
 798	int len;
 799	struct pci_controller *hose;
 800	struct resource rsrc_reg;
 801	struct resource rsrc_cfg;
 802	const int *bus_range;
 803	int primary;
 804
 805	is_mpc83xx_pci = 1;
 806
 807	if (!of_device_is_available(dev)) {
 808		pr_warn("%pOF: disabled by the firmware.\n",
 809			dev);
 810		return -ENODEV;
 811	}
 812	pr_debug("Adding PCI host bridge %pOF\n", dev);
 813
 814	/* Fetch host bridge registers address */
 815	if (of_address_to_resource(dev, 0, &rsrc_reg)) {
 816		printk(KERN_WARNING "Can't get pci register base!\n");
 817		return -ENOMEM;
 818	}
 819
 820	memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
 821
 822	if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
 823		printk(KERN_WARNING
 824			"No pci config register base in dev tree, "
 825			"using default\n");
 826		/*
 827		 * MPC83xx supports up to two host controllers
 828		 * 	one at 0x8500 has config space registers at 0x8300
 829		 * 	one at 0x8600 has config space registers at 0x8380
 830		 */
 831		if ((rsrc_reg.start & 0xfffff) == 0x8500)
 832			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
 833		else if ((rsrc_reg.start & 0xfffff) == 0x8600)
 834			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
 835	}
 836	/*
 837	 * Controller at offset 0x8500 is primary
 838	 */
 839	if ((rsrc_reg.start & 0xfffff) == 0x8500)
 840		primary = 1;
 841	else
 842		primary = 0;
 843
 844	/* Get bus range if any */
 845	bus_range = of_get_property(dev, "bus-range", &len);
 846	if (bus_range == NULL || len < 2 * sizeof(int)) {
 847		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 848		       " bus 0\n", dev);
 849	}
 850
 851	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 852	hose = pcibios_alloc_controller(dev);
 853	if (!hose)
 854		return -ENOMEM;
 855
 856	hose->first_busno = bus_range ? bus_range[0] : 0;
 857	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 858
 859	if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
 860		ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
 861		if (ret)
 862			goto err0;
 863	} else {
 864		setup_indirect_pci(hose, rsrc_cfg.start,
 865				   rsrc_cfg.start + 4, 0);
 866	}
 867
 868	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 869	       "Firmware bus number: %d->%d\n",
 870	       (unsigned long long)rsrc_reg.start, hose->first_busno,
 871	       hose->last_busno);
 872
 873	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 874	    hose, hose->cfg_addr, hose->cfg_data);
 875
 876	/* Interpret the "ranges" property */
 877	/* This also maps the I/O region and sets isa_io/mem_base */
 878	pci_process_bridge_OF_ranges(hose, dev, primary);
 879
 880	return 0;
 881err0:
 882	pcibios_free_controller(hose);
 883	return ret;
 884}
 885#endif /* CONFIG_PPC_83xx */
 886
 887u64 fsl_pci_immrbar_base(struct pci_controller *hose)
 888{
 889#ifdef CONFIG_PPC_83xx
 890	if (is_mpc83xx_pci) {
 891		struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 892		struct pex_inbound_window *in;
 893		int i;
 894
 895		/* Walk the Root Complex Inbound windows to match IMMR base */
 896		in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
 897		for (i = 0; i < 4; i++) {
 898			/* not enabled, skip */
 899			if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
 900				continue;
 901
 902			if (get_immrbase() == in_le32(&in[i].tar))
 903				return (u64)in_le32(&in[i].barh) << 32 |
 904					    in_le32(&in[i].barl);
 905		}
 906
 907		printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
 908	}
 909#endif
 910
 911#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 912	if (!is_mpc83xx_pci) {
 913		u32 base;
 914
 915		pci_bus_read_config_dword(hose->bus,
 916			PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
 917
 918		/*
 919		 * For PEXCSRBAR, bit 3-0 indicate prefetchable and
 920		 * address type. So when getting base address, these
 921		 * bits should be masked
 922		 */
 923		base &= PCI_BASE_ADDRESS_MEM_MASK;
 924
 925		return base;
 926	}
 927#endif
 928
 929	return 0;
 930}
 931
 932#ifdef CONFIG_E500
 933static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
 934{
 935	unsigned int rd, ra, rb, d;
 936
 937	rd = get_rt(inst);
 938	ra = get_ra(inst);
 939	rb = get_rb(inst);
 940	d = get_d(inst);
 941
 942	switch (get_op(inst)) {
 943	case 31:
 944		switch (get_xop(inst)) {
 945		case OP_31_XOP_LWZX:
 946		case OP_31_XOP_LWBRX:
 947			regs->gpr[rd] = 0xffffffff;
 948			break;
 949
 950		case OP_31_XOP_LWZUX:
 951			regs->gpr[rd] = 0xffffffff;
 952			regs->gpr[ra] += regs->gpr[rb];
 953			break;
 954
 955		case OP_31_XOP_LBZX:
 956			regs->gpr[rd] = 0xff;
 957			break;
 958
 959		case OP_31_XOP_LBZUX:
 960			regs->gpr[rd] = 0xff;
 961			regs->gpr[ra] += regs->gpr[rb];
 962			break;
 963
 964		case OP_31_XOP_LHZX:
 965		case OP_31_XOP_LHBRX:
 966			regs->gpr[rd] = 0xffff;
 967			break;
 968
 969		case OP_31_XOP_LHZUX:
 970			regs->gpr[rd] = 0xffff;
 971			regs->gpr[ra] += regs->gpr[rb];
 972			break;
 973
 974		case OP_31_XOP_LHAX:
 975			regs->gpr[rd] = ~0UL;
 976			break;
 977
 978		case OP_31_XOP_LHAUX:
 979			regs->gpr[rd] = ~0UL;
 980			regs->gpr[ra] += regs->gpr[rb];
 981			break;
 982
 983		default:
 984			return 0;
 985		}
 986		break;
 987
 988	case OP_LWZ:
 989		regs->gpr[rd] = 0xffffffff;
 990		break;
 991
 992	case OP_LWZU:
 993		regs->gpr[rd] = 0xffffffff;
 994		regs->gpr[ra] += (s16)d;
 995		break;
 996
 997	case OP_LBZ:
 998		regs->gpr[rd] = 0xff;
 999		break;
1000
1001	case OP_LBZU:
1002		regs->gpr[rd] = 0xff;
1003		regs->gpr[ra] += (s16)d;
1004		break;
1005
1006	case OP_LHZ:
1007		regs->gpr[rd] = 0xffff;
1008		break;
1009
1010	case OP_LHZU:
1011		regs->gpr[rd] = 0xffff;
1012		regs->gpr[ra] += (s16)d;
1013		break;
1014
1015	case OP_LHA:
1016		regs->gpr[rd] = ~0UL;
1017		break;
1018
1019	case OP_LHAU:
1020		regs->gpr[rd] = ~0UL;
1021		regs->gpr[ra] += (s16)d;
1022		break;
1023
1024	default:
1025		return 0;
1026	}
1027
1028	return 1;
1029}
1030
1031static int is_in_pci_mem_space(phys_addr_t addr)
1032{
1033	struct pci_controller *hose;
1034	struct resource *res;
1035	int i;
1036
1037	list_for_each_entry(hose, &hose_list, list_node) {
1038		if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
1039			continue;
1040
1041		for (i = 0; i < 3; i++) {
1042			res = &hose->mem_resources[i];
1043			if ((res->flags & IORESOURCE_MEM) &&
1044				addr >= res->start && addr <= res->end)
1045				return 1;
1046		}
1047	}
1048	return 0;
1049}
1050
1051int fsl_pci_mcheck_exception(struct pt_regs *regs)
1052{
1053	u32 inst;
1054	int ret;
1055	phys_addr_t addr = 0;
1056
1057	/* Let KVM/QEMU deal with the exception */
1058	if (regs->msr & MSR_GS)
1059		return 0;
1060
1061#ifdef CONFIG_PHYS_64BIT
1062	addr = mfspr(SPRN_MCARU);
1063	addr <<= 32;
1064#endif
1065	addr += mfspr(SPRN_MCAR);
1066
1067	if (is_in_pci_mem_space(addr)) {
1068		if (user_mode(regs))
1069			ret = copy_from_user_nofault(&inst,
1070					(void __user *)regs->nip, sizeof(inst));
1071		else
1072			ret = get_kernel_nofault(inst, (void *)regs->nip);
 
 
1073
1074		if (!ret && mcheck_handle_load(regs, inst)) {
1075			regs_add_return_ip(regs, 4);
1076			return 1;
1077		}
1078	}
1079
1080	return 0;
1081}
1082#endif
1083
1084#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
1085static const struct of_device_id pci_ids[] = {
1086	{ .compatible = "fsl,mpc8540-pci", },
1087	{ .compatible = "fsl,mpc8548-pcie", },
1088	{ .compatible = "fsl,mpc8610-pci", },
1089	{ .compatible = "fsl,mpc8641-pcie", },
1090	{ .compatible = "fsl,qoriq-pcie", },
1091	{ .compatible = "fsl,qoriq-pcie-v2.1", },
1092	{ .compatible = "fsl,qoriq-pcie-v2.2", },
1093	{ .compatible = "fsl,qoriq-pcie-v2.3", },
1094	{ .compatible = "fsl,qoriq-pcie-v2.4", },
1095	{ .compatible = "fsl,qoriq-pcie-v3.0", },
1096
1097	/*
1098	 * The following entries are for compatibility with older device
1099	 * trees.
1100	 */
1101	{ .compatible = "fsl,p1022-pcie", },
1102	{ .compatible = "fsl,p4080-pcie", },
1103
1104	{},
1105};
1106
1107struct device_node *fsl_pci_primary;
1108
1109void fsl_pci_assign_primary(void)
1110{
1111	struct device_node *np;
1112
1113	/* Callers can specify the primary bus using other means. */
1114	if (fsl_pci_primary)
1115		return;
1116
1117	/* If a PCI host bridge contains an ISA node, it's primary. */
1118	np = of_find_node_by_type(NULL, "isa");
1119	while ((fsl_pci_primary = of_get_parent(np))) {
1120		of_node_put(np);
1121		np = fsl_pci_primary;
1122
1123		if (of_match_node(pci_ids, np) && of_device_is_available(np))
1124			return;
1125	}
1126
1127	/*
1128	 * If there's no PCI host bridge with ISA, arbitrarily
1129	 * designate one as primary.  This can go away once
1130	 * various bugs with primary-less systems are fixed.
1131	 */
1132	for_each_matching_node(np, pci_ids) {
1133		if (of_device_is_available(np)) {
1134			fsl_pci_primary = np;
1135			of_node_put(np);
1136			return;
1137		}
1138	}
1139}
1140
1141#ifdef CONFIG_PM_SLEEP
1142static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
1143{
1144	struct pci_controller *hose = dev_id;
1145	struct ccsr_pci __iomem *pci = hose->private_data;
1146	u32 dr;
1147
1148	dr = in_be32(&pci->pex_pme_mes_dr);
1149	if (!dr)
1150		return IRQ_NONE;
1151
1152	out_be32(&pci->pex_pme_mes_dr, dr);
1153
1154	return IRQ_HANDLED;
1155}
1156
1157static int fsl_pci_pme_probe(struct pci_controller *hose)
1158{
1159	struct ccsr_pci __iomem *pci;
1160	struct pci_dev *dev;
1161	int pme_irq;
1162	int res;
1163	u16 pms;
1164
1165	/* Get hose's pci_dev */
1166	dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
1167
1168	/* PME Disable */
1169	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1170	pms &= ~PCI_PM_CTRL_PME_ENABLE;
1171	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1172
1173	pme_irq = irq_of_parse_and_map(hose->dn, 0);
1174	if (!pme_irq) {
1175		dev_err(&dev->dev, "Failed to map PME interrupt.\n");
1176
1177		return -ENXIO;
1178	}
1179
1180	res = devm_request_irq(hose->parent, pme_irq,
1181			fsl_pci_pme_handle,
1182			IRQF_SHARED,
1183			"[PCI] PME", hose);
1184	if (res < 0) {
1185		dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
1186		irq_dispose_mapping(pme_irq);
1187
1188		return -ENODEV;
1189	}
1190
1191	pci = hose->private_data;
1192
1193	/* Enable PTOD, ENL23D & EXL23D */
1194	clrbits32(&pci->pex_pme_mes_disr,
1195		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1196
1197	out_be32(&pci->pex_pme_mes_ier, 0);
1198	setbits32(&pci->pex_pme_mes_ier,
1199		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1200
1201	/* PME Enable */
1202	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1203	pms |= PCI_PM_CTRL_PME_ENABLE;
1204	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1205
1206	return 0;
1207}
1208
1209static void send_pme_turnoff_message(struct pci_controller *hose)
1210{
1211	struct ccsr_pci __iomem *pci = hose->private_data;
1212	u32 dr;
1213	int i;
1214
1215	/* Send PME_Turn_Off Message Request */
1216	setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
1217
1218	/* Wait trun off done */
1219	for (i = 0; i < 150; i++) {
1220		dr = in_be32(&pci->pex_pme_mes_dr);
1221		if (dr) {
1222			out_be32(&pci->pex_pme_mes_dr, dr);
1223			break;
1224		}
1225
1226		udelay(1000);
1227	}
1228}
1229
1230static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
1231{
1232	send_pme_turnoff_message(hose);
1233}
1234
1235static int fsl_pci_syscore_suspend(void)
1236{
1237	struct pci_controller *hose, *tmp;
1238
1239	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1240		fsl_pci_syscore_do_suspend(hose);
1241
1242	return 0;
1243}
1244
1245static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
1246{
1247	struct ccsr_pci __iomem *pci = hose->private_data;
1248	u32 dr;
1249	int i;
1250
1251	/* Send Exit L2 State Message */
1252	setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
1253
1254	/* Wait exit done */
1255	for (i = 0; i < 150; i++) {
1256		dr = in_be32(&pci->pex_pme_mes_dr);
1257		if (dr) {
1258			out_be32(&pci->pex_pme_mes_dr, dr);
1259			break;
1260		}
1261
1262		udelay(1000);
1263	}
1264
1265	setup_pci_atmu(hose);
1266}
1267
1268static void fsl_pci_syscore_resume(void)
1269{
1270	struct pci_controller *hose, *tmp;
1271
1272	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1273		fsl_pci_syscore_do_resume(hose);
1274}
1275
1276static struct syscore_ops pci_syscore_pm_ops = {
1277	.suspend = fsl_pci_syscore_suspend,
1278	.resume = fsl_pci_syscore_resume,
1279};
1280#endif
1281
1282void fsl_pcibios_fixup_phb(struct pci_controller *phb)
1283{
1284#ifdef CONFIG_PM_SLEEP
1285	fsl_pci_pme_probe(phb);
1286#endif
1287}
1288
1289static int add_err_dev(struct platform_device *pdev)
1290{
1291	struct platform_device *errdev;
1292	struct mpc85xx_edac_pci_plat_data pd = {
1293		.of_node = pdev->dev.of_node
1294	};
1295
1296	errdev = platform_device_register_resndata(&pdev->dev,
1297						   "mpc85xx-pci-edac",
1298						   PLATFORM_DEVID_AUTO,
1299						   pdev->resource,
1300						   pdev->num_resources,
1301						   &pd, sizeof(pd));
1302
1303	return PTR_ERR_OR_ZERO(errdev);
1304}
1305
1306static int fsl_pci_probe(struct platform_device *pdev)
1307{
1308	struct device_node *node;
1309	int ret;
1310
1311	node = pdev->dev.of_node;
1312	ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
1313	if (ret)
1314		return ret;
1315
1316	ret = add_err_dev(pdev);
1317	if (ret)
1318		dev_err(&pdev->dev, "couldn't register error device: %d\n",
1319			ret);
1320
1321	return 0;
1322}
1323
1324static struct platform_driver fsl_pci_driver = {
1325	.driver = {
1326		.name = "fsl-pci",
1327		.of_match_table = pci_ids,
1328	},
1329	.probe = fsl_pci_probe,
1330};
1331
1332static int __init fsl_pci_init(void)
1333{
1334#ifdef CONFIG_PM_SLEEP
1335	register_syscore_ops(&pci_syscore_pm_ops);
1336#endif
1337	return platform_driver_register(&fsl_pci_driver);
1338}
1339arch_initcall(fsl_pci_init);
1340#endif