Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * MPC83xx/85xx/86xx PCI/PCIE support routing.
   4 *
   5 * Copyright 2007-2012 Freescale Semiconductor, Inc.
   6 * Copyright 2008-2009 MontaVista Software, Inc.
   7 *
   8 * Initial author: Xianghua Xiao <x.xiao@freescale.com>
   9 * Recode: ZHANG WEI <wei.zhang@freescale.com>
  10 * Rewrite the routing for Frescale PCI and PCI Express
  11 * 	Roy Zang <tie-fei.zang@freescale.com>
  12 * MPC83xx PCI-Express support:
  13 * 	Tony Li <tony.li@freescale.com>
  14 * 	Anton Vorontsov <avorontsov@ru.mvista.com>
  15 */
  16#include <linux/kernel.h>
  17#include <linux/pci.h>
  18#include <linux/delay.h>
  19#include <linux/string.h>
  20#include <linux/fsl/edac.h>
  21#include <linux/init.h>
  22#include <linux/interrupt.h>
  23#include <linux/memblock.h>
  24#include <linux/log2.h>
  25#include <linux/of_address.h>
  26#include <linux/of_irq.h>
  27#include <linux/platform_device.h>
  28#include <linux/slab.h>
  29#include <linux/suspend.h>
  30#include <linux/syscore_ops.h>
  31#include <linux/uaccess.h>
  32
  33#include <asm/io.h>
 
  34#include <asm/pci-bridge.h>
  35#include <asm/ppc-pci.h>
  36#include <asm/machdep.h>
  37#include <asm/mpc85xx.h>
  38#include <asm/disassemble.h>
  39#include <asm/ppc-opcode.h>
  40#include <asm/swiotlb.h>
  41#include <asm/setup.h>
  42#include <sysdev/fsl_soc.h>
  43#include <sysdev/fsl_pci.h>
  44
  45static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
  46
  47static void quirk_fsl_pcie_early(struct pci_dev *dev)
  48{
  49	u8 hdr_type;
  50
  51	/* if we aren't a PCIe don't bother */
  52	if (!pci_is_pcie(dev))
  53		return;
  54
  55	/* if we aren't in host mode don't bother */
  56	pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
  57	if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE)
  58		return;
  59
  60	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
  61	fsl_pcie_bus_fixup = 1;
  62	return;
  63}
  64
  65static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
  66				    int, int, u32 *);
  67
  68static int fsl_pcie_check_link(struct pci_controller *hose)
  69{
  70	u32 val = 0;
  71
  72	if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
  73		if (hose->ops->read == fsl_indirect_read_config)
  74			__indirect_read_config(hose, hose->first_busno, 0,
  75					       PCIE_LTSSM, 4, &val);
  76		else
  77			early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
  78		if (val < PCIE_LTSSM_L0)
  79			return 1;
  80	} else {
  81		struct ccsr_pci __iomem *pci = hose->private_data;
  82		/* for PCIe IP rev 3.0 or greater use CSR0 for link state */
  83		val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
  84				>> PEX_CSR0_LTSSM_SHIFT;
  85		if (val != PEX_CSR0_LTSSM_L0)
  86			return 1;
  87	}
  88
  89	return 0;
  90}
  91
  92static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
  93				    int offset, int len, u32 *val)
  94{
  95	struct pci_controller *hose = pci_bus_to_host(bus);
  96
  97	if (fsl_pcie_check_link(hose))
  98		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
  99	else
 100		hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 101
 102	return indirect_read_config(bus, devfn, offset, len, val);
 103}
 104
 105#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 106
 107static struct pci_ops fsl_indirect_pcie_ops =
 108{
 109	.read = fsl_indirect_read_config,
 110	.write = indirect_write_config,
 111};
 112
 113static u64 pci64_dma_offset;
 114
 115#ifdef CONFIG_SWIOTLB
 116static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
 117{
 118	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 119
 120	pdev->dev.bus_dma_limit =
 121		hose->dma_window_base_cur + hose->dma_window_size - 1;
 122}
 123
 124static void setup_swiotlb_ops(struct pci_controller *hose)
 125{
 126	if (ppc_swiotlb_enable)
 127		hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
 128}
 129#else
 130static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
 131#endif
 132
 133static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
 134{
 135	/*
 136	 * Fix up PCI devices that are able to DMA to the large inbound
 137	 * mapping that allows addressing any RAM address from across PCI.
 138	 */
 139	if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
 140		dev->bus_dma_limit = 0;
 141		dev->archdata.dma_offset = pci64_dma_offset;
 142	}
 143}
 144
 145static int setup_one_atmu(struct ccsr_pci __iomem *pci,
 146	unsigned int index, const struct resource *res,
 147	resource_size_t offset)
 148{
 149	resource_size_t pci_addr = res->start - offset;
 150	resource_size_t phys_addr = res->start;
 151	resource_size_t size = resource_size(res);
 152	u32 flags = 0x80044000; /* enable & mem R/W */
 153	unsigned int i;
 154
 155	pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
 156		(u64)res->start, (u64)size);
 157
 158	if (res->flags & IORESOURCE_PREFETCH)
 159		flags |= 0x10000000; /* enable relaxed ordering */
 160
 161	for (i = 0; size > 0; i++) {
 162		unsigned int bits = min_t(u32, ilog2(size),
 163					__ffs(pci_addr | phys_addr));
 164
 165		if (index + i >= 5)
 166			return -1;
 167
 168		out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
 169		out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
 170		out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
 171		out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
 172
 173		pci_addr += (resource_size_t)1U << bits;
 174		phys_addr += (resource_size_t)1U << bits;
 175		size -= (resource_size_t)1U << bits;
 176	}
 177
 178	return i;
 179}
 180
 181static bool is_kdump(void)
 182{
 183	struct device_node *node;
 184	bool ret;
 185
 186	node = of_find_node_by_type(NULL, "memory");
 187	if (!node) {
 188		WARN_ON_ONCE(1);
 189		return false;
 190	}
 191
 192	ret = of_property_read_bool(node, "linux,usable-memory");
 193	of_node_put(node);
 194
 195	return ret;
 196}
 197
 198/* atmu setup for fsl pci/pcie controller */
 199static void setup_pci_atmu(struct pci_controller *hose)
 200{
 201	struct ccsr_pci __iomem *pci = hose->private_data;
 202	int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
 203	u64 mem, sz, paddr_hi = 0;
 204	u64 offset = 0, paddr_lo = ULLONG_MAX;
 205	u32 pcicsrbar = 0, pcicsrbar_sz;
 206	u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
 207			PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
 208	const u64 *reg;
 209	int len;
 210	bool setup_inbound;
 211
 212	/*
 213	 * If this is kdump, we don't want to trigger a bunch of PCI
 214	 * errors by closing the window on in-flight DMA.
 215	 *
 216	 * We still run most of the function's logic so that things like
 217	 * hose->dma_window_size still get set.
 218	 */
 219	setup_inbound = !is_kdump();
 220
 221	if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
 222		/*
 223		 * BSC9132 Rev1.0 has an issue where all the PEX inbound
 224		 * windows have implemented the default target value as 0xf
 225		 * for CCSR space.In all Freescale legacy devices the target
 226		 * of 0xf is reserved for local memory space. 9132 Rev1.0
 227		 * now has local memory space mapped to target 0x0 instead of
 228		 * 0xf. Hence adding a workaround to remove the target 0xf
 229		 * defined for memory space from Inbound window attributes.
 230		 */
 231		piwar &= ~PIWAR_TGI_LOCAL;
 232	}
 233
 234	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 235		if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
 236			win_idx = 2;
 237			start_idx = 0;
 238			end_idx = 3;
 239		}
 240	}
 241
 242	/* Disable all windows (except powar0 since it's ignored) */
 243	for(i = 1; i < 5; i++)
 244		out_be32(&pci->pow[i].powar, 0);
 245
 246	if (setup_inbound) {
 247		for (i = start_idx; i < end_idx; i++)
 248			out_be32(&pci->piw[i].piwar, 0);
 249	}
 250
 251	/* Setup outbound MEM window */
 252	for(i = 0, j = 1; i < 3; i++) {
 253		if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
 254			continue;
 255
 256		paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
 257		paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
 258
 259		/* We assume all memory resources have the same offset */
 260		offset = hose->mem_offset[i];
 261		n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
 262
 263		if (n < 0 || j >= 5) {
 264			pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
 265			hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
 266		} else
 267			j += n;
 268	}
 269
 270	/* Setup outbound IO window */
 271	if (hose->io_resource.flags & IORESOURCE_IO) {
 272		if (j >= 5) {
 273			pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
 274		} else {
 275			pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
 276				 "phy base 0x%016llx.\n",
 277				 (u64)hose->io_resource.start,
 278				 (u64)resource_size(&hose->io_resource),
 279				 (u64)hose->io_base_phys);
 280			out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
 281			out_be32(&pci->pow[j].potear, 0);
 282			out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
 283			/* Enable, IO R/W */
 284			out_be32(&pci->pow[j].powar, 0x80088000
 285				| (ilog2(hose->io_resource.end
 286				- hose->io_resource.start + 1) - 1));
 287		}
 288	}
 289
 290	/* convert to pci address space */
 291	paddr_hi -= offset;
 292	paddr_lo -= offset;
 293
 294	if (paddr_hi == paddr_lo) {
 295		pr_err("%pOF: No outbound window space\n", hose->dn);
 296		return;
 297	}
 298
 299	if (paddr_lo == 0) {
 300		pr_err("%pOF: No space for inbound window\n", hose->dn);
 301		return;
 302	}
 303
 304	/* setup PCSRBAR/PEXCSRBAR */
 305	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
 306	early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
 307	pcicsrbar_sz = ~pcicsrbar_sz + 1;
 308
 309	if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
 310		(paddr_lo > 0x100000000ull))
 311		pcicsrbar = 0x100000000ull - pcicsrbar_sz;
 312	else
 313		pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
 314	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
 315
 316	paddr_lo = min(paddr_lo, (u64)pcicsrbar);
 317
 318	pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
 319
 320	/* Setup inbound mem window */
 321	mem = memblock_end_of_DRAM();
 322	pr_info("%s: end of DRAM %llx\n", __func__, mem);
 323
 324	/*
 325	 * The msi-address-64 property, if it exists, indicates the physical
 326	 * address of the MSIIR register.  Normally, this register is located
 327	 * inside CCSR, so the ATMU that covers all of CCSR is used. But if
 328	 * this property exists, then we normally need to create a new ATMU
 329	 * for it.  For now, however, we cheat.  The only entity that creates
 330	 * this property is the Freescale hypervisor, and the address is
 331	 * specified in the partition configuration.  Typically, the address
 332	 * is located in the page immediately after the end of DDR.  If so, we
 333	 * can avoid allocating a new ATMU by extending the DDR ATMU by one
 334	 * page.
 335	 */
 336	reg = of_get_property(hose->dn, "msi-address-64", &len);
 337	if (reg && (len == sizeof(u64))) {
 338		u64 address = be64_to_cpup(reg);
 339
 340		if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
 341			pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
 342			mem += PAGE_SIZE;
 343		} else {
 344			/* TODO: Create a new ATMU for MSIIR */
 345			pr_warn("%pOF: msi-address-64 address of %llx is "
 346				"unsupported\n", hose->dn, address);
 347		}
 348	}
 349
 350	sz = min(mem, paddr_lo);
 351	mem_log = ilog2(sz);
 352
 353	/* PCIe can overmap inbound & outbound since RX & TX are separated */
 354	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 355		/* Size window to exact size if power-of-two or one size up */
 356		if ((1ull << mem_log) != mem) {
 357			mem_log++;
 358			if ((1ull << mem_log) > mem)
 359				pr_info("%pOF: Setting PCI inbound window "
 360					"greater than memory size\n", hose->dn);
 361		}
 362
 363		piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
 364
 365		if (setup_inbound) {
 366			/* Setup inbound memory window */
 367			out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 368			out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
 369			out_be32(&pci->piw[win_idx].piwar,  piwar);
 370		}
 371
 372		win_idx--;
 373		hose->dma_window_base_cur = 0x00000000;
 374		hose->dma_window_size = (resource_size_t)sz;
 375
 376		/*
 377		 * if we have >4G of memory setup second PCI inbound window to
 378		 * let devices that are 64-bit address capable to work w/o
 379		 * SWIOTLB and access the full range of memory
 380		 */
 381		if (sz != mem) {
 382			mem_log = ilog2(mem);
 383
 384			/* Size window up if we dont fit in exact power-of-2 */
 385			if ((1ull << mem_log) != mem)
 386				mem_log++;
 387
 388			piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
 389			pci64_dma_offset = 1ULL << mem_log;
 390
 391			if (setup_inbound) {
 392				/* Setup inbound memory window */
 393				out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 394				out_be32(&pci->piw[win_idx].piwbear,
 395						pci64_dma_offset >> 44);
 396				out_be32(&pci->piw[win_idx].piwbar,
 397						pci64_dma_offset >> 12);
 398				out_be32(&pci->piw[win_idx].piwar,  piwar);
 399			}
 400
 401			/*
 402			 * install our own dma_set_mask handler to fixup dma_ops
 403			 * and dma_offset
 404			 */
 405			ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
 406
 407			pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
 408		}
 409	} else {
 410		u64 paddr = 0;
 411
 412		if (setup_inbound) {
 413			/* Setup inbound memory window */
 414			out_be32(&pci->piw[win_idx].pitar,  paddr >> 12);
 415			out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
 416			out_be32(&pci->piw[win_idx].piwar,
 417				 (piwar | (mem_log - 1)));
 418		}
 419
 420		win_idx--;
 421		paddr += 1ull << mem_log;
 422		sz -= 1ull << mem_log;
 423
 424		if (sz) {
 425			mem_log = ilog2(sz);
 426			piwar |= (mem_log - 1);
 427
 428			if (setup_inbound) {
 429				out_be32(&pci->piw[win_idx].pitar,
 430					 paddr >> 12);
 431				out_be32(&pci->piw[win_idx].piwbar,
 432					 paddr >> 12);
 433				out_be32(&pci->piw[win_idx].piwar, piwar);
 434			}
 435
 436			win_idx--;
 437			paddr += 1ull << mem_log;
 438		}
 439
 440		hose->dma_window_base_cur = 0x00000000;
 441		hose->dma_window_size = (resource_size_t)paddr;
 442	}
 443
 444	if (hose->dma_window_size < mem) {
 445#ifdef CONFIG_SWIOTLB
 446		ppc_swiotlb_enable = 1;
 447#else
 448		pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
 449			"map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
 450			 hose->dn);
 451#endif
 452		/* adjusting outbound windows could reclaim space in mem map */
 453		if (paddr_hi < 0xffffffffull)
 454			pr_warn("%pOF: WARNING: Outbound window cfg leaves "
 455				"gaps in memory map. Adjusting the memory map "
 456				"could reduce unnecessary bounce buffering.\n",
 457				hose->dn);
 458
 459		pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
 460			(u64)hose->dma_window_size);
 461	}
 462}
 463
 464static void setup_pci_cmd(struct pci_controller *hose)
 465{
 466	u16 cmd;
 467	int cap_x;
 468
 469	early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
 470	cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
 471		| PCI_COMMAND_IO;
 472	early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
 473
 474	cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
 475	if (cap_x) {
 476		int pci_x_cmd = cap_x + PCI_X_CMD;
 477		cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
 478			| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
 479		early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
 480	} else {
 481		early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
 482	}
 483}
 484
 485void fsl_pcibios_fixup_bus(struct pci_bus *bus)
 486{
 487	struct pci_controller *hose = pci_bus_to_host(bus);
 488	int i, is_pcie = 0, no_link;
 489
 490	/* The root complex bridge comes up with bogus resources,
 491	 * we copy the PHB ones in.
 492	 *
 493	 * With the current generic PCI code, the PHB bus no longer
 494	 * has bus->resource[0..4] set, so things are a bit more
 495	 * tricky.
 496	 */
 497
 498	if (fsl_pcie_bus_fixup)
 499		is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
 500	no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
 501
 502	if (bus->parent == hose->bus && (is_pcie || no_link)) {
 503		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
 504			struct resource *res = bus->resource[i];
 505			struct resource *par;
 506
 507			if (!res)
 508				continue;
 509			if (i == 0)
 510				par = &hose->io_resource;
 511			else if (i < 4)
 512				par = &hose->mem_resources[i-1];
 513			else par = NULL;
 514
 515			res->start = par ? par->start : 0;
 516			res->end   = par ? par->end   : 0;
 517			res->flags = par ? par->flags : 0;
 518		}
 519	}
 520}
 521
 522static int fsl_add_bridge(struct platform_device *pdev, int is_primary)
 523{
 524	int len;
 525	struct pci_controller *hose;
 526	struct resource rsrc;
 527	const int *bus_range;
 528	u8 hdr_type, progif;
 529	u32 class_code;
 530	struct device_node *dev;
 531	struct ccsr_pci __iomem *pci;
 532	u16 temp;
 533	u32 svr = mfspr(SPRN_SVR);
 534
 535	dev = pdev->dev.of_node;
 536
 537	if (!of_device_is_available(dev)) {
 538		pr_warn("%pOF: disabled\n", dev);
 539		return -ENODEV;
 540	}
 541
 542	pr_debug("Adding PCI host bridge %pOF\n", dev);
 543
 544	/* Fetch host bridge registers address */
 545	if (of_address_to_resource(dev, 0, &rsrc)) {
 546		printk(KERN_WARNING "Can't get pci register base!");
 547		return -ENOMEM;
 548	}
 549
 550	/* Get bus range if any */
 551	bus_range = of_get_property(dev, "bus-range", &len);
 552	if (bus_range == NULL || len < 2 * sizeof(int))
 553		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 554			" bus 0\n", dev);
 555
 556	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 557	hose = pcibios_alloc_controller(dev);
 558	if (!hose)
 559		return -ENOMEM;
 560
 561	/* set platform device as the parent */
 562	hose->parent = &pdev->dev;
 563	hose->first_busno = bus_range ? bus_range[0] : 0x0;
 564	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 565
 566	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
 567		 (u64)rsrc.start, (u64)resource_size(&rsrc));
 568
 569	pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
 570	if (!hose->private_data)
 571		goto no_bridge;
 572
 573	setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
 574			   PPC_INDIRECT_TYPE_BIG_ENDIAN);
 575
 576	if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
 577		hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 578
 579	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 580		/* use fsl_indirect_read_config for PCIe */
 581		hose->ops = &fsl_indirect_pcie_ops;
 582		/* For PCIE read HEADER_TYPE to identify controller mode */
 583		early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
 584		if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE)
 585			goto no_bridge;
 586
 587	} else {
 588		/* For PCI read PROG to identify controller mode */
 589		early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
 590		if ((progif & 1) &&
 591		    !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
 592			goto no_bridge;
 593	}
 594
 595	setup_pci_cmd(hose);
 596
 597	/* check PCI express link status */
 598	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 599		hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
 600			PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
 601		if (fsl_pcie_check_link(hose))
 602			hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 603		/* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */
 604		if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) {
 605			early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code);
 606			class_code &= 0xff;
 607			class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
 608			early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code);
 609		}
 610	} else {
 611		/*
 612		 * Set PBFR(PCI Bus Function Register)[10] = 1 to
 613		 * disable the combining of crossing cacheline
 614		 * boundary requests into one burst transaction.
 615		 * PCI-X operation is not affected.
 616		 * Fix erratum PCI 5 on MPC8548
 617		 */
 618#define PCI_BUS_FUNCTION 0x44
 619#define PCI_BUS_FUNCTION_MDS 0x400	/* Master disable streaming */
 620		if (((SVR_SOC_VER(svr) == SVR_8543) ||
 621		     (SVR_SOC_VER(svr) == SVR_8545) ||
 622		     (SVR_SOC_VER(svr) == SVR_8547) ||
 623		     (SVR_SOC_VER(svr) == SVR_8548)) &&
 624		    !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
 625			early_read_config_word(hose, 0, 0,
 626					PCI_BUS_FUNCTION, &temp);
 627			temp |= PCI_BUS_FUNCTION_MDS;
 628			early_write_config_word(hose, 0, 0,
 629					PCI_BUS_FUNCTION, temp);
 630		}
 631	}
 632
 633	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 634		"Firmware bus number: %d->%d\n",
 635		(unsigned long long)rsrc.start, hose->first_busno,
 636		hose->last_busno);
 637
 638	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 639		hose, hose->cfg_addr, hose->cfg_data);
 640
 641	/* Interpret the "ranges" property */
 642	/* This also maps the I/O region and sets isa_io/mem_base */
 643	pci_process_bridge_OF_ranges(hose, dev, is_primary);
 644
 645	/* Setup PEX window registers */
 646	setup_pci_atmu(hose);
 647
 648	/* Set up controller operations */
 649	setup_swiotlb_ops(hose);
 650
 651	return 0;
 652
 653no_bridge:
 654	iounmap(hose->private_data);
 655	/* unmap cfg_data & cfg_addr separately if not on same page */
 656	if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
 657	    ((unsigned long)hose->cfg_addr & PAGE_MASK))
 658		iounmap(hose->cfg_data);
 659	iounmap(hose->cfg_addr);
 660	pcibios_free_controller(hose);
 661	return -ENODEV;
 662}
 663#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
 664
 665DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
 666			quirk_fsl_pcie_early);
 667
 668#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
 669struct mpc83xx_pcie_priv {
 670	void __iomem *cfg_type0;
 671	void __iomem *cfg_type1;
 672	u32 dev_base;
 673};
 674
 675struct pex_inbound_window {
 676	u32 ar;
 677	u32 tar;
 678	u32 barl;
 679	u32 barh;
 680};
 681
 682/*
 683 * With the convention of u-boot, the PCIE outbound window 0 serves
 684 * as configuration transactions outbound.
 685 */
 686#define PEX_OUTWIN0_BAR		0xCA4
 687#define PEX_OUTWIN0_TAL		0xCA8
 688#define PEX_OUTWIN0_TAH		0xCAC
 689#define PEX_RC_INWIN_BASE	0xE60
 690#define PEX_RCIWARn_EN		0x1
 691
 692static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
 693{
 694	struct pci_controller *hose = pci_bus_to_host(bus);
 695
 696	if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
 697		return PCIBIOS_DEVICE_NOT_FOUND;
 698	/*
 699	 * Workaround for the HW bug: for Type 0 configure transactions the
 700	 * PCI-E controller does not check the device number bits and just
 701	 * assumes that the device number bits are 0.
 702	 */
 703	if (bus->number == hose->first_busno ||
 704			bus->primary == hose->first_busno) {
 705		if (devfn & 0xf8)
 706			return PCIBIOS_DEVICE_NOT_FOUND;
 707	}
 708
 709	if (ppc_md.pci_exclude_device) {
 710		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 711			return PCIBIOS_DEVICE_NOT_FOUND;
 712	}
 713
 714	return PCIBIOS_SUCCESSFUL;
 715}
 716
 717static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
 718					    unsigned int devfn, int offset)
 719{
 720	struct pci_controller *hose = pci_bus_to_host(bus);
 721	struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 722	u32 dev_base = bus->number << 24 | devfn << 16;
 723	int ret;
 724
 725	ret = mpc83xx_pcie_exclude_device(bus, devfn);
 726	if (ret)
 727		return NULL;
 728
 729	offset &= 0xfff;
 730
 731	/* Type 0 */
 732	if (bus->number == hose->first_busno)
 733		return pcie->cfg_type0 + offset;
 734
 735	if (pcie->dev_base == dev_base)
 736		goto mapped;
 737
 738	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
 739
 740	pcie->dev_base = dev_base;
 741mapped:
 742	return pcie->cfg_type1 + offset;
 743}
 744
 745static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
 746				     int offset, int len, u32 val)
 747{
 748	struct pci_controller *hose = pci_bus_to_host(bus);
 749
 750	/* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
 751	if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
 752		val &= 0xffffff00;
 753
 754	return pci_generic_config_write(bus, devfn, offset, len, val);
 755}
 756
 757static struct pci_ops mpc83xx_pcie_ops = {
 758	.map_bus = mpc83xx_pcie_remap_cfg,
 759	.read = pci_generic_config_read,
 760	.write = mpc83xx_pcie_write_config,
 761};
 762
 763static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
 764				     struct resource *reg)
 765{
 766	struct mpc83xx_pcie_priv *pcie;
 767	u32 cfg_bar;
 768	int ret = -ENOMEM;
 769
 770	pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
 771	if (!pcie)
 772		return ret;
 773
 774	pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
 775	if (!pcie->cfg_type0)
 776		goto err0;
 777
 778	cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
 779	if (!cfg_bar) {
 780		/* PCI-E isn't configured. */
 781		ret = -ENODEV;
 782		goto err1;
 783	}
 784
 785	pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
 786	if (!pcie->cfg_type1)
 787		goto err1;
 788
 789	WARN_ON(hose->dn->data);
 790	hose->dn->data = pcie;
 791	hose->ops = &mpc83xx_pcie_ops;
 792	hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 793
 794	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
 795	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
 796
 797	if (fsl_pcie_check_link(hose))
 798		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 799
 800	return 0;
 801err1:
 802	iounmap(pcie->cfg_type0);
 803err0:
 804	kfree(pcie);
 805	return ret;
 806
 807}
 808
 809int __init mpc83xx_add_bridge(struct device_node *dev)
 810{
 811	int ret;
 812	int len;
 813	struct pci_controller *hose;
 814	struct resource rsrc_reg;
 815	struct resource rsrc_cfg;
 816	const int *bus_range;
 817	int primary;
 818
 819	is_mpc83xx_pci = 1;
 820
 821	if (!of_device_is_available(dev)) {
 822		pr_warn("%pOF: disabled by the firmware.\n",
 823			dev);
 824		return -ENODEV;
 825	}
 826	pr_debug("Adding PCI host bridge %pOF\n", dev);
 827
 828	/* Fetch host bridge registers address */
 829	if (of_address_to_resource(dev, 0, &rsrc_reg)) {
 830		printk(KERN_WARNING "Can't get pci register base!\n");
 831		return -ENOMEM;
 832	}
 833
 834	memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
 835
 836	if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
 837		printk(KERN_WARNING
 838			"No pci config register base in dev tree, "
 839			"using default\n");
 840		/*
 841		 * MPC83xx supports up to two host controllers
 842		 * 	one at 0x8500 has config space registers at 0x8300
 843		 * 	one at 0x8600 has config space registers at 0x8380
 844		 */
 845		if ((rsrc_reg.start & 0xfffff) == 0x8500)
 846			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
 847		else if ((rsrc_reg.start & 0xfffff) == 0x8600)
 848			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
 849	}
 850	/*
 851	 * Controller at offset 0x8500 is primary
 852	 */
 853	if ((rsrc_reg.start & 0xfffff) == 0x8500)
 854		primary = 1;
 855	else
 856		primary = 0;
 857
 858	/* Get bus range if any */
 859	bus_range = of_get_property(dev, "bus-range", &len);
 860	if (bus_range == NULL || len < 2 * sizeof(int)) {
 861		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 862		       " bus 0\n", dev);
 863	}
 864
 865	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 866	hose = pcibios_alloc_controller(dev);
 867	if (!hose)
 868		return -ENOMEM;
 869
 870	hose->first_busno = bus_range ? bus_range[0] : 0;
 871	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 872
 873	if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
 874		ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
 875		if (ret)
 876			goto err0;
 877	} else {
 878		setup_indirect_pci(hose, rsrc_cfg.start,
 879				   rsrc_cfg.start + 4, 0);
 880	}
 881
 882	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 883	       "Firmware bus number: %d->%d\n",
 884	       (unsigned long long)rsrc_reg.start, hose->first_busno,
 885	       hose->last_busno);
 886
 887	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 888	    hose, hose->cfg_addr, hose->cfg_data);
 889
 890	/* Interpret the "ranges" property */
 891	/* This also maps the I/O region and sets isa_io/mem_base */
 892	pci_process_bridge_OF_ranges(hose, dev, primary);
 893
 894	return 0;
 895err0:
 896	pcibios_free_controller(hose);
 897	return ret;
 898}
 899#endif /* CONFIG_PPC_83xx */
 900
 901u64 fsl_pci_immrbar_base(struct pci_controller *hose)
 902{
 903#ifdef CONFIG_PPC_83xx
 904	if (is_mpc83xx_pci) {
 905		struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 906		struct pex_inbound_window *in;
 907		int i;
 908
 909		/* Walk the Root Complex Inbound windows to match IMMR base */
 910		in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
 911		for (i = 0; i < 4; i++) {
 912			/* not enabled, skip */
 913			if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
 914				continue;
 915
 916			if (get_immrbase() == in_le32(&in[i].tar))
 917				return (u64)in_le32(&in[i].barh) << 32 |
 918					    in_le32(&in[i].barl);
 919		}
 920
 921		printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
 922	}
 923#endif
 924
 925#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 926	if (!is_mpc83xx_pci) {
 927		u32 base;
 928
 929		pci_bus_read_config_dword(hose->bus,
 930			PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
 931
 932		/*
 933		 * For PEXCSRBAR, bit 3-0 indicate prefetchable and
 934		 * address type. So when getting base address, these
 935		 * bits should be masked
 936		 */
 937		base &= PCI_BASE_ADDRESS_MEM_MASK;
 938
 939		return base;
 940	}
 941#endif
 942
 943	return 0;
 944}
 945
 946#ifdef CONFIG_PPC_E500
 947static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
 948{
 949	unsigned int rd, ra, rb, d;
 950
 951	rd = get_rt(inst);
 952	ra = get_ra(inst);
 953	rb = get_rb(inst);
 954	d = get_d(inst);
 955
 956	switch (get_op(inst)) {
 957	case 31:
 958		switch (get_xop(inst)) {
 959		case OP_31_XOP_LWZX:
 960		case OP_31_XOP_LWBRX:
 961			regs->gpr[rd] = 0xffffffff;
 962			break;
 963
 964		case OP_31_XOP_LWZUX:
 965			regs->gpr[rd] = 0xffffffff;
 966			regs->gpr[ra] += regs->gpr[rb];
 967			break;
 968
 969		case OP_31_XOP_LBZX:
 970			regs->gpr[rd] = 0xff;
 971			break;
 972
 973		case OP_31_XOP_LBZUX:
 974			regs->gpr[rd] = 0xff;
 975			regs->gpr[ra] += regs->gpr[rb];
 976			break;
 977
 978		case OP_31_XOP_LHZX:
 979		case OP_31_XOP_LHBRX:
 980			regs->gpr[rd] = 0xffff;
 981			break;
 982
 983		case OP_31_XOP_LHZUX:
 984			regs->gpr[rd] = 0xffff;
 985			regs->gpr[ra] += regs->gpr[rb];
 986			break;
 987
 988		case OP_31_XOP_LHAX:
 989			regs->gpr[rd] = ~0UL;
 990			break;
 991
 992		case OP_31_XOP_LHAUX:
 993			regs->gpr[rd] = ~0UL;
 994			regs->gpr[ra] += regs->gpr[rb];
 995			break;
 996
 997		default:
 998			return 0;
 999		}
1000		break;
1001
1002	case OP_LWZ:
1003		regs->gpr[rd] = 0xffffffff;
1004		break;
1005
1006	case OP_LWZU:
1007		regs->gpr[rd] = 0xffffffff;
1008		regs->gpr[ra] += (s16)d;
1009		break;
1010
1011	case OP_LBZ:
1012		regs->gpr[rd] = 0xff;
1013		break;
1014
1015	case OP_LBZU:
1016		regs->gpr[rd] = 0xff;
1017		regs->gpr[ra] += (s16)d;
1018		break;
1019
1020	case OP_LHZ:
1021		regs->gpr[rd] = 0xffff;
1022		break;
1023
1024	case OP_LHZU:
1025		regs->gpr[rd] = 0xffff;
1026		regs->gpr[ra] += (s16)d;
1027		break;
1028
1029	case OP_LHA:
1030		regs->gpr[rd] = ~0UL;
1031		break;
1032
1033	case OP_LHAU:
1034		regs->gpr[rd] = ~0UL;
1035		regs->gpr[ra] += (s16)d;
1036		break;
1037
1038	default:
1039		return 0;
1040	}
1041
1042	return 1;
1043}
1044
1045static int is_in_pci_mem_space(phys_addr_t addr)
1046{
1047	struct pci_controller *hose;
1048	struct resource *res;
1049	int i;
1050
1051	list_for_each_entry(hose, &hose_list, list_node) {
1052		if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
1053			continue;
1054
1055		for (i = 0; i < 3; i++) {
1056			res = &hose->mem_resources[i];
1057			if ((res->flags & IORESOURCE_MEM) &&
1058				addr >= res->start && addr <= res->end)
1059				return 1;
1060		}
1061	}
1062	return 0;
1063}
1064
1065int fsl_pci_mcheck_exception(struct pt_regs *regs)
1066{
1067	u32 inst;
1068	int ret;
1069	phys_addr_t addr = 0;
1070
1071	/* Let KVM/QEMU deal with the exception */
1072	if (regs->msr & MSR_GS)
1073		return 0;
1074
1075#ifdef CONFIG_PHYS_64BIT
1076	addr = mfspr(SPRN_MCARU);
1077	addr <<= 32;
1078#endif
1079	addr += mfspr(SPRN_MCAR);
1080
1081	if (is_in_pci_mem_space(addr)) {
1082		if (user_mode(regs))
1083			ret = copy_from_user_nofault(&inst,
1084					(void __user *)regs->nip, sizeof(inst));
1085		else
1086			ret = get_kernel_nofault(inst, (void *)regs->nip);
 
 
1087
1088		if (!ret && mcheck_handle_load(regs, inst)) {
1089			regs_add_return_ip(regs, 4);
1090			return 1;
1091		}
1092	}
1093
1094	return 0;
1095}
1096#endif
1097
1098#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
1099static const struct of_device_id pci_ids[] = {
1100	{ .compatible = "fsl,mpc8540-pci", },
1101	{ .compatible = "fsl,mpc8548-pcie", },
1102	{ .compatible = "fsl,mpc8610-pci", },
1103	{ .compatible = "fsl,mpc8641-pcie", },
1104	{ .compatible = "fsl,qoriq-pcie", },
1105	{ .compatible = "fsl,qoriq-pcie-v2.1", },
1106	{ .compatible = "fsl,qoriq-pcie-v2.2", },
1107	{ .compatible = "fsl,qoriq-pcie-v2.3", },
1108	{ .compatible = "fsl,qoriq-pcie-v2.4", },
1109	{ .compatible = "fsl,qoriq-pcie-v3.0", },
1110
1111	/*
1112	 * The following entries are for compatibility with older device
1113	 * trees.
1114	 */
1115	{ .compatible = "fsl,p1022-pcie", },
1116	{ .compatible = "fsl,p4080-pcie", },
1117
1118	{},
1119};
1120
1121struct device_node *fsl_pci_primary;
1122
1123void __init fsl_pci_assign_primary(void)
1124{
1125	struct device_node *np;
1126
1127	/* Callers can specify the primary bus using other means. */
1128	if (fsl_pci_primary)
1129		return;
1130
1131	/* If a PCI host bridge contains an ISA node, it's primary. */
1132	np = of_find_node_by_type(NULL, "isa");
1133	while ((fsl_pci_primary = of_get_parent(np))) {
1134		of_node_put(np);
1135		np = fsl_pci_primary;
1136
1137		if (of_match_node(pci_ids, np) && of_device_is_available(np))
1138			return;
1139	}
1140
1141	/*
1142	 * If there's no PCI host bridge with ISA then check for
1143	 * PCI host bridge with alias "pci0" (first PCI host bridge).
1144	 */
1145	np = of_find_node_by_path("pci0");
1146	if (np && of_match_node(pci_ids, np) && of_device_is_available(np)) {
1147		fsl_pci_primary = np;
1148		of_node_put(np);
1149		return;
1150	}
1151	if (np)
1152		of_node_put(np);
1153
1154	/*
1155	 * If there's no PCI host bridge with ISA, arbitrarily
1156	 * designate one as primary.  This can go away once
1157	 * various bugs with primary-less systems are fixed.
1158	 */
1159	for_each_matching_node(np, pci_ids) {
1160		if (of_device_is_available(np)) {
1161			fsl_pci_primary = np;
 
1162			return;
1163		}
1164	}
1165}
1166
1167#ifdef CONFIG_PM_SLEEP
1168static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
1169{
1170	struct pci_controller *hose = dev_id;
1171	struct ccsr_pci __iomem *pci = hose->private_data;
1172	u32 dr;
1173
1174	dr = in_be32(&pci->pex_pme_mes_dr);
1175	if (!dr)
1176		return IRQ_NONE;
1177
1178	out_be32(&pci->pex_pme_mes_dr, dr);
1179
1180	return IRQ_HANDLED;
1181}
1182
1183static int fsl_pci_pme_probe(struct pci_controller *hose)
1184{
1185	struct ccsr_pci __iomem *pci;
1186	struct pci_dev *dev;
1187	int pme_irq;
1188	int res;
1189	u16 pms;
1190
1191	/* Get hose's pci_dev */
1192	dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
1193
1194	/* PME Disable */
1195	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1196	pms &= ~PCI_PM_CTRL_PME_ENABLE;
1197	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1198
1199	pme_irq = irq_of_parse_and_map(hose->dn, 0);
1200	if (!pme_irq) {
1201		dev_err(&dev->dev, "Failed to map PME interrupt.\n");
1202
1203		return -ENXIO;
1204	}
1205
1206	res = devm_request_irq(hose->parent, pme_irq,
1207			fsl_pci_pme_handle,
1208			IRQF_SHARED,
1209			"[PCI] PME", hose);
1210	if (res < 0) {
1211		dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
1212		irq_dispose_mapping(pme_irq);
1213
1214		return -ENODEV;
1215	}
1216
1217	pci = hose->private_data;
1218
1219	/* Enable PTOD, ENL23D & EXL23D */
1220	clrbits32(&pci->pex_pme_mes_disr,
1221		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1222
1223	out_be32(&pci->pex_pme_mes_ier, 0);
1224	setbits32(&pci->pex_pme_mes_ier,
1225		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1226
1227	/* PME Enable */
1228	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1229	pms |= PCI_PM_CTRL_PME_ENABLE;
1230	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1231
1232	return 0;
1233}
1234
1235static void send_pme_turnoff_message(struct pci_controller *hose)
1236{
1237	struct ccsr_pci __iomem *pci = hose->private_data;
1238	u32 dr;
1239	int i;
1240
1241	/* Send PME_Turn_Off Message Request */
1242	setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
1243
1244	/* Wait trun off done */
1245	for (i = 0; i < 150; i++) {
1246		dr = in_be32(&pci->pex_pme_mes_dr);
1247		if (dr) {
1248			out_be32(&pci->pex_pme_mes_dr, dr);
1249			break;
1250		}
1251
1252		udelay(1000);
1253	}
1254}
1255
1256static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
1257{
1258	send_pme_turnoff_message(hose);
1259}
1260
1261static int fsl_pci_syscore_suspend(void)
1262{
1263	struct pci_controller *hose, *tmp;
1264
1265	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1266		fsl_pci_syscore_do_suspend(hose);
1267
1268	return 0;
1269}
1270
1271static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
1272{
1273	struct ccsr_pci __iomem *pci = hose->private_data;
1274	u32 dr;
1275	int i;
1276
1277	/* Send Exit L2 State Message */
1278	setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
1279
1280	/* Wait exit done */
1281	for (i = 0; i < 150; i++) {
1282		dr = in_be32(&pci->pex_pme_mes_dr);
1283		if (dr) {
1284			out_be32(&pci->pex_pme_mes_dr, dr);
1285			break;
1286		}
1287
1288		udelay(1000);
1289	}
1290
1291	setup_pci_atmu(hose);
1292}
1293
1294static void fsl_pci_syscore_resume(void)
1295{
1296	struct pci_controller *hose, *tmp;
1297
1298	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1299		fsl_pci_syscore_do_resume(hose);
1300}
1301
1302static struct syscore_ops pci_syscore_pm_ops = {
1303	.suspend = fsl_pci_syscore_suspend,
1304	.resume = fsl_pci_syscore_resume,
1305};
1306#endif
1307
1308void fsl_pcibios_fixup_phb(struct pci_controller *phb)
1309{
1310#ifdef CONFIG_PM_SLEEP
1311	fsl_pci_pme_probe(phb);
1312#endif
1313}
1314
1315static int add_err_dev(struct platform_device *pdev)
1316{
1317	struct platform_device *errdev;
1318	struct mpc85xx_edac_pci_plat_data pd = {
1319		.of_node = pdev->dev.of_node
1320	};
1321
1322	errdev = platform_device_register_resndata(&pdev->dev,
1323						   "mpc85xx-pci-edac",
1324						   PLATFORM_DEVID_AUTO,
1325						   pdev->resource,
1326						   pdev->num_resources,
1327						   &pd, sizeof(pd));
1328
1329	return PTR_ERR_OR_ZERO(errdev);
1330}
1331
1332static int fsl_pci_probe(struct platform_device *pdev)
1333{
1334	struct device_node *node;
1335	int ret;
1336
1337	node = pdev->dev.of_node;
1338	ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
1339	if (ret)
1340		return ret;
1341
1342	ret = add_err_dev(pdev);
1343	if (ret)
1344		dev_err(&pdev->dev, "couldn't register error device: %d\n",
1345			ret);
1346
1347	return 0;
1348}
1349
1350static struct platform_driver fsl_pci_driver = {
1351	.driver = {
1352		.name = "fsl-pci",
1353		.of_match_table = pci_ids,
1354	},
1355	.probe = fsl_pci_probe,
1356	.driver_managed_dma = true,
1357};
1358
1359static int __init fsl_pci_init(void)
1360{
1361#ifdef CONFIG_PM_SLEEP
1362	register_syscore_ops(&pci_syscore_pm_ops);
1363#endif
1364	return platform_driver_register(&fsl_pci_driver);
1365}
1366arch_initcall(fsl_pci_init);
1367#endif
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * MPC83xx/85xx/86xx PCI/PCIE support routing.
   4 *
   5 * Copyright 2007-2012 Freescale Semiconductor, Inc.
   6 * Copyright 2008-2009 MontaVista Software, Inc.
   7 *
   8 * Initial author: Xianghua Xiao <x.xiao@freescale.com>
   9 * Recode: ZHANG WEI <wei.zhang@freescale.com>
  10 * Rewrite the routing for Frescale PCI and PCI Express
  11 * 	Roy Zang <tie-fei.zang@freescale.com>
  12 * MPC83xx PCI-Express support:
  13 * 	Tony Li <tony.li@freescale.com>
  14 * 	Anton Vorontsov <avorontsov@ru.mvista.com>
  15 */
  16#include <linux/kernel.h>
  17#include <linux/pci.h>
  18#include <linux/delay.h>
  19#include <linux/string.h>
  20#include <linux/fsl/edac.h>
  21#include <linux/init.h>
  22#include <linux/interrupt.h>
  23#include <linux/memblock.h>
  24#include <linux/log2.h>
 
 
  25#include <linux/platform_device.h>
  26#include <linux/slab.h>
  27#include <linux/suspend.h>
  28#include <linux/syscore_ops.h>
  29#include <linux/uaccess.h>
  30
  31#include <asm/io.h>
  32#include <asm/prom.h>
  33#include <asm/pci-bridge.h>
  34#include <asm/ppc-pci.h>
  35#include <asm/machdep.h>
  36#include <asm/mpc85xx.h>
  37#include <asm/disassemble.h>
  38#include <asm/ppc-opcode.h>
  39#include <asm/swiotlb.h>
 
  40#include <sysdev/fsl_soc.h>
  41#include <sysdev/fsl_pci.h>
  42
  43static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
  44
  45static void quirk_fsl_pcie_early(struct pci_dev *dev)
  46{
  47	u8 hdr_type;
  48
  49	/* if we aren't a PCIe don't bother */
  50	if (!pci_is_pcie(dev))
  51		return;
  52
  53	/* if we aren't in host mode don't bother */
  54	pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
  55	if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
  56		return;
  57
  58	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  59	fsl_pcie_bus_fixup = 1;
  60	return;
  61}
  62
  63static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
  64				    int, int, u32 *);
  65
  66static int fsl_pcie_check_link(struct pci_controller *hose)
  67{
  68	u32 val = 0;
  69
  70	if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
  71		if (hose->ops->read == fsl_indirect_read_config)
  72			__indirect_read_config(hose, hose->first_busno, 0,
  73					       PCIE_LTSSM, 4, &val);
  74		else
  75			early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
  76		if (val < PCIE_LTSSM_L0)
  77			return 1;
  78	} else {
  79		struct ccsr_pci __iomem *pci = hose->private_data;
  80		/* for PCIe IP rev 3.0 or greater use CSR0 for link state */
  81		val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
  82				>> PEX_CSR0_LTSSM_SHIFT;
  83		if (val != PEX_CSR0_LTSSM_L0)
  84			return 1;
  85	}
  86
  87	return 0;
  88}
  89
  90static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
  91				    int offset, int len, u32 *val)
  92{
  93	struct pci_controller *hose = pci_bus_to_host(bus);
  94
  95	if (fsl_pcie_check_link(hose))
  96		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
  97	else
  98		hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
  99
 100	return indirect_read_config(bus, devfn, offset, len, val);
 101}
 102
 103#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 104
 105static struct pci_ops fsl_indirect_pcie_ops =
 106{
 107	.read = fsl_indirect_read_config,
 108	.write = indirect_write_config,
 109};
 110
 111static u64 pci64_dma_offset;
 112
 113#ifdef CONFIG_SWIOTLB
 114static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
 115{
 116	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 117
 118	pdev->dev.bus_dma_mask =
 119		hose->dma_window_base_cur + hose->dma_window_size;
 120}
 121
 122static void setup_swiotlb_ops(struct pci_controller *hose)
 123{
 124	if (ppc_swiotlb_enable)
 125		hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
 126}
 127#else
 128static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
 129#endif
 130
 131static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
 132{
 133	/*
 134	 * Fix up PCI devices that are able to DMA to the large inbound
 135	 * mapping that allows addressing any RAM address from across PCI.
 136	 */
 137	if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
 138		dev->bus_dma_mask = 0;
 139		dev->archdata.dma_offset = pci64_dma_offset;
 140	}
 141}
 142
 143static int setup_one_atmu(struct ccsr_pci __iomem *pci,
 144	unsigned int index, const struct resource *res,
 145	resource_size_t offset)
 146{
 147	resource_size_t pci_addr = res->start - offset;
 148	resource_size_t phys_addr = res->start;
 149	resource_size_t size = resource_size(res);
 150	u32 flags = 0x80044000; /* enable & mem R/W */
 151	unsigned int i;
 152
 153	pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
 154		(u64)res->start, (u64)size);
 155
 156	if (res->flags & IORESOURCE_PREFETCH)
 157		flags |= 0x10000000; /* enable relaxed ordering */
 158
 159	for (i = 0; size > 0; i++) {
 160		unsigned int bits = min_t(u32, ilog2(size),
 161					__ffs(pci_addr | phys_addr));
 162
 163		if (index + i >= 5)
 164			return -1;
 165
 166		out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
 167		out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
 168		out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
 169		out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
 170
 171		pci_addr += (resource_size_t)1U << bits;
 172		phys_addr += (resource_size_t)1U << bits;
 173		size -= (resource_size_t)1U << bits;
 174	}
 175
 176	return i;
 177}
 178
 179static bool is_kdump(void)
 180{
 181	struct device_node *node;
 
 182
 183	node = of_find_node_by_type(NULL, "memory");
 184	if (!node) {
 185		WARN_ON_ONCE(1);
 186		return false;
 187	}
 188
 189	return of_property_read_bool(node, "linux,usable-memory");
 
 
 
 190}
 191
 192/* atmu setup for fsl pci/pcie controller */
 193static void setup_pci_atmu(struct pci_controller *hose)
 194{
 195	struct ccsr_pci __iomem *pci = hose->private_data;
 196	int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
 197	u64 mem, sz, paddr_hi = 0;
 198	u64 offset = 0, paddr_lo = ULLONG_MAX;
 199	u32 pcicsrbar = 0, pcicsrbar_sz;
 200	u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
 201			PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
 202	const u64 *reg;
 203	int len;
 204	bool setup_inbound;
 205
 206	/*
 207	 * If this is kdump, we don't want to trigger a bunch of PCI
 208	 * errors by closing the window on in-flight DMA.
 209	 *
 210	 * We still run most of the function's logic so that things like
 211	 * hose->dma_window_size still get set.
 212	 */
 213	setup_inbound = !is_kdump();
 214
 215	if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
 216		/*
 217		 * BSC9132 Rev1.0 has an issue where all the PEX inbound
 218		 * windows have implemented the default target value as 0xf
 219		 * for CCSR space.In all Freescale legacy devices the target
 220		 * of 0xf is reserved for local memory space. 9132 Rev1.0
 221		 * now has local mempry space mapped to target 0x0 instead of
 222		 * 0xf. Hence adding a workaround to remove the target 0xf
 223		 * defined for memory space from Inbound window attributes.
 224		 */
 225		piwar &= ~PIWAR_TGI_LOCAL;
 226	}
 227
 228	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 229		if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
 230			win_idx = 2;
 231			start_idx = 0;
 232			end_idx = 3;
 233		}
 234	}
 235
 236	/* Disable all windows (except powar0 since it's ignored) */
 237	for(i = 1; i < 5; i++)
 238		out_be32(&pci->pow[i].powar, 0);
 239
 240	if (setup_inbound) {
 241		for (i = start_idx; i < end_idx; i++)
 242			out_be32(&pci->piw[i].piwar, 0);
 243	}
 244
 245	/* Setup outbound MEM window */
 246	for(i = 0, j = 1; i < 3; i++) {
 247		if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
 248			continue;
 249
 250		paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
 251		paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
 252
 253		/* We assume all memory resources have the same offset */
 254		offset = hose->mem_offset[i];
 255		n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
 256
 257		if (n < 0 || j >= 5) {
 258			pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
 259			hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
 260		} else
 261			j += n;
 262	}
 263
 264	/* Setup outbound IO window */
 265	if (hose->io_resource.flags & IORESOURCE_IO) {
 266		if (j >= 5) {
 267			pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
 268		} else {
 269			pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
 270				 "phy base 0x%016llx.\n",
 271				 (u64)hose->io_resource.start,
 272				 (u64)resource_size(&hose->io_resource),
 273				 (u64)hose->io_base_phys);
 274			out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
 275			out_be32(&pci->pow[j].potear, 0);
 276			out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
 277			/* Enable, IO R/W */
 278			out_be32(&pci->pow[j].powar, 0x80088000
 279				| (ilog2(hose->io_resource.end
 280				- hose->io_resource.start + 1) - 1));
 281		}
 282	}
 283
 284	/* convert to pci address space */
 285	paddr_hi -= offset;
 286	paddr_lo -= offset;
 287
 288	if (paddr_hi == paddr_lo) {
 289		pr_err("%pOF: No outbound window space\n", hose->dn);
 290		return;
 291	}
 292
 293	if (paddr_lo == 0) {
 294		pr_err("%pOF: No space for inbound window\n", hose->dn);
 295		return;
 296	}
 297
 298	/* setup PCSRBAR/PEXCSRBAR */
 299	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
 300	early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
 301	pcicsrbar_sz = ~pcicsrbar_sz + 1;
 302
 303	if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
 304		(paddr_lo > 0x100000000ull))
 305		pcicsrbar = 0x100000000ull - pcicsrbar_sz;
 306	else
 307		pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
 308	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
 309
 310	paddr_lo = min(paddr_lo, (u64)pcicsrbar);
 311
 312	pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
 313
 314	/* Setup inbound mem window */
 315	mem = memblock_end_of_DRAM();
 316	pr_info("%s: end of DRAM %llx\n", __func__, mem);
 317
 318	/*
 319	 * The msi-address-64 property, if it exists, indicates the physical
 320	 * address of the MSIIR register.  Normally, this register is located
 321	 * inside CCSR, so the ATMU that covers all of CCSR is used. But if
 322	 * this property exists, then we normally need to create a new ATMU
 323	 * for it.  For now, however, we cheat.  The only entity that creates
 324	 * this property is the Freescale hypervisor, and the address is
 325	 * specified in the partition configuration.  Typically, the address
 326	 * is located in the page immediately after the end of DDR.  If so, we
 327	 * can avoid allocating a new ATMU by extending the DDR ATMU by one
 328	 * page.
 329	 */
 330	reg = of_get_property(hose->dn, "msi-address-64", &len);
 331	if (reg && (len == sizeof(u64))) {
 332		u64 address = be64_to_cpup(reg);
 333
 334		if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
 335			pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
 336			mem += PAGE_SIZE;
 337		} else {
 338			/* TODO: Create a new ATMU for MSIIR */
 339			pr_warn("%pOF: msi-address-64 address of %llx is "
 340				"unsupported\n", hose->dn, address);
 341		}
 342	}
 343
 344	sz = min(mem, paddr_lo);
 345	mem_log = ilog2(sz);
 346
 347	/* PCIe can overmap inbound & outbound since RX & TX are separated */
 348	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 349		/* Size window to exact size if power-of-two or one size up */
 350		if ((1ull << mem_log) != mem) {
 351			mem_log++;
 352			if ((1ull << mem_log) > mem)
 353				pr_info("%pOF: Setting PCI inbound window "
 354					"greater than memory size\n", hose->dn);
 355		}
 356
 357		piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
 358
 359		if (setup_inbound) {
 360			/* Setup inbound memory window */
 361			out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 362			out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
 363			out_be32(&pci->piw[win_idx].piwar,  piwar);
 364		}
 365
 366		win_idx--;
 367		hose->dma_window_base_cur = 0x00000000;
 368		hose->dma_window_size = (resource_size_t)sz;
 369
 370		/*
 371		 * if we have >4G of memory setup second PCI inbound window to
 372		 * let devices that are 64-bit address capable to work w/o
 373		 * SWIOTLB and access the full range of memory
 374		 */
 375		if (sz != mem) {
 376			mem_log = ilog2(mem);
 377
 378			/* Size window up if we dont fit in exact power-of-2 */
 379			if ((1ull << mem_log) != mem)
 380				mem_log++;
 381
 382			piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
 383			pci64_dma_offset = 1ULL << mem_log;
 384
 385			if (setup_inbound) {
 386				/* Setup inbound memory window */
 387				out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 388				out_be32(&pci->piw[win_idx].piwbear,
 389						pci64_dma_offset >> 44);
 390				out_be32(&pci->piw[win_idx].piwbar,
 391						pci64_dma_offset >> 12);
 392				out_be32(&pci->piw[win_idx].piwar,  piwar);
 393			}
 394
 395			/*
 396			 * install our own dma_set_mask handler to fixup dma_ops
 397			 * and dma_offset
 398			 */
 399			ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
 400
 401			pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
 402		}
 403	} else {
 404		u64 paddr = 0;
 405
 406		if (setup_inbound) {
 407			/* Setup inbound memory window */
 408			out_be32(&pci->piw[win_idx].pitar,  paddr >> 12);
 409			out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
 410			out_be32(&pci->piw[win_idx].piwar,
 411				 (piwar | (mem_log - 1)));
 412		}
 413
 414		win_idx--;
 415		paddr += 1ull << mem_log;
 416		sz -= 1ull << mem_log;
 417
 418		if (sz) {
 419			mem_log = ilog2(sz);
 420			piwar |= (mem_log - 1);
 421
 422			if (setup_inbound) {
 423				out_be32(&pci->piw[win_idx].pitar,
 424					 paddr >> 12);
 425				out_be32(&pci->piw[win_idx].piwbar,
 426					 paddr >> 12);
 427				out_be32(&pci->piw[win_idx].piwar, piwar);
 428			}
 429
 430			win_idx--;
 431			paddr += 1ull << mem_log;
 432		}
 433
 434		hose->dma_window_base_cur = 0x00000000;
 435		hose->dma_window_size = (resource_size_t)paddr;
 436	}
 437
 438	if (hose->dma_window_size < mem) {
 439#ifdef CONFIG_SWIOTLB
 440		ppc_swiotlb_enable = 1;
 441#else
 442		pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
 443			"map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
 444			 hose->dn);
 445#endif
 446		/* adjusting outbound windows could reclaim space in mem map */
 447		if (paddr_hi < 0xffffffffull)
 448			pr_warn("%pOF: WARNING: Outbound window cfg leaves "
 449				"gaps in memory map. Adjusting the memory map "
 450				"could reduce unnecessary bounce buffering.\n",
 451				hose->dn);
 452
 453		pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
 454			(u64)hose->dma_window_size);
 455	}
 456}
 457
 458static void __init setup_pci_cmd(struct pci_controller *hose)
 459{
 460	u16 cmd;
 461	int cap_x;
 462
 463	early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
 464	cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
 465		| PCI_COMMAND_IO;
 466	early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
 467
 468	cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
 469	if (cap_x) {
 470		int pci_x_cmd = cap_x + PCI_X_CMD;
 471		cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
 472			| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
 473		early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
 474	} else {
 475		early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
 476	}
 477}
 478
 479void fsl_pcibios_fixup_bus(struct pci_bus *bus)
 480{
 481	struct pci_controller *hose = pci_bus_to_host(bus);
 482	int i, is_pcie = 0, no_link;
 483
 484	/* The root complex bridge comes up with bogus resources,
 485	 * we copy the PHB ones in.
 486	 *
 487	 * With the current generic PCI code, the PHB bus no longer
 488	 * has bus->resource[0..4] set, so things are a bit more
 489	 * tricky.
 490	 */
 491
 492	if (fsl_pcie_bus_fixup)
 493		is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
 494	no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
 495
 496	if (bus->parent == hose->bus && (is_pcie || no_link)) {
 497		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
 498			struct resource *res = bus->resource[i];
 499			struct resource *par;
 500
 501			if (!res)
 502				continue;
 503			if (i == 0)
 504				par = &hose->io_resource;
 505			else if (i < 4)
 506				par = &hose->mem_resources[i-1];
 507			else par = NULL;
 508
 509			res->start = par ? par->start : 0;
 510			res->end   = par ? par->end   : 0;
 511			res->flags = par ? par->flags : 0;
 512		}
 513	}
 514}
 515
 516int fsl_add_bridge(struct platform_device *pdev, int is_primary)
 517{
 518	int len;
 519	struct pci_controller *hose;
 520	struct resource rsrc;
 521	const int *bus_range;
 522	u8 hdr_type, progif;
 
 523	struct device_node *dev;
 524	struct ccsr_pci __iomem *pci;
 525	u16 temp;
 526	u32 svr = mfspr(SPRN_SVR);
 527
 528	dev = pdev->dev.of_node;
 529
 530	if (!of_device_is_available(dev)) {
 531		pr_warn("%pOF: disabled\n", dev);
 532		return -ENODEV;
 533	}
 534
 535	pr_debug("Adding PCI host bridge %pOF\n", dev);
 536
 537	/* Fetch host bridge registers address */
 538	if (of_address_to_resource(dev, 0, &rsrc)) {
 539		printk(KERN_WARNING "Can't get pci register base!");
 540		return -ENOMEM;
 541	}
 542
 543	/* Get bus range if any */
 544	bus_range = of_get_property(dev, "bus-range", &len);
 545	if (bus_range == NULL || len < 2 * sizeof(int))
 546		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 547			" bus 0\n", dev);
 548
 549	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 550	hose = pcibios_alloc_controller(dev);
 551	if (!hose)
 552		return -ENOMEM;
 553
 554	/* set platform device as the parent */
 555	hose->parent = &pdev->dev;
 556	hose->first_busno = bus_range ? bus_range[0] : 0x0;
 557	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 558
 559	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
 560		 (u64)rsrc.start, (u64)resource_size(&rsrc));
 561
 562	pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
 563	if (!hose->private_data)
 564		goto no_bridge;
 565
 566	setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
 567			   PPC_INDIRECT_TYPE_BIG_ENDIAN);
 568
 569	if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
 570		hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 571
 572	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 573		/* use fsl_indirect_read_config for PCIe */
 574		hose->ops = &fsl_indirect_pcie_ops;
 575		/* For PCIE read HEADER_TYPE to identify controller mode */
 576		early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
 577		if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
 578			goto no_bridge;
 579
 580	} else {
 581		/* For PCI read PROG to identify controller mode */
 582		early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
 583		if ((progif & 1) &&
 584		    !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
 585			goto no_bridge;
 586	}
 587
 588	setup_pci_cmd(hose);
 589
 590	/* check PCI express link status */
 591	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 592		hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
 593			PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
 594		if (fsl_pcie_check_link(hose))
 595			hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 
 
 
 
 
 
 
 596	} else {
 597		/*
 598		 * Set PBFR(PCI Bus Function Register)[10] = 1 to
 599		 * disable the combining of crossing cacheline
 600		 * boundary requests into one burst transaction.
 601		 * PCI-X operation is not affected.
 602		 * Fix erratum PCI 5 on MPC8548
 603		 */
 604#define PCI_BUS_FUNCTION 0x44
 605#define PCI_BUS_FUNCTION_MDS 0x400	/* Master disable streaming */
 606		if (((SVR_SOC_VER(svr) == SVR_8543) ||
 607		     (SVR_SOC_VER(svr) == SVR_8545) ||
 608		     (SVR_SOC_VER(svr) == SVR_8547) ||
 609		     (SVR_SOC_VER(svr) == SVR_8548)) &&
 610		    !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
 611			early_read_config_word(hose, 0, 0,
 612					PCI_BUS_FUNCTION, &temp);
 613			temp |= PCI_BUS_FUNCTION_MDS;
 614			early_write_config_word(hose, 0, 0,
 615					PCI_BUS_FUNCTION, temp);
 616		}
 617	}
 618
 619	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 620		"Firmware bus number: %d->%d\n",
 621		(unsigned long long)rsrc.start, hose->first_busno,
 622		hose->last_busno);
 623
 624	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 625		hose, hose->cfg_addr, hose->cfg_data);
 626
 627	/* Interpret the "ranges" property */
 628	/* This also maps the I/O region and sets isa_io/mem_base */
 629	pci_process_bridge_OF_ranges(hose, dev, is_primary);
 630
 631	/* Setup PEX window registers */
 632	setup_pci_atmu(hose);
 633
 634	/* Set up controller operations */
 635	setup_swiotlb_ops(hose);
 636
 637	return 0;
 638
 639no_bridge:
 640	iounmap(hose->private_data);
 641	/* unmap cfg_data & cfg_addr separately if not on same page */
 642	if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
 643	    ((unsigned long)hose->cfg_addr & PAGE_MASK))
 644		iounmap(hose->cfg_data);
 645	iounmap(hose->cfg_addr);
 646	pcibios_free_controller(hose);
 647	return -ENODEV;
 648}
 649#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
 650
 651DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
 652			quirk_fsl_pcie_early);
 653
 654#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
 655struct mpc83xx_pcie_priv {
 656	void __iomem *cfg_type0;
 657	void __iomem *cfg_type1;
 658	u32 dev_base;
 659};
 660
 661struct pex_inbound_window {
 662	u32 ar;
 663	u32 tar;
 664	u32 barl;
 665	u32 barh;
 666};
 667
 668/*
 669 * With the convention of u-boot, the PCIE outbound window 0 serves
 670 * as configuration transactions outbound.
 671 */
 672#define PEX_OUTWIN0_BAR		0xCA4
 673#define PEX_OUTWIN0_TAL		0xCA8
 674#define PEX_OUTWIN0_TAH		0xCAC
 675#define PEX_RC_INWIN_BASE	0xE60
 676#define PEX_RCIWARn_EN		0x1
 677
 678static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
 679{
 680	struct pci_controller *hose = pci_bus_to_host(bus);
 681
 682	if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
 683		return PCIBIOS_DEVICE_NOT_FOUND;
 684	/*
 685	 * Workaround for the HW bug: for Type 0 configure transactions the
 686	 * PCI-E controller does not check the device number bits and just
 687	 * assumes that the device number bits are 0.
 688	 */
 689	if (bus->number == hose->first_busno ||
 690			bus->primary == hose->first_busno) {
 691		if (devfn & 0xf8)
 692			return PCIBIOS_DEVICE_NOT_FOUND;
 693	}
 694
 695	if (ppc_md.pci_exclude_device) {
 696		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 697			return PCIBIOS_DEVICE_NOT_FOUND;
 698	}
 699
 700	return PCIBIOS_SUCCESSFUL;
 701}
 702
 703static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
 704					    unsigned int devfn, int offset)
 705{
 706	struct pci_controller *hose = pci_bus_to_host(bus);
 707	struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 708	u32 dev_base = bus->number << 24 | devfn << 16;
 709	int ret;
 710
 711	ret = mpc83xx_pcie_exclude_device(bus, devfn);
 712	if (ret)
 713		return NULL;
 714
 715	offset &= 0xfff;
 716
 717	/* Type 0 */
 718	if (bus->number == hose->first_busno)
 719		return pcie->cfg_type0 + offset;
 720
 721	if (pcie->dev_base == dev_base)
 722		goto mapped;
 723
 724	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
 725
 726	pcie->dev_base = dev_base;
 727mapped:
 728	return pcie->cfg_type1 + offset;
 729}
 730
 731static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
 732				     int offset, int len, u32 val)
 733{
 734	struct pci_controller *hose = pci_bus_to_host(bus);
 735
 736	/* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
 737	if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
 738		val &= 0xffffff00;
 739
 740	return pci_generic_config_write(bus, devfn, offset, len, val);
 741}
 742
 743static struct pci_ops mpc83xx_pcie_ops = {
 744	.map_bus = mpc83xx_pcie_remap_cfg,
 745	.read = pci_generic_config_read,
 746	.write = mpc83xx_pcie_write_config,
 747};
 748
 749static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
 750				     struct resource *reg)
 751{
 752	struct mpc83xx_pcie_priv *pcie;
 753	u32 cfg_bar;
 754	int ret = -ENOMEM;
 755
 756	pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL);
 757	if (!pcie)
 758		return ret;
 759
 760	pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
 761	if (!pcie->cfg_type0)
 762		goto err0;
 763
 764	cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
 765	if (!cfg_bar) {
 766		/* PCI-E isn't configured. */
 767		ret = -ENODEV;
 768		goto err1;
 769	}
 770
 771	pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
 772	if (!pcie->cfg_type1)
 773		goto err1;
 774
 775	WARN_ON(hose->dn->data);
 776	hose->dn->data = pcie;
 777	hose->ops = &mpc83xx_pcie_ops;
 778	hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 779
 780	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
 781	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
 782
 783	if (fsl_pcie_check_link(hose))
 784		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 785
 786	return 0;
 787err1:
 788	iounmap(pcie->cfg_type0);
 789err0:
 790	kfree(pcie);
 791	return ret;
 792
 793}
 794
 795int __init mpc83xx_add_bridge(struct device_node *dev)
 796{
 797	int ret;
 798	int len;
 799	struct pci_controller *hose;
 800	struct resource rsrc_reg;
 801	struct resource rsrc_cfg;
 802	const int *bus_range;
 803	int primary;
 804
 805	is_mpc83xx_pci = 1;
 806
 807	if (!of_device_is_available(dev)) {
 808		pr_warn("%pOF: disabled by the firmware.\n",
 809			dev);
 810		return -ENODEV;
 811	}
 812	pr_debug("Adding PCI host bridge %pOF\n", dev);
 813
 814	/* Fetch host bridge registers address */
 815	if (of_address_to_resource(dev, 0, &rsrc_reg)) {
 816		printk(KERN_WARNING "Can't get pci register base!\n");
 817		return -ENOMEM;
 818	}
 819
 820	memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
 821
 822	if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
 823		printk(KERN_WARNING
 824			"No pci config register base in dev tree, "
 825			"using default\n");
 826		/*
 827		 * MPC83xx supports up to two host controllers
 828		 * 	one at 0x8500 has config space registers at 0x8300
 829		 * 	one at 0x8600 has config space registers at 0x8380
 830		 */
 831		if ((rsrc_reg.start & 0xfffff) == 0x8500)
 832			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
 833		else if ((rsrc_reg.start & 0xfffff) == 0x8600)
 834			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
 835	}
 836	/*
 837	 * Controller at offset 0x8500 is primary
 838	 */
 839	if ((rsrc_reg.start & 0xfffff) == 0x8500)
 840		primary = 1;
 841	else
 842		primary = 0;
 843
 844	/* Get bus range if any */
 845	bus_range = of_get_property(dev, "bus-range", &len);
 846	if (bus_range == NULL || len < 2 * sizeof(int)) {
 847		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 848		       " bus 0\n", dev);
 849	}
 850
 851	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 852	hose = pcibios_alloc_controller(dev);
 853	if (!hose)
 854		return -ENOMEM;
 855
 856	hose->first_busno = bus_range ? bus_range[0] : 0;
 857	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 858
 859	if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
 860		ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
 861		if (ret)
 862			goto err0;
 863	} else {
 864		setup_indirect_pci(hose, rsrc_cfg.start,
 865				   rsrc_cfg.start + 4, 0);
 866	}
 867
 868	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 869	       "Firmware bus number: %d->%d\n",
 870	       (unsigned long long)rsrc_reg.start, hose->first_busno,
 871	       hose->last_busno);
 872
 873	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 874	    hose, hose->cfg_addr, hose->cfg_data);
 875
 876	/* Interpret the "ranges" property */
 877	/* This also maps the I/O region and sets isa_io/mem_base */
 878	pci_process_bridge_OF_ranges(hose, dev, primary);
 879
 880	return 0;
 881err0:
 882	pcibios_free_controller(hose);
 883	return ret;
 884}
 885#endif /* CONFIG_PPC_83xx */
 886
 887u64 fsl_pci_immrbar_base(struct pci_controller *hose)
 888{
 889#ifdef CONFIG_PPC_83xx
 890	if (is_mpc83xx_pci) {
 891		struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 892		struct pex_inbound_window *in;
 893		int i;
 894
 895		/* Walk the Root Complex Inbound windows to match IMMR base */
 896		in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
 897		for (i = 0; i < 4; i++) {
 898			/* not enabled, skip */
 899			if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
 900				continue;
 901
 902			if (get_immrbase() == in_le32(&in[i].tar))
 903				return (u64)in_le32(&in[i].barh) << 32 |
 904					    in_le32(&in[i].barl);
 905		}
 906
 907		printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
 908	}
 909#endif
 910
 911#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 912	if (!is_mpc83xx_pci) {
 913		u32 base;
 914
 915		pci_bus_read_config_dword(hose->bus,
 916			PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
 917
 918		/*
 919		 * For PEXCSRBAR, bit 3-0 indicate prefetchable and
 920		 * address type. So when getting base address, these
 921		 * bits should be masked
 922		 */
 923		base &= PCI_BASE_ADDRESS_MEM_MASK;
 924
 925		return base;
 926	}
 927#endif
 928
 929	return 0;
 930}
 931
 932#ifdef CONFIG_E500
 933static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
 934{
 935	unsigned int rd, ra, rb, d;
 936
 937	rd = get_rt(inst);
 938	ra = get_ra(inst);
 939	rb = get_rb(inst);
 940	d = get_d(inst);
 941
 942	switch (get_op(inst)) {
 943	case 31:
 944		switch (get_xop(inst)) {
 945		case OP_31_XOP_LWZX:
 946		case OP_31_XOP_LWBRX:
 947			regs->gpr[rd] = 0xffffffff;
 948			break;
 949
 950		case OP_31_XOP_LWZUX:
 951			regs->gpr[rd] = 0xffffffff;
 952			regs->gpr[ra] += regs->gpr[rb];
 953			break;
 954
 955		case OP_31_XOP_LBZX:
 956			regs->gpr[rd] = 0xff;
 957			break;
 958
 959		case OP_31_XOP_LBZUX:
 960			regs->gpr[rd] = 0xff;
 961			regs->gpr[ra] += regs->gpr[rb];
 962			break;
 963
 964		case OP_31_XOP_LHZX:
 965		case OP_31_XOP_LHBRX:
 966			regs->gpr[rd] = 0xffff;
 967			break;
 968
 969		case OP_31_XOP_LHZUX:
 970			regs->gpr[rd] = 0xffff;
 971			regs->gpr[ra] += regs->gpr[rb];
 972			break;
 973
 974		case OP_31_XOP_LHAX:
 975			regs->gpr[rd] = ~0UL;
 976			break;
 977
 978		case OP_31_XOP_LHAUX:
 979			regs->gpr[rd] = ~0UL;
 980			regs->gpr[ra] += regs->gpr[rb];
 981			break;
 982
 983		default:
 984			return 0;
 985		}
 986		break;
 987
 988	case OP_LWZ:
 989		regs->gpr[rd] = 0xffffffff;
 990		break;
 991
 992	case OP_LWZU:
 993		regs->gpr[rd] = 0xffffffff;
 994		regs->gpr[ra] += (s16)d;
 995		break;
 996
 997	case OP_LBZ:
 998		regs->gpr[rd] = 0xff;
 999		break;
1000
1001	case OP_LBZU:
1002		regs->gpr[rd] = 0xff;
1003		regs->gpr[ra] += (s16)d;
1004		break;
1005
1006	case OP_LHZ:
1007		regs->gpr[rd] = 0xffff;
1008		break;
1009
1010	case OP_LHZU:
1011		regs->gpr[rd] = 0xffff;
1012		regs->gpr[ra] += (s16)d;
1013		break;
1014
1015	case OP_LHA:
1016		regs->gpr[rd] = ~0UL;
1017		break;
1018
1019	case OP_LHAU:
1020		regs->gpr[rd] = ~0UL;
1021		regs->gpr[ra] += (s16)d;
1022		break;
1023
1024	default:
1025		return 0;
1026	}
1027
1028	return 1;
1029}
1030
1031static int is_in_pci_mem_space(phys_addr_t addr)
1032{
1033	struct pci_controller *hose;
1034	struct resource *res;
1035	int i;
1036
1037	list_for_each_entry(hose, &hose_list, list_node) {
1038		if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
1039			continue;
1040
1041		for (i = 0; i < 3; i++) {
1042			res = &hose->mem_resources[i];
1043			if ((res->flags & IORESOURCE_MEM) &&
1044				addr >= res->start && addr <= res->end)
1045				return 1;
1046		}
1047	}
1048	return 0;
1049}
1050
1051int fsl_pci_mcheck_exception(struct pt_regs *regs)
1052{
1053	u32 inst;
1054	int ret;
1055	phys_addr_t addr = 0;
1056
1057	/* Let KVM/QEMU deal with the exception */
1058	if (regs->msr & MSR_GS)
1059		return 0;
1060
1061#ifdef CONFIG_PHYS_64BIT
1062	addr = mfspr(SPRN_MCARU);
1063	addr <<= 32;
1064#endif
1065	addr += mfspr(SPRN_MCAR);
1066
1067	if (is_in_pci_mem_space(addr)) {
1068		if (user_mode(regs)) {
1069			pagefault_disable();
1070			ret = get_user(inst, (__u32 __user *)regs->nip);
1071			pagefault_enable();
1072		} else {
1073			ret = probe_kernel_address((void *)regs->nip, inst);
1074		}
1075
1076		if (!ret && mcheck_handle_load(regs, inst)) {
1077			regs->nip += 4;
1078			return 1;
1079		}
1080	}
1081
1082	return 0;
1083}
1084#endif
1085
1086#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
1087static const struct of_device_id pci_ids[] = {
1088	{ .compatible = "fsl,mpc8540-pci", },
1089	{ .compatible = "fsl,mpc8548-pcie", },
1090	{ .compatible = "fsl,mpc8610-pci", },
1091	{ .compatible = "fsl,mpc8641-pcie", },
1092	{ .compatible = "fsl,qoriq-pcie", },
1093	{ .compatible = "fsl,qoriq-pcie-v2.1", },
1094	{ .compatible = "fsl,qoriq-pcie-v2.2", },
1095	{ .compatible = "fsl,qoriq-pcie-v2.3", },
1096	{ .compatible = "fsl,qoriq-pcie-v2.4", },
1097	{ .compatible = "fsl,qoriq-pcie-v3.0", },
1098
1099	/*
1100	 * The following entries are for compatibility with older device
1101	 * trees.
1102	 */
1103	{ .compatible = "fsl,p1022-pcie", },
1104	{ .compatible = "fsl,p4080-pcie", },
1105
1106	{},
1107};
1108
1109struct device_node *fsl_pci_primary;
1110
1111void fsl_pci_assign_primary(void)
1112{
1113	struct device_node *np;
1114
1115	/* Callers can specify the primary bus using other means. */
1116	if (fsl_pci_primary)
1117		return;
1118
1119	/* If a PCI host bridge contains an ISA node, it's primary. */
1120	np = of_find_node_by_type(NULL, "isa");
1121	while ((fsl_pci_primary = of_get_parent(np))) {
1122		of_node_put(np);
1123		np = fsl_pci_primary;
1124
1125		if (of_match_node(pci_ids, np) && of_device_is_available(np))
1126			return;
1127	}
1128
1129	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
1130	 * If there's no PCI host bridge with ISA, arbitrarily
1131	 * designate one as primary.  This can go away once
1132	 * various bugs with primary-less systems are fixed.
1133	 */
1134	for_each_matching_node(np, pci_ids) {
1135		if (of_device_is_available(np)) {
1136			fsl_pci_primary = np;
1137			of_node_put(np);
1138			return;
1139		}
1140	}
1141}
1142
1143#ifdef CONFIG_PM_SLEEP
1144static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
1145{
1146	struct pci_controller *hose = dev_id;
1147	struct ccsr_pci __iomem *pci = hose->private_data;
1148	u32 dr;
1149
1150	dr = in_be32(&pci->pex_pme_mes_dr);
1151	if (!dr)
1152		return IRQ_NONE;
1153
1154	out_be32(&pci->pex_pme_mes_dr, dr);
1155
1156	return IRQ_HANDLED;
1157}
1158
1159static int fsl_pci_pme_probe(struct pci_controller *hose)
1160{
1161	struct ccsr_pci __iomem *pci;
1162	struct pci_dev *dev;
1163	int pme_irq;
1164	int res;
1165	u16 pms;
1166
1167	/* Get hose's pci_dev */
1168	dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
1169
1170	/* PME Disable */
1171	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1172	pms &= ~PCI_PM_CTRL_PME_ENABLE;
1173	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1174
1175	pme_irq = irq_of_parse_and_map(hose->dn, 0);
1176	if (!pme_irq) {
1177		dev_err(&dev->dev, "Failed to map PME interrupt.\n");
1178
1179		return -ENXIO;
1180	}
1181
1182	res = devm_request_irq(hose->parent, pme_irq,
1183			fsl_pci_pme_handle,
1184			IRQF_SHARED,
1185			"[PCI] PME", hose);
1186	if (res < 0) {
1187		dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
1188		irq_dispose_mapping(pme_irq);
1189
1190		return -ENODEV;
1191	}
1192
1193	pci = hose->private_data;
1194
1195	/* Enable PTOD, ENL23D & EXL23D */
1196	clrbits32(&pci->pex_pme_mes_disr,
1197		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1198
1199	out_be32(&pci->pex_pme_mes_ier, 0);
1200	setbits32(&pci->pex_pme_mes_ier,
1201		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1202
1203	/* PME Enable */
1204	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1205	pms |= PCI_PM_CTRL_PME_ENABLE;
1206	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1207
1208	return 0;
1209}
1210
1211static void send_pme_turnoff_message(struct pci_controller *hose)
1212{
1213	struct ccsr_pci __iomem *pci = hose->private_data;
1214	u32 dr;
1215	int i;
1216
1217	/* Send PME_Turn_Off Message Request */
1218	setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
1219
1220	/* Wait trun off done */
1221	for (i = 0; i < 150; i++) {
1222		dr = in_be32(&pci->pex_pme_mes_dr);
1223		if (dr) {
1224			out_be32(&pci->pex_pme_mes_dr, dr);
1225			break;
1226		}
1227
1228		udelay(1000);
1229	}
1230}
1231
1232static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
1233{
1234	send_pme_turnoff_message(hose);
1235}
1236
1237static int fsl_pci_syscore_suspend(void)
1238{
1239	struct pci_controller *hose, *tmp;
1240
1241	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1242		fsl_pci_syscore_do_suspend(hose);
1243
1244	return 0;
1245}
1246
1247static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
1248{
1249	struct ccsr_pci __iomem *pci = hose->private_data;
1250	u32 dr;
1251	int i;
1252
1253	/* Send Exit L2 State Message */
1254	setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
1255
1256	/* Wait exit done */
1257	for (i = 0; i < 150; i++) {
1258		dr = in_be32(&pci->pex_pme_mes_dr);
1259		if (dr) {
1260			out_be32(&pci->pex_pme_mes_dr, dr);
1261			break;
1262		}
1263
1264		udelay(1000);
1265	}
1266
1267	setup_pci_atmu(hose);
1268}
1269
1270static void fsl_pci_syscore_resume(void)
1271{
1272	struct pci_controller *hose, *tmp;
1273
1274	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1275		fsl_pci_syscore_do_resume(hose);
1276}
1277
1278static struct syscore_ops pci_syscore_pm_ops = {
1279	.suspend = fsl_pci_syscore_suspend,
1280	.resume = fsl_pci_syscore_resume,
1281};
1282#endif
1283
1284void fsl_pcibios_fixup_phb(struct pci_controller *phb)
1285{
1286#ifdef CONFIG_PM_SLEEP
1287	fsl_pci_pme_probe(phb);
1288#endif
1289}
1290
1291static int add_err_dev(struct platform_device *pdev)
1292{
1293	struct platform_device *errdev;
1294	struct mpc85xx_edac_pci_plat_data pd = {
1295		.of_node = pdev->dev.of_node
1296	};
1297
1298	errdev = platform_device_register_resndata(&pdev->dev,
1299						   "mpc85xx-pci-edac",
1300						   PLATFORM_DEVID_AUTO,
1301						   pdev->resource,
1302						   pdev->num_resources,
1303						   &pd, sizeof(pd));
1304
1305	return PTR_ERR_OR_ZERO(errdev);
1306}
1307
1308static int fsl_pci_probe(struct platform_device *pdev)
1309{
1310	struct device_node *node;
1311	int ret;
1312
1313	node = pdev->dev.of_node;
1314	ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
1315	if (ret)
1316		return ret;
1317
1318	ret = add_err_dev(pdev);
1319	if (ret)
1320		dev_err(&pdev->dev, "couldn't register error device: %d\n",
1321			ret);
1322
1323	return 0;
1324}
1325
1326static struct platform_driver fsl_pci_driver = {
1327	.driver = {
1328		.name = "fsl-pci",
1329		.of_match_table = pci_ids,
1330	},
1331	.probe = fsl_pci_probe,
 
1332};
1333
1334static int __init fsl_pci_init(void)
1335{
1336#ifdef CONFIG_PM_SLEEP
1337	register_syscore_ops(&pci_syscore_pm_ops);
1338#endif
1339	return platform_driver_register(&fsl_pci_driver);
1340}
1341arch_initcall(fsl_pci_init);
1342#endif