Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * MPC83xx/85xx/86xx PCI/PCIE support routing.
   4 *
   5 * Copyright 2007-2012 Freescale Semiconductor, Inc.
   6 * Copyright 2008-2009 MontaVista Software, Inc.
   7 *
   8 * Initial author: Xianghua Xiao <x.xiao@freescale.com>
   9 * Recode: ZHANG WEI <wei.zhang@freescale.com>
  10 * Rewrite the routing for Frescale PCI and PCI Express
  11 * 	Roy Zang <tie-fei.zang@freescale.com>
  12 * MPC83xx PCI-Express support:
  13 * 	Tony Li <tony.li@freescale.com>
  14 * 	Anton Vorontsov <avorontsov@ru.mvista.com>
 
 
 
 
 
  15 */
  16#include <linux/kernel.h>
  17#include <linux/pci.h>
  18#include <linux/delay.h>
  19#include <linux/string.h>
  20#include <linux/fsl/edac.h>
  21#include <linux/init.h>
  22#include <linux/interrupt.h>
  23#include <linux/memblock.h>
  24#include <linux/log2.h>
  25#include <linux/of_address.h>
  26#include <linux/of_irq.h>
  27#include <linux/platform_device.h>
  28#include <linux/slab.h>
  29#include <linux/suspend.h>
  30#include <linux/syscore_ops.h>
  31#include <linux/uaccess.h>
  32
  33#include <asm/io.h>
 
  34#include <asm/pci-bridge.h>
  35#include <asm/ppc-pci.h>
  36#include <asm/machdep.h>
  37#include <asm/mpc85xx.h>
  38#include <asm/disassemble.h>
  39#include <asm/ppc-opcode.h>
  40#include <asm/swiotlb.h>
  41#include <asm/setup.h>
  42#include <sysdev/fsl_soc.h>
  43#include <sysdev/fsl_pci.h>
  44
  45static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
  46
  47static void quirk_fsl_pcie_early(struct pci_dev *dev)
  48{
  49	u8 hdr_type;
  50
  51	/* if we aren't a PCIe don't bother */
  52	if (!pci_is_pcie(dev))
  53		return;
  54
  55	/* if we aren't in host mode don't bother */
  56	pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
  57	if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE)
  58		return;
  59
  60	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
  61	fsl_pcie_bus_fixup = 1;
  62	return;
  63}
  64
  65static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
  66				    int, int, u32 *);
  67
  68static int fsl_pcie_check_link(struct pci_controller *hose)
  69{
  70	u32 val = 0;
  71
  72	if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
  73		if (hose->ops->read == fsl_indirect_read_config)
  74			__indirect_read_config(hose, hose->first_busno, 0,
  75					       PCIE_LTSSM, 4, &val);
  76		else
  77			early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
  78		if (val < PCIE_LTSSM_L0)
  79			return 1;
  80	} else {
  81		struct ccsr_pci __iomem *pci = hose->private_data;
  82		/* for PCIe IP rev 3.0 or greater use CSR0 for link state */
  83		val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
  84				>> PEX_CSR0_LTSSM_SHIFT;
  85		if (val != PEX_CSR0_LTSSM_L0)
  86			return 1;
  87	}
  88
  89	return 0;
  90}
  91
  92static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
  93				    int offset, int len, u32 *val)
  94{
  95	struct pci_controller *hose = pci_bus_to_host(bus);
  96
  97	if (fsl_pcie_check_link(hose))
  98		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
  99	else
 100		hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 101
 102	return indirect_read_config(bus, devfn, offset, len, val);
 103}
 104
 105#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 106
 107static struct pci_ops fsl_indirect_pcie_ops =
 108{
 109	.read = fsl_indirect_read_config,
 110	.write = indirect_write_config,
 111};
 112
 113static u64 pci64_dma_offset;
 114
 115#ifdef CONFIG_SWIOTLB
 116static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
 117{
 118	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
 119
 120	pdev->dev.bus_dma_limit =
 121		hose->dma_window_base_cur + hose->dma_window_size - 1;
 122}
 123
 124static void setup_swiotlb_ops(struct pci_controller *hose)
 125{
 126	if (ppc_swiotlb_enable)
 127		hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
 
 
 128}
 129#else
 130static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
 131#endif
 132
 133static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
 134{
 
 
 
 135	/*
 136	 * Fix up PCI devices that are able to DMA to the large inbound
 137	 * mapping that allows addressing any RAM address from across PCI.
 138	 */
 139	if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
 140		dev->bus_dma_limit = 0;
 141		dev->archdata.dma_offset = pci64_dma_offset;
 142	}
 
 
 
 143}
 144
 145static int setup_one_atmu(struct ccsr_pci __iomem *pci,
 146	unsigned int index, const struct resource *res,
 147	resource_size_t offset)
 148{
 149	resource_size_t pci_addr = res->start - offset;
 150	resource_size_t phys_addr = res->start;
 151	resource_size_t size = resource_size(res);
 152	u32 flags = 0x80044000; /* enable & mem R/W */
 153	unsigned int i;
 154
 155	pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
 156		(u64)res->start, (u64)size);
 157
 158	if (res->flags & IORESOURCE_PREFETCH)
 159		flags |= 0x10000000; /* enable relaxed ordering */
 160
 161	for (i = 0; size > 0; i++) {
 162		unsigned int bits = min_t(u32, ilog2(size),
 163					__ffs(pci_addr | phys_addr));
 164
 165		if (index + i >= 5)
 166			return -1;
 167
 168		out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
 169		out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
 170		out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
 171		out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
 172
 173		pci_addr += (resource_size_t)1U << bits;
 174		phys_addr += (resource_size_t)1U << bits;
 175		size -= (resource_size_t)1U << bits;
 176	}
 177
 178	return i;
 179}
 180
 181static bool is_kdump(void)
 182{
 183	struct device_node *node;
 184	bool ret;
 185
 186	node = of_find_node_by_type(NULL, "memory");
 187	if (!node) {
 188		WARN_ON_ONCE(1);
 189		return false;
 190	}
 191
 192	ret = of_property_read_bool(node, "linux,usable-memory");
 193	of_node_put(node);
 194
 195	return ret;
 196}
 197
 198/* atmu setup for fsl pci/pcie controller */
 199static void setup_pci_atmu(struct pci_controller *hose)
 200{
 201	struct ccsr_pci __iomem *pci = hose->private_data;
 202	int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
 203	u64 mem, sz, paddr_hi = 0;
 204	u64 offset = 0, paddr_lo = ULLONG_MAX;
 205	u32 pcicsrbar = 0, pcicsrbar_sz;
 206	u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
 207			PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
 208	const u64 *reg;
 209	int len;
 210	bool setup_inbound;
 211
 212	/*
 213	 * If this is kdump, we don't want to trigger a bunch of PCI
 214	 * errors by closing the window on in-flight DMA.
 215	 *
 216	 * We still run most of the function's logic so that things like
 217	 * hose->dma_window_size still get set.
 218	 */
 219	setup_inbound = !is_kdump();
 220
 221	if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
 222		/*
 223		 * BSC9132 Rev1.0 has an issue where all the PEX inbound
 224		 * windows have implemented the default target value as 0xf
 225		 * for CCSR space.In all Freescale legacy devices the target
 226		 * of 0xf is reserved for local memory space. 9132 Rev1.0
 227		 * now has local memory space mapped to target 0x0 instead of
 228		 * 0xf. Hence adding a workaround to remove the target 0xf
 229		 * defined for memory space from Inbound window attributes.
 230		 */
 231		piwar &= ~PIWAR_TGI_LOCAL;
 232	}
 233
 234	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 235		if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
 236			win_idx = 2;
 237			start_idx = 0;
 238			end_idx = 3;
 239		}
 240	}
 241
 242	/* Disable all windows (except powar0 since it's ignored) */
 243	for(i = 1; i < 5; i++)
 244		out_be32(&pci->pow[i].powar, 0);
 245
 246	if (setup_inbound) {
 247		for (i = start_idx; i < end_idx; i++)
 248			out_be32(&pci->piw[i].piwar, 0);
 249	}
 250
 251	/* Setup outbound MEM window */
 252	for(i = 0, j = 1; i < 3; i++) {
 253		if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
 254			continue;
 255
 256		paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
 257		paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
 258
 259		/* We assume all memory resources have the same offset */
 260		offset = hose->mem_offset[i];
 261		n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
 262
 263		if (n < 0 || j >= 5) {
 264			pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
 265			hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
 266		} else
 267			j += n;
 268	}
 269
 270	/* Setup outbound IO window */
 271	if (hose->io_resource.flags & IORESOURCE_IO) {
 272		if (j >= 5) {
 273			pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
 274		} else {
 275			pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
 276				 "phy base 0x%016llx.\n",
 277				 (u64)hose->io_resource.start,
 278				 (u64)resource_size(&hose->io_resource),
 279				 (u64)hose->io_base_phys);
 280			out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
 281			out_be32(&pci->pow[j].potear, 0);
 282			out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
 283			/* Enable, IO R/W */
 284			out_be32(&pci->pow[j].powar, 0x80088000
 285				| (ilog2(hose->io_resource.end
 286				- hose->io_resource.start + 1) - 1));
 287		}
 288	}
 289
 290	/* convert to pci address space */
 291	paddr_hi -= offset;
 292	paddr_lo -= offset;
 293
 294	if (paddr_hi == paddr_lo) {
 295		pr_err("%pOF: No outbound window space\n", hose->dn);
 296		return;
 297	}
 298
 299	if (paddr_lo == 0) {
 300		pr_err("%pOF: No space for inbound window\n", hose->dn);
 301		return;
 302	}
 303
 304	/* setup PCSRBAR/PEXCSRBAR */
 305	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
 306	early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
 307	pcicsrbar_sz = ~pcicsrbar_sz + 1;
 308
 309	if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
 310		(paddr_lo > 0x100000000ull))
 311		pcicsrbar = 0x100000000ull - pcicsrbar_sz;
 312	else
 313		pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
 314	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
 315
 316	paddr_lo = min(paddr_lo, (u64)pcicsrbar);
 317
 318	pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
 319
 320	/* Setup inbound mem window */
 321	mem = memblock_end_of_DRAM();
 322	pr_info("%s: end of DRAM %llx\n", __func__, mem);
 323
 324	/*
 325	 * The msi-address-64 property, if it exists, indicates the physical
 326	 * address of the MSIIR register.  Normally, this register is located
 327	 * inside CCSR, so the ATMU that covers all of CCSR is used. But if
 328	 * this property exists, then we normally need to create a new ATMU
 329	 * for it.  For now, however, we cheat.  The only entity that creates
 330	 * this property is the Freescale hypervisor, and the address is
 331	 * specified in the partition configuration.  Typically, the address
 332	 * is located in the page immediately after the end of DDR.  If so, we
 333	 * can avoid allocating a new ATMU by extending the DDR ATMU by one
 334	 * page.
 335	 */
 336	reg = of_get_property(hose->dn, "msi-address-64", &len);
 337	if (reg && (len == sizeof(u64))) {
 338		u64 address = be64_to_cpup(reg);
 339
 340		if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
 341			pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
 342			mem += PAGE_SIZE;
 343		} else {
 344			/* TODO: Create a new ATMU for MSIIR */
 345			pr_warn("%pOF: msi-address-64 address of %llx is "
 346				"unsupported\n", hose->dn, address);
 347		}
 348	}
 349
 350	sz = min(mem, paddr_lo);
 351	mem_log = ilog2(sz);
 352
 353	/* PCIe can overmap inbound & outbound since RX & TX are separated */
 354	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 355		/* Size window to exact size if power-of-two or one size up */
 356		if ((1ull << mem_log) != mem) {
 357			mem_log++;
 358			if ((1ull << mem_log) > mem)
 359				pr_info("%pOF: Setting PCI inbound window "
 360					"greater than memory size\n", hose->dn);
 361		}
 362
 363		piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
 364
 365		if (setup_inbound) {
 366			/* Setup inbound memory window */
 367			out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 368			out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
 369			out_be32(&pci->piw[win_idx].piwar,  piwar);
 370		}
 371
 372		win_idx--;
 373		hose->dma_window_base_cur = 0x00000000;
 374		hose->dma_window_size = (resource_size_t)sz;
 375
 376		/*
 377		 * if we have >4G of memory setup second PCI inbound window to
 378		 * let devices that are 64-bit address capable to work w/o
 379		 * SWIOTLB and access the full range of memory
 380		 */
 381		if (sz != mem) {
 382			mem_log = ilog2(mem);
 383
 384			/* Size window up if we dont fit in exact power-of-2 */
 385			if ((1ull << mem_log) != mem)
 386				mem_log++;
 387
 388			piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
 389			pci64_dma_offset = 1ULL << mem_log;
 390
 391			if (setup_inbound) {
 392				/* Setup inbound memory window */
 393				out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 394				out_be32(&pci->piw[win_idx].piwbear,
 395						pci64_dma_offset >> 44);
 396				out_be32(&pci->piw[win_idx].piwbar,
 397						pci64_dma_offset >> 12);
 398				out_be32(&pci->piw[win_idx].piwar,  piwar);
 399			}
 400
 401			/*
 402			 * install our own dma_set_mask handler to fixup dma_ops
 403			 * and dma_offset
 404			 */
 405			ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
 406
 407			pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
 408		}
 409	} else {
 410		u64 paddr = 0;
 411
 412		if (setup_inbound) {
 413			/* Setup inbound memory window */
 414			out_be32(&pci->piw[win_idx].pitar,  paddr >> 12);
 415			out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
 416			out_be32(&pci->piw[win_idx].piwar,
 417				 (piwar | (mem_log - 1)));
 418		}
 419
 420		win_idx--;
 421		paddr += 1ull << mem_log;
 422		sz -= 1ull << mem_log;
 423
 424		if (sz) {
 425			mem_log = ilog2(sz);
 426			piwar |= (mem_log - 1);
 427
 428			if (setup_inbound) {
 429				out_be32(&pci->piw[win_idx].pitar,
 430					 paddr >> 12);
 431				out_be32(&pci->piw[win_idx].piwbar,
 432					 paddr >> 12);
 433				out_be32(&pci->piw[win_idx].piwar, piwar);
 434			}
 435
 436			win_idx--;
 437			paddr += 1ull << mem_log;
 438		}
 439
 440		hose->dma_window_base_cur = 0x00000000;
 441		hose->dma_window_size = (resource_size_t)paddr;
 442	}
 443
 444	if (hose->dma_window_size < mem) {
 445#ifdef CONFIG_SWIOTLB
 446		ppc_swiotlb_enable = 1;
 447#else
 448		pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
 449			"map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
 450			 hose->dn);
 451#endif
 452		/* adjusting outbound windows could reclaim space in mem map */
 453		if (paddr_hi < 0xffffffffull)
 454			pr_warn("%pOF: WARNING: Outbound window cfg leaves "
 455				"gaps in memory map. Adjusting the memory map "
 456				"could reduce unnecessary bounce buffering.\n",
 457				hose->dn);
 458
 459		pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
 460			(u64)hose->dma_window_size);
 461	}
 462}
 463
 464static void setup_pci_cmd(struct pci_controller *hose)
 465{
 466	u16 cmd;
 467	int cap_x;
 468
 469	early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
 470	cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
 471		| PCI_COMMAND_IO;
 472	early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
 473
 474	cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
 475	if (cap_x) {
 476		int pci_x_cmd = cap_x + PCI_X_CMD;
 477		cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
 478			| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
 479		early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
 480	} else {
 481		early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
 482	}
 483}
 484
 485void fsl_pcibios_fixup_bus(struct pci_bus *bus)
 486{
 487	struct pci_controller *hose = pci_bus_to_host(bus);
 488	int i, is_pcie = 0, no_link;
 489
 490	/* The root complex bridge comes up with bogus resources,
 491	 * we copy the PHB ones in.
 492	 *
 493	 * With the current generic PCI code, the PHB bus no longer
 494	 * has bus->resource[0..4] set, so things are a bit more
 495	 * tricky.
 496	 */
 497
 498	if (fsl_pcie_bus_fixup)
 499		is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
 500	no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
 501
 502	if (bus->parent == hose->bus && (is_pcie || no_link)) {
 503		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
 504			struct resource *res = bus->resource[i];
 505			struct resource *par;
 506
 507			if (!res)
 508				continue;
 509			if (i == 0)
 510				par = &hose->io_resource;
 511			else if (i < 4)
 512				par = &hose->mem_resources[i-1];
 513			else par = NULL;
 514
 515			res->start = par ? par->start : 0;
 516			res->end   = par ? par->end   : 0;
 517			res->flags = par ? par->flags : 0;
 518		}
 519	}
 520}
 521
 522static int fsl_add_bridge(struct platform_device *pdev, int is_primary)
 523{
 524	int len;
 525	struct pci_controller *hose;
 526	struct resource rsrc;
 527	const int *bus_range;
 528	u8 hdr_type, progif;
 529	u32 class_code;
 530	struct device_node *dev;
 531	struct ccsr_pci __iomem *pci;
 532	u16 temp;
 533	u32 svr = mfspr(SPRN_SVR);
 534
 535	dev = pdev->dev.of_node;
 536
 537	if (!of_device_is_available(dev)) {
 538		pr_warn("%pOF: disabled\n", dev);
 539		return -ENODEV;
 540	}
 541
 542	pr_debug("Adding PCI host bridge %pOF\n", dev);
 543
 544	/* Fetch host bridge registers address */
 545	if (of_address_to_resource(dev, 0, &rsrc)) {
 546		printk(KERN_WARNING "Can't get pci register base!");
 547		return -ENOMEM;
 548	}
 549
 550	/* Get bus range if any */
 551	bus_range = of_get_property(dev, "bus-range", &len);
 552	if (bus_range == NULL || len < 2 * sizeof(int))
 553		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 554			" bus 0\n", dev);
 555
 556	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 557	hose = pcibios_alloc_controller(dev);
 558	if (!hose)
 559		return -ENOMEM;
 560
 561	/* set platform device as the parent */
 562	hose->parent = &pdev->dev;
 563	hose->first_busno = bus_range ? bus_range[0] : 0x0;
 564	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 565
 566	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
 567		 (u64)rsrc.start, (u64)resource_size(&rsrc));
 568
 569	pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
 570	if (!hose->private_data)
 571		goto no_bridge;
 572
 573	setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
 574			   PPC_INDIRECT_TYPE_BIG_ENDIAN);
 575
 576	if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
 577		hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 578
 579	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 580		/* use fsl_indirect_read_config for PCIe */
 581		hose->ops = &fsl_indirect_pcie_ops;
 582		/* For PCIE read HEADER_TYPE to identify controller mode */
 583		early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
 584		if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE)
 585			goto no_bridge;
 586
 587	} else {
 588		/* For PCI read PROG to identify controller mode */
 589		early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
 590		if ((progif & 1) &&
 591		    !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
 592			goto no_bridge;
 593	}
 594
 595	setup_pci_cmd(hose);
 596
 597	/* check PCI express link status */
 598	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 599		hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
 600			PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
 601		if (fsl_pcie_check_link(hose))
 602			hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 603		/* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */
 604		if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) {
 605			early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code);
 606			class_code &= 0xff;
 607			class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
 608			early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code);
 609		}
 610	} else {
 611		/*
 612		 * Set PBFR(PCI Bus Function Register)[10] = 1 to
 613		 * disable the combining of crossing cacheline
 614		 * boundary requests into one burst transaction.
 615		 * PCI-X operation is not affected.
 616		 * Fix erratum PCI 5 on MPC8548
 617		 */
 618#define PCI_BUS_FUNCTION 0x44
 619#define PCI_BUS_FUNCTION_MDS 0x400	/* Master disable streaming */
 620		if (((SVR_SOC_VER(svr) == SVR_8543) ||
 621		     (SVR_SOC_VER(svr) == SVR_8545) ||
 622		     (SVR_SOC_VER(svr) == SVR_8547) ||
 623		     (SVR_SOC_VER(svr) == SVR_8548)) &&
 624		    !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
 625			early_read_config_word(hose, 0, 0,
 626					PCI_BUS_FUNCTION, &temp);
 627			temp |= PCI_BUS_FUNCTION_MDS;
 628			early_write_config_word(hose, 0, 0,
 629					PCI_BUS_FUNCTION, temp);
 630		}
 631	}
 632
 633	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 634		"Firmware bus number: %d->%d\n",
 635		(unsigned long long)rsrc.start, hose->first_busno,
 636		hose->last_busno);
 637
 638	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 639		hose, hose->cfg_addr, hose->cfg_data);
 640
 641	/* Interpret the "ranges" property */
 642	/* This also maps the I/O region and sets isa_io/mem_base */
 643	pci_process_bridge_OF_ranges(hose, dev, is_primary);
 644
 645	/* Setup PEX window registers */
 646	setup_pci_atmu(hose);
 647
 648	/* Set up controller operations */
 649	setup_swiotlb_ops(hose);
 650
 651	return 0;
 652
 653no_bridge:
 654	iounmap(hose->private_data);
 655	/* unmap cfg_data & cfg_addr separately if not on same page */
 656	if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
 657	    ((unsigned long)hose->cfg_addr & PAGE_MASK))
 658		iounmap(hose->cfg_data);
 659	iounmap(hose->cfg_addr);
 660	pcibios_free_controller(hose);
 661	return -ENODEV;
 662}
 663#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
 664
 665DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
 666			quirk_fsl_pcie_early);
 667
 668#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
 669struct mpc83xx_pcie_priv {
 670	void __iomem *cfg_type0;
 671	void __iomem *cfg_type1;
 672	u32 dev_base;
 673};
 674
 675struct pex_inbound_window {
 676	u32 ar;
 677	u32 tar;
 678	u32 barl;
 679	u32 barh;
 680};
 681
 682/*
 683 * With the convention of u-boot, the PCIE outbound window 0 serves
 684 * as configuration transactions outbound.
 685 */
 686#define PEX_OUTWIN0_BAR		0xCA4
 687#define PEX_OUTWIN0_TAL		0xCA8
 688#define PEX_OUTWIN0_TAH		0xCAC
 689#define PEX_RC_INWIN_BASE	0xE60
 690#define PEX_RCIWARn_EN		0x1
 691
 692static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
 693{
 694	struct pci_controller *hose = pci_bus_to_host(bus);
 695
 696	if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
 697		return PCIBIOS_DEVICE_NOT_FOUND;
 698	/*
 699	 * Workaround for the HW bug: for Type 0 configure transactions the
 700	 * PCI-E controller does not check the device number bits and just
 701	 * assumes that the device number bits are 0.
 702	 */
 703	if (bus->number == hose->first_busno ||
 704			bus->primary == hose->first_busno) {
 705		if (devfn & 0xf8)
 706			return PCIBIOS_DEVICE_NOT_FOUND;
 707	}
 708
 709	if (ppc_md.pci_exclude_device) {
 710		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 711			return PCIBIOS_DEVICE_NOT_FOUND;
 712	}
 713
 714	return PCIBIOS_SUCCESSFUL;
 715}
 716
 717static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
 718					    unsigned int devfn, int offset)
 719{
 720	struct pci_controller *hose = pci_bus_to_host(bus);
 721	struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 722	u32 dev_base = bus->number << 24 | devfn << 16;
 723	int ret;
 724
 725	ret = mpc83xx_pcie_exclude_device(bus, devfn);
 726	if (ret)
 727		return NULL;
 728
 729	offset &= 0xfff;
 730
 731	/* Type 0 */
 732	if (bus->number == hose->first_busno)
 733		return pcie->cfg_type0 + offset;
 734
 735	if (pcie->dev_base == dev_base)
 736		goto mapped;
 737
 738	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
 739
 740	pcie->dev_base = dev_base;
 741mapped:
 742	return pcie->cfg_type1 + offset;
 743}
 744
 745static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
 746				     int offset, int len, u32 val)
 747{
 748	struct pci_controller *hose = pci_bus_to_host(bus);
 749
 750	/* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
 751	if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
 752		val &= 0xffffff00;
 753
 754	return pci_generic_config_write(bus, devfn, offset, len, val);
 755}
 756
 757static struct pci_ops mpc83xx_pcie_ops = {
 758	.map_bus = mpc83xx_pcie_remap_cfg,
 759	.read = pci_generic_config_read,
 760	.write = mpc83xx_pcie_write_config,
 761};
 762
 763static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
 764				     struct resource *reg)
 765{
 766	struct mpc83xx_pcie_priv *pcie;
 767	u32 cfg_bar;
 768	int ret = -ENOMEM;
 769
 770	pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
 771	if (!pcie)
 772		return ret;
 773
 774	pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
 775	if (!pcie->cfg_type0)
 776		goto err0;
 777
 778	cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
 779	if (!cfg_bar) {
 780		/* PCI-E isn't configured. */
 781		ret = -ENODEV;
 782		goto err1;
 783	}
 784
 785	pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
 786	if (!pcie->cfg_type1)
 787		goto err1;
 788
 789	WARN_ON(hose->dn->data);
 790	hose->dn->data = pcie;
 791	hose->ops = &mpc83xx_pcie_ops;
 792	hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 793
 794	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
 795	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
 796
 797	if (fsl_pcie_check_link(hose))
 798		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 799
 800	return 0;
 801err1:
 802	iounmap(pcie->cfg_type0);
 803err0:
 804	kfree(pcie);
 805	return ret;
 806
 807}
 808
 809int __init mpc83xx_add_bridge(struct device_node *dev)
 810{
 811	int ret;
 812	int len;
 813	struct pci_controller *hose;
 814	struct resource rsrc_reg;
 815	struct resource rsrc_cfg;
 816	const int *bus_range;
 817	int primary;
 818
 819	is_mpc83xx_pci = 1;
 820
 821	if (!of_device_is_available(dev)) {
 822		pr_warn("%pOF: disabled by the firmware.\n",
 823			dev);
 824		return -ENODEV;
 825	}
 826	pr_debug("Adding PCI host bridge %pOF\n", dev);
 827
 828	/* Fetch host bridge registers address */
 829	if (of_address_to_resource(dev, 0, &rsrc_reg)) {
 830		printk(KERN_WARNING "Can't get pci register base!\n");
 831		return -ENOMEM;
 832	}
 833
 834	memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
 835
 836	if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
 837		printk(KERN_WARNING
 838			"No pci config register base in dev tree, "
 839			"using default\n");
 840		/*
 841		 * MPC83xx supports up to two host controllers
 842		 * 	one at 0x8500 has config space registers at 0x8300
 843		 * 	one at 0x8600 has config space registers at 0x8380
 844		 */
 845		if ((rsrc_reg.start & 0xfffff) == 0x8500)
 846			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
 847		else if ((rsrc_reg.start & 0xfffff) == 0x8600)
 848			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
 849	}
 850	/*
 851	 * Controller at offset 0x8500 is primary
 852	 */
 853	if ((rsrc_reg.start & 0xfffff) == 0x8500)
 854		primary = 1;
 855	else
 856		primary = 0;
 857
 858	/* Get bus range if any */
 859	bus_range = of_get_property(dev, "bus-range", &len);
 860	if (bus_range == NULL || len < 2 * sizeof(int)) {
 861		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 862		       " bus 0\n", dev);
 863	}
 864
 865	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 866	hose = pcibios_alloc_controller(dev);
 867	if (!hose)
 868		return -ENOMEM;
 869
 870	hose->first_busno = bus_range ? bus_range[0] : 0;
 871	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 872
 873	if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
 874		ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
 875		if (ret)
 876			goto err0;
 877	} else {
 878		setup_indirect_pci(hose, rsrc_cfg.start,
 879				   rsrc_cfg.start + 4, 0);
 880	}
 881
 882	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 883	       "Firmware bus number: %d->%d\n",
 884	       (unsigned long long)rsrc_reg.start, hose->first_busno,
 885	       hose->last_busno);
 886
 887	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 888	    hose, hose->cfg_addr, hose->cfg_data);
 889
 890	/* Interpret the "ranges" property */
 891	/* This also maps the I/O region and sets isa_io/mem_base */
 892	pci_process_bridge_OF_ranges(hose, dev, primary);
 893
 894	return 0;
 895err0:
 896	pcibios_free_controller(hose);
 897	return ret;
 898}
 899#endif /* CONFIG_PPC_83xx */
 900
 901u64 fsl_pci_immrbar_base(struct pci_controller *hose)
 902{
 903#ifdef CONFIG_PPC_83xx
 904	if (is_mpc83xx_pci) {
 905		struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 906		struct pex_inbound_window *in;
 907		int i;
 908
 909		/* Walk the Root Complex Inbound windows to match IMMR base */
 910		in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
 911		for (i = 0; i < 4; i++) {
 912			/* not enabled, skip */
 913			if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
 914				continue;
 915
 916			if (get_immrbase() == in_le32(&in[i].tar))
 917				return (u64)in_le32(&in[i].barh) << 32 |
 918					    in_le32(&in[i].barl);
 919		}
 920
 921		printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
 922	}
 923#endif
 924
 925#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 926	if (!is_mpc83xx_pci) {
 927		u32 base;
 928
 929		pci_bus_read_config_dword(hose->bus,
 930			PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
 931
 932		/*
 933		 * For PEXCSRBAR, bit 3-0 indicate prefetchable and
 934		 * address type. So when getting base address, these
 935		 * bits should be masked
 936		 */
 937		base &= PCI_BASE_ADDRESS_MEM_MASK;
 938
 939		return base;
 940	}
 941#endif
 942
 943	return 0;
 944}
 945
 946#ifdef CONFIG_PPC_E500
 947static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
 948{
 949	unsigned int rd, ra, rb, d;
 950
 951	rd = get_rt(inst);
 952	ra = get_ra(inst);
 953	rb = get_rb(inst);
 954	d = get_d(inst);
 955
 956	switch (get_op(inst)) {
 957	case 31:
 958		switch (get_xop(inst)) {
 959		case OP_31_XOP_LWZX:
 960		case OP_31_XOP_LWBRX:
 961			regs->gpr[rd] = 0xffffffff;
 962			break;
 963
 964		case OP_31_XOP_LWZUX:
 965			regs->gpr[rd] = 0xffffffff;
 966			regs->gpr[ra] += regs->gpr[rb];
 967			break;
 968
 969		case OP_31_XOP_LBZX:
 970			regs->gpr[rd] = 0xff;
 971			break;
 972
 973		case OP_31_XOP_LBZUX:
 974			regs->gpr[rd] = 0xff;
 975			regs->gpr[ra] += regs->gpr[rb];
 976			break;
 977
 978		case OP_31_XOP_LHZX:
 979		case OP_31_XOP_LHBRX:
 980			regs->gpr[rd] = 0xffff;
 981			break;
 982
 983		case OP_31_XOP_LHZUX:
 984			regs->gpr[rd] = 0xffff;
 985			regs->gpr[ra] += regs->gpr[rb];
 986			break;
 987
 988		case OP_31_XOP_LHAX:
 989			regs->gpr[rd] = ~0UL;
 990			break;
 991
 992		case OP_31_XOP_LHAUX:
 993			regs->gpr[rd] = ~0UL;
 994			regs->gpr[ra] += regs->gpr[rb];
 995			break;
 996
 997		default:
 998			return 0;
 999		}
1000		break;
1001
1002	case OP_LWZ:
1003		regs->gpr[rd] = 0xffffffff;
1004		break;
1005
1006	case OP_LWZU:
1007		regs->gpr[rd] = 0xffffffff;
1008		regs->gpr[ra] += (s16)d;
1009		break;
1010
1011	case OP_LBZ:
1012		regs->gpr[rd] = 0xff;
1013		break;
1014
1015	case OP_LBZU:
1016		regs->gpr[rd] = 0xff;
1017		regs->gpr[ra] += (s16)d;
1018		break;
1019
1020	case OP_LHZ:
1021		regs->gpr[rd] = 0xffff;
1022		break;
1023
1024	case OP_LHZU:
1025		regs->gpr[rd] = 0xffff;
1026		regs->gpr[ra] += (s16)d;
1027		break;
1028
1029	case OP_LHA:
1030		regs->gpr[rd] = ~0UL;
1031		break;
1032
1033	case OP_LHAU:
1034		regs->gpr[rd] = ~0UL;
1035		regs->gpr[ra] += (s16)d;
1036		break;
1037
1038	default:
1039		return 0;
1040	}
1041
1042	return 1;
1043}
1044
1045static int is_in_pci_mem_space(phys_addr_t addr)
1046{
1047	struct pci_controller *hose;
1048	struct resource *res;
1049	int i;
1050
1051	list_for_each_entry(hose, &hose_list, list_node) {
1052		if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
1053			continue;
1054
1055		for (i = 0; i < 3; i++) {
1056			res = &hose->mem_resources[i];
1057			if ((res->flags & IORESOURCE_MEM) &&
1058				addr >= res->start && addr <= res->end)
1059				return 1;
1060		}
1061	}
1062	return 0;
1063}
1064
1065int fsl_pci_mcheck_exception(struct pt_regs *regs)
1066{
1067	u32 inst;
1068	int ret;
1069	phys_addr_t addr = 0;
1070
1071	/* Let KVM/QEMU deal with the exception */
1072	if (regs->msr & MSR_GS)
1073		return 0;
1074
1075#ifdef CONFIG_PHYS_64BIT
1076	addr = mfspr(SPRN_MCARU);
1077	addr <<= 32;
1078#endif
1079	addr += mfspr(SPRN_MCAR);
1080
1081	if (is_in_pci_mem_space(addr)) {
1082		if (user_mode(regs))
1083			ret = copy_from_user_nofault(&inst,
1084					(void __user *)regs->nip, sizeof(inst));
1085		else
1086			ret = get_kernel_nofault(inst, (void *)regs->nip);
 
 
1087
1088		if (!ret && mcheck_handle_load(regs, inst)) {
1089			regs_add_return_ip(regs, 4);
1090			return 1;
1091		}
1092	}
1093
1094	return 0;
1095}
1096#endif
1097
1098#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
1099static const struct of_device_id pci_ids[] = {
1100	{ .compatible = "fsl,mpc8540-pci", },
1101	{ .compatible = "fsl,mpc8548-pcie", },
1102	{ .compatible = "fsl,mpc8610-pci", },
1103	{ .compatible = "fsl,mpc8641-pcie", },
1104	{ .compatible = "fsl,qoriq-pcie", },
1105	{ .compatible = "fsl,qoriq-pcie-v2.1", },
1106	{ .compatible = "fsl,qoriq-pcie-v2.2", },
1107	{ .compatible = "fsl,qoriq-pcie-v2.3", },
1108	{ .compatible = "fsl,qoriq-pcie-v2.4", },
1109	{ .compatible = "fsl,qoriq-pcie-v3.0", },
1110
1111	/*
1112	 * The following entries are for compatibility with older device
1113	 * trees.
1114	 */
1115	{ .compatible = "fsl,p1022-pcie", },
1116	{ .compatible = "fsl,p4080-pcie", },
1117
1118	{},
1119};
1120
1121struct device_node *fsl_pci_primary;
1122
1123void __init fsl_pci_assign_primary(void)
1124{
1125	struct device_node *np;
1126
1127	/* Callers can specify the primary bus using other means. */
1128	if (fsl_pci_primary)
1129		return;
1130
1131	/* If a PCI host bridge contains an ISA node, it's primary. */
1132	np = of_find_node_by_type(NULL, "isa");
1133	while ((fsl_pci_primary = of_get_parent(np))) {
1134		of_node_put(np);
1135		np = fsl_pci_primary;
1136
1137		if (of_match_node(pci_ids, np) && of_device_is_available(np))
1138			return;
1139	}
1140
1141	/*
1142	 * If there's no PCI host bridge with ISA then check for
1143	 * PCI host bridge with alias "pci0" (first PCI host bridge).
1144	 */
1145	np = of_find_node_by_path("pci0");
1146	if (np && of_match_node(pci_ids, np) && of_device_is_available(np)) {
1147		fsl_pci_primary = np;
1148		of_node_put(np);
1149		return;
1150	}
1151	if (np)
1152		of_node_put(np);
1153
1154	/*
1155	 * If there's no PCI host bridge with ISA, arbitrarily
1156	 * designate one as primary.  This can go away once
1157	 * various bugs with primary-less systems are fixed.
1158	 */
1159	for_each_matching_node(np, pci_ids) {
1160		if (of_device_is_available(np)) {
1161			fsl_pci_primary = np;
 
1162			return;
1163		}
1164	}
1165}
1166
1167#ifdef CONFIG_PM_SLEEP
1168static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
1169{
1170	struct pci_controller *hose = dev_id;
1171	struct ccsr_pci __iomem *pci = hose->private_data;
1172	u32 dr;
1173
1174	dr = in_be32(&pci->pex_pme_mes_dr);
1175	if (!dr)
1176		return IRQ_NONE;
1177
1178	out_be32(&pci->pex_pme_mes_dr, dr);
1179
1180	return IRQ_HANDLED;
1181}
1182
1183static int fsl_pci_pme_probe(struct pci_controller *hose)
1184{
1185	struct ccsr_pci __iomem *pci;
1186	struct pci_dev *dev;
1187	int pme_irq;
1188	int res;
1189	u16 pms;
1190
1191	/* Get hose's pci_dev */
1192	dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
1193
1194	/* PME Disable */
1195	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1196	pms &= ~PCI_PM_CTRL_PME_ENABLE;
1197	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1198
1199	pme_irq = irq_of_parse_and_map(hose->dn, 0);
1200	if (!pme_irq) {
1201		dev_err(&dev->dev, "Failed to map PME interrupt.\n");
1202
1203		return -ENXIO;
1204	}
1205
1206	res = devm_request_irq(hose->parent, pme_irq,
1207			fsl_pci_pme_handle,
1208			IRQF_SHARED,
1209			"[PCI] PME", hose);
1210	if (res < 0) {
1211		dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
1212		irq_dispose_mapping(pme_irq);
1213
1214		return -ENODEV;
1215	}
1216
1217	pci = hose->private_data;
1218
1219	/* Enable PTOD, ENL23D & EXL23D */
1220	clrbits32(&pci->pex_pme_mes_disr,
1221		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1222
1223	out_be32(&pci->pex_pme_mes_ier, 0);
1224	setbits32(&pci->pex_pme_mes_ier,
1225		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1226
1227	/* PME Enable */
1228	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1229	pms |= PCI_PM_CTRL_PME_ENABLE;
1230	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1231
1232	return 0;
1233}
1234
1235static void send_pme_turnoff_message(struct pci_controller *hose)
1236{
1237	struct ccsr_pci __iomem *pci = hose->private_data;
1238	u32 dr;
1239	int i;
1240
1241	/* Send PME_Turn_Off Message Request */
1242	setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
1243
1244	/* Wait trun off done */
1245	for (i = 0; i < 150; i++) {
1246		dr = in_be32(&pci->pex_pme_mes_dr);
1247		if (dr) {
1248			out_be32(&pci->pex_pme_mes_dr, dr);
1249			break;
1250		}
1251
1252		udelay(1000);
1253	}
1254}
1255
1256static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
1257{
1258	send_pme_turnoff_message(hose);
1259}
1260
1261static int fsl_pci_syscore_suspend(void)
1262{
1263	struct pci_controller *hose, *tmp;
1264
1265	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1266		fsl_pci_syscore_do_suspend(hose);
1267
1268	return 0;
1269}
1270
1271static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
1272{
1273	struct ccsr_pci __iomem *pci = hose->private_data;
1274	u32 dr;
1275	int i;
1276
1277	/* Send Exit L2 State Message */
1278	setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
1279
1280	/* Wait exit done */
1281	for (i = 0; i < 150; i++) {
1282		dr = in_be32(&pci->pex_pme_mes_dr);
1283		if (dr) {
1284			out_be32(&pci->pex_pme_mes_dr, dr);
1285			break;
1286		}
1287
1288		udelay(1000);
1289	}
1290
1291	setup_pci_atmu(hose);
1292}
1293
1294static void fsl_pci_syscore_resume(void)
1295{
1296	struct pci_controller *hose, *tmp;
1297
1298	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1299		fsl_pci_syscore_do_resume(hose);
1300}
1301
1302static struct syscore_ops pci_syscore_pm_ops = {
1303	.suspend = fsl_pci_syscore_suspend,
1304	.resume = fsl_pci_syscore_resume,
1305};
1306#endif
1307
1308void fsl_pcibios_fixup_phb(struct pci_controller *phb)
1309{
1310#ifdef CONFIG_PM_SLEEP
1311	fsl_pci_pme_probe(phb);
1312#endif
1313}
1314
1315static int add_err_dev(struct platform_device *pdev)
1316{
1317	struct platform_device *errdev;
1318	struct mpc85xx_edac_pci_plat_data pd = {
1319		.of_node = pdev->dev.of_node
1320	};
1321
1322	errdev = platform_device_register_resndata(&pdev->dev,
1323						   "mpc85xx-pci-edac",
1324						   PLATFORM_DEVID_AUTO,
1325						   pdev->resource,
1326						   pdev->num_resources,
1327						   &pd, sizeof(pd));
1328
1329	return PTR_ERR_OR_ZERO(errdev);
1330}
1331
1332static int fsl_pci_probe(struct platform_device *pdev)
1333{
1334	struct device_node *node;
1335	int ret;
1336
1337	node = pdev->dev.of_node;
1338	ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
1339	if (ret)
1340		return ret;
1341
1342	ret = add_err_dev(pdev);
1343	if (ret)
1344		dev_err(&pdev->dev, "couldn't register error device: %d\n",
1345			ret);
1346
1347	return 0;
1348}
1349
1350static struct platform_driver fsl_pci_driver = {
1351	.driver = {
1352		.name = "fsl-pci",
1353		.of_match_table = pci_ids,
1354	},
1355	.probe = fsl_pci_probe,
1356	.driver_managed_dma = true,
1357};
1358
1359static int __init fsl_pci_init(void)
1360{
1361#ifdef CONFIG_PM_SLEEP
1362	register_syscore_ops(&pci_syscore_pm_ops);
1363#endif
1364	return platform_driver_register(&fsl_pci_driver);
1365}
1366arch_initcall(fsl_pci_init);
1367#endif
v4.17
 
   1/*
   2 * MPC83xx/85xx/86xx PCI/PCIE support routing.
   3 *
   4 * Copyright 2007-2012 Freescale Semiconductor, Inc.
   5 * Copyright 2008-2009 MontaVista Software, Inc.
   6 *
   7 * Initial author: Xianghua Xiao <x.xiao@freescale.com>
   8 * Recode: ZHANG WEI <wei.zhang@freescale.com>
   9 * Rewrite the routing for Frescale PCI and PCI Express
  10 * 	Roy Zang <tie-fei.zang@freescale.com>
  11 * MPC83xx PCI-Express support:
  12 * 	Tony Li <tony.li@freescale.com>
  13 * 	Anton Vorontsov <avorontsov@ru.mvista.com>
  14 *
  15 * This program is free software; you can redistribute  it and/or modify it
  16 * under  the terms of  the GNU General  Public License as published by the
  17 * Free Software Foundation;  either version 2 of the  License, or (at your
  18 * option) any later version.
  19 */
  20#include <linux/kernel.h>
  21#include <linux/pci.h>
  22#include <linux/delay.h>
  23#include <linux/string.h>
  24#include <linux/fsl/edac.h>
  25#include <linux/init.h>
  26#include <linux/interrupt.h>
  27#include <linux/memblock.h>
  28#include <linux/log2.h>
 
 
  29#include <linux/platform_device.h>
  30#include <linux/slab.h>
  31#include <linux/suspend.h>
  32#include <linux/syscore_ops.h>
  33#include <linux/uaccess.h>
  34
  35#include <asm/io.h>
  36#include <asm/prom.h>
  37#include <asm/pci-bridge.h>
  38#include <asm/ppc-pci.h>
  39#include <asm/machdep.h>
  40#include <asm/mpc85xx.h>
  41#include <asm/disassemble.h>
  42#include <asm/ppc-opcode.h>
 
 
  43#include <sysdev/fsl_soc.h>
  44#include <sysdev/fsl_pci.h>
  45
  46static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
  47
  48static void quirk_fsl_pcie_early(struct pci_dev *dev)
  49{
  50	u8 hdr_type;
  51
  52	/* if we aren't a PCIe don't bother */
  53	if (!pci_is_pcie(dev))
  54		return;
  55
  56	/* if we aren't in host mode don't bother */
  57	pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
  58	if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
  59		return;
  60
  61	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  62	fsl_pcie_bus_fixup = 1;
  63	return;
  64}
  65
  66static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
  67				    int, int, u32 *);
  68
  69static int fsl_pcie_check_link(struct pci_controller *hose)
  70{
  71	u32 val = 0;
  72
  73	if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
  74		if (hose->ops->read == fsl_indirect_read_config)
  75			__indirect_read_config(hose, hose->first_busno, 0,
  76					       PCIE_LTSSM, 4, &val);
  77		else
  78			early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
  79		if (val < PCIE_LTSSM_L0)
  80			return 1;
  81	} else {
  82		struct ccsr_pci __iomem *pci = hose->private_data;
  83		/* for PCIe IP rev 3.0 or greater use CSR0 for link state */
  84		val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
  85				>> PEX_CSR0_LTSSM_SHIFT;
  86		if (val != PEX_CSR0_LTSSM_L0)
  87			return 1;
  88	}
  89
  90	return 0;
  91}
  92
  93static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
  94				    int offset, int len, u32 *val)
  95{
  96	struct pci_controller *hose = pci_bus_to_host(bus);
  97
  98	if (fsl_pcie_check_link(hose))
  99		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 100	else
 101		hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 102
 103	return indirect_read_config(bus, devfn, offset, len, val);
 104}
 105
 106#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 107
 108static struct pci_ops fsl_indirect_pcie_ops =
 109{
 110	.read = fsl_indirect_read_config,
 111	.write = indirect_write_config,
 112};
 113
 114static u64 pci64_dma_offset;
 115
 116#ifdef CONFIG_SWIOTLB
 
 
 
 
 
 
 
 
 117static void setup_swiotlb_ops(struct pci_controller *hose)
 118{
 119	if (ppc_swiotlb_enable) {
 120		hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
 121		set_pci_dma_ops(&powerpc_swiotlb_dma_ops);
 122	}
 123}
 124#else
 125static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
 126#endif
 127
 128static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
 129{
 130	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
 131		return -EIO;
 132
 133	/*
 134	 * Fix up PCI devices that are able to DMA to the large inbound
 135	 * mapping that allows addressing any RAM address from across PCI.
 136	 */
 137	if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
 138		set_dma_ops(dev, &dma_nommu_ops);
 139		set_dma_offset(dev, pci64_dma_offset);
 140	}
 141
 142	*dev->dma_mask = dma_mask;
 143	return 0;
 144}
 145
 146static int setup_one_atmu(struct ccsr_pci __iomem *pci,
 147	unsigned int index, const struct resource *res,
 148	resource_size_t offset)
 149{
 150	resource_size_t pci_addr = res->start - offset;
 151	resource_size_t phys_addr = res->start;
 152	resource_size_t size = resource_size(res);
 153	u32 flags = 0x80044000; /* enable & mem R/W */
 154	unsigned int i;
 155
 156	pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
 157		(u64)res->start, (u64)size);
 158
 159	if (res->flags & IORESOURCE_PREFETCH)
 160		flags |= 0x10000000; /* enable relaxed ordering */
 161
 162	for (i = 0; size > 0; i++) {
 163		unsigned int bits = min_t(u32, ilog2(size),
 164					__ffs(pci_addr | phys_addr));
 165
 166		if (index + i >= 5)
 167			return -1;
 168
 169		out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
 170		out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
 171		out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
 172		out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
 173
 174		pci_addr += (resource_size_t)1U << bits;
 175		phys_addr += (resource_size_t)1U << bits;
 176		size -= (resource_size_t)1U << bits;
 177	}
 178
 179	return i;
 180}
 181
 182static bool is_kdump(void)
 183{
 184	struct device_node *node;
 
 185
 186	node = of_find_node_by_type(NULL, "memory");
 187	if (!node) {
 188		WARN_ON_ONCE(1);
 189		return false;
 190	}
 191
 192	return of_property_read_bool(node, "linux,usable-memory");
 
 
 
 193}
 194
 195/* atmu setup for fsl pci/pcie controller */
 196static void setup_pci_atmu(struct pci_controller *hose)
 197{
 198	struct ccsr_pci __iomem *pci = hose->private_data;
 199	int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
 200	u64 mem, sz, paddr_hi = 0;
 201	u64 offset = 0, paddr_lo = ULLONG_MAX;
 202	u32 pcicsrbar = 0, pcicsrbar_sz;
 203	u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
 204			PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
 205	const u64 *reg;
 206	int len;
 207	bool setup_inbound;
 208
 209	/*
 210	 * If this is kdump, we don't want to trigger a bunch of PCI
 211	 * errors by closing the window on in-flight DMA.
 212	 *
 213	 * We still run most of the function's logic so that things like
 214	 * hose->dma_window_size still get set.
 215	 */
 216	setup_inbound = !is_kdump();
 217
 218	if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
 219		/*
 220		 * BSC9132 Rev1.0 has an issue where all the PEX inbound
 221		 * windows have implemented the default target value as 0xf
 222		 * for CCSR space.In all Freescale legacy devices the target
 223		 * of 0xf is reserved for local memory space. 9132 Rev1.0
 224		 * now has local mempry space mapped to target 0x0 instead of
 225		 * 0xf. Hence adding a workaround to remove the target 0xf
 226		 * defined for memory space from Inbound window attributes.
 227		 */
 228		piwar &= ~PIWAR_TGI_LOCAL;
 229	}
 230
 231	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 232		if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
 233			win_idx = 2;
 234			start_idx = 0;
 235			end_idx = 3;
 236		}
 237	}
 238
 239	/* Disable all windows (except powar0 since it's ignored) */
 240	for(i = 1; i < 5; i++)
 241		out_be32(&pci->pow[i].powar, 0);
 242
 243	if (setup_inbound) {
 244		for (i = start_idx; i < end_idx; i++)
 245			out_be32(&pci->piw[i].piwar, 0);
 246	}
 247
 248	/* Setup outbound MEM window */
 249	for(i = 0, j = 1; i < 3; i++) {
 250		if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
 251			continue;
 252
 253		paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
 254		paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
 255
 256		/* We assume all memory resources have the same offset */
 257		offset = hose->mem_offset[i];
 258		n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
 259
 260		if (n < 0 || j >= 5) {
 261			pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
 262			hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
 263		} else
 264			j += n;
 265	}
 266
 267	/* Setup outbound IO window */
 268	if (hose->io_resource.flags & IORESOURCE_IO) {
 269		if (j >= 5) {
 270			pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
 271		} else {
 272			pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
 273				 "phy base 0x%016llx.\n",
 274				 (u64)hose->io_resource.start,
 275				 (u64)resource_size(&hose->io_resource),
 276				 (u64)hose->io_base_phys);
 277			out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
 278			out_be32(&pci->pow[j].potear, 0);
 279			out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
 280			/* Enable, IO R/W */
 281			out_be32(&pci->pow[j].powar, 0x80088000
 282				| (ilog2(hose->io_resource.end
 283				- hose->io_resource.start + 1) - 1));
 284		}
 285	}
 286
 287	/* convert to pci address space */
 288	paddr_hi -= offset;
 289	paddr_lo -= offset;
 290
 291	if (paddr_hi == paddr_lo) {
 292		pr_err("%pOF: No outbound window space\n", hose->dn);
 293		return;
 294	}
 295
 296	if (paddr_lo == 0) {
 297		pr_err("%pOF: No space for inbound window\n", hose->dn);
 298		return;
 299	}
 300
 301	/* setup PCSRBAR/PEXCSRBAR */
 302	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
 303	early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
 304	pcicsrbar_sz = ~pcicsrbar_sz + 1;
 305
 306	if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
 307		(paddr_lo > 0x100000000ull))
 308		pcicsrbar = 0x100000000ull - pcicsrbar_sz;
 309	else
 310		pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
 311	early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
 312
 313	paddr_lo = min(paddr_lo, (u64)pcicsrbar);
 314
 315	pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
 316
 317	/* Setup inbound mem window */
 318	mem = memblock_end_of_DRAM();
 319	pr_info("%s: end of DRAM %llx\n", __func__, mem);
 320
 321	/*
 322	 * The msi-address-64 property, if it exists, indicates the physical
 323	 * address of the MSIIR register.  Normally, this register is located
 324	 * inside CCSR, so the ATMU that covers all of CCSR is used. But if
 325	 * this property exists, then we normally need to create a new ATMU
 326	 * for it.  For now, however, we cheat.  The only entity that creates
 327	 * this property is the Freescale hypervisor, and the address is
 328	 * specified in the partition configuration.  Typically, the address
 329	 * is located in the page immediately after the end of DDR.  If so, we
 330	 * can avoid allocating a new ATMU by extending the DDR ATMU by one
 331	 * page.
 332	 */
 333	reg = of_get_property(hose->dn, "msi-address-64", &len);
 334	if (reg && (len == sizeof(u64))) {
 335		u64 address = be64_to_cpup(reg);
 336
 337		if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
 338			pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
 339			mem += PAGE_SIZE;
 340		} else {
 341			/* TODO: Create a new ATMU for MSIIR */
 342			pr_warn("%pOF: msi-address-64 address of %llx is "
 343				"unsupported\n", hose->dn, address);
 344		}
 345	}
 346
 347	sz = min(mem, paddr_lo);
 348	mem_log = ilog2(sz);
 349
 350	/* PCIe can overmap inbound & outbound since RX & TX are separated */
 351	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 352		/* Size window to exact size if power-of-two or one size up */
 353		if ((1ull << mem_log) != mem) {
 354			mem_log++;
 355			if ((1ull << mem_log) > mem)
 356				pr_info("%pOF: Setting PCI inbound window "
 357					"greater than memory size\n", hose->dn);
 358		}
 359
 360		piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
 361
 362		if (setup_inbound) {
 363			/* Setup inbound memory window */
 364			out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 365			out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
 366			out_be32(&pci->piw[win_idx].piwar,  piwar);
 367		}
 368
 369		win_idx--;
 370		hose->dma_window_base_cur = 0x00000000;
 371		hose->dma_window_size = (resource_size_t)sz;
 372
 373		/*
 374		 * if we have >4G of memory setup second PCI inbound window to
 375		 * let devices that are 64-bit address capable to work w/o
 376		 * SWIOTLB and access the full range of memory
 377		 */
 378		if (sz != mem) {
 379			mem_log = ilog2(mem);
 380
 381			/* Size window up if we dont fit in exact power-of-2 */
 382			if ((1ull << mem_log) != mem)
 383				mem_log++;
 384
 385			piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
 386			pci64_dma_offset = 1ULL << mem_log;
 387
 388			if (setup_inbound) {
 389				/* Setup inbound memory window */
 390				out_be32(&pci->piw[win_idx].pitar,  0x00000000);
 391				out_be32(&pci->piw[win_idx].piwbear,
 392						pci64_dma_offset >> 44);
 393				out_be32(&pci->piw[win_idx].piwbar,
 394						pci64_dma_offset >> 12);
 395				out_be32(&pci->piw[win_idx].piwar,  piwar);
 396			}
 397
 398			/*
 399			 * install our own dma_set_mask handler to fixup dma_ops
 400			 * and dma_offset
 401			 */
 402			ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
 403
 404			pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
 405		}
 406	} else {
 407		u64 paddr = 0;
 408
 409		if (setup_inbound) {
 410			/* Setup inbound memory window */
 411			out_be32(&pci->piw[win_idx].pitar,  paddr >> 12);
 412			out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
 413			out_be32(&pci->piw[win_idx].piwar,
 414				 (piwar | (mem_log - 1)));
 415		}
 416
 417		win_idx--;
 418		paddr += 1ull << mem_log;
 419		sz -= 1ull << mem_log;
 420
 421		if (sz) {
 422			mem_log = ilog2(sz);
 423			piwar |= (mem_log - 1);
 424
 425			if (setup_inbound) {
 426				out_be32(&pci->piw[win_idx].pitar,
 427					 paddr >> 12);
 428				out_be32(&pci->piw[win_idx].piwbar,
 429					 paddr >> 12);
 430				out_be32(&pci->piw[win_idx].piwar, piwar);
 431			}
 432
 433			win_idx--;
 434			paddr += 1ull << mem_log;
 435		}
 436
 437		hose->dma_window_base_cur = 0x00000000;
 438		hose->dma_window_size = (resource_size_t)paddr;
 439	}
 440
 441	if (hose->dma_window_size < mem) {
 442#ifdef CONFIG_SWIOTLB
 443		ppc_swiotlb_enable = 1;
 444#else
 445		pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
 446			"map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
 447			 hose->dn);
 448#endif
 449		/* adjusting outbound windows could reclaim space in mem map */
 450		if (paddr_hi < 0xffffffffull)
 451			pr_warn("%pOF: WARNING: Outbound window cfg leaves "
 452				"gaps in memory map. Adjusting the memory map "
 453				"could reduce unnecessary bounce buffering.\n",
 454				hose->dn);
 455
 456		pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
 457			(u64)hose->dma_window_size);
 458	}
 459}
 460
 461static void __init setup_pci_cmd(struct pci_controller *hose)
 462{
 463	u16 cmd;
 464	int cap_x;
 465
 466	early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
 467	cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
 468		| PCI_COMMAND_IO;
 469	early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
 470
 471	cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
 472	if (cap_x) {
 473		int pci_x_cmd = cap_x + PCI_X_CMD;
 474		cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
 475			| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
 476		early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
 477	} else {
 478		early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
 479	}
 480}
 481
 482void fsl_pcibios_fixup_bus(struct pci_bus *bus)
 483{
 484	struct pci_controller *hose = pci_bus_to_host(bus);
 485	int i, is_pcie = 0, no_link;
 486
 487	/* The root complex bridge comes up with bogus resources,
 488	 * we copy the PHB ones in.
 489	 *
 490	 * With the current generic PCI code, the PHB bus no longer
 491	 * has bus->resource[0..4] set, so things are a bit more
 492	 * tricky.
 493	 */
 494
 495	if (fsl_pcie_bus_fixup)
 496		is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
 497	no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
 498
 499	if (bus->parent == hose->bus && (is_pcie || no_link)) {
 500		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
 501			struct resource *res = bus->resource[i];
 502			struct resource *par;
 503
 504			if (!res)
 505				continue;
 506			if (i == 0)
 507				par = &hose->io_resource;
 508			else if (i < 4)
 509				par = &hose->mem_resources[i-1];
 510			else par = NULL;
 511
 512			res->start = par ? par->start : 0;
 513			res->end   = par ? par->end   : 0;
 514			res->flags = par ? par->flags : 0;
 515		}
 516	}
 517}
 518
 519int fsl_add_bridge(struct platform_device *pdev, int is_primary)
 520{
 521	int len;
 522	struct pci_controller *hose;
 523	struct resource rsrc;
 524	const int *bus_range;
 525	u8 hdr_type, progif;
 
 526	struct device_node *dev;
 527	struct ccsr_pci __iomem *pci;
 528	u16 temp;
 529	u32 svr = mfspr(SPRN_SVR);
 530
 531	dev = pdev->dev.of_node;
 532
 533	if (!of_device_is_available(dev)) {
 534		pr_warn("%pOF: disabled\n", dev);
 535		return -ENODEV;
 536	}
 537
 538	pr_debug("Adding PCI host bridge %pOF\n", dev);
 539
 540	/* Fetch host bridge registers address */
 541	if (of_address_to_resource(dev, 0, &rsrc)) {
 542		printk(KERN_WARNING "Can't get pci register base!");
 543		return -ENOMEM;
 544	}
 545
 546	/* Get bus range if any */
 547	bus_range = of_get_property(dev, "bus-range", &len);
 548	if (bus_range == NULL || len < 2 * sizeof(int))
 549		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 550			" bus 0\n", dev);
 551
 552	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 553	hose = pcibios_alloc_controller(dev);
 554	if (!hose)
 555		return -ENOMEM;
 556
 557	/* set platform device as the parent */
 558	hose->parent = &pdev->dev;
 559	hose->first_busno = bus_range ? bus_range[0] : 0x0;
 560	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 561
 562	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
 563		 (u64)rsrc.start, (u64)resource_size(&rsrc));
 564
 565	pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
 566	if (!hose->private_data)
 567		goto no_bridge;
 568
 569	setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
 570			   PPC_INDIRECT_TYPE_BIG_ENDIAN);
 571
 572	if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
 573		hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 574
 575	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 576		/* use fsl_indirect_read_config for PCIe */
 577		hose->ops = &fsl_indirect_pcie_ops;
 578		/* For PCIE read HEADER_TYPE to identify controller mode */
 579		early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
 580		if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
 581			goto no_bridge;
 582
 583	} else {
 584		/* For PCI read PROG to identify controller mode */
 585		early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
 586		if ((progif & 1) &&
 587		    !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
 588			goto no_bridge;
 589	}
 590
 591	setup_pci_cmd(hose);
 592
 593	/* check PCI express link status */
 594	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
 595		hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
 596			PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
 597		if (fsl_pcie_check_link(hose))
 598			hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 
 
 
 
 
 
 
 599	} else {
 600		/*
 601		 * Set PBFR(PCI Bus Function Register)[10] = 1 to
 602		 * disable the combining of crossing cacheline
 603		 * boundary requests into one burst transaction.
 604		 * PCI-X operation is not affected.
 605		 * Fix erratum PCI 5 on MPC8548
 606		 */
 607#define PCI_BUS_FUNCTION 0x44
 608#define PCI_BUS_FUNCTION_MDS 0x400	/* Master disable streaming */
 609		if (((SVR_SOC_VER(svr) == SVR_8543) ||
 610		     (SVR_SOC_VER(svr) == SVR_8545) ||
 611		     (SVR_SOC_VER(svr) == SVR_8547) ||
 612		     (SVR_SOC_VER(svr) == SVR_8548)) &&
 613		    !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
 614			early_read_config_word(hose, 0, 0,
 615					PCI_BUS_FUNCTION, &temp);
 616			temp |= PCI_BUS_FUNCTION_MDS;
 617			early_write_config_word(hose, 0, 0,
 618					PCI_BUS_FUNCTION, temp);
 619		}
 620	}
 621
 622	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 623		"Firmware bus number: %d->%d\n",
 624		(unsigned long long)rsrc.start, hose->first_busno,
 625		hose->last_busno);
 626
 627	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 628		hose, hose->cfg_addr, hose->cfg_data);
 629
 630	/* Interpret the "ranges" property */
 631	/* This also maps the I/O region and sets isa_io/mem_base */
 632	pci_process_bridge_OF_ranges(hose, dev, is_primary);
 633
 634	/* Setup PEX window registers */
 635	setup_pci_atmu(hose);
 636
 637	/* Set up controller operations */
 638	setup_swiotlb_ops(hose);
 639
 640	return 0;
 641
 642no_bridge:
 643	iounmap(hose->private_data);
 644	/* unmap cfg_data & cfg_addr separately if not on same page */
 645	if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
 646	    ((unsigned long)hose->cfg_addr & PAGE_MASK))
 647		iounmap(hose->cfg_data);
 648	iounmap(hose->cfg_addr);
 649	pcibios_free_controller(hose);
 650	return -ENODEV;
 651}
 652#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
 653
 654DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
 655			quirk_fsl_pcie_early);
 656
 657#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
 658struct mpc83xx_pcie_priv {
 659	void __iomem *cfg_type0;
 660	void __iomem *cfg_type1;
 661	u32 dev_base;
 662};
 663
 664struct pex_inbound_window {
 665	u32 ar;
 666	u32 tar;
 667	u32 barl;
 668	u32 barh;
 669};
 670
 671/*
 672 * With the convention of u-boot, the PCIE outbound window 0 serves
 673 * as configuration transactions outbound.
 674 */
 675#define PEX_OUTWIN0_BAR		0xCA4
 676#define PEX_OUTWIN0_TAL		0xCA8
 677#define PEX_OUTWIN0_TAH		0xCAC
 678#define PEX_RC_INWIN_BASE	0xE60
 679#define PEX_RCIWARn_EN		0x1
 680
 681static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
 682{
 683	struct pci_controller *hose = pci_bus_to_host(bus);
 684
 685	if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
 686		return PCIBIOS_DEVICE_NOT_FOUND;
 687	/*
 688	 * Workaround for the HW bug: for Type 0 configure transactions the
 689	 * PCI-E controller does not check the device number bits and just
 690	 * assumes that the device number bits are 0.
 691	 */
 692	if (bus->number == hose->first_busno ||
 693			bus->primary == hose->first_busno) {
 694		if (devfn & 0xf8)
 695			return PCIBIOS_DEVICE_NOT_FOUND;
 696	}
 697
 698	if (ppc_md.pci_exclude_device) {
 699		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
 700			return PCIBIOS_DEVICE_NOT_FOUND;
 701	}
 702
 703	return PCIBIOS_SUCCESSFUL;
 704}
 705
 706static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
 707					    unsigned int devfn, int offset)
 708{
 709	struct pci_controller *hose = pci_bus_to_host(bus);
 710	struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 711	u32 dev_base = bus->number << 24 | devfn << 16;
 712	int ret;
 713
 714	ret = mpc83xx_pcie_exclude_device(bus, devfn);
 715	if (ret)
 716		return NULL;
 717
 718	offset &= 0xfff;
 719
 720	/* Type 0 */
 721	if (bus->number == hose->first_busno)
 722		return pcie->cfg_type0 + offset;
 723
 724	if (pcie->dev_base == dev_base)
 725		goto mapped;
 726
 727	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
 728
 729	pcie->dev_base = dev_base;
 730mapped:
 731	return pcie->cfg_type1 + offset;
 732}
 733
 734static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
 735				     int offset, int len, u32 val)
 736{
 737	struct pci_controller *hose = pci_bus_to_host(bus);
 738
 739	/* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
 740	if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
 741		val &= 0xffffff00;
 742
 743	return pci_generic_config_write(bus, devfn, offset, len, val);
 744}
 745
 746static struct pci_ops mpc83xx_pcie_ops = {
 747	.map_bus = mpc83xx_pcie_remap_cfg,
 748	.read = pci_generic_config_read,
 749	.write = mpc83xx_pcie_write_config,
 750};
 751
 752static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
 753				     struct resource *reg)
 754{
 755	struct mpc83xx_pcie_priv *pcie;
 756	u32 cfg_bar;
 757	int ret = -ENOMEM;
 758
 759	pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL);
 760	if (!pcie)
 761		return ret;
 762
 763	pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
 764	if (!pcie->cfg_type0)
 765		goto err0;
 766
 767	cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
 768	if (!cfg_bar) {
 769		/* PCI-E isn't configured. */
 770		ret = -ENODEV;
 771		goto err1;
 772	}
 773
 774	pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
 775	if (!pcie->cfg_type1)
 776		goto err1;
 777
 778	WARN_ON(hose->dn->data);
 779	hose->dn->data = pcie;
 780	hose->ops = &mpc83xx_pcie_ops;
 781	hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 782
 783	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
 784	out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
 785
 786	if (fsl_pcie_check_link(hose))
 787		hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
 788
 789	return 0;
 790err1:
 791	iounmap(pcie->cfg_type0);
 792err0:
 793	kfree(pcie);
 794	return ret;
 795
 796}
 797
 798int __init mpc83xx_add_bridge(struct device_node *dev)
 799{
 800	int ret;
 801	int len;
 802	struct pci_controller *hose;
 803	struct resource rsrc_reg;
 804	struct resource rsrc_cfg;
 805	const int *bus_range;
 806	int primary;
 807
 808	is_mpc83xx_pci = 1;
 809
 810	if (!of_device_is_available(dev)) {
 811		pr_warn("%pOF: disabled by the firmware.\n",
 812			dev);
 813		return -ENODEV;
 814	}
 815	pr_debug("Adding PCI host bridge %pOF\n", dev);
 816
 817	/* Fetch host bridge registers address */
 818	if (of_address_to_resource(dev, 0, &rsrc_reg)) {
 819		printk(KERN_WARNING "Can't get pci register base!\n");
 820		return -ENOMEM;
 821	}
 822
 823	memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
 824
 825	if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
 826		printk(KERN_WARNING
 827			"No pci config register base in dev tree, "
 828			"using default\n");
 829		/*
 830		 * MPC83xx supports up to two host controllers
 831		 * 	one at 0x8500 has config space registers at 0x8300
 832		 * 	one at 0x8600 has config space registers at 0x8380
 833		 */
 834		if ((rsrc_reg.start & 0xfffff) == 0x8500)
 835			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
 836		else if ((rsrc_reg.start & 0xfffff) == 0x8600)
 837			rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
 838	}
 839	/*
 840	 * Controller at offset 0x8500 is primary
 841	 */
 842	if ((rsrc_reg.start & 0xfffff) == 0x8500)
 843		primary = 1;
 844	else
 845		primary = 0;
 846
 847	/* Get bus range if any */
 848	bus_range = of_get_property(dev, "bus-range", &len);
 849	if (bus_range == NULL || len < 2 * sizeof(int)) {
 850		printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
 851		       " bus 0\n", dev);
 852	}
 853
 854	pci_add_flags(PCI_REASSIGN_ALL_BUS);
 855	hose = pcibios_alloc_controller(dev);
 856	if (!hose)
 857		return -ENOMEM;
 858
 859	hose->first_busno = bus_range ? bus_range[0] : 0;
 860	hose->last_busno = bus_range ? bus_range[1] : 0xff;
 861
 862	if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
 863		ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
 864		if (ret)
 865			goto err0;
 866	} else {
 867		setup_indirect_pci(hose, rsrc_cfg.start,
 868				   rsrc_cfg.start + 4, 0);
 869	}
 870
 871	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
 872	       "Firmware bus number: %d->%d\n",
 873	       (unsigned long long)rsrc_reg.start, hose->first_busno,
 874	       hose->last_busno);
 875
 876	pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
 877	    hose, hose->cfg_addr, hose->cfg_data);
 878
 879	/* Interpret the "ranges" property */
 880	/* This also maps the I/O region and sets isa_io/mem_base */
 881	pci_process_bridge_OF_ranges(hose, dev, primary);
 882
 883	return 0;
 884err0:
 885	pcibios_free_controller(hose);
 886	return ret;
 887}
 888#endif /* CONFIG_PPC_83xx */
 889
 890u64 fsl_pci_immrbar_base(struct pci_controller *hose)
 891{
 892#ifdef CONFIG_PPC_83xx
 893	if (is_mpc83xx_pci) {
 894		struct mpc83xx_pcie_priv *pcie = hose->dn->data;
 895		struct pex_inbound_window *in;
 896		int i;
 897
 898		/* Walk the Root Complex Inbound windows to match IMMR base */
 899		in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
 900		for (i = 0; i < 4; i++) {
 901			/* not enabled, skip */
 902			if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
 903				continue;
 904
 905			if (get_immrbase() == in_le32(&in[i].tar))
 906				return (u64)in_le32(&in[i].barh) << 32 |
 907					    in_le32(&in[i].barl);
 908		}
 909
 910		printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
 911	}
 912#endif
 913
 914#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 915	if (!is_mpc83xx_pci) {
 916		u32 base;
 917
 918		pci_bus_read_config_dword(hose->bus,
 919			PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
 920
 921		/*
 922		 * For PEXCSRBAR, bit 3-0 indicate prefetchable and
 923		 * address type. So when getting base address, these
 924		 * bits should be masked
 925		 */
 926		base &= PCI_BASE_ADDRESS_MEM_MASK;
 927
 928		return base;
 929	}
 930#endif
 931
 932	return 0;
 933}
 934
 935#ifdef CONFIG_E500
 936static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
 937{
 938	unsigned int rd, ra, rb, d;
 939
 940	rd = get_rt(inst);
 941	ra = get_ra(inst);
 942	rb = get_rb(inst);
 943	d = get_d(inst);
 944
 945	switch (get_op(inst)) {
 946	case 31:
 947		switch (get_xop(inst)) {
 948		case OP_31_XOP_LWZX:
 949		case OP_31_XOP_LWBRX:
 950			regs->gpr[rd] = 0xffffffff;
 951			break;
 952
 953		case OP_31_XOP_LWZUX:
 954			regs->gpr[rd] = 0xffffffff;
 955			regs->gpr[ra] += regs->gpr[rb];
 956			break;
 957
 958		case OP_31_XOP_LBZX:
 959			regs->gpr[rd] = 0xff;
 960			break;
 961
 962		case OP_31_XOP_LBZUX:
 963			regs->gpr[rd] = 0xff;
 964			regs->gpr[ra] += regs->gpr[rb];
 965			break;
 966
 967		case OP_31_XOP_LHZX:
 968		case OP_31_XOP_LHBRX:
 969			regs->gpr[rd] = 0xffff;
 970			break;
 971
 972		case OP_31_XOP_LHZUX:
 973			regs->gpr[rd] = 0xffff;
 974			regs->gpr[ra] += regs->gpr[rb];
 975			break;
 976
 977		case OP_31_XOP_LHAX:
 978			regs->gpr[rd] = ~0UL;
 979			break;
 980
 981		case OP_31_XOP_LHAUX:
 982			regs->gpr[rd] = ~0UL;
 983			regs->gpr[ra] += regs->gpr[rb];
 984			break;
 985
 986		default:
 987			return 0;
 988		}
 989		break;
 990
 991	case OP_LWZ:
 992		regs->gpr[rd] = 0xffffffff;
 993		break;
 994
 995	case OP_LWZU:
 996		regs->gpr[rd] = 0xffffffff;
 997		regs->gpr[ra] += (s16)d;
 998		break;
 999
1000	case OP_LBZ:
1001		regs->gpr[rd] = 0xff;
1002		break;
1003
1004	case OP_LBZU:
1005		regs->gpr[rd] = 0xff;
1006		regs->gpr[ra] += (s16)d;
1007		break;
1008
1009	case OP_LHZ:
1010		regs->gpr[rd] = 0xffff;
1011		break;
1012
1013	case OP_LHZU:
1014		regs->gpr[rd] = 0xffff;
1015		regs->gpr[ra] += (s16)d;
1016		break;
1017
1018	case OP_LHA:
1019		regs->gpr[rd] = ~0UL;
1020		break;
1021
1022	case OP_LHAU:
1023		regs->gpr[rd] = ~0UL;
1024		regs->gpr[ra] += (s16)d;
1025		break;
1026
1027	default:
1028		return 0;
1029	}
1030
1031	return 1;
1032}
1033
1034static int is_in_pci_mem_space(phys_addr_t addr)
1035{
1036	struct pci_controller *hose;
1037	struct resource *res;
1038	int i;
1039
1040	list_for_each_entry(hose, &hose_list, list_node) {
1041		if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
1042			continue;
1043
1044		for (i = 0; i < 3; i++) {
1045			res = &hose->mem_resources[i];
1046			if ((res->flags & IORESOURCE_MEM) &&
1047				addr >= res->start && addr <= res->end)
1048				return 1;
1049		}
1050	}
1051	return 0;
1052}
1053
1054int fsl_pci_mcheck_exception(struct pt_regs *regs)
1055{
1056	u32 inst;
1057	int ret;
1058	phys_addr_t addr = 0;
1059
1060	/* Let KVM/QEMU deal with the exception */
1061	if (regs->msr & MSR_GS)
1062		return 0;
1063
1064#ifdef CONFIG_PHYS_64BIT
1065	addr = mfspr(SPRN_MCARU);
1066	addr <<= 32;
1067#endif
1068	addr += mfspr(SPRN_MCAR);
1069
1070	if (is_in_pci_mem_space(addr)) {
1071		if (user_mode(regs)) {
1072			pagefault_disable();
1073			ret = get_user(inst, (__u32 __user *)regs->nip);
1074			pagefault_enable();
1075		} else {
1076			ret = probe_kernel_address((void *)regs->nip, inst);
1077		}
1078
1079		if (!ret && mcheck_handle_load(regs, inst)) {
1080			regs->nip += 4;
1081			return 1;
1082		}
1083	}
1084
1085	return 0;
1086}
1087#endif
1088
1089#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
1090static const struct of_device_id pci_ids[] = {
1091	{ .compatible = "fsl,mpc8540-pci", },
1092	{ .compatible = "fsl,mpc8548-pcie", },
1093	{ .compatible = "fsl,mpc8610-pci", },
1094	{ .compatible = "fsl,mpc8641-pcie", },
1095	{ .compatible = "fsl,qoriq-pcie", },
1096	{ .compatible = "fsl,qoriq-pcie-v2.1", },
1097	{ .compatible = "fsl,qoriq-pcie-v2.2", },
1098	{ .compatible = "fsl,qoriq-pcie-v2.3", },
1099	{ .compatible = "fsl,qoriq-pcie-v2.4", },
1100	{ .compatible = "fsl,qoriq-pcie-v3.0", },
1101
1102	/*
1103	 * The following entries are for compatibility with older device
1104	 * trees.
1105	 */
1106	{ .compatible = "fsl,p1022-pcie", },
1107	{ .compatible = "fsl,p4080-pcie", },
1108
1109	{},
1110};
1111
1112struct device_node *fsl_pci_primary;
1113
1114void fsl_pci_assign_primary(void)
1115{
1116	struct device_node *np;
1117
1118	/* Callers can specify the primary bus using other means. */
1119	if (fsl_pci_primary)
1120		return;
1121
1122	/* If a PCI host bridge contains an ISA node, it's primary. */
1123	np = of_find_node_by_type(NULL, "isa");
1124	while ((fsl_pci_primary = of_get_parent(np))) {
1125		of_node_put(np);
1126		np = fsl_pci_primary;
1127
1128		if (of_match_node(pci_ids, np) && of_device_is_available(np))
1129			return;
1130	}
1131
1132	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
1133	 * If there's no PCI host bridge with ISA, arbitrarily
1134	 * designate one as primary.  This can go away once
1135	 * various bugs with primary-less systems are fixed.
1136	 */
1137	for_each_matching_node(np, pci_ids) {
1138		if (of_device_is_available(np)) {
1139			fsl_pci_primary = np;
1140			of_node_put(np);
1141			return;
1142		}
1143	}
1144}
1145
1146#ifdef CONFIG_PM_SLEEP
1147static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
1148{
1149	struct pci_controller *hose = dev_id;
1150	struct ccsr_pci __iomem *pci = hose->private_data;
1151	u32 dr;
1152
1153	dr = in_be32(&pci->pex_pme_mes_dr);
1154	if (!dr)
1155		return IRQ_NONE;
1156
1157	out_be32(&pci->pex_pme_mes_dr, dr);
1158
1159	return IRQ_HANDLED;
1160}
1161
1162static int fsl_pci_pme_probe(struct pci_controller *hose)
1163{
1164	struct ccsr_pci __iomem *pci;
1165	struct pci_dev *dev;
1166	int pme_irq;
1167	int res;
1168	u16 pms;
1169
1170	/* Get hose's pci_dev */
1171	dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
1172
1173	/* PME Disable */
1174	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1175	pms &= ~PCI_PM_CTRL_PME_ENABLE;
1176	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1177
1178	pme_irq = irq_of_parse_and_map(hose->dn, 0);
1179	if (!pme_irq) {
1180		dev_err(&dev->dev, "Failed to map PME interrupt.\n");
1181
1182		return -ENXIO;
1183	}
1184
1185	res = devm_request_irq(hose->parent, pme_irq,
1186			fsl_pci_pme_handle,
1187			IRQF_SHARED,
1188			"[PCI] PME", hose);
1189	if (res < 0) {
1190		dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
1191		irq_dispose_mapping(pme_irq);
1192
1193		return -ENODEV;
1194	}
1195
1196	pci = hose->private_data;
1197
1198	/* Enable PTOD, ENL23D & EXL23D */
1199	clrbits32(&pci->pex_pme_mes_disr,
1200		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1201
1202	out_be32(&pci->pex_pme_mes_ier, 0);
1203	setbits32(&pci->pex_pme_mes_ier,
1204		  PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1205
1206	/* PME Enable */
1207	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1208	pms |= PCI_PM_CTRL_PME_ENABLE;
1209	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1210
1211	return 0;
1212}
1213
1214static void send_pme_turnoff_message(struct pci_controller *hose)
1215{
1216	struct ccsr_pci __iomem *pci = hose->private_data;
1217	u32 dr;
1218	int i;
1219
1220	/* Send PME_Turn_Off Message Request */
1221	setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
1222
1223	/* Wait trun off done */
1224	for (i = 0; i < 150; i++) {
1225		dr = in_be32(&pci->pex_pme_mes_dr);
1226		if (dr) {
1227			out_be32(&pci->pex_pme_mes_dr, dr);
1228			break;
1229		}
1230
1231		udelay(1000);
1232	}
1233}
1234
1235static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
1236{
1237	send_pme_turnoff_message(hose);
1238}
1239
1240static int fsl_pci_syscore_suspend(void)
1241{
1242	struct pci_controller *hose, *tmp;
1243
1244	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1245		fsl_pci_syscore_do_suspend(hose);
1246
1247	return 0;
1248}
1249
1250static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
1251{
1252	struct ccsr_pci __iomem *pci = hose->private_data;
1253	u32 dr;
1254	int i;
1255
1256	/* Send Exit L2 State Message */
1257	setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
1258
1259	/* Wait exit done */
1260	for (i = 0; i < 150; i++) {
1261		dr = in_be32(&pci->pex_pme_mes_dr);
1262		if (dr) {
1263			out_be32(&pci->pex_pme_mes_dr, dr);
1264			break;
1265		}
1266
1267		udelay(1000);
1268	}
1269
1270	setup_pci_atmu(hose);
1271}
1272
1273static void fsl_pci_syscore_resume(void)
1274{
1275	struct pci_controller *hose, *tmp;
1276
1277	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1278		fsl_pci_syscore_do_resume(hose);
1279}
1280
1281static struct syscore_ops pci_syscore_pm_ops = {
1282	.suspend = fsl_pci_syscore_suspend,
1283	.resume = fsl_pci_syscore_resume,
1284};
1285#endif
1286
1287void fsl_pcibios_fixup_phb(struct pci_controller *phb)
1288{
1289#ifdef CONFIG_PM_SLEEP
1290	fsl_pci_pme_probe(phb);
1291#endif
1292}
1293
1294static int add_err_dev(struct platform_device *pdev)
1295{
1296	struct platform_device *errdev;
1297	struct mpc85xx_edac_pci_plat_data pd = {
1298		.of_node = pdev->dev.of_node
1299	};
1300
1301	errdev = platform_device_register_resndata(&pdev->dev,
1302						   "mpc85xx-pci-edac",
1303						   PLATFORM_DEVID_AUTO,
1304						   pdev->resource,
1305						   pdev->num_resources,
1306						   &pd, sizeof(pd));
1307
1308	return PTR_ERR_OR_ZERO(errdev);
1309}
1310
1311static int fsl_pci_probe(struct platform_device *pdev)
1312{
1313	struct device_node *node;
1314	int ret;
1315
1316	node = pdev->dev.of_node;
1317	ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
1318	if (ret)
1319		return ret;
1320
1321	ret = add_err_dev(pdev);
1322	if (ret)
1323		dev_err(&pdev->dev, "couldn't register error device: %d\n",
1324			ret);
1325
1326	return 0;
1327}
1328
1329static struct platform_driver fsl_pci_driver = {
1330	.driver = {
1331		.name = "fsl-pci",
1332		.of_match_table = pci_ids,
1333	},
1334	.probe = fsl_pci_probe,
 
1335};
1336
1337static int __init fsl_pci_init(void)
1338{
1339#ifdef CONFIG_PM_SLEEP
1340	register_syscore_ops(&pci_syscore_pm_ops);
1341#endif
1342	return platform_driver_register(&fsl_pci_driver);
1343}
1344arch_initcall(fsl_pci_init);
1345#endif