Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1/*
   2 * PCIe host controller driver for Tegra SoCs
   3 *
   4 * Copyright (c) 2010, CompuLab, Ltd.
   5 * Author: Mike Rapoport <mike@compulab.co.il>
   6 *
   7 * Based on NVIDIA PCIe driver
   8 * Copyright (c) 2008-2009, NVIDIA Corporation.
   9 *
  10 * Bits taken from arch/arm/mach-dove/pcie.c
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful, but WITHOUT
  18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  19 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  20 * more details.
  21 *
  22 * You should have received a copy of the GNU General Public License along
  23 * with this program; if not, write to the Free Software Foundation, Inc.,
  24 * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  25 */
  26
  27#include <linux/clk.h>
  28#include <linux/delay.h>
  29#include <linux/export.h>
  30#include <linux/interrupt.h>
  31#include <linux/irq.h>
  32#include <linux/irqdomain.h>
  33#include <linux/kernel.h>
  34#include <linux/module.h>
  35#include <linux/msi.h>
  36#include <linux/of_address.h>
  37#include <linux/of_pci.h>
  38#include <linux/of_platform.h>
  39#include <linux/pci.h>
  40#include <linux/platform_device.h>
  41#include <linux/reset.h>
  42#include <linux/sizes.h>
  43#include <linux/slab.h>
  44#include <linux/tegra-cpuidle.h>
  45#include <linux/tegra-powergate.h>
  46#include <linux/vmalloc.h>
  47#include <linux/regulator/consumer.h>
  48
  49#include <asm/mach/irq.h>
  50#include <asm/mach/map.h>
  51#include <asm/mach/pci.h>
  52
  53#define INT_PCI_MSI_NR (8 * 32)
  54
  55/* register definitions */
  56
  57#define AFI_AXI_BAR0_SZ	0x00
  58#define AFI_AXI_BAR1_SZ	0x04
  59#define AFI_AXI_BAR2_SZ	0x08
  60#define AFI_AXI_BAR3_SZ	0x0c
  61#define AFI_AXI_BAR4_SZ	0x10
  62#define AFI_AXI_BAR5_SZ	0x14
  63
  64#define AFI_AXI_BAR0_START	0x18
  65#define AFI_AXI_BAR1_START	0x1c
  66#define AFI_AXI_BAR2_START	0x20
  67#define AFI_AXI_BAR3_START	0x24
  68#define AFI_AXI_BAR4_START	0x28
  69#define AFI_AXI_BAR5_START	0x2c
  70
  71#define AFI_FPCI_BAR0	0x30
  72#define AFI_FPCI_BAR1	0x34
  73#define AFI_FPCI_BAR2	0x38
  74#define AFI_FPCI_BAR3	0x3c
  75#define AFI_FPCI_BAR4	0x40
  76#define AFI_FPCI_BAR5	0x44
  77
  78#define AFI_CACHE_BAR0_SZ	0x48
  79#define AFI_CACHE_BAR0_ST	0x4c
  80#define AFI_CACHE_BAR1_SZ	0x50
  81#define AFI_CACHE_BAR1_ST	0x54
  82
  83#define AFI_MSI_BAR_SZ		0x60
  84#define AFI_MSI_FPCI_BAR_ST	0x64
  85#define AFI_MSI_AXI_BAR_ST	0x68
  86
  87#define AFI_MSI_VEC0		0x6c
  88#define AFI_MSI_VEC1		0x70
  89#define AFI_MSI_VEC2		0x74
  90#define AFI_MSI_VEC3		0x78
  91#define AFI_MSI_VEC4		0x7c
  92#define AFI_MSI_VEC5		0x80
  93#define AFI_MSI_VEC6		0x84
  94#define AFI_MSI_VEC7		0x88
  95
  96#define AFI_MSI_EN_VEC0		0x8c
  97#define AFI_MSI_EN_VEC1		0x90
  98#define AFI_MSI_EN_VEC2		0x94
  99#define AFI_MSI_EN_VEC3		0x98
 100#define AFI_MSI_EN_VEC4		0x9c
 101#define AFI_MSI_EN_VEC5		0xa0
 102#define AFI_MSI_EN_VEC6		0xa4
 103#define AFI_MSI_EN_VEC7		0xa8
 104
 105#define AFI_CONFIGURATION		0xac
 106#define  AFI_CONFIGURATION_EN_FPCI	(1 << 0)
 107
 108#define AFI_FPCI_ERROR_MASKS	0xb0
 109
 110#define AFI_INTR_MASK		0xb4
 111#define  AFI_INTR_MASK_INT_MASK	(1 << 0)
 112#define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
 113
 114#define AFI_INTR_CODE			0xb8
 115#define  AFI_INTR_CODE_MASK		0xf
 116#define  AFI_INTR_AXI_SLAVE_ERROR	1
 117#define  AFI_INTR_AXI_DECODE_ERROR	2
 118#define  AFI_INTR_TARGET_ABORT		3
 119#define  AFI_INTR_MASTER_ABORT		4
 120#define  AFI_INTR_INVALID_WRITE		5
 121#define  AFI_INTR_LEGACY		6
 122#define  AFI_INTR_FPCI_DECODE_ERROR	7
 123
 124#define AFI_INTR_SIGNATURE	0xbc
 125#define AFI_UPPER_FPCI_ADDRESS	0xc0
 126#define AFI_SM_INTR_ENABLE	0xc4
 127#define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
 128#define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
 129#define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
 130#define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
 131#define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
 132#define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
 133#define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
 134#define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
 135
 136#define AFI_AFI_INTR_ENABLE		0xc8
 137#define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
 138#define  AFI_INTR_EN_INI_DECERR		(1 << 1)
 139#define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
 140#define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
 141#define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
 142#define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
 143#define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
 144#define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
 145#define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
 146
 147#define AFI_PCIE_CONFIG					0x0f8
 148#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
 149#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
 150#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
 151#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
 152#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
 153#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
 154#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
 155#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
 156
 157#define AFI_FUSE			0x104
 158#define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
 159
 160#define AFI_PEX0_CTRL			0x110
 161#define AFI_PEX1_CTRL			0x118
 162#define AFI_PEX2_CTRL			0x128
 163#define  AFI_PEX_CTRL_RST		(1 << 0)
 164#define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
 165#define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
 166
 167#define AFI_PEXBIAS_CTRL_0		0x168
 168
 169#define RP_VEND_XP	0x00000F00
 170#define  RP_VEND_XP_DL_UP	(1 << 30)
 171
 172#define RP_LINK_CONTROL_STATUS			0x00000090
 173#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
 174#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
 175
 176#define PADS_CTL_SEL		0x0000009C
 177
 178#define PADS_CTL		0x000000A0
 179#define  PADS_CTL_IDDQ_1L	(1 << 0)
 180#define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
 181#define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
 182
 183#define PADS_PLL_CTL_TEGRA20			0x000000B8
 184#define PADS_PLL_CTL_TEGRA30			0x000000B4
 185#define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
 186#define  PADS_PLL_CTL_LOCKDET			(1 << 8)
 187#define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
 188#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
 189#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
 190#define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
 191#define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
 192#define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
 193#define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
 194#define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
 195
 196#define PADS_REFCLK_CFG0			0x000000C8
 197#define PADS_REFCLK_CFG1			0x000000CC
 198
 199/*
 200 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
 201 * entries, one entry per PCIe port. These field definitions and desired
 202 * values aren't in the TRM, but do come from NVIDIA.
 203 */
 204#define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
 205#define PADS_REFCLK_CFG_E_TERM_SHIFT		7
 206#define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
 207#define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
 208
 209/* Default value provided by HW engineering is 0xfa5c */
 210#define PADS_REFCLK_CFG_VALUE \
 211	( \
 212		(0x17 << PADS_REFCLK_CFG_TERM_SHIFT)   | \
 213		(0    << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
 214		(0xa  << PADS_REFCLK_CFG_PREDI_SHIFT)  | \
 215		(0xf  << PADS_REFCLK_CFG_DRVI_SHIFT)     \
 216	)
 217
 218struct tegra_msi {
 219	struct msi_chip chip;
 220	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
 221	struct irq_domain *domain;
 222	unsigned long pages;
 223	struct mutex lock;
 224	int irq;
 225};
 226
 227/* used to differentiate between Tegra SoC generations */
 228struct tegra_pcie_soc_data {
 229	unsigned int num_ports;
 230	unsigned int msi_base_shift;
 231	u32 pads_pll_ctl;
 232	u32 tx_ref_sel;
 233	bool has_pex_clkreq_en;
 234	bool has_pex_bias_ctrl;
 235	bool has_intr_prsnt_sense;
 236	bool has_avdd_supply;
 237	bool has_cml_clk;
 238};
 239
 240static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
 241{
 242	return container_of(chip, struct tegra_msi, chip);
 243}
 244
 245struct tegra_pcie {
 246	struct device *dev;
 247
 248	void __iomem *pads;
 249	void __iomem *afi;
 250	int irq;
 251
 252	struct list_head buses;
 253	struct resource *cs;
 254
 255	struct resource io;
 256	struct resource mem;
 257	struct resource prefetch;
 258	struct resource busn;
 259
 260	struct clk *pex_clk;
 261	struct clk *afi_clk;
 262	struct clk *pll_e;
 263	struct clk *cml_clk;
 264
 265	struct reset_control *pex_rst;
 266	struct reset_control *afi_rst;
 267	struct reset_control *pcie_xrst;
 268
 269	struct tegra_msi msi;
 270
 271	struct list_head ports;
 272	unsigned int num_ports;
 273	u32 xbar_config;
 274
 275	struct regulator *pex_clk_supply;
 276	struct regulator *vdd_supply;
 277	struct regulator *avdd_supply;
 278
 279	const struct tegra_pcie_soc_data *soc_data;
 280};
 281
 282struct tegra_pcie_port {
 283	struct tegra_pcie *pcie;
 284	struct list_head list;
 285	struct resource regs;
 286	void __iomem *base;
 287	unsigned int index;
 288	unsigned int lanes;
 289};
 290
 291struct tegra_pcie_bus {
 292	struct vm_struct *area;
 293	struct list_head list;
 294	unsigned int nr;
 295};
 296
 297static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
 298{
 299	return sys->private_data;
 300}
 301
 302static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
 303			      unsigned long offset)
 304{
 305	writel(value, pcie->afi + offset);
 306}
 307
 308static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
 309{
 310	return readl(pcie->afi + offset);
 311}
 312
 313static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
 314			       unsigned long offset)
 315{
 316	writel(value, pcie->pads + offset);
 317}
 318
 319static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
 320{
 321	return readl(pcie->pads + offset);
 322}
 323
 324/*
 325 * The configuration space mapping on Tegra is somewhat similar to the ECAM
 326 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
 327 * register accesses are mapped:
 328 *
 329 *    [27:24] extended register number
 330 *    [23:16] bus number
 331 *    [15:11] device number
 332 *    [10: 8] function number
 333 *    [ 7: 0] register number
 334 *
 335 * Mapping the whole extended configuration space would require 256 MiB of
 336 * virtual address space, only a small part of which will actually be used.
 337 * To work around this, a 1 MiB of virtual addresses are allocated per bus
 338 * when the bus is first accessed. When the physical range is mapped, the
 339 * the bus number bits are hidden so that the extended register number bits
 340 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
 341 *
 342 *    [19:16] extended register number
 343 *    [15:11] device number
 344 *    [10: 8] function number
 345 *    [ 7: 0] register number
 346 *
 347 * This is achieved by stitching together 16 chunks of 64 KiB of physical
 348 * address space via the MMU.
 349 */
 350static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
 351{
 352	return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
 353	       (PCI_FUNC(devfn) << 8) | (where & 0xfc);
 354}
 355
 356static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
 357						   unsigned int busnr)
 358{
 359	pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
 360			L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
 361	phys_addr_t cs = pcie->cs->start;
 362	struct tegra_pcie_bus *bus;
 363	unsigned int i;
 364	int err;
 365
 366	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
 367	if (!bus)
 368		return ERR_PTR(-ENOMEM);
 369
 370	INIT_LIST_HEAD(&bus->list);
 371	bus->nr = busnr;
 372
 373	/* allocate 1 MiB of virtual addresses */
 374	bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
 375	if (!bus->area) {
 376		err = -ENOMEM;
 377		goto free;
 378	}
 379
 380	/* map each of the 16 chunks of 64 KiB each */
 381	for (i = 0; i < 16; i++) {
 382		unsigned long virt = (unsigned long)bus->area->addr +
 383				     i * SZ_64K;
 384		phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
 385
 386		err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
 387		if (err < 0) {
 388			dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
 389				err);
 390			goto unmap;
 391		}
 392	}
 393
 394	return bus;
 395
 396unmap:
 397	vunmap(bus->area->addr);
 398free:
 399	kfree(bus);
 400	return ERR_PTR(err);
 401}
 402
 403/*
 404 * Look up a virtual address mapping for the specified bus number. If no such
 405 * mapping exists, try to create one.
 406 */
 407static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
 408					unsigned int busnr)
 409{
 410	struct tegra_pcie_bus *bus;
 411
 412	list_for_each_entry(bus, &pcie->buses, list)
 413		if (bus->nr == busnr)
 414			return (void __iomem *)bus->area->addr;
 415
 416	bus = tegra_pcie_bus_alloc(pcie, busnr);
 417	if (IS_ERR(bus))
 418		return NULL;
 419
 420	list_add_tail(&bus->list, &pcie->buses);
 421
 422	return (void __iomem *)bus->area->addr;
 423}
 424
 425static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
 426					     unsigned int devfn,
 427					     int where)
 428{
 429	struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
 430	void __iomem *addr = NULL;
 431
 432	if (bus->number == 0) {
 433		unsigned int slot = PCI_SLOT(devfn);
 434		struct tegra_pcie_port *port;
 435
 436		list_for_each_entry(port, &pcie->ports, list) {
 437			if (port->index + 1 == slot) {
 438				addr = port->base + (where & ~3);
 439				break;
 440			}
 441		}
 442	} else {
 443		addr = tegra_pcie_bus_map(pcie, bus->number);
 444		if (!addr) {
 445			dev_err(pcie->dev,
 446				"failed to map cfg. space for bus %u\n",
 447				bus->number);
 448			return NULL;
 449		}
 450
 451		addr += tegra_pcie_conf_offset(devfn, where);
 452	}
 453
 454	return addr;
 455}
 456
 457static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
 458				int where, int size, u32 *value)
 459{
 460	void __iomem *addr;
 461
 462	addr = tegra_pcie_conf_address(bus, devfn, where);
 463	if (!addr) {
 464		*value = 0xffffffff;
 465		return PCIBIOS_DEVICE_NOT_FOUND;
 466	}
 467
 468	*value = readl(addr);
 469
 470	if (size == 1)
 471		*value = (*value >> (8 * (where & 3))) & 0xff;
 472	else if (size == 2)
 473		*value = (*value >> (8 * (where & 3))) & 0xffff;
 474
 475	return PCIBIOS_SUCCESSFUL;
 476}
 477
 478static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
 479				 int where, int size, u32 value)
 480{
 481	void __iomem *addr;
 482	u32 mask, tmp;
 483
 484	addr = tegra_pcie_conf_address(bus, devfn, where);
 485	if (!addr)
 486		return PCIBIOS_DEVICE_NOT_FOUND;
 487
 488	if (size == 4) {
 489		writel(value, addr);
 490		return PCIBIOS_SUCCESSFUL;
 491	}
 492
 493	if (size == 2)
 494		mask = ~(0xffff << ((where & 0x3) * 8));
 495	else if (size == 1)
 496		mask = ~(0xff << ((where & 0x3) * 8));
 497	else
 498		return PCIBIOS_BAD_REGISTER_NUMBER;
 499
 500	tmp = readl(addr) & mask;
 501	tmp |= value << ((where & 0x3) * 8);
 502	writel(tmp, addr);
 503
 504	return PCIBIOS_SUCCESSFUL;
 505}
 506
 507static struct pci_ops tegra_pcie_ops = {
 508	.read = tegra_pcie_read_conf,
 509	.write = tegra_pcie_write_conf,
 510};
 511
 512static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
 513{
 514	unsigned long ret = 0;
 515
 516	switch (port->index) {
 517	case 0:
 518		ret = AFI_PEX0_CTRL;
 519		break;
 520
 521	case 1:
 522		ret = AFI_PEX1_CTRL;
 523		break;
 524
 525	case 2:
 526		ret = AFI_PEX2_CTRL;
 527		break;
 528	}
 529
 530	return ret;
 531}
 532
 533static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
 534{
 535	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 536	unsigned long value;
 537
 538	/* pulse reset signal */
 539	value = afi_readl(port->pcie, ctrl);
 540	value &= ~AFI_PEX_CTRL_RST;
 541	afi_writel(port->pcie, value, ctrl);
 542
 543	usleep_range(1000, 2000);
 544
 545	value = afi_readl(port->pcie, ctrl);
 546	value |= AFI_PEX_CTRL_RST;
 547	afi_writel(port->pcie, value, ctrl);
 548}
 549
 550static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
 551{
 552	const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
 553	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 554	unsigned long value;
 555
 556	/* enable reference clock */
 557	value = afi_readl(port->pcie, ctrl);
 558	value |= AFI_PEX_CTRL_REFCLK_EN;
 559
 560	if (soc->has_pex_clkreq_en)
 561		value |= AFI_PEX_CTRL_CLKREQ_EN;
 562
 563	afi_writel(port->pcie, value, ctrl);
 564
 565	tegra_pcie_port_reset(port);
 566}
 567
 568static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
 569{
 570	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 571	unsigned long value;
 572
 573	/* assert port reset */
 574	value = afi_readl(port->pcie, ctrl);
 575	value &= ~AFI_PEX_CTRL_RST;
 576	afi_writel(port->pcie, value, ctrl);
 577
 578	/* disable reference clock */
 579	value = afi_readl(port->pcie, ctrl);
 580	value &= ~AFI_PEX_CTRL_REFCLK_EN;
 581	afi_writel(port->pcie, value, ctrl);
 582}
 583
 584static void tegra_pcie_port_free(struct tegra_pcie_port *port)
 585{
 586	struct tegra_pcie *pcie = port->pcie;
 587
 588	devm_iounmap(pcie->dev, port->base);
 589	devm_release_mem_region(pcie->dev, port->regs.start,
 590				resource_size(&port->regs));
 591	list_del(&port->list);
 592	devm_kfree(pcie->dev, port);
 593}
 594
 595static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
 596{
 597	u16 reg;
 598
 599	if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
 600		pci_read_config_word(dev, PCI_COMMAND, &reg);
 601		reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
 602			PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
 603		pci_write_config_word(dev, PCI_COMMAND, reg);
 604	}
 605}
 606DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
 607
 608/* Tegra PCIE root complex wrongly reports device class */
 609static void tegra_pcie_fixup_class(struct pci_dev *dev)
 610{
 611	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
 612}
 613DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
 614DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
 615DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
 616DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
 617
 618/* Tegra PCIE requires relaxed ordering */
 619static void tegra_pcie_relax_enable(struct pci_dev *dev)
 620{
 621	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
 622}
 623DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
 624
 625static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
 626{
 627	struct tegra_pcie *pcie = sys_to_pcie(sys);
 628
 629	pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
 630	pci_add_resource_offset(&sys->resources, &pcie->prefetch,
 631				sys->mem_offset);
 632	pci_add_resource(&sys->resources, &pcie->busn);
 633
 634	pci_ioremap_io(nr * SZ_64K, pcie->io.start);
 635
 636	return 1;
 637}
 638
 639static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
 640{
 641	struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
 642	int irq;
 643
 644	tegra_cpuidle_pcie_irqs_in_use();
 645
 646	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
 647	if (!irq)
 648		irq = pcie->irq;
 649
 650	return irq;
 651}
 652
 653static void tegra_pcie_add_bus(struct pci_bus *bus)
 654{
 655	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 656		struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
 657
 658		bus->msi = &pcie->msi.chip;
 659	}
 660}
 661
 662static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
 663{
 664	struct tegra_pcie *pcie = sys_to_pcie(sys);
 665	struct pci_bus *bus;
 666
 667	bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
 668				  &sys->resources);
 669	if (!bus)
 670		return NULL;
 671
 672	pci_scan_child_bus(bus);
 673
 674	return bus;
 675}
 676
 677static irqreturn_t tegra_pcie_isr(int irq, void *arg)
 678{
 679	const char *err_msg[] = {
 680		"Unknown",
 681		"AXI slave error",
 682		"AXI decode error",
 683		"Target abort",
 684		"Master abort",
 685		"Invalid write",
 686		"Response decoding error",
 687		"AXI response decoding error",
 688		"Transaction timeout",
 689	};
 690	struct tegra_pcie *pcie = arg;
 691	u32 code, signature;
 692
 693	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
 694	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
 695	afi_writel(pcie, 0, AFI_INTR_CODE);
 696
 697	if (code == AFI_INTR_LEGACY)
 698		return IRQ_NONE;
 699
 700	if (code >= ARRAY_SIZE(err_msg))
 701		code = 0;
 702
 703	/*
 704	 * do not pollute kernel log with master abort reports since they
 705	 * happen a lot during enumeration
 706	 */
 707	if (code == AFI_INTR_MASTER_ABORT)
 708		dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
 709			signature);
 710	else
 711		dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
 712			signature);
 713
 714	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
 715	    code == AFI_INTR_FPCI_DECODE_ERROR) {
 716		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
 717		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
 718
 719		if (code == AFI_INTR_MASTER_ABORT)
 720			dev_dbg(pcie->dev, "  FPCI address: %10llx\n", address);
 721		else
 722			dev_err(pcie->dev, "  FPCI address: %10llx\n", address);
 723	}
 724
 725	return IRQ_HANDLED;
 726}
 727
 728/*
 729 * FPCI map is as follows:
 730 * - 0xfdfc000000: I/O space
 731 * - 0xfdfe000000: type 0 configuration space
 732 * - 0xfdff000000: type 1 configuration space
 733 * - 0xfe00000000: type 0 extended configuration space
 734 * - 0xfe10000000: type 1 extended configuration space
 735 */
 736static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
 737{
 738	u32 fpci_bar, size, axi_address;
 739
 740	/* Bar 0: type 1 extended configuration space */
 741	fpci_bar = 0xfe100000;
 742	size = resource_size(pcie->cs);
 743	axi_address = pcie->cs->start;
 744	afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
 745	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
 746	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
 747
 748	/* Bar 1: downstream IO bar */
 749	fpci_bar = 0xfdfc0000;
 750	size = resource_size(&pcie->io);
 751	axi_address = pcie->io.start;
 752	afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
 753	afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
 754	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
 755
 756	/* Bar 2: prefetchable memory BAR */
 757	fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
 758	size = resource_size(&pcie->prefetch);
 759	axi_address = pcie->prefetch.start;
 760	afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
 761	afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
 762	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
 763
 764	/* Bar 3: non prefetchable memory BAR */
 765	fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
 766	size = resource_size(&pcie->mem);
 767	axi_address = pcie->mem.start;
 768	afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
 769	afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
 770	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
 771
 772	/* NULL out the remaining BARs as they are not used */
 773	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
 774	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
 775	afi_writel(pcie, 0, AFI_FPCI_BAR4);
 776
 777	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
 778	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
 779	afi_writel(pcie, 0, AFI_FPCI_BAR5);
 780
 781	/* map all upstream transactions as uncached */
 782	afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
 783	afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
 784	afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
 785	afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
 786
 787	/* MSI translations are setup only when needed */
 788	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
 789	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 790	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
 791	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 792}
 793
 794static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
 795{
 796	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
 797	struct tegra_pcie_port *port;
 798	unsigned int timeout;
 799	unsigned long value;
 800
 801	/* power down PCIe slot clock bias pad */
 802	if (soc->has_pex_bias_ctrl)
 803		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
 804
 805	/* configure mode and disable all ports */
 806	value = afi_readl(pcie, AFI_PCIE_CONFIG);
 807	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
 808	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
 809
 810	list_for_each_entry(port, &pcie->ports, list)
 811		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
 812
 813	afi_writel(pcie, value, AFI_PCIE_CONFIG);
 814
 815	value = afi_readl(pcie, AFI_FUSE);
 816	value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
 817	afi_writel(pcie, value, AFI_FUSE);
 818
 819	/* initialize internal PHY, enable up to 16 PCIE lanes */
 820	pads_writel(pcie, 0x0, PADS_CTL_SEL);
 821
 822	/* override IDDQ to 1 on all 4 lanes */
 823	value = pads_readl(pcie, PADS_CTL);
 824	value |= PADS_CTL_IDDQ_1L;
 825	pads_writel(pcie, value, PADS_CTL);
 826
 827	/*
 828	 * Set up PHY PLL inputs select PLLE output as refclock,
 829	 * set TX ref sel to div10 (not div5).
 830	 */
 831	value = pads_readl(pcie, soc->pads_pll_ctl);
 832	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
 833	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
 834	pads_writel(pcie, value, soc->pads_pll_ctl);
 835
 836	/* take PLL out of reset  */
 837	value = pads_readl(pcie, soc->pads_pll_ctl);
 838	value |= PADS_PLL_CTL_RST_B4SM;
 839	pads_writel(pcie, value, soc->pads_pll_ctl);
 840
 841	/* Configure the reference clock driver */
 842	value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
 843	pads_writel(pcie, value, PADS_REFCLK_CFG0);
 844	if (soc->num_ports > 2)
 845		pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
 846
 847	/* wait for the PLL to lock */
 848	timeout = 300;
 849	do {
 850		value = pads_readl(pcie, soc->pads_pll_ctl);
 851		usleep_range(1000, 2000);
 852		if (--timeout == 0) {
 853			pr_err("Tegra PCIe error: timeout waiting for PLL\n");
 854			return -EBUSY;
 855		}
 856	} while (!(value & PADS_PLL_CTL_LOCKDET));
 857
 858	/* turn off IDDQ override */
 859	value = pads_readl(pcie, PADS_CTL);
 860	value &= ~PADS_CTL_IDDQ_1L;
 861	pads_writel(pcie, value, PADS_CTL);
 862
 863	/* enable TX/RX data */
 864	value = pads_readl(pcie, PADS_CTL);
 865	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
 866	pads_writel(pcie, value, PADS_CTL);
 867
 868	/* take the PCIe interface module out of reset */
 869	reset_control_deassert(pcie->pcie_xrst);
 870
 871	/* finally enable PCIe */
 872	value = afi_readl(pcie, AFI_CONFIGURATION);
 873	value |= AFI_CONFIGURATION_EN_FPCI;
 874	afi_writel(pcie, value, AFI_CONFIGURATION);
 875
 876	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
 877		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
 878		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
 879
 880	if (soc->has_intr_prsnt_sense)
 881		value |= AFI_INTR_EN_PRSNT_SENSE;
 882
 883	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
 884	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
 885
 886	/* don't enable MSI for now, only when needed */
 887	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
 888
 889	/* disable all exceptions */
 890	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
 891
 892	return 0;
 893}
 894
 895static void tegra_pcie_power_off(struct tegra_pcie *pcie)
 896{
 897	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
 898	int err;
 899
 900	/* TODO: disable and unprepare clocks? */
 901
 902	reset_control_assert(pcie->pcie_xrst);
 903	reset_control_assert(pcie->afi_rst);
 904	reset_control_assert(pcie->pex_rst);
 905
 906	tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
 907
 908	if (soc->has_avdd_supply) {
 909		err = regulator_disable(pcie->avdd_supply);
 910		if (err < 0)
 911			dev_warn(pcie->dev,
 912				 "failed to disable AVDD regulator: %d\n",
 913				 err);
 914	}
 915
 916	err = regulator_disable(pcie->pex_clk_supply);
 917	if (err < 0)
 918		dev_warn(pcie->dev, "failed to disable pex-clk regulator: %d\n",
 919			 err);
 920
 921	err = regulator_disable(pcie->vdd_supply);
 922	if (err < 0)
 923		dev_warn(pcie->dev, "failed to disable VDD regulator: %d\n",
 924			 err);
 925}
 926
 927static int tegra_pcie_power_on(struct tegra_pcie *pcie)
 928{
 929	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
 930	int err;
 931
 932	reset_control_assert(pcie->pcie_xrst);
 933	reset_control_assert(pcie->afi_rst);
 934	reset_control_assert(pcie->pex_rst);
 935
 936	tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
 937
 938	/* enable regulators */
 939	err = regulator_enable(pcie->vdd_supply);
 940	if (err < 0) {
 941		dev_err(pcie->dev, "failed to enable VDD regulator: %d\n", err);
 942		return err;
 943	}
 944
 945	err = regulator_enable(pcie->pex_clk_supply);
 946	if (err < 0) {
 947		dev_err(pcie->dev, "failed to enable pex-clk regulator: %d\n",
 948			err);
 949		return err;
 950	}
 951
 952	if (soc->has_avdd_supply) {
 953		err = regulator_enable(pcie->avdd_supply);
 954		if (err < 0) {
 955			dev_err(pcie->dev,
 956				"failed to enable AVDD regulator: %d\n",
 957				err);
 958			return err;
 959		}
 960	}
 961
 962	err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
 963						pcie->pex_clk,
 964						pcie->pex_rst);
 965	if (err) {
 966		dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
 967		return err;
 968	}
 969
 970	reset_control_deassert(pcie->afi_rst);
 971
 972	err = clk_prepare_enable(pcie->afi_clk);
 973	if (err < 0) {
 974		dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
 975		return err;
 976	}
 977
 978	if (soc->has_cml_clk) {
 979		err = clk_prepare_enable(pcie->cml_clk);
 980		if (err < 0) {
 981			dev_err(pcie->dev, "failed to enable CML clock: %d\n",
 982				err);
 983			return err;
 984		}
 985	}
 986
 987	err = clk_prepare_enable(pcie->pll_e);
 988	if (err < 0) {
 989		dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
 990		return err;
 991	}
 992
 993	return 0;
 994}
 995
 996static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
 997{
 998	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
 999
1000	pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
1001	if (IS_ERR(pcie->pex_clk))
1002		return PTR_ERR(pcie->pex_clk);
1003
1004	pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
1005	if (IS_ERR(pcie->afi_clk))
1006		return PTR_ERR(pcie->afi_clk);
1007
1008	pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1009	if (IS_ERR(pcie->pll_e))
1010		return PTR_ERR(pcie->pll_e);
1011
1012	if (soc->has_cml_clk) {
1013		pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1014		if (IS_ERR(pcie->cml_clk))
1015			return PTR_ERR(pcie->cml_clk);
1016	}
1017
1018	return 0;
1019}
1020
1021static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1022{
1023	pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1024	if (IS_ERR(pcie->pex_rst))
1025		return PTR_ERR(pcie->pex_rst);
1026
1027	pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1028	if (IS_ERR(pcie->afi_rst))
1029		return PTR_ERR(pcie->afi_rst);
1030
1031	pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1032	if (IS_ERR(pcie->pcie_xrst))
1033		return PTR_ERR(pcie->pcie_xrst);
1034
1035	return 0;
1036}
1037
1038static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1039{
1040	struct platform_device *pdev = to_platform_device(pcie->dev);
1041	struct resource *pads, *afi, *res;
1042	int err;
1043
1044	err = tegra_pcie_clocks_get(pcie);
1045	if (err) {
1046		dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1047		return err;
1048	}
1049
1050	err = tegra_pcie_resets_get(pcie);
1051	if (err) {
1052		dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1053		return err;
1054	}
1055
1056	err = tegra_pcie_power_on(pcie);
1057	if (err) {
1058		dev_err(&pdev->dev, "failed to power up: %d\n", err);
1059		return err;
1060	}
1061
1062	pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1063	pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1064	if (IS_ERR(pcie->pads)) {
1065		err = PTR_ERR(pcie->pads);
1066		goto poweroff;
1067	}
1068
1069	afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1070	pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1071	if (IS_ERR(pcie->afi)) {
1072		err = PTR_ERR(pcie->afi);
1073		goto poweroff;
1074	}
1075
1076	/* request configuration space, but remap later, on demand */
1077	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1078	if (!res) {
1079		err = -EADDRNOTAVAIL;
1080		goto poweroff;
1081	}
1082
1083	pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1084					   resource_size(res), res->name);
1085	if (!pcie->cs) {
1086		err = -EADDRNOTAVAIL;
1087		goto poweroff;
1088	}
1089
1090	/* request interrupt */
1091	err = platform_get_irq_byname(pdev, "intr");
1092	if (err < 0) {
1093		dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1094		goto poweroff;
1095	}
1096
1097	pcie->irq = err;
1098
1099	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1100	if (err) {
1101		dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1102		goto poweroff;
1103	}
1104
1105	return 0;
1106
1107poweroff:
1108	tegra_pcie_power_off(pcie);
1109	return err;
1110}
1111
1112static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1113{
1114	if (pcie->irq > 0)
1115		free_irq(pcie->irq, pcie);
1116
1117	tegra_pcie_power_off(pcie);
1118	return 0;
1119}
1120
1121static int tegra_msi_alloc(struct tegra_msi *chip)
1122{
1123	int msi;
1124
1125	mutex_lock(&chip->lock);
1126
1127	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1128	if (msi < INT_PCI_MSI_NR)
1129		set_bit(msi, chip->used);
1130	else
1131		msi = -ENOSPC;
1132
1133	mutex_unlock(&chip->lock);
1134
1135	return msi;
1136}
1137
1138static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1139{
1140	struct device *dev = chip->chip.dev;
1141
1142	mutex_lock(&chip->lock);
1143
1144	if (!test_bit(irq, chip->used))
1145		dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1146	else
1147		clear_bit(irq, chip->used);
1148
1149	mutex_unlock(&chip->lock);
1150}
1151
1152static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1153{
1154	struct tegra_pcie *pcie = data;
1155	struct tegra_msi *msi = &pcie->msi;
1156	unsigned int i, processed = 0;
1157
1158	for (i = 0; i < 8; i++) {
1159		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1160
1161		while (reg) {
1162			unsigned int offset = find_first_bit(&reg, 32);
1163			unsigned int index = i * 32 + offset;
1164			unsigned int irq;
1165
1166			/* clear the interrupt */
1167			afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1168
1169			irq = irq_find_mapping(msi->domain, index);
1170			if (irq) {
1171				if (test_bit(index, msi->used))
1172					generic_handle_irq(irq);
1173				else
1174					dev_info(pcie->dev, "unhandled MSI\n");
1175			} else {
1176				/*
1177				 * that's weird who triggered this?
1178				 * just clear it
1179				 */
1180				dev_info(pcie->dev, "unexpected MSI\n");
1181			}
1182
1183			/* see if there's any more pending in this vector */
1184			reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1185
1186			processed++;
1187		}
1188	}
1189
1190	return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1191}
1192
1193static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1194			       struct msi_desc *desc)
1195{
1196	struct tegra_msi *msi = to_tegra_msi(chip);
1197	struct msi_msg msg;
1198	unsigned int irq;
1199	int hwirq;
1200
1201	hwirq = tegra_msi_alloc(msi);
1202	if (hwirq < 0)
1203		return hwirq;
1204
1205	irq = irq_create_mapping(msi->domain, hwirq);
1206	if (!irq)
1207		return -EINVAL;
1208
1209	irq_set_msi_desc(irq, desc);
1210
1211	msg.address_lo = virt_to_phys((void *)msi->pages);
1212	/* 32 bit address only */
1213	msg.address_hi = 0;
1214	msg.data = hwirq;
1215
1216	write_msi_msg(irq, &msg);
1217
1218	return 0;
1219}
1220
1221static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1222{
1223	struct tegra_msi *msi = to_tegra_msi(chip);
1224	struct irq_data *d = irq_get_irq_data(irq);
1225
1226	tegra_msi_free(msi, d->hwirq);
1227}
1228
1229static struct irq_chip tegra_msi_irq_chip = {
1230	.name = "Tegra PCIe MSI",
1231	.irq_enable = unmask_msi_irq,
1232	.irq_disable = mask_msi_irq,
1233	.irq_mask = mask_msi_irq,
1234	.irq_unmask = unmask_msi_irq,
1235};
1236
1237static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1238			 irq_hw_number_t hwirq)
1239{
1240	irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1241	irq_set_chip_data(irq, domain->host_data);
1242	set_irq_flags(irq, IRQF_VALID);
1243
1244	tegra_cpuidle_pcie_irqs_in_use();
1245
1246	return 0;
1247}
1248
1249static const struct irq_domain_ops msi_domain_ops = {
1250	.map = tegra_msi_map,
1251};
1252
1253static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1254{
1255	struct platform_device *pdev = to_platform_device(pcie->dev);
1256	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1257	struct tegra_msi *msi = &pcie->msi;
1258	unsigned long base;
1259	int err;
1260	u32 reg;
1261
1262	mutex_init(&msi->lock);
1263
1264	msi->chip.dev = pcie->dev;
1265	msi->chip.setup_irq = tegra_msi_setup_irq;
1266	msi->chip.teardown_irq = tegra_msi_teardown_irq;
1267
1268	msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1269					    &msi_domain_ops, &msi->chip);
1270	if (!msi->domain) {
1271		dev_err(&pdev->dev, "failed to create IRQ domain\n");
1272		return -ENOMEM;
1273	}
1274
1275	err = platform_get_irq_byname(pdev, "msi");
1276	if (err < 0) {
1277		dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1278		goto err;
1279	}
1280
1281	msi->irq = err;
1282
1283	err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1284			  tegra_msi_irq_chip.name, pcie);
1285	if (err < 0) {
1286		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1287		goto err;
1288	}
1289
1290	/* setup AFI/FPCI range */
1291	msi->pages = __get_free_pages(GFP_KERNEL, 0);
1292	base = virt_to_phys((void *)msi->pages);
1293
1294	afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1295	afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1296	/* this register is in 4K increments */
1297	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1298
1299	/* enable all MSI vectors */
1300	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1301	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1302	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1303	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1304	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1305	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1306	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1307	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1308
1309	/* and unmask the MSI interrupt */
1310	reg = afi_readl(pcie, AFI_INTR_MASK);
1311	reg |= AFI_INTR_MASK_MSI_MASK;
1312	afi_writel(pcie, reg, AFI_INTR_MASK);
1313
1314	return 0;
1315
1316err:
1317	irq_domain_remove(msi->domain);
1318	return err;
1319}
1320
1321static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1322{
1323	struct tegra_msi *msi = &pcie->msi;
1324	unsigned int i, irq;
1325	u32 value;
1326
1327	/* mask the MSI interrupt */
1328	value = afi_readl(pcie, AFI_INTR_MASK);
1329	value &= ~AFI_INTR_MASK_MSI_MASK;
1330	afi_writel(pcie, value, AFI_INTR_MASK);
1331
1332	/* disable all MSI vectors */
1333	afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1334	afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1335	afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1336	afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1337	afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1338	afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1339	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1340	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1341
1342	free_pages(msi->pages, 0);
1343
1344	if (msi->irq > 0)
1345		free_irq(msi->irq, pcie);
1346
1347	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1348		irq = irq_find_mapping(msi->domain, i);
1349		if (irq > 0)
1350			irq_dispose_mapping(irq);
1351	}
1352
1353	irq_domain_remove(msi->domain);
1354
1355	return 0;
1356}
1357
1358static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1359				      u32 *xbar)
1360{
1361	struct device_node *np = pcie->dev->of_node;
1362
1363	if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1364		switch (lanes) {
1365		case 0x00000204:
1366			dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1367			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1368			return 0;
1369
1370		case 0x00020202:
1371			dev_info(pcie->dev, "2x3 configuration\n");
1372			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1373			return 0;
1374
1375		case 0x00010104:
1376			dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1377			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1378			return 0;
1379		}
1380	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1381		switch (lanes) {
1382		case 0x00000004:
1383			dev_info(pcie->dev, "single-mode configuration\n");
1384			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1385			return 0;
1386
1387		case 0x00000202:
1388			dev_info(pcie->dev, "dual-mode configuration\n");
1389			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1390			return 0;
1391		}
1392	}
1393
1394	return -EINVAL;
1395}
1396
1397static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1398{
1399	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1400	struct device_node *np = pcie->dev->of_node, *port;
1401	struct of_pci_range_parser parser;
1402	struct of_pci_range range;
1403	struct resource res;
1404	u32 lanes = 0;
1405	int err;
1406
1407	if (of_pci_range_parser_init(&parser, np)) {
1408		dev_err(pcie->dev, "missing \"ranges\" property\n");
1409		return -EINVAL;
1410	}
1411
1412	pcie->vdd_supply = devm_regulator_get(pcie->dev, "vdd");
1413	if (IS_ERR(pcie->vdd_supply))
1414		return PTR_ERR(pcie->vdd_supply);
1415
1416	pcie->pex_clk_supply = devm_regulator_get(pcie->dev, "pex-clk");
1417	if (IS_ERR(pcie->pex_clk_supply))
1418		return PTR_ERR(pcie->pex_clk_supply);
1419
1420	if (soc->has_avdd_supply) {
1421		pcie->avdd_supply = devm_regulator_get(pcie->dev, "avdd");
1422		if (IS_ERR(pcie->avdd_supply))
1423			return PTR_ERR(pcie->avdd_supply);
1424	}
1425
1426	for_each_of_pci_range(&parser, &range) {
1427		of_pci_range_to_resource(&range, np, &res);
1428
1429		switch (res.flags & IORESOURCE_TYPE_BITS) {
1430		case IORESOURCE_IO:
1431			memcpy(&pcie->io, &res, sizeof(res));
1432			pcie->io.name = "I/O";
1433			break;
1434
1435		case IORESOURCE_MEM:
1436			if (res.flags & IORESOURCE_PREFETCH) {
1437				memcpy(&pcie->prefetch, &res, sizeof(res));
1438				pcie->prefetch.name = "PREFETCH";
1439			} else {
1440				memcpy(&pcie->mem, &res, sizeof(res));
1441				pcie->mem.name = "MEM";
1442			}
1443			break;
1444		}
1445	}
1446
1447	err = of_pci_parse_bus_range(np, &pcie->busn);
1448	if (err < 0) {
1449		dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1450			err);
1451		pcie->busn.name = np->name;
1452		pcie->busn.start = 0;
1453		pcie->busn.end = 0xff;
1454		pcie->busn.flags = IORESOURCE_BUS;
1455	}
1456
1457	/* parse root ports */
1458	for_each_child_of_node(np, port) {
1459		struct tegra_pcie_port *rp;
1460		unsigned int index;
1461		u32 value;
1462
1463		err = of_pci_get_devfn(port);
1464		if (err < 0) {
1465			dev_err(pcie->dev, "failed to parse address: %d\n",
1466				err);
1467			return err;
1468		}
1469
1470		index = PCI_SLOT(err);
1471
1472		if (index < 1 || index > soc->num_ports) {
1473			dev_err(pcie->dev, "invalid port number: %d\n", index);
1474			return -EINVAL;
1475		}
1476
1477		index--;
1478
1479		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1480		if (err < 0) {
1481			dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1482				err);
1483			return err;
1484		}
1485
1486		if (value > 16) {
1487			dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1488			return -EINVAL;
1489		}
1490
1491		lanes |= value << (index << 3);
1492
1493		if (!of_device_is_available(port))
1494			continue;
1495
1496		rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1497		if (!rp)
1498			return -ENOMEM;
1499
1500		err = of_address_to_resource(port, 0, &rp->regs);
1501		if (err < 0) {
1502			dev_err(pcie->dev, "failed to parse address: %d\n",
1503				err);
1504			return err;
1505		}
1506
1507		INIT_LIST_HEAD(&rp->list);
1508		rp->index = index;
1509		rp->lanes = value;
1510		rp->pcie = pcie;
1511
1512		rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1513		if (IS_ERR(rp->base))
1514			return PTR_ERR(rp->base);
1515
1516		list_add_tail(&rp->list, &pcie->ports);
1517	}
1518
1519	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1520	if (err < 0) {
1521		dev_err(pcie->dev, "invalid lane configuration\n");
1522		return err;
1523	}
1524
1525	return 0;
1526}
1527
1528/*
1529 * FIXME: If there are no PCIe cards attached, then calling this function
1530 * can result in the increase of the bootup time as there are big timeout
1531 * loops.
1532 */
1533#define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
1534static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1535{
1536	unsigned int retries = 3;
1537	unsigned long value;
1538
1539	do {
1540		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1541
1542		do {
1543			value = readl(port->base + RP_VEND_XP);
1544
1545			if (value & RP_VEND_XP_DL_UP)
1546				break;
1547
1548			usleep_range(1000, 2000);
1549		} while (--timeout);
1550
1551		if (!timeout) {
1552			dev_err(port->pcie->dev, "link %u down, retrying\n",
1553				port->index);
1554			goto retry;
1555		}
1556
1557		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1558
1559		do {
1560			value = readl(port->base + RP_LINK_CONTROL_STATUS);
1561
1562			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1563				return true;
1564
1565			usleep_range(1000, 2000);
1566		} while (--timeout);
1567
1568retry:
1569		tegra_pcie_port_reset(port);
1570	} while (--retries);
1571
1572	return false;
1573}
1574
1575static int tegra_pcie_enable(struct tegra_pcie *pcie)
1576{
1577	struct tegra_pcie_port *port, *tmp;
1578	struct hw_pci hw;
1579
1580	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1581		dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1582			 port->index, port->lanes);
1583
1584		tegra_pcie_port_enable(port);
1585
1586		if (tegra_pcie_port_check_link(port))
1587			continue;
1588
1589		dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1590
1591		tegra_pcie_port_disable(port);
1592		tegra_pcie_port_free(port);
1593	}
1594
1595	memset(&hw, 0, sizeof(hw));
1596
1597	hw.nr_controllers = 1;
1598	hw.private_data = (void **)&pcie;
1599	hw.setup = tegra_pcie_setup;
1600	hw.map_irq = tegra_pcie_map_irq;
1601	hw.add_bus = tegra_pcie_add_bus;
1602	hw.scan = tegra_pcie_scan_bus;
1603	hw.ops = &tegra_pcie_ops;
1604
1605	pci_common_init_dev(pcie->dev, &hw);
1606
1607	return 0;
1608}
1609
1610static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1611	.num_ports = 2,
1612	.msi_base_shift = 0,
1613	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1614	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1615	.has_pex_clkreq_en = false,
1616	.has_pex_bias_ctrl = false,
1617	.has_intr_prsnt_sense = false,
1618	.has_avdd_supply = false,
1619	.has_cml_clk = false,
1620};
1621
1622static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1623	.num_ports = 3,
1624	.msi_base_shift = 8,
1625	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1626	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1627	.has_pex_clkreq_en = true,
1628	.has_pex_bias_ctrl = true,
1629	.has_intr_prsnt_sense = true,
1630	.has_avdd_supply = true,
1631	.has_cml_clk = true,
1632};
1633
1634static const struct of_device_id tegra_pcie_of_match[] = {
1635	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1636	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1637	{ },
1638};
1639MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1640
1641static int tegra_pcie_probe(struct platform_device *pdev)
1642{
1643	const struct of_device_id *match;
1644	struct tegra_pcie *pcie;
1645	int err;
1646
1647	match = of_match_device(tegra_pcie_of_match, &pdev->dev);
1648	if (!match)
1649		return -ENODEV;
1650
1651	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1652	if (!pcie)
1653		return -ENOMEM;
1654
1655	INIT_LIST_HEAD(&pcie->buses);
1656	INIT_LIST_HEAD(&pcie->ports);
1657	pcie->soc_data = match->data;
1658	pcie->dev = &pdev->dev;
1659
1660	err = tegra_pcie_parse_dt(pcie);
1661	if (err < 0)
1662		return err;
1663
1664	pcibios_min_mem = 0;
1665
1666	err = tegra_pcie_get_resources(pcie);
1667	if (err < 0) {
1668		dev_err(&pdev->dev, "failed to request resources: %d\n", err);
1669		return err;
1670	}
1671
1672	err = tegra_pcie_enable_controller(pcie);
1673	if (err)
1674		goto put_resources;
1675
1676	/* setup the AFI address translations */
1677	tegra_pcie_setup_translations(pcie);
1678
1679	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1680		err = tegra_pcie_enable_msi(pcie);
1681		if (err < 0) {
1682			dev_err(&pdev->dev,
1683				"failed to enable MSI support: %d\n",
1684				err);
1685			goto put_resources;
1686		}
1687	}
1688
1689	err = tegra_pcie_enable(pcie);
1690	if (err < 0) {
1691		dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
1692		goto disable_msi;
1693	}
1694
1695	platform_set_drvdata(pdev, pcie);
1696	return 0;
1697
1698disable_msi:
1699	if (IS_ENABLED(CONFIG_PCI_MSI))
1700		tegra_pcie_disable_msi(pcie);
1701put_resources:
1702	tegra_pcie_put_resources(pcie);
1703	return err;
1704}
1705
1706static struct platform_driver tegra_pcie_driver = {
1707	.driver = {
1708		.name = "tegra-pcie",
1709		.owner = THIS_MODULE,
1710		.of_match_table = tegra_pcie_of_match,
1711		.suppress_bind_attrs = true,
1712	},
1713	.probe = tegra_pcie_probe,
1714};
1715module_platform_driver(tegra_pcie_driver);
1716
1717MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
1718MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
1719MODULE_LICENSE("GPLv2");