Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * PCIe host controller driver for Tegra SoCs
   4 *
   5 * Copyright (c) 2010, CompuLab, Ltd.
   6 * Author: Mike Rapoport <mike@compulab.co.il>
   7 *
   8 * Based on NVIDIA PCIe driver
   9 * Copyright (c) 2008-2009, NVIDIA Corporation.
  10 *
  11 * Bits taken from arch/arm/mach-dove/pcie.c
  12 *
  13 * Author: Thierry Reding <treding@nvidia.com>
  14 */
  15
  16#include <linux/clk.h>
  17#include <linux/debugfs.h>
  18#include <linux/delay.h>
  19#include <linux/export.h>
  20#include <linux/gpio/consumer.h>
  21#include <linux/interrupt.h>
  22#include <linux/iopoll.h>
  23#include <linux/irq.h>
  24#include <linux/irqchip/chained_irq.h>
  25#include <linux/irqdomain.h>
  26#include <linux/kernel.h>
  27#include <linux/init.h>
  28#include <linux/module.h>
  29#include <linux/msi.h>
  30#include <linux/of_address.h>
  31#include <linux/of_pci.h>
  32#include <linux/of_platform.h>
  33#include <linux/pci.h>
  34#include <linux/phy/phy.h>
  35#include <linux/pinctrl/consumer.h>
  36#include <linux/platform_device.h>
  37#include <linux/reset.h>
  38#include <linux/sizes.h>
  39#include <linux/slab.h>
  40#include <linux/vmalloc.h>
  41#include <linux/regulator/consumer.h>
  42
  43#include <soc/tegra/cpuidle.h>
  44#include <soc/tegra/pmc.h>
  45
  46#include "../pci.h"
  47
  48#define INT_PCI_MSI_NR (8 * 32)
  49
  50/* register definitions */
  51
  52#define AFI_AXI_BAR0_SZ	0x00
  53#define AFI_AXI_BAR1_SZ	0x04
  54#define AFI_AXI_BAR2_SZ	0x08
  55#define AFI_AXI_BAR3_SZ	0x0c
  56#define AFI_AXI_BAR4_SZ	0x10
  57#define AFI_AXI_BAR5_SZ	0x14
  58
  59#define AFI_AXI_BAR0_START	0x18
  60#define AFI_AXI_BAR1_START	0x1c
  61#define AFI_AXI_BAR2_START	0x20
  62#define AFI_AXI_BAR3_START	0x24
  63#define AFI_AXI_BAR4_START	0x28
  64#define AFI_AXI_BAR5_START	0x2c
  65
  66#define AFI_FPCI_BAR0	0x30
  67#define AFI_FPCI_BAR1	0x34
  68#define AFI_FPCI_BAR2	0x38
  69#define AFI_FPCI_BAR3	0x3c
  70#define AFI_FPCI_BAR4	0x40
  71#define AFI_FPCI_BAR5	0x44
  72
  73#define AFI_CACHE_BAR0_SZ	0x48
  74#define AFI_CACHE_BAR0_ST	0x4c
  75#define AFI_CACHE_BAR1_SZ	0x50
  76#define AFI_CACHE_BAR1_ST	0x54
  77
  78#define AFI_MSI_BAR_SZ		0x60
  79#define AFI_MSI_FPCI_BAR_ST	0x64
  80#define AFI_MSI_AXI_BAR_ST	0x68
  81
  82#define AFI_MSI_VEC(x)		(0x6c + ((x) * 4))
  83#define AFI_MSI_EN_VEC(x)	(0x8c + ((x) * 4))
  84
  85#define AFI_CONFIGURATION		0xac
  86#define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
  87#define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
  88
  89#define AFI_FPCI_ERROR_MASKS	0xb0
  90
  91#define AFI_INTR_MASK		0xb4
  92#define  AFI_INTR_MASK_INT_MASK	(1 << 0)
  93#define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
  94
  95#define AFI_INTR_CODE			0xb8
  96#define  AFI_INTR_CODE_MASK		0xf
  97#define  AFI_INTR_INI_SLAVE_ERROR	1
  98#define  AFI_INTR_INI_DECODE_ERROR	2
  99#define  AFI_INTR_TARGET_ABORT		3
 100#define  AFI_INTR_MASTER_ABORT		4
 101#define  AFI_INTR_INVALID_WRITE		5
 102#define  AFI_INTR_LEGACY		6
 103#define  AFI_INTR_FPCI_DECODE_ERROR	7
 104#define  AFI_INTR_AXI_DECODE_ERROR	8
 105#define  AFI_INTR_FPCI_TIMEOUT		9
 106#define  AFI_INTR_PE_PRSNT_SENSE	10
 107#define  AFI_INTR_PE_CLKREQ_SENSE	11
 108#define  AFI_INTR_CLKCLAMP_SENSE	12
 109#define  AFI_INTR_RDY4PD_SENSE		13
 110#define  AFI_INTR_P2P_ERROR		14
 111
 112#define AFI_INTR_SIGNATURE	0xbc
 113#define AFI_UPPER_FPCI_ADDRESS	0xc0
 114#define AFI_SM_INTR_ENABLE	0xc4
 115#define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
 116#define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
 117#define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
 118#define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
 119#define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
 120#define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
 121#define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
 122#define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
 123
 124#define AFI_AFI_INTR_ENABLE		0xc8
 125#define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
 126#define  AFI_INTR_EN_INI_DECERR		(1 << 1)
 127#define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
 128#define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
 129#define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
 130#define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
 131#define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
 132#define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
 133#define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
 134
 135#define AFI_PCIE_PME		0xf0
 136
 137#define AFI_PCIE_CONFIG					0x0f8
 138#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
 139#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
 140#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
 141#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
 142#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
 143#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
 144#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
 145#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
 146#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
 147#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
 148#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
 149#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
 150#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
 151#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
 152#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
 153
 154#define AFI_FUSE			0x104
 155#define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
 156
 157#define AFI_PEX0_CTRL			0x110
 158#define AFI_PEX1_CTRL			0x118
 159#define  AFI_PEX_CTRL_RST		(1 << 0)
 160#define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
 161#define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
 162#define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
 163
 164#define AFI_PLLE_CONTROL		0x160
 165#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
 166#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
 167
 168#define AFI_PEXBIAS_CTRL_0		0x168
 169
 170#define RP_ECTL_2_R1	0x00000e84
 171#define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
 172
 173#define RP_ECTL_4_R1	0x00000e8c
 174#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
 175#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
 176
 177#define RP_ECTL_5_R1	0x00000e90
 178#define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
 179
 180#define RP_ECTL_6_R1	0x00000e94
 181#define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
 182
 183#define RP_ECTL_2_R2	0x00000ea4
 184#define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
 185
 186#define RP_ECTL_4_R2	0x00000eac
 187#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
 188#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
 189
 190#define RP_ECTL_5_R2	0x00000eb0
 191#define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
 192
 193#define RP_ECTL_6_R2	0x00000eb4
 194#define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
 195
 196#define RP_VEND_XP	0x00000f00
 197#define  RP_VEND_XP_DL_UP			(1 << 30)
 198#define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
 199#define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
 200#define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
 201
 202#define RP_VEND_CTL0	0x00000f44
 203#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
 204#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
 205
 206#define RP_VEND_CTL1	0x00000f48
 207#define  RP_VEND_CTL1_ERPT	(1 << 13)
 208
 209#define RP_VEND_XP_BIST	0x00000f4c
 210#define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
 211
 212#define RP_VEND_CTL2 0x00000fa8
 213#define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
 214
 215#define RP_PRIV_MISC	0x00000fe0
 216#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
 217#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
 218#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
 219#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
 220#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
 221#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
 222#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
 223#define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
 224
 225#define RP_LINK_CONTROL_STATUS			0x00000090
 226#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
 227#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
 228
 229#define RP_LINK_CONTROL_STATUS_2		0x000000b0
 230
 231#define PADS_CTL_SEL		0x0000009c
 232
 233#define PADS_CTL		0x000000a0
 234#define  PADS_CTL_IDDQ_1L	(1 << 0)
 235#define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
 236#define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
 237
 238#define PADS_PLL_CTL_TEGRA20			0x000000b8
 239#define PADS_PLL_CTL_TEGRA30			0x000000b4
 240#define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
 241#define  PADS_PLL_CTL_LOCKDET			(1 << 8)
 242#define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
 243#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
 244#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
 245#define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
 246#define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
 247#define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
 248#define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
 249#define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
 250
 251#define PADS_REFCLK_CFG0			0x000000c8
 252#define PADS_REFCLK_CFG1			0x000000cc
 253#define PADS_REFCLK_BIAS			0x000000d0
 254
 255/*
 256 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
 257 * entries, one entry per PCIe port. These field definitions and desired
 258 * values aren't in the TRM, but do come from NVIDIA.
 259 */
 260#define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
 261#define PADS_REFCLK_CFG_E_TERM_SHIFT		7
 262#define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
 263#define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
 264
 265#define PME_ACK_TIMEOUT 10000
 266#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
 267
 268struct tegra_msi {
 269	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
 270	struct irq_domain *domain;
 271	struct mutex map_lock;
 272	spinlock_t mask_lock;
 273	void *virt;
 274	dma_addr_t phys;
 275	int irq;
 276};
 277
 278/* used to differentiate between Tegra SoC generations */
 279struct tegra_pcie_port_soc {
 280	struct {
 281		u8 turnoff_bit;
 282		u8 ack_bit;
 283	} pme;
 284};
 285
 286struct tegra_pcie_soc {
 287	unsigned int num_ports;
 288	const struct tegra_pcie_port_soc *ports;
 289	unsigned int msi_base_shift;
 290	unsigned long afi_pex2_ctrl;
 291	u32 pads_pll_ctl;
 292	u32 tx_ref_sel;
 293	u32 pads_refclk_cfg0;
 294	u32 pads_refclk_cfg1;
 295	u32 update_fc_threshold;
 296	bool has_pex_clkreq_en;
 297	bool has_pex_bias_ctrl;
 298	bool has_intr_prsnt_sense;
 299	bool has_cml_clk;
 300	bool has_gen2;
 301	bool force_pca_enable;
 302	bool program_uphy;
 303	bool update_clamp_threshold;
 304	bool program_deskew_time;
 305	bool update_fc_timer;
 306	bool has_cache_bars;
 307	struct {
 308		struct {
 309			u32 rp_ectl_2_r1;
 310			u32 rp_ectl_4_r1;
 311			u32 rp_ectl_5_r1;
 312			u32 rp_ectl_6_r1;
 313			u32 rp_ectl_2_r2;
 314			u32 rp_ectl_4_r2;
 315			u32 rp_ectl_5_r2;
 316			u32 rp_ectl_6_r2;
 317		} regs;
 318		bool enable;
 319	} ectl;
 320};
 321
 322struct tegra_pcie {
 323	struct device *dev;
 324
 325	void __iomem *pads;
 326	void __iomem *afi;
 327	void __iomem *cfg;
 328	int irq;
 329
 330	struct resource cs;
 331
 332	struct clk *pex_clk;
 333	struct clk *afi_clk;
 334	struct clk *pll_e;
 335	struct clk *cml_clk;
 336
 337	struct reset_control *pex_rst;
 338	struct reset_control *afi_rst;
 339	struct reset_control *pcie_xrst;
 340
 341	bool legacy_phy;
 342	struct phy *phy;
 343
 344	struct tegra_msi msi;
 345
 346	struct list_head ports;
 347	u32 xbar_config;
 348
 349	struct regulator_bulk_data *supplies;
 350	unsigned int num_supplies;
 351
 352	const struct tegra_pcie_soc *soc;
 353	struct dentry *debugfs;
 354};
 355
 356static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
 357{
 358	return container_of(msi, struct tegra_pcie, msi);
 359}
 360
 361struct tegra_pcie_port {
 362	struct tegra_pcie *pcie;
 363	struct device_node *np;
 364	struct list_head list;
 365	struct resource regs;
 366	void __iomem *base;
 367	unsigned int index;
 368	unsigned int lanes;
 369
 370	struct phy **phys;
 371
 372	struct gpio_desc *reset_gpio;
 373};
 374
 375static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
 376			      unsigned long offset)
 377{
 378	writel(value, pcie->afi + offset);
 379}
 380
 381static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
 382{
 383	return readl(pcie->afi + offset);
 384}
 385
 386static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
 387			       unsigned long offset)
 388{
 389	writel(value, pcie->pads + offset);
 390}
 391
 392static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
 393{
 394	return readl(pcie->pads + offset);
 395}
 396
 397/*
 398 * The configuration space mapping on Tegra is somewhat similar to the ECAM
 399 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
 400 * register accesses are mapped:
 401 *
 402 *    [27:24] extended register number
 403 *    [23:16] bus number
 404 *    [15:11] device number
 405 *    [10: 8] function number
 406 *    [ 7: 0] register number
 407 *
 408 * Mapping the whole extended configuration space would require 256 MiB of
 409 * virtual address space, only a small part of which will actually be used.
 410 *
 411 * To work around this, a 4 KiB region is used to generate the required
 412 * configuration transaction with relevant B:D:F and register offset values.
 413 * This is achieved by dynamically programming base address and size of
 414 * AFI_AXI_BAR used for end point config space mapping to make sure that the
 415 * address (access to which generates correct config transaction) falls in
 416 * this 4 KiB region.
 417 */
 418static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
 419					   unsigned int where)
 420{
 421	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
 422	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
 423}
 424
 425static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
 426					unsigned int devfn,
 427					int where)
 428{
 429	struct tegra_pcie *pcie = bus->sysdata;
 430	void __iomem *addr = NULL;
 431
 432	if (bus->number == 0) {
 433		unsigned int slot = PCI_SLOT(devfn);
 434		struct tegra_pcie_port *port;
 435
 436		list_for_each_entry(port, &pcie->ports, list) {
 437			if (port->index + 1 == slot) {
 438				addr = port->base + (where & ~3);
 439				break;
 440			}
 441		}
 442	} else {
 443		unsigned int offset;
 444		u32 base;
 445
 446		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
 447
 448		/* move 4 KiB window to offset within the FPCI region */
 449		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
 450		afi_writel(pcie, base, AFI_FPCI_BAR0);
 451
 452		/* move to correct offset within the 4 KiB page */
 453		addr = pcie->cfg + (offset & (SZ_4K - 1));
 454	}
 455
 456	return addr;
 457}
 458
 459static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 460				  int where, int size, u32 *value)
 461{
 462	if (bus->number == 0)
 463		return pci_generic_config_read32(bus, devfn, where, size,
 464						 value);
 465
 466	return pci_generic_config_read(bus, devfn, where, size, value);
 467}
 468
 469static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 470				   int where, int size, u32 value)
 471{
 472	if (bus->number == 0)
 473		return pci_generic_config_write32(bus, devfn, where, size,
 474						  value);
 475
 476	return pci_generic_config_write(bus, devfn, where, size, value);
 477}
 478
 479static struct pci_ops tegra_pcie_ops = {
 480	.map_bus = tegra_pcie_map_bus,
 481	.read = tegra_pcie_config_read,
 482	.write = tegra_pcie_config_write,
 483};
 484
 485static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
 486{
 487	const struct tegra_pcie_soc *soc = port->pcie->soc;
 488	unsigned long ret = 0;
 489
 490	switch (port->index) {
 491	case 0:
 492		ret = AFI_PEX0_CTRL;
 493		break;
 494
 495	case 1:
 496		ret = AFI_PEX1_CTRL;
 497		break;
 498
 499	case 2:
 500		ret = soc->afi_pex2_ctrl;
 501		break;
 502	}
 503
 504	return ret;
 505}
 506
 507static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
 508{
 509	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 510	unsigned long value;
 511
 512	/* pulse reset signal */
 513	if (port->reset_gpio) {
 514		gpiod_set_value(port->reset_gpio, 1);
 515	} else {
 516		value = afi_readl(port->pcie, ctrl);
 517		value &= ~AFI_PEX_CTRL_RST;
 518		afi_writel(port->pcie, value, ctrl);
 519	}
 520
 521	usleep_range(1000, 2000);
 522
 523	if (port->reset_gpio) {
 524		gpiod_set_value(port->reset_gpio, 0);
 525	} else {
 526		value = afi_readl(port->pcie, ctrl);
 527		value |= AFI_PEX_CTRL_RST;
 528		afi_writel(port->pcie, value, ctrl);
 529	}
 530}
 531
 532static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
 533{
 534	const struct tegra_pcie_soc *soc = port->pcie->soc;
 535	u32 value;
 536
 537	/* Enable AER capability */
 538	value = readl(port->base + RP_VEND_CTL1);
 539	value |= RP_VEND_CTL1_ERPT;
 540	writel(value, port->base + RP_VEND_CTL1);
 541
 542	/* Optimal settings to enhance bandwidth */
 543	value = readl(port->base + RP_VEND_XP);
 544	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
 545	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
 546	writel(value, port->base + RP_VEND_XP);
 547
 548	/*
 549	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
 550	 * to avoid truncation of PM messages which results in receiver errors
 551	 */
 552	value = readl(port->base + RP_VEND_XP_BIST);
 553	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
 554	writel(value, port->base + RP_VEND_XP_BIST);
 555
 556	value = readl(port->base + RP_PRIV_MISC);
 557	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
 558	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
 559
 560	if (soc->update_clamp_threshold) {
 561		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
 562				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
 563		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
 564			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
 565	}
 566
 567	writel(value, port->base + RP_PRIV_MISC);
 568}
 569
 570static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
 571{
 572	const struct tegra_pcie_soc *soc = port->pcie->soc;
 573	u32 value;
 574
 575	value = readl(port->base + RP_ECTL_2_R1);
 576	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
 577	value |= soc->ectl.regs.rp_ectl_2_r1;
 578	writel(value, port->base + RP_ECTL_2_R1);
 579
 580	value = readl(port->base + RP_ECTL_4_R1);
 581	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
 582	value |= soc->ectl.regs.rp_ectl_4_r1 <<
 583				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
 584	writel(value, port->base + RP_ECTL_4_R1);
 585
 586	value = readl(port->base + RP_ECTL_5_R1);
 587	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
 588	value |= soc->ectl.regs.rp_ectl_5_r1;
 589	writel(value, port->base + RP_ECTL_5_R1);
 590
 591	value = readl(port->base + RP_ECTL_6_R1);
 592	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
 593	value |= soc->ectl.regs.rp_ectl_6_r1;
 594	writel(value, port->base + RP_ECTL_6_R1);
 595
 596	value = readl(port->base + RP_ECTL_2_R2);
 597	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
 598	value |= soc->ectl.regs.rp_ectl_2_r2;
 599	writel(value, port->base + RP_ECTL_2_R2);
 600
 601	value = readl(port->base + RP_ECTL_4_R2);
 602	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
 603	value |= soc->ectl.regs.rp_ectl_4_r2 <<
 604				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
 605	writel(value, port->base + RP_ECTL_4_R2);
 606
 607	value = readl(port->base + RP_ECTL_5_R2);
 608	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
 609	value |= soc->ectl.regs.rp_ectl_5_r2;
 610	writel(value, port->base + RP_ECTL_5_R2);
 611
 612	value = readl(port->base + RP_ECTL_6_R2);
 613	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
 614	value |= soc->ectl.regs.rp_ectl_6_r2;
 615	writel(value, port->base + RP_ECTL_6_R2);
 616}
 617
 618static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
 619{
 620	const struct tegra_pcie_soc *soc = port->pcie->soc;
 621	u32 value;
 622
 623	/*
 624	 * Sometimes link speed change from Gen2 to Gen1 fails due to
 625	 * instability in deskew logic on lane-0. Increase the deskew
 626	 * retry time to resolve this issue.
 627	 */
 628	if (soc->program_deskew_time) {
 629		value = readl(port->base + RP_VEND_CTL0);
 630		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
 631		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
 632		writel(value, port->base + RP_VEND_CTL0);
 633	}
 634
 635	if (soc->update_fc_timer) {
 636		value = readl(port->base + RP_VEND_XP);
 637		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
 638		value |= soc->update_fc_threshold;
 639		writel(value, port->base + RP_VEND_XP);
 640	}
 641
 642	/*
 643	 * PCIe link doesn't come up with few legacy PCIe endpoints if
 644	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
 645	 * Hence, the strategy followed here is to initially advertise
 646	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
 647	 */
 648	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
 649	value &= ~PCI_EXP_LNKSTA_CLS;
 650	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
 651	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
 652}
 653
 654static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
 655{
 656	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 657	const struct tegra_pcie_soc *soc = port->pcie->soc;
 658	unsigned long value;
 659
 660	/* enable reference clock */
 661	value = afi_readl(port->pcie, ctrl);
 662	value |= AFI_PEX_CTRL_REFCLK_EN;
 663
 664	if (soc->has_pex_clkreq_en)
 665		value |= AFI_PEX_CTRL_CLKREQ_EN;
 666
 667	value |= AFI_PEX_CTRL_OVERRIDE_EN;
 668
 669	afi_writel(port->pcie, value, ctrl);
 670
 671	tegra_pcie_port_reset(port);
 672
 673	if (soc->force_pca_enable) {
 674		value = readl(port->base + RP_VEND_CTL2);
 675		value |= RP_VEND_CTL2_PCA_ENABLE;
 676		writel(value, port->base + RP_VEND_CTL2);
 677	}
 678
 679	tegra_pcie_enable_rp_features(port);
 680
 681	if (soc->ectl.enable)
 682		tegra_pcie_program_ectl_settings(port);
 683
 684	tegra_pcie_apply_sw_fixup(port);
 685}
 686
 687static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
 688{
 689	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 690	const struct tegra_pcie_soc *soc = port->pcie->soc;
 691	unsigned long value;
 692
 693	/* assert port reset */
 694	value = afi_readl(port->pcie, ctrl);
 695	value &= ~AFI_PEX_CTRL_RST;
 696	afi_writel(port->pcie, value, ctrl);
 697
 698	/* disable reference clock */
 699	value = afi_readl(port->pcie, ctrl);
 700
 701	if (soc->has_pex_clkreq_en)
 702		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
 703
 704	value &= ~AFI_PEX_CTRL_REFCLK_EN;
 705	afi_writel(port->pcie, value, ctrl);
 706
 707	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
 708	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
 709	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
 710	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
 711	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
 712}
 713
 714static void tegra_pcie_port_free(struct tegra_pcie_port *port)
 715{
 716	struct tegra_pcie *pcie = port->pcie;
 717	struct device *dev = pcie->dev;
 718
 719	devm_iounmap(dev, port->base);
 720	devm_release_mem_region(dev, port->regs.start,
 721				resource_size(&port->regs));
 722	list_del(&port->list);
 723	devm_kfree(dev, port);
 724}
 725
 726/* Tegra PCIE root complex wrongly reports device class */
 727static void tegra_pcie_fixup_class(struct pci_dev *dev)
 728{
 729	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
 730}
 731DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
 732DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
 733DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
 734DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
 735
 736/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
 737static void tegra_pcie_relax_enable(struct pci_dev *dev)
 738{
 739	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
 740}
 741DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
 742DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
 743DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
 744DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
 745
 746static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
 747{
 748	struct tegra_pcie *pcie = pdev->bus->sysdata;
 749	int irq;
 750
 751	tegra_cpuidle_pcie_irqs_in_use();
 752
 753	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
 754	if (!irq)
 755		irq = pcie->irq;
 756
 757	return irq;
 758}
 759
 760static irqreturn_t tegra_pcie_isr(int irq, void *arg)
 761{
 762	static const char * const err_msg[] = {
 763		"Unknown",
 764		"AXI slave error",
 765		"AXI decode error",
 766		"Target abort",
 767		"Master abort",
 768		"Invalid write",
 769		"Legacy interrupt",
 770		"Response decoding error",
 771		"AXI response decoding error",
 772		"Transaction timeout",
 773		"Slot present pin change",
 774		"Slot clock request change",
 775		"TMS clock ramp change",
 776		"TMS ready for power down",
 777		"Peer2Peer error",
 778	};
 779	struct tegra_pcie *pcie = arg;
 780	struct device *dev = pcie->dev;
 781	u32 code, signature;
 782
 783	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
 784	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
 785	afi_writel(pcie, 0, AFI_INTR_CODE);
 786
 787	if (code == AFI_INTR_LEGACY)
 788		return IRQ_NONE;
 789
 790	if (code >= ARRAY_SIZE(err_msg))
 791		code = 0;
 792
 793	/*
 794	 * do not pollute kernel log with master abort reports since they
 795	 * happen a lot during enumeration
 796	 */
 797	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
 798		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
 799	else
 800		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
 801
 802	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
 803	    code == AFI_INTR_FPCI_DECODE_ERROR) {
 804		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
 805		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
 806
 807		if (code == AFI_INTR_MASTER_ABORT)
 808			dev_dbg(dev, "  FPCI address: %10llx\n", address);
 809		else
 810			dev_err(dev, "  FPCI address: %10llx\n", address);
 811	}
 812
 813	return IRQ_HANDLED;
 814}
 815
 816/*
 817 * FPCI map is as follows:
 818 * - 0xfdfc000000: I/O space
 819 * - 0xfdfe000000: type 0 configuration space
 820 * - 0xfdff000000: type 1 configuration space
 821 * - 0xfe00000000: type 0 extended configuration space
 822 * - 0xfe10000000: type 1 extended configuration space
 823 */
 824static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
 825{
 826	u32 size;
 827	struct resource_entry *entry;
 828	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
 829
 830	/* Bar 0: type 1 extended configuration space */
 831	size = resource_size(&pcie->cs);
 832	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
 833	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
 834
 835	resource_list_for_each_entry(entry, &bridge->windows) {
 836		u32 fpci_bar, axi_address;
 837		struct resource *res = entry->res;
 838
 839		size = resource_size(res);
 840
 841		switch (resource_type(res)) {
 842		case IORESOURCE_IO:
 843			/* Bar 1: downstream IO bar */
 844			fpci_bar = 0xfdfc0000;
 845			axi_address = pci_pio_to_address(res->start);
 846			afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
 847			afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
 848			afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
 849			break;
 850		case IORESOURCE_MEM:
 851			fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
 852			axi_address = res->start;
 853
 854			if (res->flags & IORESOURCE_PREFETCH) {
 855				/* Bar 2: prefetchable memory BAR */
 856				afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
 857				afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
 858				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
 859
 860			} else {
 861				/* Bar 3: non prefetchable memory BAR */
 862				afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
 863				afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
 864				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
 865			}
 866			break;
 867		}
 868	}
 869
 870	/* NULL out the remaining BARs as they are not used */
 871	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
 872	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
 873	afi_writel(pcie, 0, AFI_FPCI_BAR4);
 874
 875	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
 876	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
 877	afi_writel(pcie, 0, AFI_FPCI_BAR5);
 878
 879	if (pcie->soc->has_cache_bars) {
 880		/* map all upstream transactions as uncached */
 881		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
 882		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
 883		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
 884		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
 885	}
 886
 887	/* MSI translations are setup only when needed */
 888	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
 889	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 890	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
 891	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 892}
 893
 894static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
 895{
 896	const struct tegra_pcie_soc *soc = pcie->soc;
 897	u32 value;
 898
 899	timeout = jiffies + msecs_to_jiffies(timeout);
 900
 901	while (time_before(jiffies, timeout)) {
 902		value = pads_readl(pcie, soc->pads_pll_ctl);
 903		if (value & PADS_PLL_CTL_LOCKDET)
 904			return 0;
 905	}
 906
 907	return -ETIMEDOUT;
 908}
 909
 910static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
 911{
 912	struct device *dev = pcie->dev;
 913	const struct tegra_pcie_soc *soc = pcie->soc;
 914	u32 value;
 915	int err;
 916
 917	/* initialize internal PHY, enable up to 16 PCIE lanes */
 918	pads_writel(pcie, 0x0, PADS_CTL_SEL);
 919
 920	/* override IDDQ to 1 on all 4 lanes */
 921	value = pads_readl(pcie, PADS_CTL);
 922	value |= PADS_CTL_IDDQ_1L;
 923	pads_writel(pcie, value, PADS_CTL);
 924
 925	/*
 926	 * Set up PHY PLL inputs select PLLE output as refclock,
 927	 * set TX ref sel to div10 (not div5).
 928	 */
 929	value = pads_readl(pcie, soc->pads_pll_ctl);
 930	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
 931	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
 932	pads_writel(pcie, value, soc->pads_pll_ctl);
 933
 934	/* reset PLL */
 935	value = pads_readl(pcie, soc->pads_pll_ctl);
 936	value &= ~PADS_PLL_CTL_RST_B4SM;
 937	pads_writel(pcie, value, soc->pads_pll_ctl);
 938
 939	usleep_range(20, 100);
 940
 941	/* take PLL out of reset  */
 942	value = pads_readl(pcie, soc->pads_pll_ctl);
 943	value |= PADS_PLL_CTL_RST_B4SM;
 944	pads_writel(pcie, value, soc->pads_pll_ctl);
 945
 946	/* wait for the PLL to lock */
 947	err = tegra_pcie_pll_wait(pcie, 500);
 948	if (err < 0) {
 949		dev_err(dev, "PLL failed to lock: %d\n", err);
 950		return err;
 951	}
 952
 953	/* turn off IDDQ override */
 954	value = pads_readl(pcie, PADS_CTL);
 955	value &= ~PADS_CTL_IDDQ_1L;
 956	pads_writel(pcie, value, PADS_CTL);
 957
 958	/* enable TX/RX data */
 959	value = pads_readl(pcie, PADS_CTL);
 960	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
 961	pads_writel(pcie, value, PADS_CTL);
 962
 963	return 0;
 964}
 965
 966static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
 967{
 968	const struct tegra_pcie_soc *soc = pcie->soc;
 969	u32 value;
 970
 971	/* disable TX/RX data */
 972	value = pads_readl(pcie, PADS_CTL);
 973	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
 974	pads_writel(pcie, value, PADS_CTL);
 975
 976	/* override IDDQ */
 977	value = pads_readl(pcie, PADS_CTL);
 978	value |= PADS_CTL_IDDQ_1L;
 979	pads_writel(pcie, value, PADS_CTL);
 980
 981	/* reset PLL */
 982	value = pads_readl(pcie, soc->pads_pll_ctl);
 983	value &= ~PADS_PLL_CTL_RST_B4SM;
 984	pads_writel(pcie, value, soc->pads_pll_ctl);
 985
 986	usleep_range(20, 100);
 987
 988	return 0;
 989}
 990
 991static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
 992{
 993	struct device *dev = port->pcie->dev;
 994	unsigned int i;
 995	int err;
 996
 997	for (i = 0; i < port->lanes; i++) {
 998		err = phy_power_on(port->phys[i]);
 999		if (err < 0) {
1000			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1001			return err;
1002		}
1003	}
1004
1005	return 0;
1006}
1007
1008static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1009{
1010	struct device *dev = port->pcie->dev;
1011	unsigned int i;
1012	int err;
1013
1014	for (i = 0; i < port->lanes; i++) {
1015		err = phy_power_off(port->phys[i]);
1016		if (err < 0) {
1017			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1018				err);
1019			return err;
1020		}
1021	}
1022
1023	return 0;
1024}
1025
1026static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1027{
1028	struct device *dev = pcie->dev;
1029	struct tegra_pcie_port *port;
1030	int err;
1031
1032	if (pcie->legacy_phy) {
1033		if (pcie->phy)
1034			err = phy_power_on(pcie->phy);
1035		else
1036			err = tegra_pcie_phy_enable(pcie);
1037
1038		if (err < 0)
1039			dev_err(dev, "failed to power on PHY: %d\n", err);
1040
1041		return err;
1042	}
1043
1044	list_for_each_entry(port, &pcie->ports, list) {
1045		err = tegra_pcie_port_phy_power_on(port);
1046		if (err < 0) {
1047			dev_err(dev,
1048				"failed to power on PCIe port %u PHY: %d\n",
1049				port->index, err);
1050			return err;
1051		}
1052	}
1053
1054	return 0;
1055}
1056
1057static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1058{
1059	struct device *dev = pcie->dev;
1060	struct tegra_pcie_port *port;
1061	int err;
1062
1063	if (pcie->legacy_phy) {
1064		if (pcie->phy)
1065			err = phy_power_off(pcie->phy);
1066		else
1067			err = tegra_pcie_phy_disable(pcie);
1068
1069		if (err < 0)
1070			dev_err(dev, "failed to power off PHY: %d\n", err);
1071
1072		return err;
1073	}
1074
1075	list_for_each_entry(port, &pcie->ports, list) {
1076		err = tegra_pcie_port_phy_power_off(port);
1077		if (err < 0) {
1078			dev_err(dev,
1079				"failed to power off PCIe port %u PHY: %d\n",
1080				port->index, err);
1081			return err;
1082		}
1083	}
1084
1085	return 0;
1086}
1087
1088static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1089{
1090	const struct tegra_pcie_soc *soc = pcie->soc;
1091	struct tegra_pcie_port *port;
1092	unsigned long value;
1093
1094	/* enable PLL power down */
1095	if (pcie->phy) {
1096		value = afi_readl(pcie, AFI_PLLE_CONTROL);
1097		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1098		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1099		afi_writel(pcie, value, AFI_PLLE_CONTROL);
1100	}
1101
1102	/* power down PCIe slot clock bias pad */
1103	if (soc->has_pex_bias_ctrl)
1104		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1105
1106	/* configure mode and disable all ports */
1107	value = afi_readl(pcie, AFI_PCIE_CONFIG);
1108	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1109	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1110	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1111
1112	list_for_each_entry(port, &pcie->ports, list) {
1113		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1114		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1115	}
1116
1117	afi_writel(pcie, value, AFI_PCIE_CONFIG);
1118
1119	if (soc->has_gen2) {
1120		value = afi_readl(pcie, AFI_FUSE);
1121		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1122		afi_writel(pcie, value, AFI_FUSE);
1123	} else {
1124		value = afi_readl(pcie, AFI_FUSE);
1125		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1126		afi_writel(pcie, value, AFI_FUSE);
1127	}
1128
1129	/* Disable AFI dynamic clock gating and enable PCIe */
1130	value = afi_readl(pcie, AFI_CONFIGURATION);
1131	value |= AFI_CONFIGURATION_EN_FPCI;
1132	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1133	afi_writel(pcie, value, AFI_CONFIGURATION);
1134
1135	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1136		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1137		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1138
1139	if (soc->has_intr_prsnt_sense)
1140		value |= AFI_INTR_EN_PRSNT_SENSE;
1141
1142	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1143	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1144
1145	/* don't enable MSI for now, only when needed */
1146	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1147
1148	/* disable all exceptions */
1149	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1150}
1151
1152static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1153{
1154	struct device *dev = pcie->dev;
1155	const struct tegra_pcie_soc *soc = pcie->soc;
1156	int err;
1157
1158	reset_control_assert(pcie->afi_rst);
1159
1160	clk_disable_unprepare(pcie->pll_e);
1161	if (soc->has_cml_clk)
1162		clk_disable_unprepare(pcie->cml_clk);
1163	clk_disable_unprepare(pcie->afi_clk);
1164
1165	if (!dev->pm_domain)
1166		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1167
1168	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1169	if (err < 0)
1170		dev_warn(dev, "failed to disable regulators: %d\n", err);
1171}
1172
1173static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1174{
1175	struct device *dev = pcie->dev;
1176	const struct tegra_pcie_soc *soc = pcie->soc;
1177	int err;
1178
1179	reset_control_assert(pcie->pcie_xrst);
1180	reset_control_assert(pcie->afi_rst);
1181	reset_control_assert(pcie->pex_rst);
1182
1183	if (!dev->pm_domain)
1184		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1185
1186	/* enable regulators */
1187	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1188	if (err < 0)
1189		dev_err(dev, "failed to enable regulators: %d\n", err);
1190
1191	if (!dev->pm_domain) {
1192		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1193		if (err) {
1194			dev_err(dev, "failed to power ungate: %d\n", err);
1195			goto regulator_disable;
1196		}
1197		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1198		if (err) {
1199			dev_err(dev, "failed to remove clamp: %d\n", err);
1200			goto powergate;
1201		}
1202	}
1203
1204	err = clk_prepare_enable(pcie->afi_clk);
1205	if (err < 0) {
1206		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1207		goto powergate;
1208	}
1209
1210	if (soc->has_cml_clk) {
1211		err = clk_prepare_enable(pcie->cml_clk);
1212		if (err < 0) {
1213			dev_err(dev, "failed to enable CML clock: %d\n", err);
1214			goto disable_afi_clk;
1215		}
1216	}
1217
1218	err = clk_prepare_enable(pcie->pll_e);
1219	if (err < 0) {
1220		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1221		goto disable_cml_clk;
1222	}
1223
1224	reset_control_deassert(pcie->afi_rst);
1225
1226	return 0;
1227
1228disable_cml_clk:
1229	if (soc->has_cml_clk)
1230		clk_disable_unprepare(pcie->cml_clk);
1231disable_afi_clk:
1232	clk_disable_unprepare(pcie->afi_clk);
1233powergate:
1234	if (!dev->pm_domain)
1235		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1236regulator_disable:
1237	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1238
1239	return err;
1240}
1241
1242static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1243{
1244	const struct tegra_pcie_soc *soc = pcie->soc;
1245
1246	/* Configure the reference clock driver */
1247	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1248
1249	if (soc->num_ports > 2)
1250		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1251}
1252
1253static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1254{
1255	struct device *dev = pcie->dev;
1256	const struct tegra_pcie_soc *soc = pcie->soc;
1257
1258	pcie->pex_clk = devm_clk_get(dev, "pex");
1259	if (IS_ERR(pcie->pex_clk))
1260		return PTR_ERR(pcie->pex_clk);
1261
1262	pcie->afi_clk = devm_clk_get(dev, "afi");
1263	if (IS_ERR(pcie->afi_clk))
1264		return PTR_ERR(pcie->afi_clk);
1265
1266	pcie->pll_e = devm_clk_get(dev, "pll_e");
1267	if (IS_ERR(pcie->pll_e))
1268		return PTR_ERR(pcie->pll_e);
1269
1270	if (soc->has_cml_clk) {
1271		pcie->cml_clk = devm_clk_get(dev, "cml");
1272		if (IS_ERR(pcie->cml_clk))
1273			return PTR_ERR(pcie->cml_clk);
1274	}
1275
1276	return 0;
1277}
1278
1279static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1280{
1281	struct device *dev = pcie->dev;
1282
1283	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1284	if (IS_ERR(pcie->pex_rst))
1285		return PTR_ERR(pcie->pex_rst);
1286
1287	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1288	if (IS_ERR(pcie->afi_rst))
1289		return PTR_ERR(pcie->afi_rst);
1290
1291	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1292	if (IS_ERR(pcie->pcie_xrst))
1293		return PTR_ERR(pcie->pcie_xrst);
1294
1295	return 0;
1296}
1297
1298static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1299{
1300	struct device *dev = pcie->dev;
1301	int err;
1302
1303	pcie->phy = devm_phy_optional_get(dev, "pcie");
1304	if (IS_ERR(pcie->phy)) {
1305		err = PTR_ERR(pcie->phy);
1306		dev_err(dev, "failed to get PHY: %d\n", err);
1307		return err;
1308	}
1309
1310	err = phy_init(pcie->phy);
1311	if (err < 0) {
1312		dev_err(dev, "failed to initialize PHY: %d\n", err);
1313		return err;
1314	}
1315
1316	pcie->legacy_phy = true;
1317
1318	return 0;
1319}
1320
1321static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1322						  struct device_node *np,
1323						  const char *consumer,
1324						  unsigned int index)
1325{
1326	struct phy *phy;
1327	char *name;
1328
1329	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1330	if (!name)
1331		return ERR_PTR(-ENOMEM);
1332
1333	phy = devm_of_phy_optional_get(dev, np, name);
1334	kfree(name);
1335
1336	return phy;
1337}
1338
1339static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1340{
1341	struct device *dev = port->pcie->dev;
1342	struct phy *phy;
1343	unsigned int i;
1344	int err;
1345
1346	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1347	if (!port->phys)
1348		return -ENOMEM;
1349
1350	for (i = 0; i < port->lanes; i++) {
1351		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1352		if (IS_ERR(phy)) {
1353			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1354				PTR_ERR(phy));
1355			return PTR_ERR(phy);
1356		}
1357
1358		err = phy_init(phy);
1359		if (err < 0) {
1360			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1361				err);
1362			return err;
1363		}
1364
1365		port->phys[i] = phy;
1366	}
1367
1368	return 0;
1369}
1370
1371static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1372{
1373	const struct tegra_pcie_soc *soc = pcie->soc;
1374	struct device_node *np = pcie->dev->of_node;
1375	struct tegra_pcie_port *port;
1376	int err;
1377
1378	if (!soc->has_gen2 || of_property_present(np, "phys"))
1379		return tegra_pcie_phys_get_legacy(pcie);
1380
1381	list_for_each_entry(port, &pcie->ports, list) {
1382		err = tegra_pcie_port_get_phys(port);
1383		if (err < 0)
1384			return err;
1385	}
1386
1387	return 0;
1388}
1389
1390static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1391{
1392	struct tegra_pcie_port *port;
1393	struct device *dev = pcie->dev;
1394	int err, i;
1395
1396	if (pcie->legacy_phy) {
1397		err = phy_exit(pcie->phy);
1398		if (err < 0)
1399			dev_err(dev, "failed to teardown PHY: %d\n", err);
1400		return;
1401	}
1402
1403	list_for_each_entry(port, &pcie->ports, list) {
1404		for (i = 0; i < port->lanes; i++) {
1405			err = phy_exit(port->phys[i]);
1406			if (err < 0)
1407				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1408					i, err);
1409		}
1410	}
1411}
1412
1413static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1414{
1415	struct device *dev = pcie->dev;
1416	struct platform_device *pdev = to_platform_device(dev);
1417	struct resource *res;
1418	const struct tegra_pcie_soc *soc = pcie->soc;
1419	int err;
1420
1421	err = tegra_pcie_clocks_get(pcie);
1422	if (err) {
1423		dev_err(dev, "failed to get clocks: %d\n", err);
1424		return err;
1425	}
1426
1427	err = tegra_pcie_resets_get(pcie);
1428	if (err) {
1429		dev_err(dev, "failed to get resets: %d\n", err);
1430		return err;
1431	}
1432
1433	if (soc->program_uphy) {
1434		err = tegra_pcie_phys_get(pcie);
1435		if (err < 0) {
1436			dev_err(dev, "failed to get PHYs: %d\n", err);
1437			return err;
1438		}
1439	}
1440
1441	pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
1442	if (IS_ERR(pcie->pads)) {
1443		err = PTR_ERR(pcie->pads);
1444		goto phys_put;
1445	}
1446
1447	pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
1448	if (IS_ERR(pcie->afi)) {
1449		err = PTR_ERR(pcie->afi);
1450		goto phys_put;
1451	}
1452
1453	/* request configuration space, but remap later, on demand */
1454	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1455	if (!res) {
1456		err = -EADDRNOTAVAIL;
1457		goto phys_put;
1458	}
1459
1460	pcie->cs = *res;
1461
1462	/* constrain configuration space to 4 KiB */
1463	resource_set_size(&pcie->cs, SZ_4K);
1464
1465	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1466	if (IS_ERR(pcie->cfg)) {
1467		err = PTR_ERR(pcie->cfg);
1468		goto phys_put;
1469	}
1470
1471	/* request interrupt */
1472	err = platform_get_irq_byname(pdev, "intr");
1473	if (err < 0)
1474		goto phys_put;
1475
1476	pcie->irq = err;
1477
1478	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1479	if (err) {
1480		dev_err(dev, "failed to register IRQ: %d\n", err);
1481		goto phys_put;
1482	}
1483
1484	return 0;
1485
1486phys_put:
1487	if (soc->program_uphy)
1488		tegra_pcie_phys_put(pcie);
1489
1490	return err;
1491}
1492
1493static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1494{
1495	const struct tegra_pcie_soc *soc = pcie->soc;
1496
1497	if (pcie->irq > 0)
1498		free_irq(pcie->irq, pcie);
1499
1500	if (soc->program_uphy)
1501		tegra_pcie_phys_put(pcie);
1502
1503	return 0;
1504}
1505
1506static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1507{
1508	struct tegra_pcie *pcie = port->pcie;
1509	const struct tegra_pcie_soc *soc = pcie->soc;
1510	int err;
1511	u32 val;
1512	u8 ack_bit;
1513
1514	val = afi_readl(pcie, AFI_PCIE_PME);
1515	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1516	afi_writel(pcie, val, AFI_PCIE_PME);
1517
1518	ack_bit = soc->ports[port->index].pme.ack_bit;
1519	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1520				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1521	if (err)
1522		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1523			port->index);
1524
1525	usleep_range(10000, 11000);
1526
1527	val = afi_readl(pcie, AFI_PCIE_PME);
1528	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1529	afi_writel(pcie, val, AFI_PCIE_PME);
1530}
1531
1532static void tegra_pcie_msi_irq(struct irq_desc *desc)
1533{
1534	struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
1535	struct irq_chip *chip = irq_desc_get_chip(desc);
1536	struct tegra_msi *msi = &pcie->msi;
1537	struct device *dev = pcie->dev;
1538	unsigned int i;
1539
1540	chained_irq_enter(chip, desc);
1541
1542	for (i = 0; i < 8; i++) {
1543		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
1544
1545		while (reg) {
1546			unsigned int offset = find_first_bit(&reg, 32);
1547			unsigned int index = i * 32 + offset;
1548			int ret;
1549
1550			ret = generic_handle_domain_irq(msi->domain->parent, index);
1551			if (ret) {
1552				/*
1553				 * that's weird who triggered this?
1554				 * just clear it
1555				 */
1556				dev_info(dev, "unexpected MSI\n");
1557				afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
1558			}
1559
1560			/* see if there's any more pending in this vector */
1561			reg = afi_readl(pcie, AFI_MSI_VEC(i));
1562		}
1563	}
1564
1565	chained_irq_exit(chip, desc);
1566}
1567
1568static void tegra_msi_top_irq_ack(struct irq_data *d)
1569{
1570	irq_chip_ack_parent(d);
1571}
1572
1573static void tegra_msi_top_irq_mask(struct irq_data *d)
1574{
1575	pci_msi_mask_irq(d);
1576	irq_chip_mask_parent(d);
1577}
1578
1579static void tegra_msi_top_irq_unmask(struct irq_data *d)
1580{
1581	pci_msi_unmask_irq(d);
1582	irq_chip_unmask_parent(d);
1583}
1584
1585static struct irq_chip tegra_msi_top_chip = {
1586	.name		= "Tegra PCIe MSI",
1587	.irq_ack	= tegra_msi_top_irq_ack,
1588	.irq_mask	= tegra_msi_top_irq_mask,
1589	.irq_unmask	= tegra_msi_top_irq_unmask,
1590};
1591
1592static void tegra_msi_irq_ack(struct irq_data *d)
1593{
1594	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1595	struct tegra_pcie *pcie = msi_to_pcie(msi);
1596	unsigned int index = d->hwirq / 32;
1597
1598	/* clear the interrupt */
1599	afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
1600}
1601
1602static void tegra_msi_irq_mask(struct irq_data *d)
1603{
1604	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1605	struct tegra_pcie *pcie = msi_to_pcie(msi);
1606	unsigned int index = d->hwirq / 32;
1607	unsigned long flags;
1608	u32 value;
1609
1610	spin_lock_irqsave(&msi->mask_lock, flags);
1611	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1612	value &= ~BIT(d->hwirq % 32);
1613	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1614	spin_unlock_irqrestore(&msi->mask_lock, flags);
1615}
1616
1617static void tegra_msi_irq_unmask(struct irq_data *d)
1618{
1619	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1620	struct tegra_pcie *pcie = msi_to_pcie(msi);
1621	unsigned int index = d->hwirq / 32;
1622	unsigned long flags;
1623	u32 value;
1624
1625	spin_lock_irqsave(&msi->mask_lock, flags);
1626	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1627	value |= BIT(d->hwirq % 32);
1628	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1629	spin_unlock_irqrestore(&msi->mask_lock, flags);
1630}
1631
 
 
 
 
 
1632static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1633{
1634	struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
1635
1636	msg->address_lo = lower_32_bits(msi->phys);
1637	msg->address_hi = upper_32_bits(msi->phys);
1638	msg->data = data->hwirq;
1639}
1640
1641static struct irq_chip tegra_msi_bottom_chip = {
1642	.name			= "Tegra MSI",
1643	.irq_ack		= tegra_msi_irq_ack,
1644	.irq_mask		= tegra_msi_irq_mask,
1645	.irq_unmask		= tegra_msi_irq_unmask,
 
1646	.irq_compose_msi_msg	= tegra_compose_msi_msg,
1647};
1648
1649static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
1650				  unsigned int nr_irqs, void *args)
1651{
1652	struct tegra_msi *msi = domain->host_data;
1653	unsigned int i;
1654	int hwirq;
1655
1656	mutex_lock(&msi->map_lock);
1657
1658	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
1659
1660	mutex_unlock(&msi->map_lock);
1661
1662	if (hwirq < 0)
1663		return -ENOSPC;
1664
1665	for (i = 0; i < nr_irqs; i++)
1666		irq_domain_set_info(domain, virq + i, hwirq + i,
1667				    &tegra_msi_bottom_chip, domain->host_data,
1668				    handle_edge_irq, NULL, NULL);
1669
1670	tegra_cpuidle_pcie_irqs_in_use();
1671
1672	return 0;
1673}
1674
1675static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
1676				  unsigned int nr_irqs)
1677{
1678	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1679	struct tegra_msi *msi = domain->host_data;
1680
1681	mutex_lock(&msi->map_lock);
1682
1683	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
1684
1685	mutex_unlock(&msi->map_lock);
1686}
1687
1688static const struct irq_domain_ops tegra_msi_domain_ops = {
1689	.alloc = tegra_msi_domain_alloc,
1690	.free = tegra_msi_domain_free,
1691};
1692
1693static struct msi_domain_info tegra_msi_info = {
1694	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1695		  MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
1696	.chip	= &tegra_msi_top_chip,
1697};
1698
1699static int tegra_allocate_domains(struct tegra_msi *msi)
1700{
1701	struct tegra_pcie *pcie = msi_to_pcie(msi);
1702	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
1703	struct irq_domain *parent;
1704
1705	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
1706					  &tegra_msi_domain_ops, msi);
1707	if (!parent) {
1708		dev_err(pcie->dev, "failed to create IRQ domain\n");
1709		return -ENOMEM;
1710	}
1711	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
1712
1713	msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
1714	if (!msi->domain) {
1715		dev_err(pcie->dev, "failed to create MSI domain\n");
1716		irq_domain_remove(parent);
1717		return -ENOMEM;
1718	}
1719
1720	return 0;
1721}
1722
1723static void tegra_free_domains(struct tegra_msi *msi)
1724{
1725	struct irq_domain *parent = msi->domain->parent;
1726
1727	irq_domain_remove(msi->domain);
1728	irq_domain_remove(parent);
1729}
1730
1731static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1732{
1733	struct platform_device *pdev = to_platform_device(pcie->dev);
1734	struct tegra_msi *msi = &pcie->msi;
1735	struct device *dev = pcie->dev;
1736	int err;
1737
1738	mutex_init(&msi->map_lock);
1739	spin_lock_init(&msi->mask_lock);
1740
1741	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1742		err = tegra_allocate_domains(msi);
1743		if (err)
1744			return err;
1745	}
1746
1747	err = platform_get_irq_byname(pdev, "msi");
1748	if (err < 0)
1749		goto free_irq_domain;
1750
1751	msi->irq = err;
1752
1753	irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
1754
1755	/* Though the PCIe controller can address >32-bit address space, to
1756	 * facilitate endpoints that support only 32-bit MSI target address,
1757	 * the mask is set to 32-bit to make sure that MSI target address is
1758	 * always a 32-bit address
1759	 */
1760	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1761	if (err < 0) {
1762		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1763		goto free_irq;
1764	}
1765
1766	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1767				    DMA_ATTR_NO_KERNEL_MAPPING);
1768	if (!msi->virt) {
1769		dev_err(dev, "failed to allocate DMA memory for MSI\n");
1770		err = -ENOMEM;
1771		goto free_irq;
1772	}
1773
1774	return 0;
1775
1776free_irq:
1777	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1778free_irq_domain:
1779	if (IS_ENABLED(CONFIG_PCI_MSI))
1780		tegra_free_domains(msi);
1781
1782	return err;
1783}
1784
1785static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1786{
1787	const struct tegra_pcie_soc *soc = pcie->soc;
1788	struct tegra_msi *msi = &pcie->msi;
1789	u32 reg, msi_state[INT_PCI_MSI_NR / 32];
1790	int i;
1791
1792	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1793	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1794	/* this register is in 4K increments */
1795	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1796
1797	/* Restore the MSI allocation state */
1798	bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
1799	for (i = 0; i < ARRAY_SIZE(msi_state); i++)
1800		afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
1801
1802	/* and unmask the MSI interrupt */
1803	reg = afi_readl(pcie, AFI_INTR_MASK);
1804	reg |= AFI_INTR_MASK_MSI_MASK;
1805	afi_writel(pcie, reg, AFI_INTR_MASK);
1806}
1807
1808static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1809{
1810	struct tegra_msi *msi = &pcie->msi;
1811	unsigned int i, irq;
1812
1813	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1814		       DMA_ATTR_NO_KERNEL_MAPPING);
1815
1816	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1817		irq = irq_find_mapping(msi->domain, i);
1818		if (irq > 0)
1819			irq_domain_free_irqs(irq, 1);
1820	}
1821
1822	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1823
1824	if (IS_ENABLED(CONFIG_PCI_MSI))
1825		tegra_free_domains(msi);
1826}
1827
1828static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1829{
1830	u32 value;
1831
1832	/* mask the MSI interrupt */
1833	value = afi_readl(pcie, AFI_INTR_MASK);
1834	value &= ~AFI_INTR_MASK_MSI_MASK;
1835	afi_writel(pcie, value, AFI_INTR_MASK);
1836
1837	return 0;
1838}
1839
1840static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1841{
1842	u32 value;
1843
1844	value = afi_readl(pcie, AFI_INTR_MASK);
1845	value &= ~AFI_INTR_MASK_INT_MASK;
1846	afi_writel(pcie, value, AFI_INTR_MASK);
1847}
1848
1849static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1850				      u32 *xbar)
1851{
1852	struct device *dev = pcie->dev;
1853	struct device_node *np = dev->of_node;
1854
1855	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1856		switch (lanes) {
1857		case 0x010004:
1858			dev_info(dev, "4x1, 1x1 configuration\n");
1859			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1860			return 0;
1861
1862		case 0x010102:
1863			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1864			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1865			return 0;
1866
1867		case 0x010101:
1868			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1869			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1870			return 0;
1871
1872		default:
1873			dev_info(dev, "wrong configuration updated in DT, "
1874				 "switching to default 2x1, 1x1, 1x1 "
1875				 "configuration\n");
1876			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1877			return 0;
1878		}
1879	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1880		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1881		switch (lanes) {
1882		case 0x0000104:
1883			dev_info(dev, "4x1, 1x1 configuration\n");
1884			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1885			return 0;
1886
1887		case 0x0000102:
1888			dev_info(dev, "2x1, 1x1 configuration\n");
1889			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1890			return 0;
1891		}
1892	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1893		switch (lanes) {
1894		case 0x00000204:
1895			dev_info(dev, "4x1, 2x1 configuration\n");
1896			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1897			return 0;
1898
1899		case 0x00020202:
1900			dev_info(dev, "2x3 configuration\n");
1901			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1902			return 0;
1903
1904		case 0x00010104:
1905			dev_info(dev, "4x1, 1x2 configuration\n");
1906			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1907			return 0;
1908		}
1909	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1910		switch (lanes) {
1911		case 0x00000004:
1912			dev_info(dev, "single-mode configuration\n");
1913			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1914			return 0;
1915
1916		case 0x00000202:
1917			dev_info(dev, "dual-mode configuration\n");
1918			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1919			return 0;
1920		}
1921	}
1922
1923	return -EINVAL;
1924}
1925
1926/*
1927 * Check whether a given set of supplies is available in a device tree node.
1928 * This is used to check whether the new or the legacy device tree bindings
1929 * should be used.
1930 */
1931static bool of_regulator_bulk_available(struct device_node *np,
1932					struct regulator_bulk_data *supplies,
1933					unsigned int num_supplies)
1934{
1935	char property[32];
1936	unsigned int i;
1937
1938	for (i = 0; i < num_supplies; i++) {
1939		snprintf(property, 32, "%s-supply", supplies[i].supply);
1940
1941		if (!of_property_present(np, property))
1942			return false;
1943	}
1944
1945	return true;
1946}
1947
1948/*
1949 * Old versions of the device tree binding for this device used a set of power
1950 * supplies that didn't match the hardware inputs. This happened to work for a
1951 * number of cases but is not future proof. However to preserve backwards-
1952 * compatibility with old device trees, this function will try to use the old
1953 * set of supplies.
1954 */
1955static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1956{
1957	struct device *dev = pcie->dev;
1958	struct device_node *np = dev->of_node;
1959
1960	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1961		pcie->num_supplies = 3;
1962	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1963		pcie->num_supplies = 2;
1964
1965	if (pcie->num_supplies == 0) {
1966		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1967		return -ENODEV;
1968	}
1969
1970	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1971				      sizeof(*pcie->supplies),
1972				      GFP_KERNEL);
1973	if (!pcie->supplies)
1974		return -ENOMEM;
1975
1976	pcie->supplies[0].supply = "pex-clk";
1977	pcie->supplies[1].supply = "vdd";
1978
1979	if (pcie->num_supplies > 2)
1980		pcie->supplies[2].supply = "avdd";
1981
1982	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1983}
1984
1985/*
1986 * Obtains the list of regulators required for a particular generation of the
1987 * IP block.
1988 *
1989 * This would've been nice to do simply by providing static tables for use
1990 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1991 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1992 * and either seems to be optional depending on which ports are being used.
1993 */
1994static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1995{
1996	struct device *dev = pcie->dev;
1997	struct device_node *np = dev->of_node;
1998	unsigned int i = 0;
1999
2000	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
2001		pcie->num_supplies = 4;
2002
2003		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2004					      sizeof(*pcie->supplies),
2005					      GFP_KERNEL);
2006		if (!pcie->supplies)
2007			return -ENOMEM;
2008
2009		pcie->supplies[i++].supply = "dvdd-pex";
2010		pcie->supplies[i++].supply = "hvdd-pex-pll";
2011		pcie->supplies[i++].supply = "hvdd-pex";
2012		pcie->supplies[i++].supply = "vddio-pexctl-aud";
2013	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
2014		pcie->num_supplies = 3;
2015
2016		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2017					      sizeof(*pcie->supplies),
2018					      GFP_KERNEL);
2019		if (!pcie->supplies)
2020			return -ENOMEM;
2021
2022		pcie->supplies[i++].supply = "hvddio-pex";
2023		pcie->supplies[i++].supply = "dvddio-pex";
2024		pcie->supplies[i++].supply = "vddio-pex-ctl";
2025	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2026		pcie->num_supplies = 4;
2027
2028		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2029					      sizeof(*pcie->supplies),
2030					      GFP_KERNEL);
2031		if (!pcie->supplies)
2032			return -ENOMEM;
2033
2034		pcie->supplies[i++].supply = "avddio-pex";
2035		pcie->supplies[i++].supply = "dvddio-pex";
2036		pcie->supplies[i++].supply = "hvdd-pex";
2037		pcie->supplies[i++].supply = "vddio-pex-ctl";
2038	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2039		bool need_pexa = false, need_pexb = false;
2040
2041		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2042		if (lane_mask & 0x0f)
2043			need_pexa = true;
2044
2045		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2046		if (lane_mask & 0x30)
2047			need_pexb = true;
2048
2049		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2050					 (need_pexb ? 2 : 0);
2051
2052		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2053					      sizeof(*pcie->supplies),
2054					      GFP_KERNEL);
2055		if (!pcie->supplies)
2056			return -ENOMEM;
2057
2058		pcie->supplies[i++].supply = "avdd-pex-pll";
2059		pcie->supplies[i++].supply = "hvdd-pex";
2060		pcie->supplies[i++].supply = "vddio-pex-ctl";
2061		pcie->supplies[i++].supply = "avdd-plle";
2062
2063		if (need_pexa) {
2064			pcie->supplies[i++].supply = "avdd-pexa";
2065			pcie->supplies[i++].supply = "vdd-pexa";
2066		}
2067
2068		if (need_pexb) {
2069			pcie->supplies[i++].supply = "avdd-pexb";
2070			pcie->supplies[i++].supply = "vdd-pexb";
2071		}
2072	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2073		pcie->num_supplies = 5;
2074
2075		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2076					      sizeof(*pcie->supplies),
2077					      GFP_KERNEL);
2078		if (!pcie->supplies)
2079			return -ENOMEM;
2080
2081		pcie->supplies[0].supply = "avdd-pex";
2082		pcie->supplies[1].supply = "vdd-pex";
2083		pcie->supplies[2].supply = "avdd-pex-pll";
2084		pcie->supplies[3].supply = "avdd-plle";
2085		pcie->supplies[4].supply = "vddio-pex-clk";
2086	}
2087
2088	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2089					pcie->num_supplies))
2090		return devm_regulator_bulk_get(dev, pcie->num_supplies,
2091					       pcie->supplies);
2092
2093	/*
2094	 * If not all regulators are available for this new scheme, assume
2095	 * that the device tree complies with an older version of the device
2096	 * tree binding.
2097	 */
2098	dev_info(dev, "using legacy DT binding for power supplies\n");
2099
2100	devm_kfree(dev, pcie->supplies);
2101	pcie->num_supplies = 0;
2102
2103	return tegra_pcie_get_legacy_regulators(pcie);
2104}
2105
2106static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2107{
2108	struct device *dev = pcie->dev;
2109	struct device_node *np = dev->of_node, *port;
2110	const struct tegra_pcie_soc *soc = pcie->soc;
2111	u32 lanes = 0, mask = 0;
2112	unsigned int lane = 0;
2113	int err;
2114
2115	/* parse root ports */
2116	for_each_child_of_node(np, port) {
2117		struct tegra_pcie_port *rp;
2118		unsigned int index;
2119		u32 value;
2120		char *label;
2121
2122		err = of_pci_get_devfn(port);
2123		if (err < 0) {
2124			dev_err(dev, "failed to parse address: %d\n", err);
2125			goto err_node_put;
2126		}
2127
2128		index = PCI_SLOT(err);
2129
2130		if (index < 1 || index > soc->num_ports) {
2131			dev_err(dev, "invalid port number: %d\n", index);
2132			err = -EINVAL;
2133			goto err_node_put;
2134		}
2135
2136		index--;
2137
2138		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2139		if (err < 0) {
2140			dev_err(dev, "failed to parse # of lanes: %d\n",
2141				err);
2142			goto err_node_put;
2143		}
2144
2145		if (value > 16) {
2146			dev_err(dev, "invalid # of lanes: %u\n", value);
2147			err = -EINVAL;
2148			goto err_node_put;
2149		}
2150
2151		lanes |= value << (index << 3);
2152
2153		if (!of_device_is_available(port)) {
2154			lane += value;
2155			continue;
2156		}
2157
2158		mask |= ((1 << value) - 1) << lane;
2159		lane += value;
2160
2161		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2162		if (!rp) {
2163			err = -ENOMEM;
2164			goto err_node_put;
2165		}
2166
2167		err = of_address_to_resource(port, 0, &rp->regs);
2168		if (err < 0) {
2169			dev_err(dev, "failed to parse address: %d\n", err);
2170			goto err_node_put;
2171		}
2172
2173		INIT_LIST_HEAD(&rp->list);
2174		rp->index = index;
2175		rp->lanes = value;
2176		rp->pcie = pcie;
2177		rp->np = port;
2178
2179		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2180		if (IS_ERR(rp->base)) {
2181			err = PTR_ERR(rp->base);
2182			goto err_node_put;
2183		}
2184
2185		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2186		if (!label) {
2187			err = -ENOMEM;
2188			goto err_node_put;
2189		}
2190
2191		/*
2192		 * Returns -ENOENT if reset-gpios property is not populated
2193		 * and in this case fall back to using AFI per port register
2194		 * to toggle PERST# SFIO line.
2195		 */
2196		rp->reset_gpio = devm_fwnode_gpiod_get(dev,
2197						       of_fwnode_handle(port),
2198						       "reset",
2199						       GPIOD_OUT_LOW,
2200						       label);
2201		if (IS_ERR(rp->reset_gpio)) {
2202			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2203				rp->reset_gpio = NULL;
2204			} else {
2205				dev_err(dev, "failed to get reset GPIO: %ld\n",
2206					PTR_ERR(rp->reset_gpio));
2207				err = PTR_ERR(rp->reset_gpio);
2208				goto err_node_put;
2209			}
2210		}
2211
2212		list_add_tail(&rp->list, &pcie->ports);
2213	}
2214
2215	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2216	if (err < 0) {
2217		dev_err(dev, "invalid lane configuration\n");
2218		return err;
2219	}
2220
2221	err = tegra_pcie_get_regulators(pcie, mask);
2222	if (err < 0)
2223		return err;
2224
2225	return 0;
2226
2227err_node_put:
2228	of_node_put(port);
2229	return err;
2230}
2231
2232/*
2233 * FIXME: If there are no PCIe cards attached, then calling this function
2234 * can result in the increase of the bootup time as there are big timeout
2235 * loops.
2236 */
2237#define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
2238static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2239{
2240	struct device *dev = port->pcie->dev;
2241	unsigned int retries = 3;
2242	unsigned long value;
2243
2244	/* override presence detection */
2245	value = readl(port->base + RP_PRIV_MISC);
2246	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2247	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2248	writel(value, port->base + RP_PRIV_MISC);
2249
2250	do {
2251		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2252
2253		do {
2254			value = readl(port->base + RP_VEND_XP);
2255
2256			if (value & RP_VEND_XP_DL_UP)
2257				break;
2258
2259			usleep_range(1000, 2000);
2260		} while (--timeout);
2261
2262		if (!timeout) {
2263			dev_dbg(dev, "link %u down, retrying\n", port->index);
2264			goto retry;
2265		}
2266
2267		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2268
2269		do {
2270			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2271
2272			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2273				return true;
2274
2275			usleep_range(1000, 2000);
2276		} while (--timeout);
2277
2278retry:
2279		tegra_pcie_port_reset(port);
2280	} while (--retries);
2281
2282	return false;
2283}
2284
2285static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2286{
2287	struct device *dev = pcie->dev;
2288	struct tegra_pcie_port *port;
2289	ktime_t deadline;
2290	u32 value;
2291
2292	list_for_each_entry(port, &pcie->ports, list) {
2293		/*
2294		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2295		 * is not supported by Tegra. tegra_pcie_change_link_speed()
2296		 * is called only for Tegra chips which support Gen2.
2297		 * So there no harm if supported link speed is not verified.
2298		 */
2299		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2300		value &= ~PCI_EXP_LNKSTA_CLS;
2301		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2302		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2303
2304		/*
2305		 * Poll until link comes back from recovery to avoid race
2306		 * condition.
2307		 */
2308		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2309
2310		while (ktime_before(ktime_get(), deadline)) {
2311			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2312			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2313				break;
2314
2315			usleep_range(2000, 3000);
2316		}
2317
2318		if (value & PCI_EXP_LNKSTA_LT)
2319			dev_warn(dev, "PCIe port %u link is in recovery\n",
2320				 port->index);
2321
2322		/* Retrain the link */
2323		value = readl(port->base + RP_LINK_CONTROL_STATUS);
2324		value |= PCI_EXP_LNKCTL_RL;
2325		writel(value, port->base + RP_LINK_CONTROL_STATUS);
2326
2327		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2328
2329		while (ktime_before(ktime_get(), deadline)) {
2330			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2331			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2332				break;
2333
2334			usleep_range(2000, 3000);
2335		}
2336
2337		if (value & PCI_EXP_LNKSTA_LT)
2338			dev_err(dev, "failed to retrain link of port %u\n",
2339				port->index);
2340	}
2341}
2342
2343static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2344{
2345	struct device *dev = pcie->dev;
2346	struct tegra_pcie_port *port, *tmp;
2347
2348	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2349		dev_info(dev, "probing port %u, using %u lanes\n",
2350			 port->index, port->lanes);
2351
2352		tegra_pcie_port_enable(port);
2353	}
2354
2355	/* Start LTSSM from Tegra side */
2356	reset_control_deassert(pcie->pcie_xrst);
2357
2358	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2359		if (tegra_pcie_port_check_link(port))
2360			continue;
2361
2362		dev_info(dev, "link %u down, ignoring\n", port->index);
2363
2364		tegra_pcie_port_disable(port);
2365		tegra_pcie_port_free(port);
2366	}
2367
2368	if (pcie->soc->has_gen2)
2369		tegra_pcie_change_link_speed(pcie);
2370}
2371
2372static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2373{
2374	struct tegra_pcie_port *port, *tmp;
2375
2376	reset_control_assert(pcie->pcie_xrst);
2377
2378	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2379		tegra_pcie_port_disable(port);
2380}
2381
2382static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2383	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2384	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2385};
2386
2387static const struct tegra_pcie_soc tegra20_pcie = {
2388	.num_ports = 2,
2389	.ports = tegra20_pcie_ports,
2390	.msi_base_shift = 0,
2391	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2392	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2393	.pads_refclk_cfg0 = 0xfa5cfa5c,
2394	.has_pex_clkreq_en = false,
2395	.has_pex_bias_ctrl = false,
2396	.has_intr_prsnt_sense = false,
2397	.has_cml_clk = false,
2398	.has_gen2 = false,
2399	.force_pca_enable = false,
2400	.program_uphy = true,
2401	.update_clamp_threshold = false,
2402	.program_deskew_time = false,
2403	.update_fc_timer = false,
2404	.has_cache_bars = true,
2405	.ectl.enable = false,
2406};
2407
2408static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2409	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2410	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2411	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2412};
2413
2414static const struct tegra_pcie_soc tegra30_pcie = {
2415	.num_ports = 3,
2416	.ports = tegra30_pcie_ports,
2417	.msi_base_shift = 8,
2418	.afi_pex2_ctrl = 0x128,
2419	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2420	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2421	.pads_refclk_cfg0 = 0xfa5cfa5c,
2422	.pads_refclk_cfg1 = 0xfa5cfa5c,
2423	.has_pex_clkreq_en = true,
2424	.has_pex_bias_ctrl = true,
2425	.has_intr_prsnt_sense = true,
2426	.has_cml_clk = true,
2427	.has_gen2 = false,
2428	.force_pca_enable = false,
2429	.program_uphy = true,
2430	.update_clamp_threshold = false,
2431	.program_deskew_time = false,
2432	.update_fc_timer = false,
2433	.has_cache_bars = false,
2434	.ectl.enable = false,
2435};
2436
2437static const struct tegra_pcie_soc tegra124_pcie = {
2438	.num_ports = 2,
2439	.ports = tegra20_pcie_ports,
2440	.msi_base_shift = 8,
2441	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2442	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2443	.pads_refclk_cfg0 = 0x44ac44ac,
2444	.has_pex_clkreq_en = true,
2445	.has_pex_bias_ctrl = true,
2446	.has_intr_prsnt_sense = true,
2447	.has_cml_clk = true,
2448	.has_gen2 = true,
2449	.force_pca_enable = false,
2450	.program_uphy = true,
2451	.update_clamp_threshold = true,
2452	.program_deskew_time = false,
2453	.update_fc_timer = false,
2454	.has_cache_bars = false,
2455	.ectl.enable = false,
2456};
2457
2458static const struct tegra_pcie_soc tegra210_pcie = {
2459	.num_ports = 2,
2460	.ports = tegra20_pcie_ports,
2461	.msi_base_shift = 8,
2462	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2463	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2464	.pads_refclk_cfg0 = 0x90b890b8,
2465	/* FC threshold is bit[25:18] */
2466	.update_fc_threshold = 0x01800000,
2467	.has_pex_clkreq_en = true,
2468	.has_pex_bias_ctrl = true,
2469	.has_intr_prsnt_sense = true,
2470	.has_cml_clk = true,
2471	.has_gen2 = true,
2472	.force_pca_enable = true,
2473	.program_uphy = true,
2474	.update_clamp_threshold = true,
2475	.program_deskew_time = true,
2476	.update_fc_timer = true,
2477	.has_cache_bars = false,
2478	.ectl = {
2479		.regs = {
2480			.rp_ectl_2_r1 = 0x0000000f,
2481			.rp_ectl_4_r1 = 0x00000067,
2482			.rp_ectl_5_r1 = 0x55010000,
2483			.rp_ectl_6_r1 = 0x00000001,
2484			.rp_ectl_2_r2 = 0x0000008f,
2485			.rp_ectl_4_r2 = 0x000000c7,
2486			.rp_ectl_5_r2 = 0x55010000,
2487			.rp_ectl_6_r2 = 0x00000001,
2488		},
2489		.enable = true,
2490	},
2491};
2492
2493static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2494	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2495	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2496	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2497};
2498
2499static const struct tegra_pcie_soc tegra186_pcie = {
2500	.num_ports = 3,
2501	.ports = tegra186_pcie_ports,
2502	.msi_base_shift = 8,
2503	.afi_pex2_ctrl = 0x19c,
2504	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2505	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2506	.pads_refclk_cfg0 = 0x80b880b8,
2507	.pads_refclk_cfg1 = 0x000480b8,
2508	.has_pex_clkreq_en = true,
2509	.has_pex_bias_ctrl = true,
2510	.has_intr_prsnt_sense = true,
2511	.has_cml_clk = false,
2512	.has_gen2 = true,
2513	.force_pca_enable = false,
2514	.program_uphy = false,
2515	.update_clamp_threshold = false,
2516	.program_deskew_time = false,
2517	.update_fc_timer = false,
2518	.has_cache_bars = false,
2519	.ectl.enable = false,
2520};
2521
2522static const struct of_device_id tegra_pcie_of_match[] = {
2523	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2524	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2525	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2526	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2527	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2528	{ },
2529};
2530MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
2531
2532static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2533{
2534	struct tegra_pcie *pcie = s->private;
2535
2536	if (list_empty(&pcie->ports))
2537		return NULL;
2538
2539	seq_puts(s, "Index  Status\n");
2540
2541	return seq_list_start(&pcie->ports, *pos);
2542}
2543
2544static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2545{
2546	struct tegra_pcie *pcie = s->private;
2547
2548	return seq_list_next(v, &pcie->ports, pos);
2549}
2550
2551static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2552{
2553}
2554
2555static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2556{
2557	bool up = false, active = false;
2558	struct tegra_pcie_port *port;
2559	unsigned int value;
2560
2561	port = list_entry(v, struct tegra_pcie_port, list);
2562
2563	value = readl(port->base + RP_VEND_XP);
2564
2565	if (value & RP_VEND_XP_DL_UP)
2566		up = true;
2567
2568	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2569
2570	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2571		active = true;
2572
2573	seq_printf(s, "%2u     ", port->index);
2574
2575	if (up)
2576		seq_puts(s, "up");
2577
2578	if (active) {
2579		if (up)
2580			seq_puts(s, ", ");
2581
2582		seq_puts(s, "active");
2583	}
2584
2585	seq_puts(s, "\n");
2586	return 0;
2587}
2588
2589static const struct seq_operations tegra_pcie_ports_sops = {
2590	.start = tegra_pcie_ports_seq_start,
2591	.next = tegra_pcie_ports_seq_next,
2592	.stop = tegra_pcie_ports_seq_stop,
2593	.show = tegra_pcie_ports_seq_show,
2594};
2595
2596DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
2597
2598static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2599{
2600	debugfs_remove_recursive(pcie->debugfs);
2601	pcie->debugfs = NULL;
2602}
2603
2604static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2605{
2606	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2607
2608	debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
2609			    &tegra_pcie_ports_fops);
2610}
2611
2612static int tegra_pcie_probe(struct platform_device *pdev)
2613{
2614	struct device *dev = &pdev->dev;
2615	struct pci_host_bridge *host;
2616	struct tegra_pcie *pcie;
2617	int err;
2618
2619	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2620	if (!host)
2621		return -ENOMEM;
2622
2623	pcie = pci_host_bridge_priv(host);
2624	host->sysdata = pcie;
2625	platform_set_drvdata(pdev, pcie);
2626
2627	pcie->soc = of_device_get_match_data(dev);
2628	INIT_LIST_HEAD(&pcie->ports);
2629	pcie->dev = dev;
2630
2631	err = tegra_pcie_parse_dt(pcie);
2632	if (err < 0)
2633		return err;
2634
2635	err = tegra_pcie_get_resources(pcie);
2636	if (err < 0) {
2637		dev_err(dev, "failed to request resources: %d\n", err);
2638		return err;
2639	}
2640
2641	err = tegra_pcie_msi_setup(pcie);
2642	if (err < 0) {
2643		dev_err(dev, "failed to enable MSI support: %d\n", err);
2644		goto put_resources;
2645	}
2646
2647	pm_runtime_enable(pcie->dev);
2648	err = pm_runtime_get_sync(pcie->dev);
2649	if (err < 0) {
2650		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2651		goto pm_runtime_put;
2652	}
2653
2654	host->ops = &tegra_pcie_ops;
2655	host->map_irq = tegra_pcie_map_irq;
2656
2657	err = pci_host_probe(host);
2658	if (err < 0) {
2659		dev_err(dev, "failed to register host: %d\n", err);
2660		goto pm_runtime_put;
2661	}
2662
2663	if (IS_ENABLED(CONFIG_DEBUG_FS))
2664		tegra_pcie_debugfs_init(pcie);
2665
2666	return 0;
2667
2668pm_runtime_put:
2669	pm_runtime_put_sync(pcie->dev);
2670	pm_runtime_disable(pcie->dev);
2671	tegra_pcie_msi_teardown(pcie);
2672put_resources:
2673	tegra_pcie_put_resources(pcie);
2674	return err;
2675}
2676
2677static void tegra_pcie_remove(struct platform_device *pdev)
2678{
2679	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2680	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2681	struct tegra_pcie_port *port, *tmp;
2682
2683	if (IS_ENABLED(CONFIG_DEBUG_FS))
2684		tegra_pcie_debugfs_exit(pcie);
2685
2686	pci_stop_root_bus(host->bus);
2687	pci_remove_root_bus(host->bus);
2688	pm_runtime_put_sync(pcie->dev);
2689	pm_runtime_disable(pcie->dev);
2690
2691	if (IS_ENABLED(CONFIG_PCI_MSI))
2692		tegra_pcie_msi_teardown(pcie);
2693
2694	tegra_pcie_put_resources(pcie);
2695
2696	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2697		tegra_pcie_port_free(port);
2698}
2699
2700static int tegra_pcie_pm_suspend(struct device *dev)
2701{
2702	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2703	struct tegra_pcie_port *port;
2704	int err;
2705
2706	list_for_each_entry(port, &pcie->ports, list)
2707		tegra_pcie_pme_turnoff(port);
2708
2709	tegra_pcie_disable_ports(pcie);
2710
2711	/*
2712	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2713	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2714	 */
2715	tegra_pcie_disable_interrupts(pcie);
2716
2717	if (pcie->soc->program_uphy) {
2718		err = tegra_pcie_phy_power_off(pcie);
2719		if (err < 0)
2720			dev_err(dev, "failed to power off PHY(s): %d\n", err);
2721	}
2722
2723	reset_control_assert(pcie->pex_rst);
2724	clk_disable_unprepare(pcie->pex_clk);
2725
2726	if (IS_ENABLED(CONFIG_PCI_MSI))
2727		tegra_pcie_disable_msi(pcie);
2728
2729	pinctrl_pm_select_idle_state(dev);
2730	tegra_pcie_power_off(pcie);
2731
2732	return 0;
2733}
2734
2735static int tegra_pcie_pm_resume(struct device *dev)
2736{
2737	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2738	int err;
2739
2740	err = tegra_pcie_power_on(pcie);
2741	if (err) {
2742		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2743		return err;
2744	}
2745
2746	err = pinctrl_pm_select_default_state(dev);
2747	if (err < 0) {
2748		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2749		goto poweroff;
2750	}
2751
2752	tegra_pcie_enable_controller(pcie);
2753	tegra_pcie_setup_translations(pcie);
2754
2755	if (IS_ENABLED(CONFIG_PCI_MSI))
2756		tegra_pcie_enable_msi(pcie);
2757
2758	err = clk_prepare_enable(pcie->pex_clk);
2759	if (err) {
2760		dev_err(dev, "failed to enable PEX clock: %d\n", err);
2761		goto pex_dpd_enable;
2762	}
2763
2764	reset_control_deassert(pcie->pex_rst);
2765
2766	if (pcie->soc->program_uphy) {
2767		err = tegra_pcie_phy_power_on(pcie);
2768		if (err < 0) {
2769			dev_err(dev, "failed to power on PHY(s): %d\n", err);
2770			goto disable_pex_clk;
2771		}
2772	}
2773
2774	tegra_pcie_apply_pad_settings(pcie);
2775	tegra_pcie_enable_ports(pcie);
2776
2777	return 0;
2778
2779disable_pex_clk:
2780	reset_control_assert(pcie->pex_rst);
2781	clk_disable_unprepare(pcie->pex_clk);
2782pex_dpd_enable:
2783	pinctrl_pm_select_idle_state(dev);
2784poweroff:
2785	tegra_pcie_power_off(pcie);
2786
2787	return err;
2788}
2789
2790static const struct dev_pm_ops tegra_pcie_pm_ops = {
2791	RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2792	NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume)
2793};
2794
2795static struct platform_driver tegra_pcie_driver = {
2796	.driver = {
2797		.name = "tegra-pcie",
2798		.of_match_table = tegra_pcie_of_match,
2799		.suppress_bind_attrs = true,
2800		.pm = &tegra_pcie_pm_ops,
2801	},
2802	.probe = tegra_pcie_probe,
2803	.remove = tegra_pcie_remove,
2804};
2805module_platform_driver(tegra_pcie_driver);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * PCIe host controller driver for Tegra SoCs
   4 *
   5 * Copyright (c) 2010, CompuLab, Ltd.
   6 * Author: Mike Rapoport <mike@compulab.co.il>
   7 *
   8 * Based on NVIDIA PCIe driver
   9 * Copyright (c) 2008-2009, NVIDIA Corporation.
  10 *
  11 * Bits taken from arch/arm/mach-dove/pcie.c
  12 *
  13 * Author: Thierry Reding <treding@nvidia.com>
  14 */
  15
  16#include <linux/clk.h>
  17#include <linux/debugfs.h>
  18#include <linux/delay.h>
  19#include <linux/export.h>
  20#include <linux/gpio/consumer.h>
  21#include <linux/interrupt.h>
  22#include <linux/iopoll.h>
  23#include <linux/irq.h>
  24#include <linux/irqchip/chained_irq.h>
  25#include <linux/irqdomain.h>
  26#include <linux/kernel.h>
  27#include <linux/init.h>
  28#include <linux/module.h>
  29#include <linux/msi.h>
  30#include <linux/of_address.h>
  31#include <linux/of_pci.h>
  32#include <linux/of_platform.h>
  33#include <linux/pci.h>
  34#include <linux/phy/phy.h>
  35#include <linux/pinctrl/consumer.h>
  36#include <linux/platform_device.h>
  37#include <linux/reset.h>
  38#include <linux/sizes.h>
  39#include <linux/slab.h>
  40#include <linux/vmalloc.h>
  41#include <linux/regulator/consumer.h>
  42
  43#include <soc/tegra/cpuidle.h>
  44#include <soc/tegra/pmc.h>
  45
  46#include "../pci.h"
  47
  48#define INT_PCI_MSI_NR (8 * 32)
  49
  50/* register definitions */
  51
  52#define AFI_AXI_BAR0_SZ	0x00
  53#define AFI_AXI_BAR1_SZ	0x04
  54#define AFI_AXI_BAR2_SZ	0x08
  55#define AFI_AXI_BAR3_SZ	0x0c
  56#define AFI_AXI_BAR4_SZ	0x10
  57#define AFI_AXI_BAR5_SZ	0x14
  58
  59#define AFI_AXI_BAR0_START	0x18
  60#define AFI_AXI_BAR1_START	0x1c
  61#define AFI_AXI_BAR2_START	0x20
  62#define AFI_AXI_BAR3_START	0x24
  63#define AFI_AXI_BAR4_START	0x28
  64#define AFI_AXI_BAR5_START	0x2c
  65
  66#define AFI_FPCI_BAR0	0x30
  67#define AFI_FPCI_BAR1	0x34
  68#define AFI_FPCI_BAR2	0x38
  69#define AFI_FPCI_BAR3	0x3c
  70#define AFI_FPCI_BAR4	0x40
  71#define AFI_FPCI_BAR5	0x44
  72
  73#define AFI_CACHE_BAR0_SZ	0x48
  74#define AFI_CACHE_BAR0_ST	0x4c
  75#define AFI_CACHE_BAR1_SZ	0x50
  76#define AFI_CACHE_BAR1_ST	0x54
  77
  78#define AFI_MSI_BAR_SZ		0x60
  79#define AFI_MSI_FPCI_BAR_ST	0x64
  80#define AFI_MSI_AXI_BAR_ST	0x68
  81
  82#define AFI_MSI_VEC(x)		(0x6c + ((x) * 4))
  83#define AFI_MSI_EN_VEC(x)	(0x8c + ((x) * 4))
  84
  85#define AFI_CONFIGURATION		0xac
  86#define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
  87#define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
  88
  89#define AFI_FPCI_ERROR_MASKS	0xb0
  90
  91#define AFI_INTR_MASK		0xb4
  92#define  AFI_INTR_MASK_INT_MASK	(1 << 0)
  93#define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
  94
  95#define AFI_INTR_CODE			0xb8
  96#define  AFI_INTR_CODE_MASK		0xf
  97#define  AFI_INTR_INI_SLAVE_ERROR	1
  98#define  AFI_INTR_INI_DECODE_ERROR	2
  99#define  AFI_INTR_TARGET_ABORT		3
 100#define  AFI_INTR_MASTER_ABORT		4
 101#define  AFI_INTR_INVALID_WRITE		5
 102#define  AFI_INTR_LEGACY		6
 103#define  AFI_INTR_FPCI_DECODE_ERROR	7
 104#define  AFI_INTR_AXI_DECODE_ERROR	8
 105#define  AFI_INTR_FPCI_TIMEOUT		9
 106#define  AFI_INTR_PE_PRSNT_SENSE	10
 107#define  AFI_INTR_PE_CLKREQ_SENSE	11
 108#define  AFI_INTR_CLKCLAMP_SENSE	12
 109#define  AFI_INTR_RDY4PD_SENSE		13
 110#define  AFI_INTR_P2P_ERROR		14
 111
 112#define AFI_INTR_SIGNATURE	0xbc
 113#define AFI_UPPER_FPCI_ADDRESS	0xc0
 114#define AFI_SM_INTR_ENABLE	0xc4
 115#define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
 116#define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
 117#define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
 118#define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
 119#define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
 120#define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
 121#define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
 122#define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
 123
 124#define AFI_AFI_INTR_ENABLE		0xc8
 125#define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
 126#define  AFI_INTR_EN_INI_DECERR		(1 << 1)
 127#define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
 128#define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
 129#define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
 130#define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
 131#define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
 132#define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
 133#define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
 134
 135#define AFI_PCIE_PME		0xf0
 136
 137#define AFI_PCIE_CONFIG					0x0f8
 138#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
 139#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
 140#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
 141#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
 142#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
 143#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
 144#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
 145#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
 146#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
 147#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
 148#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
 149#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
 150#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
 151#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
 152#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
 153
 154#define AFI_FUSE			0x104
 155#define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
 156
 157#define AFI_PEX0_CTRL			0x110
 158#define AFI_PEX1_CTRL			0x118
 159#define  AFI_PEX_CTRL_RST		(1 << 0)
 160#define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
 161#define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
 162#define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
 163
 164#define AFI_PLLE_CONTROL		0x160
 165#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
 166#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
 167
 168#define AFI_PEXBIAS_CTRL_0		0x168
 169
 170#define RP_ECTL_2_R1	0x00000e84
 171#define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
 172
 173#define RP_ECTL_4_R1	0x00000e8c
 174#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
 175#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
 176
 177#define RP_ECTL_5_R1	0x00000e90
 178#define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
 179
 180#define RP_ECTL_6_R1	0x00000e94
 181#define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
 182
 183#define RP_ECTL_2_R2	0x00000ea4
 184#define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
 185
 186#define RP_ECTL_4_R2	0x00000eac
 187#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
 188#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
 189
 190#define RP_ECTL_5_R2	0x00000eb0
 191#define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
 192
 193#define RP_ECTL_6_R2	0x00000eb4
 194#define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
 195
 196#define RP_VEND_XP	0x00000f00
 197#define  RP_VEND_XP_DL_UP			(1 << 30)
 198#define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
 199#define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
 200#define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
 201
 202#define RP_VEND_CTL0	0x00000f44
 203#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
 204#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
 205
 206#define RP_VEND_CTL1	0x00000f48
 207#define  RP_VEND_CTL1_ERPT	(1 << 13)
 208
 209#define RP_VEND_XP_BIST	0x00000f4c
 210#define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
 211
 212#define RP_VEND_CTL2 0x00000fa8
 213#define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
 214
 215#define RP_PRIV_MISC	0x00000fe0
 216#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
 217#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
 218#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
 219#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
 220#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
 221#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
 222#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
 223#define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
 224
 225#define RP_LINK_CONTROL_STATUS			0x00000090
 226#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
 227#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
 228
 229#define RP_LINK_CONTROL_STATUS_2		0x000000b0
 230
 231#define PADS_CTL_SEL		0x0000009c
 232
 233#define PADS_CTL		0x000000a0
 234#define  PADS_CTL_IDDQ_1L	(1 << 0)
 235#define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
 236#define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
 237
 238#define PADS_PLL_CTL_TEGRA20			0x000000b8
 239#define PADS_PLL_CTL_TEGRA30			0x000000b4
 240#define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
 241#define  PADS_PLL_CTL_LOCKDET			(1 << 8)
 242#define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
 243#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
 244#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
 245#define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
 246#define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
 247#define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
 248#define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
 249#define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
 250
 251#define PADS_REFCLK_CFG0			0x000000c8
 252#define PADS_REFCLK_CFG1			0x000000cc
 253#define PADS_REFCLK_BIAS			0x000000d0
 254
 255/*
 256 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
 257 * entries, one entry per PCIe port. These field definitions and desired
 258 * values aren't in the TRM, but do come from NVIDIA.
 259 */
 260#define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
 261#define PADS_REFCLK_CFG_E_TERM_SHIFT		7
 262#define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
 263#define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
 264
 265#define PME_ACK_TIMEOUT 10000
 266#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
 267
 268struct tegra_msi {
 269	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
 270	struct irq_domain *domain;
 271	struct mutex map_lock;
 272	spinlock_t mask_lock;
 273	void *virt;
 274	dma_addr_t phys;
 275	int irq;
 276};
 277
 278/* used to differentiate between Tegra SoC generations */
 279struct tegra_pcie_port_soc {
 280	struct {
 281		u8 turnoff_bit;
 282		u8 ack_bit;
 283	} pme;
 284};
 285
 286struct tegra_pcie_soc {
 287	unsigned int num_ports;
 288	const struct tegra_pcie_port_soc *ports;
 289	unsigned int msi_base_shift;
 290	unsigned long afi_pex2_ctrl;
 291	u32 pads_pll_ctl;
 292	u32 tx_ref_sel;
 293	u32 pads_refclk_cfg0;
 294	u32 pads_refclk_cfg1;
 295	u32 update_fc_threshold;
 296	bool has_pex_clkreq_en;
 297	bool has_pex_bias_ctrl;
 298	bool has_intr_prsnt_sense;
 299	bool has_cml_clk;
 300	bool has_gen2;
 301	bool force_pca_enable;
 302	bool program_uphy;
 303	bool update_clamp_threshold;
 304	bool program_deskew_time;
 305	bool update_fc_timer;
 306	bool has_cache_bars;
 307	struct {
 308		struct {
 309			u32 rp_ectl_2_r1;
 310			u32 rp_ectl_4_r1;
 311			u32 rp_ectl_5_r1;
 312			u32 rp_ectl_6_r1;
 313			u32 rp_ectl_2_r2;
 314			u32 rp_ectl_4_r2;
 315			u32 rp_ectl_5_r2;
 316			u32 rp_ectl_6_r2;
 317		} regs;
 318		bool enable;
 319	} ectl;
 320};
 321
 322struct tegra_pcie {
 323	struct device *dev;
 324
 325	void __iomem *pads;
 326	void __iomem *afi;
 327	void __iomem *cfg;
 328	int irq;
 329
 330	struct resource cs;
 331
 332	struct clk *pex_clk;
 333	struct clk *afi_clk;
 334	struct clk *pll_e;
 335	struct clk *cml_clk;
 336
 337	struct reset_control *pex_rst;
 338	struct reset_control *afi_rst;
 339	struct reset_control *pcie_xrst;
 340
 341	bool legacy_phy;
 342	struct phy *phy;
 343
 344	struct tegra_msi msi;
 345
 346	struct list_head ports;
 347	u32 xbar_config;
 348
 349	struct regulator_bulk_data *supplies;
 350	unsigned int num_supplies;
 351
 352	const struct tegra_pcie_soc *soc;
 353	struct dentry *debugfs;
 354};
 355
 356static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
 357{
 358	return container_of(msi, struct tegra_pcie, msi);
 359}
 360
 361struct tegra_pcie_port {
 362	struct tegra_pcie *pcie;
 363	struct device_node *np;
 364	struct list_head list;
 365	struct resource regs;
 366	void __iomem *base;
 367	unsigned int index;
 368	unsigned int lanes;
 369
 370	struct phy **phys;
 371
 372	struct gpio_desc *reset_gpio;
 373};
 374
 375static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
 376			      unsigned long offset)
 377{
 378	writel(value, pcie->afi + offset);
 379}
 380
 381static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
 382{
 383	return readl(pcie->afi + offset);
 384}
 385
 386static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
 387			       unsigned long offset)
 388{
 389	writel(value, pcie->pads + offset);
 390}
 391
 392static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
 393{
 394	return readl(pcie->pads + offset);
 395}
 396
 397/*
 398 * The configuration space mapping on Tegra is somewhat similar to the ECAM
 399 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
 400 * register accesses are mapped:
 401 *
 402 *    [27:24] extended register number
 403 *    [23:16] bus number
 404 *    [15:11] device number
 405 *    [10: 8] function number
 406 *    [ 7: 0] register number
 407 *
 408 * Mapping the whole extended configuration space would require 256 MiB of
 409 * virtual address space, only a small part of which will actually be used.
 410 *
 411 * To work around this, a 4 KiB region is used to generate the required
 412 * configuration transaction with relevant B:D:F and register offset values.
 413 * This is achieved by dynamically programming base address and size of
 414 * AFI_AXI_BAR used for end point config space mapping to make sure that the
 415 * address (access to which generates correct config transaction) falls in
 416 * this 4 KiB region.
 417 */
 418static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
 419					   unsigned int where)
 420{
 421	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
 422	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
 423}
 424
 425static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
 426					unsigned int devfn,
 427					int where)
 428{
 429	struct tegra_pcie *pcie = bus->sysdata;
 430	void __iomem *addr = NULL;
 431
 432	if (bus->number == 0) {
 433		unsigned int slot = PCI_SLOT(devfn);
 434		struct tegra_pcie_port *port;
 435
 436		list_for_each_entry(port, &pcie->ports, list) {
 437			if (port->index + 1 == slot) {
 438				addr = port->base + (where & ~3);
 439				break;
 440			}
 441		}
 442	} else {
 443		unsigned int offset;
 444		u32 base;
 445
 446		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
 447
 448		/* move 4 KiB window to offset within the FPCI region */
 449		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
 450		afi_writel(pcie, base, AFI_FPCI_BAR0);
 451
 452		/* move to correct offset within the 4 KiB page */
 453		addr = pcie->cfg + (offset & (SZ_4K - 1));
 454	}
 455
 456	return addr;
 457}
 458
 459static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 460				  int where, int size, u32 *value)
 461{
 462	if (bus->number == 0)
 463		return pci_generic_config_read32(bus, devfn, where, size,
 464						 value);
 465
 466	return pci_generic_config_read(bus, devfn, where, size, value);
 467}
 468
 469static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 470				   int where, int size, u32 value)
 471{
 472	if (bus->number == 0)
 473		return pci_generic_config_write32(bus, devfn, where, size,
 474						  value);
 475
 476	return pci_generic_config_write(bus, devfn, where, size, value);
 477}
 478
 479static struct pci_ops tegra_pcie_ops = {
 480	.map_bus = tegra_pcie_map_bus,
 481	.read = tegra_pcie_config_read,
 482	.write = tegra_pcie_config_write,
 483};
 484
 485static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
 486{
 487	const struct tegra_pcie_soc *soc = port->pcie->soc;
 488	unsigned long ret = 0;
 489
 490	switch (port->index) {
 491	case 0:
 492		ret = AFI_PEX0_CTRL;
 493		break;
 494
 495	case 1:
 496		ret = AFI_PEX1_CTRL;
 497		break;
 498
 499	case 2:
 500		ret = soc->afi_pex2_ctrl;
 501		break;
 502	}
 503
 504	return ret;
 505}
 506
 507static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
 508{
 509	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 510	unsigned long value;
 511
 512	/* pulse reset signal */
 513	if (port->reset_gpio) {
 514		gpiod_set_value(port->reset_gpio, 1);
 515	} else {
 516		value = afi_readl(port->pcie, ctrl);
 517		value &= ~AFI_PEX_CTRL_RST;
 518		afi_writel(port->pcie, value, ctrl);
 519	}
 520
 521	usleep_range(1000, 2000);
 522
 523	if (port->reset_gpio) {
 524		gpiod_set_value(port->reset_gpio, 0);
 525	} else {
 526		value = afi_readl(port->pcie, ctrl);
 527		value |= AFI_PEX_CTRL_RST;
 528		afi_writel(port->pcie, value, ctrl);
 529	}
 530}
 531
 532static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
 533{
 534	const struct tegra_pcie_soc *soc = port->pcie->soc;
 535	u32 value;
 536
 537	/* Enable AER capability */
 538	value = readl(port->base + RP_VEND_CTL1);
 539	value |= RP_VEND_CTL1_ERPT;
 540	writel(value, port->base + RP_VEND_CTL1);
 541
 542	/* Optimal settings to enhance bandwidth */
 543	value = readl(port->base + RP_VEND_XP);
 544	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
 545	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
 546	writel(value, port->base + RP_VEND_XP);
 547
 548	/*
 549	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
 550	 * to avoid truncation of PM messages which results in receiver errors
 551	 */
 552	value = readl(port->base + RP_VEND_XP_BIST);
 553	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
 554	writel(value, port->base + RP_VEND_XP_BIST);
 555
 556	value = readl(port->base + RP_PRIV_MISC);
 557	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
 558	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
 559
 560	if (soc->update_clamp_threshold) {
 561		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
 562				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
 563		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
 564			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
 565	}
 566
 567	writel(value, port->base + RP_PRIV_MISC);
 568}
 569
 570static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
 571{
 572	const struct tegra_pcie_soc *soc = port->pcie->soc;
 573	u32 value;
 574
 575	value = readl(port->base + RP_ECTL_2_R1);
 576	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
 577	value |= soc->ectl.regs.rp_ectl_2_r1;
 578	writel(value, port->base + RP_ECTL_2_R1);
 579
 580	value = readl(port->base + RP_ECTL_4_R1);
 581	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
 582	value |= soc->ectl.regs.rp_ectl_4_r1 <<
 583				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
 584	writel(value, port->base + RP_ECTL_4_R1);
 585
 586	value = readl(port->base + RP_ECTL_5_R1);
 587	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
 588	value |= soc->ectl.regs.rp_ectl_5_r1;
 589	writel(value, port->base + RP_ECTL_5_R1);
 590
 591	value = readl(port->base + RP_ECTL_6_R1);
 592	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
 593	value |= soc->ectl.regs.rp_ectl_6_r1;
 594	writel(value, port->base + RP_ECTL_6_R1);
 595
 596	value = readl(port->base + RP_ECTL_2_R2);
 597	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
 598	value |= soc->ectl.regs.rp_ectl_2_r2;
 599	writel(value, port->base + RP_ECTL_2_R2);
 600
 601	value = readl(port->base + RP_ECTL_4_R2);
 602	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
 603	value |= soc->ectl.regs.rp_ectl_4_r2 <<
 604				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
 605	writel(value, port->base + RP_ECTL_4_R2);
 606
 607	value = readl(port->base + RP_ECTL_5_R2);
 608	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
 609	value |= soc->ectl.regs.rp_ectl_5_r2;
 610	writel(value, port->base + RP_ECTL_5_R2);
 611
 612	value = readl(port->base + RP_ECTL_6_R2);
 613	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
 614	value |= soc->ectl.regs.rp_ectl_6_r2;
 615	writel(value, port->base + RP_ECTL_6_R2);
 616}
 617
 618static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
 619{
 620	const struct tegra_pcie_soc *soc = port->pcie->soc;
 621	u32 value;
 622
 623	/*
 624	 * Sometimes link speed change from Gen2 to Gen1 fails due to
 625	 * instability in deskew logic on lane-0. Increase the deskew
 626	 * retry time to resolve this issue.
 627	 */
 628	if (soc->program_deskew_time) {
 629		value = readl(port->base + RP_VEND_CTL0);
 630		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
 631		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
 632		writel(value, port->base + RP_VEND_CTL0);
 633	}
 634
 635	if (soc->update_fc_timer) {
 636		value = readl(port->base + RP_VEND_XP);
 637		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
 638		value |= soc->update_fc_threshold;
 639		writel(value, port->base + RP_VEND_XP);
 640	}
 641
 642	/*
 643	 * PCIe link doesn't come up with few legacy PCIe endpoints if
 644	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
 645	 * Hence, the strategy followed here is to initially advertise
 646	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
 647	 */
 648	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
 649	value &= ~PCI_EXP_LNKSTA_CLS;
 650	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
 651	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
 652}
 653
 654static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
 655{
 656	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 657	const struct tegra_pcie_soc *soc = port->pcie->soc;
 658	unsigned long value;
 659
 660	/* enable reference clock */
 661	value = afi_readl(port->pcie, ctrl);
 662	value |= AFI_PEX_CTRL_REFCLK_EN;
 663
 664	if (soc->has_pex_clkreq_en)
 665		value |= AFI_PEX_CTRL_CLKREQ_EN;
 666
 667	value |= AFI_PEX_CTRL_OVERRIDE_EN;
 668
 669	afi_writel(port->pcie, value, ctrl);
 670
 671	tegra_pcie_port_reset(port);
 672
 673	if (soc->force_pca_enable) {
 674		value = readl(port->base + RP_VEND_CTL2);
 675		value |= RP_VEND_CTL2_PCA_ENABLE;
 676		writel(value, port->base + RP_VEND_CTL2);
 677	}
 678
 679	tegra_pcie_enable_rp_features(port);
 680
 681	if (soc->ectl.enable)
 682		tegra_pcie_program_ectl_settings(port);
 683
 684	tegra_pcie_apply_sw_fixup(port);
 685}
 686
 687static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
 688{
 689	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 690	const struct tegra_pcie_soc *soc = port->pcie->soc;
 691	unsigned long value;
 692
 693	/* assert port reset */
 694	value = afi_readl(port->pcie, ctrl);
 695	value &= ~AFI_PEX_CTRL_RST;
 696	afi_writel(port->pcie, value, ctrl);
 697
 698	/* disable reference clock */
 699	value = afi_readl(port->pcie, ctrl);
 700
 701	if (soc->has_pex_clkreq_en)
 702		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
 703
 704	value &= ~AFI_PEX_CTRL_REFCLK_EN;
 705	afi_writel(port->pcie, value, ctrl);
 706
 707	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
 708	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
 709	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
 710	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
 711	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
 712}
 713
 714static void tegra_pcie_port_free(struct tegra_pcie_port *port)
 715{
 716	struct tegra_pcie *pcie = port->pcie;
 717	struct device *dev = pcie->dev;
 718
 719	devm_iounmap(dev, port->base);
 720	devm_release_mem_region(dev, port->regs.start,
 721				resource_size(&port->regs));
 722	list_del(&port->list);
 723	devm_kfree(dev, port);
 724}
 725
 726/* Tegra PCIE root complex wrongly reports device class */
 727static void tegra_pcie_fixup_class(struct pci_dev *dev)
 728{
 729	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
 730}
 731DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
 732DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
 733DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
 734DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
 735
 736/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
 737static void tegra_pcie_relax_enable(struct pci_dev *dev)
 738{
 739	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
 740}
 741DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
 742DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
 743DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
 744DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
 745
 746static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
 747{
 748	struct tegra_pcie *pcie = pdev->bus->sysdata;
 749	int irq;
 750
 751	tegra_cpuidle_pcie_irqs_in_use();
 752
 753	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
 754	if (!irq)
 755		irq = pcie->irq;
 756
 757	return irq;
 758}
 759
 760static irqreturn_t tegra_pcie_isr(int irq, void *arg)
 761{
 762	static const char * const err_msg[] = {
 763		"Unknown",
 764		"AXI slave error",
 765		"AXI decode error",
 766		"Target abort",
 767		"Master abort",
 768		"Invalid write",
 769		"Legacy interrupt",
 770		"Response decoding error",
 771		"AXI response decoding error",
 772		"Transaction timeout",
 773		"Slot present pin change",
 774		"Slot clock request change",
 775		"TMS clock ramp change",
 776		"TMS ready for power down",
 777		"Peer2Peer error",
 778	};
 779	struct tegra_pcie *pcie = arg;
 780	struct device *dev = pcie->dev;
 781	u32 code, signature;
 782
 783	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
 784	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
 785	afi_writel(pcie, 0, AFI_INTR_CODE);
 786
 787	if (code == AFI_INTR_LEGACY)
 788		return IRQ_NONE;
 789
 790	if (code >= ARRAY_SIZE(err_msg))
 791		code = 0;
 792
 793	/*
 794	 * do not pollute kernel log with master abort reports since they
 795	 * happen a lot during enumeration
 796	 */
 797	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
 798		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
 799	else
 800		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
 801
 802	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
 803	    code == AFI_INTR_FPCI_DECODE_ERROR) {
 804		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
 805		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
 806
 807		if (code == AFI_INTR_MASTER_ABORT)
 808			dev_dbg(dev, "  FPCI address: %10llx\n", address);
 809		else
 810			dev_err(dev, "  FPCI address: %10llx\n", address);
 811	}
 812
 813	return IRQ_HANDLED;
 814}
 815
 816/*
 817 * FPCI map is as follows:
 818 * - 0xfdfc000000: I/O space
 819 * - 0xfdfe000000: type 0 configuration space
 820 * - 0xfdff000000: type 1 configuration space
 821 * - 0xfe00000000: type 0 extended configuration space
 822 * - 0xfe10000000: type 1 extended configuration space
 823 */
 824static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
 825{
 826	u32 size;
 827	struct resource_entry *entry;
 828	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
 829
 830	/* Bar 0: type 1 extended configuration space */
 831	size = resource_size(&pcie->cs);
 832	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
 833	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
 834
 835	resource_list_for_each_entry(entry, &bridge->windows) {
 836		u32 fpci_bar, axi_address;
 837		struct resource *res = entry->res;
 838
 839		size = resource_size(res);
 840
 841		switch (resource_type(res)) {
 842		case IORESOURCE_IO:
 843			/* Bar 1: downstream IO bar */
 844			fpci_bar = 0xfdfc0000;
 845			axi_address = pci_pio_to_address(res->start);
 846			afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
 847			afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
 848			afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
 849			break;
 850		case IORESOURCE_MEM:
 851			fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
 852			axi_address = res->start;
 853
 854			if (res->flags & IORESOURCE_PREFETCH) {
 855				/* Bar 2: prefetchable memory BAR */
 856				afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
 857				afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
 858				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
 859
 860			} else {
 861				/* Bar 3: non prefetchable memory BAR */
 862				afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
 863				afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
 864				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
 865			}
 866			break;
 867		}
 868	}
 869
 870	/* NULL out the remaining BARs as they are not used */
 871	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
 872	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
 873	afi_writel(pcie, 0, AFI_FPCI_BAR4);
 874
 875	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
 876	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
 877	afi_writel(pcie, 0, AFI_FPCI_BAR5);
 878
 879	if (pcie->soc->has_cache_bars) {
 880		/* map all upstream transactions as uncached */
 881		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
 882		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
 883		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
 884		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
 885	}
 886
 887	/* MSI translations are setup only when needed */
 888	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
 889	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 890	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
 891	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 892}
 893
 894static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
 895{
 896	const struct tegra_pcie_soc *soc = pcie->soc;
 897	u32 value;
 898
 899	timeout = jiffies + msecs_to_jiffies(timeout);
 900
 901	while (time_before(jiffies, timeout)) {
 902		value = pads_readl(pcie, soc->pads_pll_ctl);
 903		if (value & PADS_PLL_CTL_LOCKDET)
 904			return 0;
 905	}
 906
 907	return -ETIMEDOUT;
 908}
 909
 910static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
 911{
 912	struct device *dev = pcie->dev;
 913	const struct tegra_pcie_soc *soc = pcie->soc;
 914	u32 value;
 915	int err;
 916
 917	/* initialize internal PHY, enable up to 16 PCIE lanes */
 918	pads_writel(pcie, 0x0, PADS_CTL_SEL);
 919
 920	/* override IDDQ to 1 on all 4 lanes */
 921	value = pads_readl(pcie, PADS_CTL);
 922	value |= PADS_CTL_IDDQ_1L;
 923	pads_writel(pcie, value, PADS_CTL);
 924
 925	/*
 926	 * Set up PHY PLL inputs select PLLE output as refclock,
 927	 * set TX ref sel to div10 (not div5).
 928	 */
 929	value = pads_readl(pcie, soc->pads_pll_ctl);
 930	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
 931	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
 932	pads_writel(pcie, value, soc->pads_pll_ctl);
 933
 934	/* reset PLL */
 935	value = pads_readl(pcie, soc->pads_pll_ctl);
 936	value &= ~PADS_PLL_CTL_RST_B4SM;
 937	pads_writel(pcie, value, soc->pads_pll_ctl);
 938
 939	usleep_range(20, 100);
 940
 941	/* take PLL out of reset  */
 942	value = pads_readl(pcie, soc->pads_pll_ctl);
 943	value |= PADS_PLL_CTL_RST_B4SM;
 944	pads_writel(pcie, value, soc->pads_pll_ctl);
 945
 946	/* wait for the PLL to lock */
 947	err = tegra_pcie_pll_wait(pcie, 500);
 948	if (err < 0) {
 949		dev_err(dev, "PLL failed to lock: %d\n", err);
 950		return err;
 951	}
 952
 953	/* turn off IDDQ override */
 954	value = pads_readl(pcie, PADS_CTL);
 955	value &= ~PADS_CTL_IDDQ_1L;
 956	pads_writel(pcie, value, PADS_CTL);
 957
 958	/* enable TX/RX data */
 959	value = pads_readl(pcie, PADS_CTL);
 960	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
 961	pads_writel(pcie, value, PADS_CTL);
 962
 963	return 0;
 964}
 965
 966static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
 967{
 968	const struct tegra_pcie_soc *soc = pcie->soc;
 969	u32 value;
 970
 971	/* disable TX/RX data */
 972	value = pads_readl(pcie, PADS_CTL);
 973	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
 974	pads_writel(pcie, value, PADS_CTL);
 975
 976	/* override IDDQ */
 977	value = pads_readl(pcie, PADS_CTL);
 978	value |= PADS_CTL_IDDQ_1L;
 979	pads_writel(pcie, value, PADS_CTL);
 980
 981	/* reset PLL */
 982	value = pads_readl(pcie, soc->pads_pll_ctl);
 983	value &= ~PADS_PLL_CTL_RST_B4SM;
 984	pads_writel(pcie, value, soc->pads_pll_ctl);
 985
 986	usleep_range(20, 100);
 987
 988	return 0;
 989}
 990
 991static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
 992{
 993	struct device *dev = port->pcie->dev;
 994	unsigned int i;
 995	int err;
 996
 997	for (i = 0; i < port->lanes; i++) {
 998		err = phy_power_on(port->phys[i]);
 999		if (err < 0) {
1000			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1001			return err;
1002		}
1003	}
1004
1005	return 0;
1006}
1007
1008static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1009{
1010	struct device *dev = port->pcie->dev;
1011	unsigned int i;
1012	int err;
1013
1014	for (i = 0; i < port->lanes; i++) {
1015		err = phy_power_off(port->phys[i]);
1016		if (err < 0) {
1017			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1018				err);
1019			return err;
1020		}
1021	}
1022
1023	return 0;
1024}
1025
1026static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1027{
1028	struct device *dev = pcie->dev;
1029	struct tegra_pcie_port *port;
1030	int err;
1031
1032	if (pcie->legacy_phy) {
1033		if (pcie->phy)
1034			err = phy_power_on(pcie->phy);
1035		else
1036			err = tegra_pcie_phy_enable(pcie);
1037
1038		if (err < 0)
1039			dev_err(dev, "failed to power on PHY: %d\n", err);
1040
1041		return err;
1042	}
1043
1044	list_for_each_entry(port, &pcie->ports, list) {
1045		err = tegra_pcie_port_phy_power_on(port);
1046		if (err < 0) {
1047			dev_err(dev,
1048				"failed to power on PCIe port %u PHY: %d\n",
1049				port->index, err);
1050			return err;
1051		}
1052	}
1053
1054	return 0;
1055}
1056
1057static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1058{
1059	struct device *dev = pcie->dev;
1060	struct tegra_pcie_port *port;
1061	int err;
1062
1063	if (pcie->legacy_phy) {
1064		if (pcie->phy)
1065			err = phy_power_off(pcie->phy);
1066		else
1067			err = tegra_pcie_phy_disable(pcie);
1068
1069		if (err < 0)
1070			dev_err(dev, "failed to power off PHY: %d\n", err);
1071
1072		return err;
1073	}
1074
1075	list_for_each_entry(port, &pcie->ports, list) {
1076		err = tegra_pcie_port_phy_power_off(port);
1077		if (err < 0) {
1078			dev_err(dev,
1079				"failed to power off PCIe port %u PHY: %d\n",
1080				port->index, err);
1081			return err;
1082		}
1083	}
1084
1085	return 0;
1086}
1087
1088static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1089{
1090	const struct tegra_pcie_soc *soc = pcie->soc;
1091	struct tegra_pcie_port *port;
1092	unsigned long value;
1093
1094	/* enable PLL power down */
1095	if (pcie->phy) {
1096		value = afi_readl(pcie, AFI_PLLE_CONTROL);
1097		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1098		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1099		afi_writel(pcie, value, AFI_PLLE_CONTROL);
1100	}
1101
1102	/* power down PCIe slot clock bias pad */
1103	if (soc->has_pex_bias_ctrl)
1104		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1105
1106	/* configure mode and disable all ports */
1107	value = afi_readl(pcie, AFI_PCIE_CONFIG);
1108	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1109	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1110	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1111
1112	list_for_each_entry(port, &pcie->ports, list) {
1113		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1114		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1115	}
1116
1117	afi_writel(pcie, value, AFI_PCIE_CONFIG);
1118
1119	if (soc->has_gen2) {
1120		value = afi_readl(pcie, AFI_FUSE);
1121		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1122		afi_writel(pcie, value, AFI_FUSE);
1123	} else {
1124		value = afi_readl(pcie, AFI_FUSE);
1125		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1126		afi_writel(pcie, value, AFI_FUSE);
1127	}
1128
1129	/* Disable AFI dynamic clock gating and enable PCIe */
1130	value = afi_readl(pcie, AFI_CONFIGURATION);
1131	value |= AFI_CONFIGURATION_EN_FPCI;
1132	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1133	afi_writel(pcie, value, AFI_CONFIGURATION);
1134
1135	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1136		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1137		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1138
1139	if (soc->has_intr_prsnt_sense)
1140		value |= AFI_INTR_EN_PRSNT_SENSE;
1141
1142	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1143	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1144
1145	/* don't enable MSI for now, only when needed */
1146	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1147
1148	/* disable all exceptions */
1149	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1150}
1151
1152static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1153{
1154	struct device *dev = pcie->dev;
1155	const struct tegra_pcie_soc *soc = pcie->soc;
1156	int err;
1157
1158	reset_control_assert(pcie->afi_rst);
1159
1160	clk_disable_unprepare(pcie->pll_e);
1161	if (soc->has_cml_clk)
1162		clk_disable_unprepare(pcie->cml_clk);
1163	clk_disable_unprepare(pcie->afi_clk);
1164
1165	if (!dev->pm_domain)
1166		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1167
1168	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1169	if (err < 0)
1170		dev_warn(dev, "failed to disable regulators: %d\n", err);
1171}
1172
1173static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1174{
1175	struct device *dev = pcie->dev;
1176	const struct tegra_pcie_soc *soc = pcie->soc;
1177	int err;
1178
1179	reset_control_assert(pcie->pcie_xrst);
1180	reset_control_assert(pcie->afi_rst);
1181	reset_control_assert(pcie->pex_rst);
1182
1183	if (!dev->pm_domain)
1184		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1185
1186	/* enable regulators */
1187	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1188	if (err < 0)
1189		dev_err(dev, "failed to enable regulators: %d\n", err);
1190
1191	if (!dev->pm_domain) {
1192		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1193		if (err) {
1194			dev_err(dev, "failed to power ungate: %d\n", err);
1195			goto regulator_disable;
1196		}
1197		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1198		if (err) {
1199			dev_err(dev, "failed to remove clamp: %d\n", err);
1200			goto powergate;
1201		}
1202	}
1203
1204	err = clk_prepare_enable(pcie->afi_clk);
1205	if (err < 0) {
1206		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1207		goto powergate;
1208	}
1209
1210	if (soc->has_cml_clk) {
1211		err = clk_prepare_enable(pcie->cml_clk);
1212		if (err < 0) {
1213			dev_err(dev, "failed to enable CML clock: %d\n", err);
1214			goto disable_afi_clk;
1215		}
1216	}
1217
1218	err = clk_prepare_enable(pcie->pll_e);
1219	if (err < 0) {
1220		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1221		goto disable_cml_clk;
1222	}
1223
1224	reset_control_deassert(pcie->afi_rst);
1225
1226	return 0;
1227
1228disable_cml_clk:
1229	if (soc->has_cml_clk)
1230		clk_disable_unprepare(pcie->cml_clk);
1231disable_afi_clk:
1232	clk_disable_unprepare(pcie->afi_clk);
1233powergate:
1234	if (!dev->pm_domain)
1235		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1236regulator_disable:
1237	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1238
1239	return err;
1240}
1241
1242static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1243{
1244	const struct tegra_pcie_soc *soc = pcie->soc;
1245
1246	/* Configure the reference clock driver */
1247	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1248
1249	if (soc->num_ports > 2)
1250		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1251}
1252
1253static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1254{
1255	struct device *dev = pcie->dev;
1256	const struct tegra_pcie_soc *soc = pcie->soc;
1257
1258	pcie->pex_clk = devm_clk_get(dev, "pex");
1259	if (IS_ERR(pcie->pex_clk))
1260		return PTR_ERR(pcie->pex_clk);
1261
1262	pcie->afi_clk = devm_clk_get(dev, "afi");
1263	if (IS_ERR(pcie->afi_clk))
1264		return PTR_ERR(pcie->afi_clk);
1265
1266	pcie->pll_e = devm_clk_get(dev, "pll_e");
1267	if (IS_ERR(pcie->pll_e))
1268		return PTR_ERR(pcie->pll_e);
1269
1270	if (soc->has_cml_clk) {
1271		pcie->cml_clk = devm_clk_get(dev, "cml");
1272		if (IS_ERR(pcie->cml_clk))
1273			return PTR_ERR(pcie->cml_clk);
1274	}
1275
1276	return 0;
1277}
1278
1279static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1280{
1281	struct device *dev = pcie->dev;
1282
1283	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1284	if (IS_ERR(pcie->pex_rst))
1285		return PTR_ERR(pcie->pex_rst);
1286
1287	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1288	if (IS_ERR(pcie->afi_rst))
1289		return PTR_ERR(pcie->afi_rst);
1290
1291	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1292	if (IS_ERR(pcie->pcie_xrst))
1293		return PTR_ERR(pcie->pcie_xrst);
1294
1295	return 0;
1296}
1297
1298static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1299{
1300	struct device *dev = pcie->dev;
1301	int err;
1302
1303	pcie->phy = devm_phy_optional_get(dev, "pcie");
1304	if (IS_ERR(pcie->phy)) {
1305		err = PTR_ERR(pcie->phy);
1306		dev_err(dev, "failed to get PHY: %d\n", err);
1307		return err;
1308	}
1309
1310	err = phy_init(pcie->phy);
1311	if (err < 0) {
1312		dev_err(dev, "failed to initialize PHY: %d\n", err);
1313		return err;
1314	}
1315
1316	pcie->legacy_phy = true;
1317
1318	return 0;
1319}
1320
1321static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1322						  struct device_node *np,
1323						  const char *consumer,
1324						  unsigned int index)
1325{
1326	struct phy *phy;
1327	char *name;
1328
1329	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1330	if (!name)
1331		return ERR_PTR(-ENOMEM);
1332
1333	phy = devm_of_phy_optional_get(dev, np, name);
1334	kfree(name);
1335
1336	return phy;
1337}
1338
1339static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1340{
1341	struct device *dev = port->pcie->dev;
1342	struct phy *phy;
1343	unsigned int i;
1344	int err;
1345
1346	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1347	if (!port->phys)
1348		return -ENOMEM;
1349
1350	for (i = 0; i < port->lanes; i++) {
1351		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1352		if (IS_ERR(phy)) {
1353			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1354				PTR_ERR(phy));
1355			return PTR_ERR(phy);
1356		}
1357
1358		err = phy_init(phy);
1359		if (err < 0) {
1360			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1361				err);
1362			return err;
1363		}
1364
1365		port->phys[i] = phy;
1366	}
1367
1368	return 0;
1369}
1370
1371static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1372{
1373	const struct tegra_pcie_soc *soc = pcie->soc;
1374	struct device_node *np = pcie->dev->of_node;
1375	struct tegra_pcie_port *port;
1376	int err;
1377
1378	if (!soc->has_gen2 || of_property_present(np, "phys"))
1379		return tegra_pcie_phys_get_legacy(pcie);
1380
1381	list_for_each_entry(port, &pcie->ports, list) {
1382		err = tegra_pcie_port_get_phys(port);
1383		if (err < 0)
1384			return err;
1385	}
1386
1387	return 0;
1388}
1389
1390static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1391{
1392	struct tegra_pcie_port *port;
1393	struct device *dev = pcie->dev;
1394	int err, i;
1395
1396	if (pcie->legacy_phy) {
1397		err = phy_exit(pcie->phy);
1398		if (err < 0)
1399			dev_err(dev, "failed to teardown PHY: %d\n", err);
1400		return;
1401	}
1402
1403	list_for_each_entry(port, &pcie->ports, list) {
1404		for (i = 0; i < port->lanes; i++) {
1405			err = phy_exit(port->phys[i]);
1406			if (err < 0)
1407				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1408					i, err);
1409		}
1410	}
1411}
1412
1413static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1414{
1415	struct device *dev = pcie->dev;
1416	struct platform_device *pdev = to_platform_device(dev);
1417	struct resource *res;
1418	const struct tegra_pcie_soc *soc = pcie->soc;
1419	int err;
1420
1421	err = tegra_pcie_clocks_get(pcie);
1422	if (err) {
1423		dev_err(dev, "failed to get clocks: %d\n", err);
1424		return err;
1425	}
1426
1427	err = tegra_pcie_resets_get(pcie);
1428	if (err) {
1429		dev_err(dev, "failed to get resets: %d\n", err);
1430		return err;
1431	}
1432
1433	if (soc->program_uphy) {
1434		err = tegra_pcie_phys_get(pcie);
1435		if (err < 0) {
1436			dev_err(dev, "failed to get PHYs: %d\n", err);
1437			return err;
1438		}
1439	}
1440
1441	pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
1442	if (IS_ERR(pcie->pads)) {
1443		err = PTR_ERR(pcie->pads);
1444		goto phys_put;
1445	}
1446
1447	pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
1448	if (IS_ERR(pcie->afi)) {
1449		err = PTR_ERR(pcie->afi);
1450		goto phys_put;
1451	}
1452
1453	/* request configuration space, but remap later, on demand */
1454	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1455	if (!res) {
1456		err = -EADDRNOTAVAIL;
1457		goto phys_put;
1458	}
1459
1460	pcie->cs = *res;
1461
1462	/* constrain configuration space to 4 KiB */
1463	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1464
1465	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1466	if (IS_ERR(pcie->cfg)) {
1467		err = PTR_ERR(pcie->cfg);
1468		goto phys_put;
1469	}
1470
1471	/* request interrupt */
1472	err = platform_get_irq_byname(pdev, "intr");
1473	if (err < 0)
1474		goto phys_put;
1475
1476	pcie->irq = err;
1477
1478	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1479	if (err) {
1480		dev_err(dev, "failed to register IRQ: %d\n", err);
1481		goto phys_put;
1482	}
1483
1484	return 0;
1485
1486phys_put:
1487	if (soc->program_uphy)
1488		tegra_pcie_phys_put(pcie);
1489
1490	return err;
1491}
1492
1493static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1494{
1495	const struct tegra_pcie_soc *soc = pcie->soc;
1496
1497	if (pcie->irq > 0)
1498		free_irq(pcie->irq, pcie);
1499
1500	if (soc->program_uphy)
1501		tegra_pcie_phys_put(pcie);
1502
1503	return 0;
1504}
1505
1506static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1507{
1508	struct tegra_pcie *pcie = port->pcie;
1509	const struct tegra_pcie_soc *soc = pcie->soc;
1510	int err;
1511	u32 val;
1512	u8 ack_bit;
1513
1514	val = afi_readl(pcie, AFI_PCIE_PME);
1515	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1516	afi_writel(pcie, val, AFI_PCIE_PME);
1517
1518	ack_bit = soc->ports[port->index].pme.ack_bit;
1519	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1520				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1521	if (err)
1522		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1523			port->index);
1524
1525	usleep_range(10000, 11000);
1526
1527	val = afi_readl(pcie, AFI_PCIE_PME);
1528	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1529	afi_writel(pcie, val, AFI_PCIE_PME);
1530}
1531
1532static void tegra_pcie_msi_irq(struct irq_desc *desc)
1533{
1534	struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
1535	struct irq_chip *chip = irq_desc_get_chip(desc);
1536	struct tegra_msi *msi = &pcie->msi;
1537	struct device *dev = pcie->dev;
1538	unsigned int i;
1539
1540	chained_irq_enter(chip, desc);
1541
1542	for (i = 0; i < 8; i++) {
1543		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
1544
1545		while (reg) {
1546			unsigned int offset = find_first_bit(&reg, 32);
1547			unsigned int index = i * 32 + offset;
1548			int ret;
1549
1550			ret = generic_handle_domain_irq(msi->domain->parent, index);
1551			if (ret) {
1552				/*
1553				 * that's weird who triggered this?
1554				 * just clear it
1555				 */
1556				dev_info(dev, "unexpected MSI\n");
1557				afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
1558			}
1559
1560			/* see if there's any more pending in this vector */
1561			reg = afi_readl(pcie, AFI_MSI_VEC(i));
1562		}
1563	}
1564
1565	chained_irq_exit(chip, desc);
1566}
1567
1568static void tegra_msi_top_irq_ack(struct irq_data *d)
1569{
1570	irq_chip_ack_parent(d);
1571}
1572
1573static void tegra_msi_top_irq_mask(struct irq_data *d)
1574{
1575	pci_msi_mask_irq(d);
1576	irq_chip_mask_parent(d);
1577}
1578
1579static void tegra_msi_top_irq_unmask(struct irq_data *d)
1580{
1581	pci_msi_unmask_irq(d);
1582	irq_chip_unmask_parent(d);
1583}
1584
1585static struct irq_chip tegra_msi_top_chip = {
1586	.name		= "Tegra PCIe MSI",
1587	.irq_ack	= tegra_msi_top_irq_ack,
1588	.irq_mask	= tegra_msi_top_irq_mask,
1589	.irq_unmask	= tegra_msi_top_irq_unmask,
1590};
1591
1592static void tegra_msi_irq_ack(struct irq_data *d)
1593{
1594	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1595	struct tegra_pcie *pcie = msi_to_pcie(msi);
1596	unsigned int index = d->hwirq / 32;
1597
1598	/* clear the interrupt */
1599	afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
1600}
1601
1602static void tegra_msi_irq_mask(struct irq_data *d)
1603{
1604	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1605	struct tegra_pcie *pcie = msi_to_pcie(msi);
1606	unsigned int index = d->hwirq / 32;
1607	unsigned long flags;
1608	u32 value;
1609
1610	spin_lock_irqsave(&msi->mask_lock, flags);
1611	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1612	value &= ~BIT(d->hwirq % 32);
1613	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1614	spin_unlock_irqrestore(&msi->mask_lock, flags);
1615}
1616
1617static void tegra_msi_irq_unmask(struct irq_data *d)
1618{
1619	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1620	struct tegra_pcie *pcie = msi_to_pcie(msi);
1621	unsigned int index = d->hwirq / 32;
1622	unsigned long flags;
1623	u32 value;
1624
1625	spin_lock_irqsave(&msi->mask_lock, flags);
1626	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1627	value |= BIT(d->hwirq % 32);
1628	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1629	spin_unlock_irqrestore(&msi->mask_lock, flags);
1630}
1631
1632static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
1633{
1634	return -EINVAL;
1635}
1636
1637static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1638{
1639	struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
1640
1641	msg->address_lo = lower_32_bits(msi->phys);
1642	msg->address_hi = upper_32_bits(msi->phys);
1643	msg->data = data->hwirq;
1644}
1645
1646static struct irq_chip tegra_msi_bottom_chip = {
1647	.name			= "Tegra MSI",
1648	.irq_ack		= tegra_msi_irq_ack,
1649	.irq_mask		= tegra_msi_irq_mask,
1650	.irq_unmask		= tegra_msi_irq_unmask,
1651	.irq_set_affinity 	= tegra_msi_set_affinity,
1652	.irq_compose_msi_msg	= tegra_compose_msi_msg,
1653};
1654
1655static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
1656				  unsigned int nr_irqs, void *args)
1657{
1658	struct tegra_msi *msi = domain->host_data;
1659	unsigned int i;
1660	int hwirq;
1661
1662	mutex_lock(&msi->map_lock);
1663
1664	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
1665
1666	mutex_unlock(&msi->map_lock);
1667
1668	if (hwirq < 0)
1669		return -ENOSPC;
1670
1671	for (i = 0; i < nr_irqs; i++)
1672		irq_domain_set_info(domain, virq + i, hwirq + i,
1673				    &tegra_msi_bottom_chip, domain->host_data,
1674				    handle_edge_irq, NULL, NULL);
1675
1676	tegra_cpuidle_pcie_irqs_in_use();
1677
1678	return 0;
1679}
1680
1681static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
1682				  unsigned int nr_irqs)
1683{
1684	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1685	struct tegra_msi *msi = domain->host_data;
1686
1687	mutex_lock(&msi->map_lock);
1688
1689	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
1690
1691	mutex_unlock(&msi->map_lock);
1692}
1693
1694static const struct irq_domain_ops tegra_msi_domain_ops = {
1695	.alloc = tegra_msi_domain_alloc,
1696	.free = tegra_msi_domain_free,
1697};
1698
1699static struct msi_domain_info tegra_msi_info = {
1700	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1701		   MSI_FLAG_PCI_MSIX),
1702	.chip	= &tegra_msi_top_chip,
1703};
1704
1705static int tegra_allocate_domains(struct tegra_msi *msi)
1706{
1707	struct tegra_pcie *pcie = msi_to_pcie(msi);
1708	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
1709	struct irq_domain *parent;
1710
1711	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
1712					  &tegra_msi_domain_ops, msi);
1713	if (!parent) {
1714		dev_err(pcie->dev, "failed to create IRQ domain\n");
1715		return -ENOMEM;
1716	}
1717	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
1718
1719	msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
1720	if (!msi->domain) {
1721		dev_err(pcie->dev, "failed to create MSI domain\n");
1722		irq_domain_remove(parent);
1723		return -ENOMEM;
1724	}
1725
1726	return 0;
1727}
1728
1729static void tegra_free_domains(struct tegra_msi *msi)
1730{
1731	struct irq_domain *parent = msi->domain->parent;
1732
1733	irq_domain_remove(msi->domain);
1734	irq_domain_remove(parent);
1735}
1736
1737static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1738{
1739	struct platform_device *pdev = to_platform_device(pcie->dev);
1740	struct tegra_msi *msi = &pcie->msi;
1741	struct device *dev = pcie->dev;
1742	int err;
1743
1744	mutex_init(&msi->map_lock);
1745	spin_lock_init(&msi->mask_lock);
1746
1747	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1748		err = tegra_allocate_domains(msi);
1749		if (err)
1750			return err;
1751	}
1752
1753	err = platform_get_irq_byname(pdev, "msi");
1754	if (err < 0)
1755		goto free_irq_domain;
1756
1757	msi->irq = err;
1758
1759	irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
1760
1761	/* Though the PCIe controller can address >32-bit address space, to
1762	 * facilitate endpoints that support only 32-bit MSI target address,
1763	 * the mask is set to 32-bit to make sure that MSI target address is
1764	 * always a 32-bit address
1765	 */
1766	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1767	if (err < 0) {
1768		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1769		goto free_irq;
1770	}
1771
1772	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1773				    DMA_ATTR_NO_KERNEL_MAPPING);
1774	if (!msi->virt) {
1775		dev_err(dev, "failed to allocate DMA memory for MSI\n");
1776		err = -ENOMEM;
1777		goto free_irq;
1778	}
1779
1780	return 0;
1781
1782free_irq:
1783	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1784free_irq_domain:
1785	if (IS_ENABLED(CONFIG_PCI_MSI))
1786		tegra_free_domains(msi);
1787
1788	return err;
1789}
1790
1791static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1792{
1793	const struct tegra_pcie_soc *soc = pcie->soc;
1794	struct tegra_msi *msi = &pcie->msi;
1795	u32 reg, msi_state[INT_PCI_MSI_NR / 32];
1796	int i;
1797
1798	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1799	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1800	/* this register is in 4K increments */
1801	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1802
1803	/* Restore the MSI allocation state */
1804	bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
1805	for (i = 0; i < ARRAY_SIZE(msi_state); i++)
1806		afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
1807
1808	/* and unmask the MSI interrupt */
1809	reg = afi_readl(pcie, AFI_INTR_MASK);
1810	reg |= AFI_INTR_MASK_MSI_MASK;
1811	afi_writel(pcie, reg, AFI_INTR_MASK);
1812}
1813
1814static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1815{
1816	struct tegra_msi *msi = &pcie->msi;
1817	unsigned int i, irq;
1818
1819	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1820		       DMA_ATTR_NO_KERNEL_MAPPING);
1821
1822	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1823		irq = irq_find_mapping(msi->domain, i);
1824		if (irq > 0)
1825			irq_domain_free_irqs(irq, 1);
1826	}
1827
1828	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1829
1830	if (IS_ENABLED(CONFIG_PCI_MSI))
1831		tegra_free_domains(msi);
1832}
1833
1834static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1835{
1836	u32 value;
1837
1838	/* mask the MSI interrupt */
1839	value = afi_readl(pcie, AFI_INTR_MASK);
1840	value &= ~AFI_INTR_MASK_MSI_MASK;
1841	afi_writel(pcie, value, AFI_INTR_MASK);
1842
1843	return 0;
1844}
1845
1846static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1847{
1848	u32 value;
1849
1850	value = afi_readl(pcie, AFI_INTR_MASK);
1851	value &= ~AFI_INTR_MASK_INT_MASK;
1852	afi_writel(pcie, value, AFI_INTR_MASK);
1853}
1854
1855static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1856				      u32 *xbar)
1857{
1858	struct device *dev = pcie->dev;
1859	struct device_node *np = dev->of_node;
1860
1861	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1862		switch (lanes) {
1863		case 0x010004:
1864			dev_info(dev, "4x1, 1x1 configuration\n");
1865			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1866			return 0;
1867
1868		case 0x010102:
1869			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1870			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1871			return 0;
1872
1873		case 0x010101:
1874			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1875			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1876			return 0;
1877
1878		default:
1879			dev_info(dev, "wrong configuration updated in DT, "
1880				 "switching to default 2x1, 1x1, 1x1 "
1881				 "configuration\n");
1882			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1883			return 0;
1884		}
1885	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1886		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1887		switch (lanes) {
1888		case 0x0000104:
1889			dev_info(dev, "4x1, 1x1 configuration\n");
1890			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1891			return 0;
1892
1893		case 0x0000102:
1894			dev_info(dev, "2x1, 1x1 configuration\n");
1895			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1896			return 0;
1897		}
1898	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1899		switch (lanes) {
1900		case 0x00000204:
1901			dev_info(dev, "4x1, 2x1 configuration\n");
1902			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1903			return 0;
1904
1905		case 0x00020202:
1906			dev_info(dev, "2x3 configuration\n");
1907			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1908			return 0;
1909
1910		case 0x00010104:
1911			dev_info(dev, "4x1, 1x2 configuration\n");
1912			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1913			return 0;
1914		}
1915	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1916		switch (lanes) {
1917		case 0x00000004:
1918			dev_info(dev, "single-mode configuration\n");
1919			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1920			return 0;
1921
1922		case 0x00000202:
1923			dev_info(dev, "dual-mode configuration\n");
1924			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1925			return 0;
1926		}
1927	}
1928
1929	return -EINVAL;
1930}
1931
1932/*
1933 * Check whether a given set of supplies is available in a device tree node.
1934 * This is used to check whether the new or the legacy device tree bindings
1935 * should be used.
1936 */
1937static bool of_regulator_bulk_available(struct device_node *np,
1938					struct regulator_bulk_data *supplies,
1939					unsigned int num_supplies)
1940{
1941	char property[32];
1942	unsigned int i;
1943
1944	for (i = 0; i < num_supplies; i++) {
1945		snprintf(property, 32, "%s-supply", supplies[i].supply);
1946
1947		if (!of_property_present(np, property))
1948			return false;
1949	}
1950
1951	return true;
1952}
1953
1954/*
1955 * Old versions of the device tree binding for this device used a set of power
1956 * supplies that didn't match the hardware inputs. This happened to work for a
1957 * number of cases but is not future proof. However to preserve backwards-
1958 * compatibility with old device trees, this function will try to use the old
1959 * set of supplies.
1960 */
1961static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1962{
1963	struct device *dev = pcie->dev;
1964	struct device_node *np = dev->of_node;
1965
1966	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1967		pcie->num_supplies = 3;
1968	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1969		pcie->num_supplies = 2;
1970
1971	if (pcie->num_supplies == 0) {
1972		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1973		return -ENODEV;
1974	}
1975
1976	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1977				      sizeof(*pcie->supplies),
1978				      GFP_KERNEL);
1979	if (!pcie->supplies)
1980		return -ENOMEM;
1981
1982	pcie->supplies[0].supply = "pex-clk";
1983	pcie->supplies[1].supply = "vdd";
1984
1985	if (pcie->num_supplies > 2)
1986		pcie->supplies[2].supply = "avdd";
1987
1988	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1989}
1990
1991/*
1992 * Obtains the list of regulators required for a particular generation of the
1993 * IP block.
1994 *
1995 * This would've been nice to do simply by providing static tables for use
1996 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1997 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1998 * and either seems to be optional depending on which ports are being used.
1999 */
2000static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
2001{
2002	struct device *dev = pcie->dev;
2003	struct device_node *np = dev->of_node;
2004	unsigned int i = 0;
2005
2006	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
2007		pcie->num_supplies = 4;
2008
2009		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2010					      sizeof(*pcie->supplies),
2011					      GFP_KERNEL);
2012		if (!pcie->supplies)
2013			return -ENOMEM;
2014
2015		pcie->supplies[i++].supply = "dvdd-pex";
2016		pcie->supplies[i++].supply = "hvdd-pex-pll";
2017		pcie->supplies[i++].supply = "hvdd-pex";
2018		pcie->supplies[i++].supply = "vddio-pexctl-aud";
2019	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
2020		pcie->num_supplies = 3;
2021
2022		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2023					      sizeof(*pcie->supplies),
2024					      GFP_KERNEL);
2025		if (!pcie->supplies)
2026			return -ENOMEM;
2027
2028		pcie->supplies[i++].supply = "hvddio-pex";
2029		pcie->supplies[i++].supply = "dvddio-pex";
2030		pcie->supplies[i++].supply = "vddio-pex-ctl";
2031	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2032		pcie->num_supplies = 4;
2033
2034		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2035					      sizeof(*pcie->supplies),
2036					      GFP_KERNEL);
2037		if (!pcie->supplies)
2038			return -ENOMEM;
2039
2040		pcie->supplies[i++].supply = "avddio-pex";
2041		pcie->supplies[i++].supply = "dvddio-pex";
2042		pcie->supplies[i++].supply = "hvdd-pex";
2043		pcie->supplies[i++].supply = "vddio-pex-ctl";
2044	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2045		bool need_pexa = false, need_pexb = false;
2046
2047		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2048		if (lane_mask & 0x0f)
2049			need_pexa = true;
2050
2051		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2052		if (lane_mask & 0x30)
2053			need_pexb = true;
2054
2055		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2056					 (need_pexb ? 2 : 0);
2057
2058		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2059					      sizeof(*pcie->supplies),
2060					      GFP_KERNEL);
2061		if (!pcie->supplies)
2062			return -ENOMEM;
2063
2064		pcie->supplies[i++].supply = "avdd-pex-pll";
2065		pcie->supplies[i++].supply = "hvdd-pex";
2066		pcie->supplies[i++].supply = "vddio-pex-ctl";
2067		pcie->supplies[i++].supply = "avdd-plle";
2068
2069		if (need_pexa) {
2070			pcie->supplies[i++].supply = "avdd-pexa";
2071			pcie->supplies[i++].supply = "vdd-pexa";
2072		}
2073
2074		if (need_pexb) {
2075			pcie->supplies[i++].supply = "avdd-pexb";
2076			pcie->supplies[i++].supply = "vdd-pexb";
2077		}
2078	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2079		pcie->num_supplies = 5;
2080
2081		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2082					      sizeof(*pcie->supplies),
2083					      GFP_KERNEL);
2084		if (!pcie->supplies)
2085			return -ENOMEM;
2086
2087		pcie->supplies[0].supply = "avdd-pex";
2088		pcie->supplies[1].supply = "vdd-pex";
2089		pcie->supplies[2].supply = "avdd-pex-pll";
2090		pcie->supplies[3].supply = "avdd-plle";
2091		pcie->supplies[4].supply = "vddio-pex-clk";
2092	}
2093
2094	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2095					pcie->num_supplies))
2096		return devm_regulator_bulk_get(dev, pcie->num_supplies,
2097					       pcie->supplies);
2098
2099	/*
2100	 * If not all regulators are available for this new scheme, assume
2101	 * that the device tree complies with an older version of the device
2102	 * tree binding.
2103	 */
2104	dev_info(dev, "using legacy DT binding for power supplies\n");
2105
2106	devm_kfree(dev, pcie->supplies);
2107	pcie->num_supplies = 0;
2108
2109	return tegra_pcie_get_legacy_regulators(pcie);
2110}
2111
2112static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2113{
2114	struct device *dev = pcie->dev;
2115	struct device_node *np = dev->of_node, *port;
2116	const struct tegra_pcie_soc *soc = pcie->soc;
2117	u32 lanes = 0, mask = 0;
2118	unsigned int lane = 0;
2119	int err;
2120
2121	/* parse root ports */
2122	for_each_child_of_node(np, port) {
2123		struct tegra_pcie_port *rp;
2124		unsigned int index;
2125		u32 value;
2126		char *label;
2127
2128		err = of_pci_get_devfn(port);
2129		if (err < 0) {
2130			dev_err(dev, "failed to parse address: %d\n", err);
2131			goto err_node_put;
2132		}
2133
2134		index = PCI_SLOT(err);
2135
2136		if (index < 1 || index > soc->num_ports) {
2137			dev_err(dev, "invalid port number: %d\n", index);
2138			err = -EINVAL;
2139			goto err_node_put;
2140		}
2141
2142		index--;
2143
2144		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2145		if (err < 0) {
2146			dev_err(dev, "failed to parse # of lanes: %d\n",
2147				err);
2148			goto err_node_put;
2149		}
2150
2151		if (value > 16) {
2152			dev_err(dev, "invalid # of lanes: %u\n", value);
2153			err = -EINVAL;
2154			goto err_node_put;
2155		}
2156
2157		lanes |= value << (index << 3);
2158
2159		if (!of_device_is_available(port)) {
2160			lane += value;
2161			continue;
2162		}
2163
2164		mask |= ((1 << value) - 1) << lane;
2165		lane += value;
2166
2167		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2168		if (!rp) {
2169			err = -ENOMEM;
2170			goto err_node_put;
2171		}
2172
2173		err = of_address_to_resource(port, 0, &rp->regs);
2174		if (err < 0) {
2175			dev_err(dev, "failed to parse address: %d\n", err);
2176			goto err_node_put;
2177		}
2178
2179		INIT_LIST_HEAD(&rp->list);
2180		rp->index = index;
2181		rp->lanes = value;
2182		rp->pcie = pcie;
2183		rp->np = port;
2184
2185		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2186		if (IS_ERR(rp->base)) {
2187			err = PTR_ERR(rp->base);
2188			goto err_node_put;
2189		}
2190
2191		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2192		if (!label) {
2193			err = -ENOMEM;
2194			goto err_node_put;
2195		}
2196
2197		/*
2198		 * Returns -ENOENT if reset-gpios property is not populated
2199		 * and in this case fall back to using AFI per port register
2200		 * to toggle PERST# SFIO line.
2201		 */
2202		rp->reset_gpio = devm_fwnode_gpiod_get(dev,
2203						       of_fwnode_handle(port),
2204						       "reset",
2205						       GPIOD_OUT_LOW,
2206						       label);
2207		if (IS_ERR(rp->reset_gpio)) {
2208			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2209				rp->reset_gpio = NULL;
2210			} else {
2211				dev_err(dev, "failed to get reset GPIO: %ld\n",
2212					PTR_ERR(rp->reset_gpio));
2213				err = PTR_ERR(rp->reset_gpio);
2214				goto err_node_put;
2215			}
2216		}
2217
2218		list_add_tail(&rp->list, &pcie->ports);
2219	}
2220
2221	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2222	if (err < 0) {
2223		dev_err(dev, "invalid lane configuration\n");
2224		return err;
2225	}
2226
2227	err = tegra_pcie_get_regulators(pcie, mask);
2228	if (err < 0)
2229		return err;
2230
2231	return 0;
2232
2233err_node_put:
2234	of_node_put(port);
2235	return err;
2236}
2237
2238/*
2239 * FIXME: If there are no PCIe cards attached, then calling this function
2240 * can result in the increase of the bootup time as there are big timeout
2241 * loops.
2242 */
2243#define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
2244static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2245{
2246	struct device *dev = port->pcie->dev;
2247	unsigned int retries = 3;
2248	unsigned long value;
2249
2250	/* override presence detection */
2251	value = readl(port->base + RP_PRIV_MISC);
2252	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2253	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2254	writel(value, port->base + RP_PRIV_MISC);
2255
2256	do {
2257		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2258
2259		do {
2260			value = readl(port->base + RP_VEND_XP);
2261
2262			if (value & RP_VEND_XP_DL_UP)
2263				break;
2264
2265			usleep_range(1000, 2000);
2266		} while (--timeout);
2267
2268		if (!timeout) {
2269			dev_dbg(dev, "link %u down, retrying\n", port->index);
2270			goto retry;
2271		}
2272
2273		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2274
2275		do {
2276			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2277
2278			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2279				return true;
2280
2281			usleep_range(1000, 2000);
2282		} while (--timeout);
2283
2284retry:
2285		tegra_pcie_port_reset(port);
2286	} while (--retries);
2287
2288	return false;
2289}
2290
2291static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2292{
2293	struct device *dev = pcie->dev;
2294	struct tegra_pcie_port *port;
2295	ktime_t deadline;
2296	u32 value;
2297
2298	list_for_each_entry(port, &pcie->ports, list) {
2299		/*
2300		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2301		 * is not supported by Tegra. tegra_pcie_change_link_speed()
2302		 * is called only for Tegra chips which support Gen2.
2303		 * So there no harm if supported link speed is not verified.
2304		 */
2305		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2306		value &= ~PCI_EXP_LNKSTA_CLS;
2307		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2308		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2309
2310		/*
2311		 * Poll until link comes back from recovery to avoid race
2312		 * condition.
2313		 */
2314		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2315
2316		while (ktime_before(ktime_get(), deadline)) {
2317			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2318			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2319				break;
2320
2321			usleep_range(2000, 3000);
2322		}
2323
2324		if (value & PCI_EXP_LNKSTA_LT)
2325			dev_warn(dev, "PCIe port %u link is in recovery\n",
2326				 port->index);
2327
2328		/* Retrain the link */
2329		value = readl(port->base + RP_LINK_CONTROL_STATUS);
2330		value |= PCI_EXP_LNKCTL_RL;
2331		writel(value, port->base + RP_LINK_CONTROL_STATUS);
2332
2333		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2334
2335		while (ktime_before(ktime_get(), deadline)) {
2336			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2337			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2338				break;
2339
2340			usleep_range(2000, 3000);
2341		}
2342
2343		if (value & PCI_EXP_LNKSTA_LT)
2344			dev_err(dev, "failed to retrain link of port %u\n",
2345				port->index);
2346	}
2347}
2348
2349static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2350{
2351	struct device *dev = pcie->dev;
2352	struct tegra_pcie_port *port, *tmp;
2353
2354	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2355		dev_info(dev, "probing port %u, using %u lanes\n",
2356			 port->index, port->lanes);
2357
2358		tegra_pcie_port_enable(port);
2359	}
2360
2361	/* Start LTSSM from Tegra side */
2362	reset_control_deassert(pcie->pcie_xrst);
2363
2364	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2365		if (tegra_pcie_port_check_link(port))
2366			continue;
2367
2368		dev_info(dev, "link %u down, ignoring\n", port->index);
2369
2370		tegra_pcie_port_disable(port);
2371		tegra_pcie_port_free(port);
2372	}
2373
2374	if (pcie->soc->has_gen2)
2375		tegra_pcie_change_link_speed(pcie);
2376}
2377
2378static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2379{
2380	struct tegra_pcie_port *port, *tmp;
2381
2382	reset_control_assert(pcie->pcie_xrst);
2383
2384	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2385		tegra_pcie_port_disable(port);
2386}
2387
2388static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2389	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2390	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2391};
2392
2393static const struct tegra_pcie_soc tegra20_pcie = {
2394	.num_ports = 2,
2395	.ports = tegra20_pcie_ports,
2396	.msi_base_shift = 0,
2397	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2398	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2399	.pads_refclk_cfg0 = 0xfa5cfa5c,
2400	.has_pex_clkreq_en = false,
2401	.has_pex_bias_ctrl = false,
2402	.has_intr_prsnt_sense = false,
2403	.has_cml_clk = false,
2404	.has_gen2 = false,
2405	.force_pca_enable = false,
2406	.program_uphy = true,
2407	.update_clamp_threshold = false,
2408	.program_deskew_time = false,
2409	.update_fc_timer = false,
2410	.has_cache_bars = true,
2411	.ectl.enable = false,
2412};
2413
2414static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2415	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2416	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2417	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2418};
2419
2420static const struct tegra_pcie_soc tegra30_pcie = {
2421	.num_ports = 3,
2422	.ports = tegra30_pcie_ports,
2423	.msi_base_shift = 8,
2424	.afi_pex2_ctrl = 0x128,
2425	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2426	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2427	.pads_refclk_cfg0 = 0xfa5cfa5c,
2428	.pads_refclk_cfg1 = 0xfa5cfa5c,
2429	.has_pex_clkreq_en = true,
2430	.has_pex_bias_ctrl = true,
2431	.has_intr_prsnt_sense = true,
2432	.has_cml_clk = true,
2433	.has_gen2 = false,
2434	.force_pca_enable = false,
2435	.program_uphy = true,
2436	.update_clamp_threshold = false,
2437	.program_deskew_time = false,
2438	.update_fc_timer = false,
2439	.has_cache_bars = false,
2440	.ectl.enable = false,
2441};
2442
2443static const struct tegra_pcie_soc tegra124_pcie = {
2444	.num_ports = 2,
2445	.ports = tegra20_pcie_ports,
2446	.msi_base_shift = 8,
2447	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2448	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2449	.pads_refclk_cfg0 = 0x44ac44ac,
2450	.has_pex_clkreq_en = true,
2451	.has_pex_bias_ctrl = true,
2452	.has_intr_prsnt_sense = true,
2453	.has_cml_clk = true,
2454	.has_gen2 = true,
2455	.force_pca_enable = false,
2456	.program_uphy = true,
2457	.update_clamp_threshold = true,
2458	.program_deskew_time = false,
2459	.update_fc_timer = false,
2460	.has_cache_bars = false,
2461	.ectl.enable = false,
2462};
2463
2464static const struct tegra_pcie_soc tegra210_pcie = {
2465	.num_ports = 2,
2466	.ports = tegra20_pcie_ports,
2467	.msi_base_shift = 8,
2468	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2469	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2470	.pads_refclk_cfg0 = 0x90b890b8,
2471	/* FC threshold is bit[25:18] */
2472	.update_fc_threshold = 0x01800000,
2473	.has_pex_clkreq_en = true,
2474	.has_pex_bias_ctrl = true,
2475	.has_intr_prsnt_sense = true,
2476	.has_cml_clk = true,
2477	.has_gen2 = true,
2478	.force_pca_enable = true,
2479	.program_uphy = true,
2480	.update_clamp_threshold = true,
2481	.program_deskew_time = true,
2482	.update_fc_timer = true,
2483	.has_cache_bars = false,
2484	.ectl = {
2485		.regs = {
2486			.rp_ectl_2_r1 = 0x0000000f,
2487			.rp_ectl_4_r1 = 0x00000067,
2488			.rp_ectl_5_r1 = 0x55010000,
2489			.rp_ectl_6_r1 = 0x00000001,
2490			.rp_ectl_2_r2 = 0x0000008f,
2491			.rp_ectl_4_r2 = 0x000000c7,
2492			.rp_ectl_5_r2 = 0x55010000,
2493			.rp_ectl_6_r2 = 0x00000001,
2494		},
2495		.enable = true,
2496	},
2497};
2498
2499static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2500	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2501	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2502	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2503};
2504
2505static const struct tegra_pcie_soc tegra186_pcie = {
2506	.num_ports = 3,
2507	.ports = tegra186_pcie_ports,
2508	.msi_base_shift = 8,
2509	.afi_pex2_ctrl = 0x19c,
2510	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2511	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2512	.pads_refclk_cfg0 = 0x80b880b8,
2513	.pads_refclk_cfg1 = 0x000480b8,
2514	.has_pex_clkreq_en = true,
2515	.has_pex_bias_ctrl = true,
2516	.has_intr_prsnt_sense = true,
2517	.has_cml_clk = false,
2518	.has_gen2 = true,
2519	.force_pca_enable = false,
2520	.program_uphy = false,
2521	.update_clamp_threshold = false,
2522	.program_deskew_time = false,
2523	.update_fc_timer = false,
2524	.has_cache_bars = false,
2525	.ectl.enable = false,
2526};
2527
2528static const struct of_device_id tegra_pcie_of_match[] = {
2529	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2530	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2531	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2532	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2533	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2534	{ },
2535};
2536MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
2537
2538static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2539{
2540	struct tegra_pcie *pcie = s->private;
2541
2542	if (list_empty(&pcie->ports))
2543		return NULL;
2544
2545	seq_puts(s, "Index  Status\n");
2546
2547	return seq_list_start(&pcie->ports, *pos);
2548}
2549
2550static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2551{
2552	struct tegra_pcie *pcie = s->private;
2553
2554	return seq_list_next(v, &pcie->ports, pos);
2555}
2556
2557static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2558{
2559}
2560
2561static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2562{
2563	bool up = false, active = false;
2564	struct tegra_pcie_port *port;
2565	unsigned int value;
2566
2567	port = list_entry(v, struct tegra_pcie_port, list);
2568
2569	value = readl(port->base + RP_VEND_XP);
2570
2571	if (value & RP_VEND_XP_DL_UP)
2572		up = true;
2573
2574	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2575
2576	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2577		active = true;
2578
2579	seq_printf(s, "%2u     ", port->index);
2580
2581	if (up)
2582		seq_puts(s, "up");
2583
2584	if (active) {
2585		if (up)
2586			seq_puts(s, ", ");
2587
2588		seq_puts(s, "active");
2589	}
2590
2591	seq_puts(s, "\n");
2592	return 0;
2593}
2594
2595static const struct seq_operations tegra_pcie_ports_sops = {
2596	.start = tegra_pcie_ports_seq_start,
2597	.next = tegra_pcie_ports_seq_next,
2598	.stop = tegra_pcie_ports_seq_stop,
2599	.show = tegra_pcie_ports_seq_show,
2600};
2601
2602DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
2603
2604static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2605{
2606	debugfs_remove_recursive(pcie->debugfs);
2607	pcie->debugfs = NULL;
2608}
2609
2610static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2611{
2612	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2613
2614	debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
2615			    &tegra_pcie_ports_fops);
2616}
2617
2618static int tegra_pcie_probe(struct platform_device *pdev)
2619{
2620	struct device *dev = &pdev->dev;
2621	struct pci_host_bridge *host;
2622	struct tegra_pcie *pcie;
2623	int err;
2624
2625	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2626	if (!host)
2627		return -ENOMEM;
2628
2629	pcie = pci_host_bridge_priv(host);
2630	host->sysdata = pcie;
2631	platform_set_drvdata(pdev, pcie);
2632
2633	pcie->soc = of_device_get_match_data(dev);
2634	INIT_LIST_HEAD(&pcie->ports);
2635	pcie->dev = dev;
2636
2637	err = tegra_pcie_parse_dt(pcie);
2638	if (err < 0)
2639		return err;
2640
2641	err = tegra_pcie_get_resources(pcie);
2642	if (err < 0) {
2643		dev_err(dev, "failed to request resources: %d\n", err);
2644		return err;
2645	}
2646
2647	err = tegra_pcie_msi_setup(pcie);
2648	if (err < 0) {
2649		dev_err(dev, "failed to enable MSI support: %d\n", err);
2650		goto put_resources;
2651	}
2652
2653	pm_runtime_enable(pcie->dev);
2654	err = pm_runtime_get_sync(pcie->dev);
2655	if (err < 0) {
2656		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2657		goto pm_runtime_put;
2658	}
2659
2660	host->ops = &tegra_pcie_ops;
2661	host->map_irq = tegra_pcie_map_irq;
2662
2663	err = pci_host_probe(host);
2664	if (err < 0) {
2665		dev_err(dev, "failed to register host: %d\n", err);
2666		goto pm_runtime_put;
2667	}
2668
2669	if (IS_ENABLED(CONFIG_DEBUG_FS))
2670		tegra_pcie_debugfs_init(pcie);
2671
2672	return 0;
2673
2674pm_runtime_put:
2675	pm_runtime_put_sync(pcie->dev);
2676	pm_runtime_disable(pcie->dev);
2677	tegra_pcie_msi_teardown(pcie);
2678put_resources:
2679	tegra_pcie_put_resources(pcie);
2680	return err;
2681}
2682
2683static void tegra_pcie_remove(struct platform_device *pdev)
2684{
2685	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2686	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2687	struct tegra_pcie_port *port, *tmp;
2688
2689	if (IS_ENABLED(CONFIG_DEBUG_FS))
2690		tegra_pcie_debugfs_exit(pcie);
2691
2692	pci_stop_root_bus(host->bus);
2693	pci_remove_root_bus(host->bus);
2694	pm_runtime_put_sync(pcie->dev);
2695	pm_runtime_disable(pcie->dev);
2696
2697	if (IS_ENABLED(CONFIG_PCI_MSI))
2698		tegra_pcie_msi_teardown(pcie);
2699
2700	tegra_pcie_put_resources(pcie);
2701
2702	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2703		tegra_pcie_port_free(port);
2704}
2705
2706static int tegra_pcie_pm_suspend(struct device *dev)
2707{
2708	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2709	struct tegra_pcie_port *port;
2710	int err;
2711
2712	list_for_each_entry(port, &pcie->ports, list)
2713		tegra_pcie_pme_turnoff(port);
2714
2715	tegra_pcie_disable_ports(pcie);
2716
2717	/*
2718	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2719	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2720	 */
2721	tegra_pcie_disable_interrupts(pcie);
2722
2723	if (pcie->soc->program_uphy) {
2724		err = tegra_pcie_phy_power_off(pcie);
2725		if (err < 0)
2726			dev_err(dev, "failed to power off PHY(s): %d\n", err);
2727	}
2728
2729	reset_control_assert(pcie->pex_rst);
2730	clk_disable_unprepare(pcie->pex_clk);
2731
2732	if (IS_ENABLED(CONFIG_PCI_MSI))
2733		tegra_pcie_disable_msi(pcie);
2734
2735	pinctrl_pm_select_idle_state(dev);
2736	tegra_pcie_power_off(pcie);
2737
2738	return 0;
2739}
2740
2741static int tegra_pcie_pm_resume(struct device *dev)
2742{
2743	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2744	int err;
2745
2746	err = tegra_pcie_power_on(pcie);
2747	if (err) {
2748		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2749		return err;
2750	}
2751
2752	err = pinctrl_pm_select_default_state(dev);
2753	if (err < 0) {
2754		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2755		goto poweroff;
2756	}
2757
2758	tegra_pcie_enable_controller(pcie);
2759	tegra_pcie_setup_translations(pcie);
2760
2761	if (IS_ENABLED(CONFIG_PCI_MSI))
2762		tegra_pcie_enable_msi(pcie);
2763
2764	err = clk_prepare_enable(pcie->pex_clk);
2765	if (err) {
2766		dev_err(dev, "failed to enable PEX clock: %d\n", err);
2767		goto pex_dpd_enable;
2768	}
2769
2770	reset_control_deassert(pcie->pex_rst);
2771
2772	if (pcie->soc->program_uphy) {
2773		err = tegra_pcie_phy_power_on(pcie);
2774		if (err < 0) {
2775			dev_err(dev, "failed to power on PHY(s): %d\n", err);
2776			goto disable_pex_clk;
2777		}
2778	}
2779
2780	tegra_pcie_apply_pad_settings(pcie);
2781	tegra_pcie_enable_ports(pcie);
2782
2783	return 0;
2784
2785disable_pex_clk:
2786	reset_control_assert(pcie->pex_rst);
2787	clk_disable_unprepare(pcie->pex_clk);
2788pex_dpd_enable:
2789	pinctrl_pm_select_idle_state(dev);
2790poweroff:
2791	tegra_pcie_power_off(pcie);
2792
2793	return err;
2794}
2795
2796static const struct dev_pm_ops tegra_pcie_pm_ops = {
2797	RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2798	NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume)
2799};
2800
2801static struct platform_driver tegra_pcie_driver = {
2802	.driver = {
2803		.name = "tegra-pcie",
2804		.of_match_table = tegra_pcie_of_match,
2805		.suppress_bind_attrs = true,
2806		.pm = &tegra_pcie_pm_ops,
2807	},
2808	.probe = tegra_pcie_probe,
2809	.remove_new = tegra_pcie_remove,
2810};
2811module_platform_driver(tegra_pcie_driver);