Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * PCIe host controller driver for Tegra SoCs
   4 *
   5 * Copyright (c) 2010, CompuLab, Ltd.
   6 * Author: Mike Rapoport <mike@compulab.co.il>
   7 *
   8 * Based on NVIDIA PCIe driver
   9 * Copyright (c) 2008-2009, NVIDIA Corporation.
  10 *
  11 * Bits taken from arch/arm/mach-dove/pcie.c
  12 *
  13 * Author: Thierry Reding <treding@nvidia.com>
  14 */
  15
  16#include <linux/clk.h>
  17#include <linux/debugfs.h>
  18#include <linux/delay.h>
  19#include <linux/export.h>
  20#include <linux/gpio/consumer.h>
  21#include <linux/interrupt.h>
  22#include <linux/iopoll.h>
  23#include <linux/irq.h>
 
  24#include <linux/irqdomain.h>
  25#include <linux/kernel.h>
  26#include <linux/init.h>
  27#include <linux/module.h>
  28#include <linux/msi.h>
  29#include <linux/of_address.h>
  30#include <linux/of_pci.h>
  31#include <linux/of_platform.h>
  32#include <linux/pci.h>
  33#include <linux/phy/phy.h>
  34#include <linux/pinctrl/consumer.h>
  35#include <linux/platform_device.h>
  36#include <linux/reset.h>
  37#include <linux/sizes.h>
  38#include <linux/slab.h>
  39#include <linux/vmalloc.h>
  40#include <linux/regulator/consumer.h>
  41
  42#include <soc/tegra/cpuidle.h>
  43#include <soc/tegra/pmc.h>
  44
  45#include "../pci.h"
  46
  47#define INT_PCI_MSI_NR (8 * 32)
  48
  49/* register definitions */
  50
  51#define AFI_AXI_BAR0_SZ	0x00
  52#define AFI_AXI_BAR1_SZ	0x04
  53#define AFI_AXI_BAR2_SZ	0x08
  54#define AFI_AXI_BAR3_SZ	0x0c
  55#define AFI_AXI_BAR4_SZ	0x10
  56#define AFI_AXI_BAR5_SZ	0x14
  57
  58#define AFI_AXI_BAR0_START	0x18
  59#define AFI_AXI_BAR1_START	0x1c
  60#define AFI_AXI_BAR2_START	0x20
  61#define AFI_AXI_BAR3_START	0x24
  62#define AFI_AXI_BAR4_START	0x28
  63#define AFI_AXI_BAR5_START	0x2c
  64
  65#define AFI_FPCI_BAR0	0x30
  66#define AFI_FPCI_BAR1	0x34
  67#define AFI_FPCI_BAR2	0x38
  68#define AFI_FPCI_BAR3	0x3c
  69#define AFI_FPCI_BAR4	0x40
  70#define AFI_FPCI_BAR5	0x44
  71
  72#define AFI_CACHE_BAR0_SZ	0x48
  73#define AFI_CACHE_BAR0_ST	0x4c
  74#define AFI_CACHE_BAR1_SZ	0x50
  75#define AFI_CACHE_BAR1_ST	0x54
  76
  77#define AFI_MSI_BAR_SZ		0x60
  78#define AFI_MSI_FPCI_BAR_ST	0x64
  79#define AFI_MSI_AXI_BAR_ST	0x68
  80
  81#define AFI_MSI_VEC0		0x6c
  82#define AFI_MSI_VEC1		0x70
  83#define AFI_MSI_VEC2		0x74
  84#define AFI_MSI_VEC3		0x78
  85#define AFI_MSI_VEC4		0x7c
  86#define AFI_MSI_VEC5		0x80
  87#define AFI_MSI_VEC6		0x84
  88#define AFI_MSI_VEC7		0x88
  89
  90#define AFI_MSI_EN_VEC0		0x8c
  91#define AFI_MSI_EN_VEC1		0x90
  92#define AFI_MSI_EN_VEC2		0x94
  93#define AFI_MSI_EN_VEC3		0x98
  94#define AFI_MSI_EN_VEC4		0x9c
  95#define AFI_MSI_EN_VEC5		0xa0
  96#define AFI_MSI_EN_VEC6		0xa4
  97#define AFI_MSI_EN_VEC7		0xa8
  98
  99#define AFI_CONFIGURATION		0xac
 100#define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
 101#define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
 102
 103#define AFI_FPCI_ERROR_MASKS	0xb0
 104
 105#define AFI_INTR_MASK		0xb4
 106#define  AFI_INTR_MASK_INT_MASK	(1 << 0)
 107#define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
 108
 109#define AFI_INTR_CODE			0xb8
 110#define  AFI_INTR_CODE_MASK		0xf
 111#define  AFI_INTR_INI_SLAVE_ERROR	1
 112#define  AFI_INTR_INI_DECODE_ERROR	2
 113#define  AFI_INTR_TARGET_ABORT		3
 114#define  AFI_INTR_MASTER_ABORT		4
 115#define  AFI_INTR_INVALID_WRITE		5
 116#define  AFI_INTR_LEGACY		6
 117#define  AFI_INTR_FPCI_DECODE_ERROR	7
 118#define  AFI_INTR_AXI_DECODE_ERROR	8
 119#define  AFI_INTR_FPCI_TIMEOUT		9
 120#define  AFI_INTR_PE_PRSNT_SENSE	10
 121#define  AFI_INTR_PE_CLKREQ_SENSE	11
 122#define  AFI_INTR_CLKCLAMP_SENSE	12
 123#define  AFI_INTR_RDY4PD_SENSE		13
 124#define  AFI_INTR_P2P_ERROR		14
 125
 126#define AFI_INTR_SIGNATURE	0xbc
 127#define AFI_UPPER_FPCI_ADDRESS	0xc0
 128#define AFI_SM_INTR_ENABLE	0xc4
 129#define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
 130#define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
 131#define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
 132#define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
 133#define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
 134#define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
 135#define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
 136#define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
 137
 138#define AFI_AFI_INTR_ENABLE		0xc8
 139#define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
 140#define  AFI_INTR_EN_INI_DECERR		(1 << 1)
 141#define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
 142#define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
 143#define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
 144#define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
 145#define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
 146#define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
 147#define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
 148
 149#define AFI_PCIE_PME		0xf0
 150
 151#define AFI_PCIE_CONFIG					0x0f8
 152#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
 153#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
 154#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
 155#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
 156#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
 157#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
 158#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
 159#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
 160#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
 161#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
 162#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
 163#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
 164#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
 165#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
 166#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
 167
 168#define AFI_FUSE			0x104
 169#define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
 170
 171#define AFI_PEX0_CTRL			0x110
 172#define AFI_PEX1_CTRL			0x118
 173#define  AFI_PEX_CTRL_RST		(1 << 0)
 174#define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
 175#define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
 176#define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
 177
 178#define AFI_PLLE_CONTROL		0x160
 179#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
 180#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
 181
 182#define AFI_PEXBIAS_CTRL_0		0x168
 183
 184#define RP_PRIV_XP_DL		0x00000494
 185#define  RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD	(0x1ff << 1)
 186
 187#define RP_RX_HDR_LIMIT		0x00000e00
 188#define  RP_RX_HDR_LIMIT_PW_MASK	(0xff << 8)
 189#define  RP_RX_HDR_LIMIT_PW		(0x0e << 8)
 190
 191#define RP_ECTL_2_R1	0x00000e84
 192#define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
 193
 194#define RP_ECTL_4_R1	0x00000e8c
 195#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
 196#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
 197
 198#define RP_ECTL_5_R1	0x00000e90
 199#define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
 200
 201#define RP_ECTL_6_R1	0x00000e94
 202#define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
 203
 204#define RP_ECTL_2_R2	0x00000ea4
 205#define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
 206
 207#define RP_ECTL_4_R2	0x00000eac
 208#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
 209#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
 210
 211#define RP_ECTL_5_R2	0x00000eb0
 212#define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
 213
 214#define RP_ECTL_6_R2	0x00000eb4
 215#define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
 216
 217#define RP_VEND_XP	0x00000f00
 218#define  RP_VEND_XP_DL_UP			(1 << 30)
 219#define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
 220#define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
 221#define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
 222
 223#define RP_VEND_CTL0	0x00000f44
 224#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
 225#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
 226
 227#define RP_VEND_CTL1	0x00000f48
 228#define  RP_VEND_CTL1_ERPT	(1 << 13)
 229
 230#define RP_VEND_XP_BIST	0x00000f4c
 231#define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
 232
 233#define RP_VEND_CTL2 0x00000fa8
 234#define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
 235
 236#define RP_PRIV_MISC	0x00000fe0
 237#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
 238#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
 239#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
 240#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
 241#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
 242#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
 243#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
 244#define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
 245
 246#define RP_LINK_CONTROL_STATUS			0x00000090
 247#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
 248#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
 249
 250#define RP_LINK_CONTROL_STATUS_2		0x000000b0
 251
 252#define PADS_CTL_SEL		0x0000009c
 253
 254#define PADS_CTL		0x000000a0
 255#define  PADS_CTL_IDDQ_1L	(1 << 0)
 256#define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
 257#define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
 258
 259#define PADS_PLL_CTL_TEGRA20			0x000000b8
 260#define PADS_PLL_CTL_TEGRA30			0x000000b4
 261#define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
 262#define  PADS_PLL_CTL_LOCKDET			(1 << 8)
 263#define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
 264#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
 265#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
 266#define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
 267#define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
 268#define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
 269#define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
 270#define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
 271
 272#define PADS_REFCLK_CFG0			0x000000c8
 273#define PADS_REFCLK_CFG1			0x000000cc
 274#define PADS_REFCLK_BIAS			0x000000d0
 275
 276/*
 277 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
 278 * entries, one entry per PCIe port. These field definitions and desired
 279 * values aren't in the TRM, but do come from NVIDIA.
 280 */
 281#define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
 282#define PADS_REFCLK_CFG_E_TERM_SHIFT		7
 283#define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
 284#define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
 285
 286#define PME_ACK_TIMEOUT 10000
 287#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
 288
 289struct tegra_msi {
 290	struct msi_controller chip;
 291	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
 292	struct irq_domain *domain;
 293	struct mutex lock;
 
 294	void *virt;
 295	dma_addr_t phys;
 296	int irq;
 297};
 298
 299/* used to differentiate between Tegra SoC generations */
 300struct tegra_pcie_port_soc {
 301	struct {
 302		u8 turnoff_bit;
 303		u8 ack_bit;
 304	} pme;
 305};
 306
 307struct tegra_pcie_soc {
 308	unsigned int num_ports;
 309	const struct tegra_pcie_port_soc *ports;
 310	unsigned int msi_base_shift;
 311	unsigned long afi_pex2_ctrl;
 312	u32 pads_pll_ctl;
 313	u32 tx_ref_sel;
 314	u32 pads_refclk_cfg0;
 315	u32 pads_refclk_cfg1;
 316	u32 update_fc_threshold;
 317	bool has_pex_clkreq_en;
 318	bool has_pex_bias_ctrl;
 319	bool has_intr_prsnt_sense;
 320	bool has_cml_clk;
 321	bool has_gen2;
 322	bool force_pca_enable;
 323	bool program_uphy;
 324	bool update_clamp_threshold;
 325	bool program_deskew_time;
 326	bool raw_violation_fixup;
 327	bool update_fc_timer;
 328	bool has_cache_bars;
 329	struct {
 330		struct {
 331			u32 rp_ectl_2_r1;
 332			u32 rp_ectl_4_r1;
 333			u32 rp_ectl_5_r1;
 334			u32 rp_ectl_6_r1;
 335			u32 rp_ectl_2_r2;
 336			u32 rp_ectl_4_r2;
 337			u32 rp_ectl_5_r2;
 338			u32 rp_ectl_6_r2;
 339		} regs;
 340		bool enable;
 341	} ectl;
 342};
 343
 344static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
 345{
 346	return container_of(chip, struct tegra_msi, chip);
 347}
 348
 349struct tegra_pcie {
 350	struct device *dev;
 351
 352	void __iomem *pads;
 353	void __iomem *afi;
 354	void __iomem *cfg;
 355	int irq;
 356
 357	struct resource cs;
 358	struct resource io;
 359	struct resource pio;
 360	struct resource mem;
 361	struct resource prefetch;
 362	struct resource busn;
 363
 364	struct {
 365		resource_size_t mem;
 366		resource_size_t io;
 367	} offset;
 368
 369	struct clk *pex_clk;
 370	struct clk *afi_clk;
 371	struct clk *pll_e;
 372	struct clk *cml_clk;
 373
 374	struct reset_control *pex_rst;
 375	struct reset_control *afi_rst;
 376	struct reset_control *pcie_xrst;
 377
 378	bool legacy_phy;
 379	struct phy *phy;
 380
 381	struct tegra_msi msi;
 382
 383	struct list_head ports;
 384	u32 xbar_config;
 385
 386	struct regulator_bulk_data *supplies;
 387	unsigned int num_supplies;
 388
 389	const struct tegra_pcie_soc *soc;
 390	struct dentry *debugfs;
 391};
 392
 
 
 
 
 
 393struct tegra_pcie_port {
 394	struct tegra_pcie *pcie;
 395	struct device_node *np;
 396	struct list_head list;
 397	struct resource regs;
 398	void __iomem *base;
 399	unsigned int index;
 400	unsigned int lanes;
 401
 402	struct phy **phys;
 403
 404	struct gpio_desc *reset_gpio;
 405};
 406
 407struct tegra_pcie_bus {
 408	struct list_head list;
 409	unsigned int nr;
 410};
 411
 412static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
 413			      unsigned long offset)
 414{
 415	writel(value, pcie->afi + offset);
 416}
 417
 418static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
 419{
 420	return readl(pcie->afi + offset);
 421}
 422
 423static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
 424			       unsigned long offset)
 425{
 426	writel(value, pcie->pads + offset);
 427}
 428
 429static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
 430{
 431	return readl(pcie->pads + offset);
 432}
 433
 434/*
 435 * The configuration space mapping on Tegra is somewhat similar to the ECAM
 436 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
 437 * register accesses are mapped:
 438 *
 439 *    [27:24] extended register number
 440 *    [23:16] bus number
 441 *    [15:11] device number
 442 *    [10: 8] function number
 443 *    [ 7: 0] register number
 444 *
 445 * Mapping the whole extended configuration space would require 256 MiB of
 446 * virtual address space, only a small part of which will actually be used.
 447 *
 448 * To work around this, a 4 KiB region is used to generate the required
 449 * configuration transaction with relevant B:D:F and register offset values.
 450 * This is achieved by dynamically programming base address and size of
 451 * AFI_AXI_BAR used for end point config space mapping to make sure that the
 452 * address (access to which generates correct config transaction) falls in
 453 * this 4 KiB region.
 454 */
 455static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
 456					   unsigned int where)
 457{
 458	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
 459	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
 460}
 461
 462static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
 463					unsigned int devfn,
 464					int where)
 465{
 466	struct tegra_pcie *pcie = bus->sysdata;
 467	void __iomem *addr = NULL;
 468
 469	if (bus->number == 0) {
 470		unsigned int slot = PCI_SLOT(devfn);
 471		struct tegra_pcie_port *port;
 472
 473		list_for_each_entry(port, &pcie->ports, list) {
 474			if (port->index + 1 == slot) {
 475				addr = port->base + (where & ~3);
 476				break;
 477			}
 478		}
 479	} else {
 480		unsigned int offset;
 481		u32 base;
 482
 483		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
 484
 485		/* move 4 KiB window to offset within the FPCI region */
 486		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
 487		afi_writel(pcie, base, AFI_FPCI_BAR0);
 488
 489		/* move to correct offset within the 4 KiB page */
 490		addr = pcie->cfg + (offset & (SZ_4K - 1));
 491	}
 492
 493	return addr;
 494}
 495
 496static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 497				  int where, int size, u32 *value)
 498{
 499	if (bus->number == 0)
 500		return pci_generic_config_read32(bus, devfn, where, size,
 501						 value);
 502
 503	return pci_generic_config_read(bus, devfn, where, size, value);
 504}
 505
 506static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 507				   int where, int size, u32 value)
 508{
 509	if (bus->number == 0)
 510		return pci_generic_config_write32(bus, devfn, where, size,
 511						  value);
 512
 513	return pci_generic_config_write(bus, devfn, where, size, value);
 514}
 515
 516static struct pci_ops tegra_pcie_ops = {
 517	.map_bus = tegra_pcie_map_bus,
 518	.read = tegra_pcie_config_read,
 519	.write = tegra_pcie_config_write,
 520};
 521
 522static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
 523{
 524	const struct tegra_pcie_soc *soc = port->pcie->soc;
 525	unsigned long ret = 0;
 526
 527	switch (port->index) {
 528	case 0:
 529		ret = AFI_PEX0_CTRL;
 530		break;
 531
 532	case 1:
 533		ret = AFI_PEX1_CTRL;
 534		break;
 535
 536	case 2:
 537		ret = soc->afi_pex2_ctrl;
 538		break;
 539	}
 540
 541	return ret;
 542}
 543
 544static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
 545{
 546	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 547	unsigned long value;
 548
 549	/* pulse reset signal */
 550	if (port->reset_gpio) {
 551		gpiod_set_value(port->reset_gpio, 1);
 552	} else {
 553		value = afi_readl(port->pcie, ctrl);
 554		value &= ~AFI_PEX_CTRL_RST;
 555		afi_writel(port->pcie, value, ctrl);
 556	}
 557
 558	usleep_range(1000, 2000);
 559
 560	if (port->reset_gpio) {
 561		gpiod_set_value(port->reset_gpio, 0);
 562	} else {
 563		value = afi_readl(port->pcie, ctrl);
 564		value |= AFI_PEX_CTRL_RST;
 565		afi_writel(port->pcie, value, ctrl);
 566	}
 567}
 568
 569static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
 570{
 571	const struct tegra_pcie_soc *soc = port->pcie->soc;
 572	u32 value;
 573
 574	/* Enable AER capability */
 575	value = readl(port->base + RP_VEND_CTL1);
 576	value |= RP_VEND_CTL1_ERPT;
 577	writel(value, port->base + RP_VEND_CTL1);
 578
 579	/* Optimal settings to enhance bandwidth */
 580	value = readl(port->base + RP_VEND_XP);
 581	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
 582	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
 583	writel(value, port->base + RP_VEND_XP);
 584
 585	/*
 586	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
 587	 * to avoid truncation of PM messages which results in receiver errors
 588	 */
 589	value = readl(port->base + RP_VEND_XP_BIST);
 590	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
 591	writel(value, port->base + RP_VEND_XP_BIST);
 592
 593	value = readl(port->base + RP_PRIV_MISC);
 594	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
 595	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
 596
 597	if (soc->update_clamp_threshold) {
 598		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
 599				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
 600		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
 601			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
 602	}
 603
 604	writel(value, port->base + RP_PRIV_MISC);
 605}
 606
 607static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
 608{
 609	const struct tegra_pcie_soc *soc = port->pcie->soc;
 610	u32 value;
 611
 612	value = readl(port->base + RP_ECTL_2_R1);
 613	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
 614	value |= soc->ectl.regs.rp_ectl_2_r1;
 615	writel(value, port->base + RP_ECTL_2_R1);
 616
 617	value = readl(port->base + RP_ECTL_4_R1);
 618	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
 619	value |= soc->ectl.regs.rp_ectl_4_r1 <<
 620				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
 621	writel(value, port->base + RP_ECTL_4_R1);
 622
 623	value = readl(port->base + RP_ECTL_5_R1);
 624	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
 625	value |= soc->ectl.regs.rp_ectl_5_r1;
 626	writel(value, port->base + RP_ECTL_5_R1);
 627
 628	value = readl(port->base + RP_ECTL_6_R1);
 629	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
 630	value |= soc->ectl.regs.rp_ectl_6_r1;
 631	writel(value, port->base + RP_ECTL_6_R1);
 632
 633	value = readl(port->base + RP_ECTL_2_R2);
 634	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
 635	value |= soc->ectl.regs.rp_ectl_2_r2;
 636	writel(value, port->base + RP_ECTL_2_R2);
 637
 638	value = readl(port->base + RP_ECTL_4_R2);
 639	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
 640	value |= soc->ectl.regs.rp_ectl_4_r2 <<
 641				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
 642	writel(value, port->base + RP_ECTL_4_R2);
 643
 644	value = readl(port->base + RP_ECTL_5_R2);
 645	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
 646	value |= soc->ectl.regs.rp_ectl_5_r2;
 647	writel(value, port->base + RP_ECTL_5_R2);
 648
 649	value = readl(port->base + RP_ECTL_6_R2);
 650	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
 651	value |= soc->ectl.regs.rp_ectl_6_r2;
 652	writel(value, port->base + RP_ECTL_6_R2);
 653}
 654
 655static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
 656{
 657	const struct tegra_pcie_soc *soc = port->pcie->soc;
 658	u32 value;
 659
 660	/*
 661	 * Sometimes link speed change from Gen2 to Gen1 fails due to
 662	 * instability in deskew logic on lane-0. Increase the deskew
 663	 * retry time to resolve this issue.
 664	 */
 665	if (soc->program_deskew_time) {
 666		value = readl(port->base + RP_VEND_CTL0);
 667		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
 668		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
 669		writel(value, port->base + RP_VEND_CTL0);
 670	}
 671
 672	/* Fixup for read after write violation. */
 673	if (soc->raw_violation_fixup) {
 674		value = readl(port->base + RP_RX_HDR_LIMIT);
 675		value &= ~RP_RX_HDR_LIMIT_PW_MASK;
 676		value |= RP_RX_HDR_LIMIT_PW;
 677		writel(value, port->base + RP_RX_HDR_LIMIT);
 678
 679		value = readl(port->base + RP_PRIV_XP_DL);
 680		value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD;
 681		writel(value, port->base + RP_PRIV_XP_DL);
 682
 683		value = readl(port->base + RP_VEND_XP);
 684		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
 685		value |= soc->update_fc_threshold;
 686		writel(value, port->base + RP_VEND_XP);
 687	}
 688
 689	if (soc->update_fc_timer) {
 690		value = readl(port->base + RP_VEND_XP);
 691		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
 692		value |= soc->update_fc_threshold;
 693		writel(value, port->base + RP_VEND_XP);
 694	}
 695
 696	/*
 697	 * PCIe link doesn't come up with few legacy PCIe endpoints if
 698	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
 699	 * Hence, the strategy followed here is to initially advertise
 700	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
 701	 */
 702	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
 703	value &= ~PCI_EXP_LNKSTA_CLS;
 704	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
 705	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
 706}
 707
 708static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
 709{
 710	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 711	const struct tegra_pcie_soc *soc = port->pcie->soc;
 712	unsigned long value;
 713
 714	/* enable reference clock */
 715	value = afi_readl(port->pcie, ctrl);
 716	value |= AFI_PEX_CTRL_REFCLK_EN;
 717
 718	if (soc->has_pex_clkreq_en)
 719		value |= AFI_PEX_CTRL_CLKREQ_EN;
 720
 721	value |= AFI_PEX_CTRL_OVERRIDE_EN;
 722
 723	afi_writel(port->pcie, value, ctrl);
 724
 725	tegra_pcie_port_reset(port);
 726
 727	if (soc->force_pca_enable) {
 728		value = readl(port->base + RP_VEND_CTL2);
 729		value |= RP_VEND_CTL2_PCA_ENABLE;
 730		writel(value, port->base + RP_VEND_CTL2);
 731	}
 732
 733	tegra_pcie_enable_rp_features(port);
 734
 735	if (soc->ectl.enable)
 736		tegra_pcie_program_ectl_settings(port);
 737
 738	tegra_pcie_apply_sw_fixup(port);
 739}
 740
 741static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
 742{
 743	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 744	const struct tegra_pcie_soc *soc = port->pcie->soc;
 745	unsigned long value;
 746
 747	/* assert port reset */
 748	value = afi_readl(port->pcie, ctrl);
 749	value &= ~AFI_PEX_CTRL_RST;
 750	afi_writel(port->pcie, value, ctrl);
 751
 752	/* disable reference clock */
 753	value = afi_readl(port->pcie, ctrl);
 754
 755	if (soc->has_pex_clkreq_en)
 756		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
 757
 758	value &= ~AFI_PEX_CTRL_REFCLK_EN;
 759	afi_writel(port->pcie, value, ctrl);
 760
 761	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
 762	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
 763	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
 764	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
 765	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
 766}
 767
 768static void tegra_pcie_port_free(struct tegra_pcie_port *port)
 769{
 770	struct tegra_pcie *pcie = port->pcie;
 771	struct device *dev = pcie->dev;
 772
 773	devm_iounmap(dev, port->base);
 774	devm_release_mem_region(dev, port->regs.start,
 775				resource_size(&port->regs));
 776	list_del(&port->list);
 777	devm_kfree(dev, port);
 778}
 779
 780/* Tegra PCIE root complex wrongly reports device class */
 781static void tegra_pcie_fixup_class(struct pci_dev *dev)
 782{
 783	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
 784}
 785DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
 786DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
 787DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
 788DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
 789
 790/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
 791static void tegra_pcie_relax_enable(struct pci_dev *dev)
 792{
 793	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
 794}
 795DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
 796DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
 797DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
 798DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
 799
 800static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
 801{
 802	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 803	struct list_head *windows = &host->windows;
 804	struct device *dev = pcie->dev;
 805	int err;
 806
 807	pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
 808	pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
 809	pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
 810	pci_add_resource(windows, &pcie->busn);
 811
 812	err = devm_request_pci_bus_resources(dev, windows);
 813	if (err < 0) {
 814		pci_free_resource_list(windows);
 815		return err;
 816	}
 817
 818	pci_remap_iospace(&pcie->pio, pcie->io.start);
 819
 820	return 0;
 821}
 822
 823static void tegra_pcie_free_resources(struct tegra_pcie *pcie)
 824{
 825	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 826	struct list_head *windows = &host->windows;
 827
 828	pci_unmap_iospace(&pcie->pio);
 829	pci_free_resource_list(windows);
 830}
 831
 832static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
 833{
 834	struct tegra_pcie *pcie = pdev->bus->sysdata;
 835	int irq;
 836
 837	tegra_cpuidle_pcie_irqs_in_use();
 838
 839	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
 840	if (!irq)
 841		irq = pcie->irq;
 842
 843	return irq;
 844}
 845
 846static irqreturn_t tegra_pcie_isr(int irq, void *arg)
 847{
 848	const char *err_msg[] = {
 849		"Unknown",
 850		"AXI slave error",
 851		"AXI decode error",
 852		"Target abort",
 853		"Master abort",
 854		"Invalid write",
 855		"Legacy interrupt",
 856		"Response decoding error",
 857		"AXI response decoding error",
 858		"Transaction timeout",
 859		"Slot present pin change",
 860		"Slot clock request change",
 861		"TMS clock ramp change",
 862		"TMS ready for power down",
 863		"Peer2Peer error",
 864	};
 865	struct tegra_pcie *pcie = arg;
 866	struct device *dev = pcie->dev;
 867	u32 code, signature;
 868
 869	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
 870	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
 871	afi_writel(pcie, 0, AFI_INTR_CODE);
 872
 873	if (code == AFI_INTR_LEGACY)
 874		return IRQ_NONE;
 875
 876	if (code >= ARRAY_SIZE(err_msg))
 877		code = 0;
 878
 879	/*
 880	 * do not pollute kernel log with master abort reports since they
 881	 * happen a lot during enumeration
 882	 */
 883	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
 884		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
 885	else
 886		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
 887
 888	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
 889	    code == AFI_INTR_FPCI_DECODE_ERROR) {
 890		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
 891		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
 892
 893		if (code == AFI_INTR_MASTER_ABORT)
 894			dev_dbg(dev, "  FPCI address: %10llx\n", address);
 895		else
 896			dev_err(dev, "  FPCI address: %10llx\n", address);
 897	}
 898
 899	return IRQ_HANDLED;
 900}
 901
 902/*
 903 * FPCI map is as follows:
 904 * - 0xfdfc000000: I/O space
 905 * - 0xfdfe000000: type 0 configuration space
 906 * - 0xfdff000000: type 1 configuration space
 907 * - 0xfe00000000: type 0 extended configuration space
 908 * - 0xfe10000000: type 1 extended configuration space
 909 */
 910static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
 911{
 912	u32 fpci_bar, size, axi_address;
 
 
 913
 914	/* Bar 0: type 1 extended configuration space */
 915	size = resource_size(&pcie->cs);
 916	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
 917	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
 918
 919	/* Bar 1: downstream IO bar */
 920	fpci_bar = 0xfdfc0000;
 921	size = resource_size(&pcie->io);
 922	axi_address = pcie->io.start;
 923	afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
 924	afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
 925	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
 926
 927	/* Bar 2: prefetchable memory BAR */
 928	fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
 929	size = resource_size(&pcie->prefetch);
 930	axi_address = pcie->prefetch.start;
 931	afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
 932	afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
 933	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
 934
 935	/* Bar 3: non prefetchable memory BAR */
 936	fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
 937	size = resource_size(&pcie->mem);
 938	axi_address = pcie->mem.start;
 939	afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
 940	afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
 941	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
 
 
 
 
 
 
 
 
 
 
 
 942
 943	/* NULL out the remaining BARs as they are not used */
 944	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
 945	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
 946	afi_writel(pcie, 0, AFI_FPCI_BAR4);
 947
 948	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
 949	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
 950	afi_writel(pcie, 0, AFI_FPCI_BAR5);
 951
 952	if (pcie->soc->has_cache_bars) {
 953		/* map all upstream transactions as uncached */
 954		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
 955		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
 956		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
 957		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
 958	}
 959
 960	/* MSI translations are setup only when needed */
 961	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
 962	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 963	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
 964	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 965}
 966
 967static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
 968{
 969	const struct tegra_pcie_soc *soc = pcie->soc;
 970	u32 value;
 971
 972	timeout = jiffies + msecs_to_jiffies(timeout);
 973
 974	while (time_before(jiffies, timeout)) {
 975		value = pads_readl(pcie, soc->pads_pll_ctl);
 976		if (value & PADS_PLL_CTL_LOCKDET)
 977			return 0;
 978	}
 979
 980	return -ETIMEDOUT;
 981}
 982
 983static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
 984{
 985	struct device *dev = pcie->dev;
 986	const struct tegra_pcie_soc *soc = pcie->soc;
 987	u32 value;
 988	int err;
 989
 990	/* initialize internal PHY, enable up to 16 PCIE lanes */
 991	pads_writel(pcie, 0x0, PADS_CTL_SEL);
 992
 993	/* override IDDQ to 1 on all 4 lanes */
 994	value = pads_readl(pcie, PADS_CTL);
 995	value |= PADS_CTL_IDDQ_1L;
 996	pads_writel(pcie, value, PADS_CTL);
 997
 998	/*
 999	 * Set up PHY PLL inputs select PLLE output as refclock,
1000	 * set TX ref sel to div10 (not div5).
1001	 */
1002	value = pads_readl(pcie, soc->pads_pll_ctl);
1003	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
1004	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
1005	pads_writel(pcie, value, soc->pads_pll_ctl);
1006
1007	/* reset PLL */
1008	value = pads_readl(pcie, soc->pads_pll_ctl);
1009	value &= ~PADS_PLL_CTL_RST_B4SM;
1010	pads_writel(pcie, value, soc->pads_pll_ctl);
1011
1012	usleep_range(20, 100);
1013
1014	/* take PLL out of reset  */
1015	value = pads_readl(pcie, soc->pads_pll_ctl);
1016	value |= PADS_PLL_CTL_RST_B4SM;
1017	pads_writel(pcie, value, soc->pads_pll_ctl);
1018
1019	/* wait for the PLL to lock */
1020	err = tegra_pcie_pll_wait(pcie, 500);
1021	if (err < 0) {
1022		dev_err(dev, "PLL failed to lock: %d\n", err);
1023		return err;
1024	}
1025
1026	/* turn off IDDQ override */
1027	value = pads_readl(pcie, PADS_CTL);
1028	value &= ~PADS_CTL_IDDQ_1L;
1029	pads_writel(pcie, value, PADS_CTL);
1030
1031	/* enable TX/RX data */
1032	value = pads_readl(pcie, PADS_CTL);
1033	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
1034	pads_writel(pcie, value, PADS_CTL);
1035
1036	return 0;
1037}
1038
1039static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
1040{
1041	const struct tegra_pcie_soc *soc = pcie->soc;
1042	u32 value;
1043
1044	/* disable TX/RX data */
1045	value = pads_readl(pcie, PADS_CTL);
1046	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
1047	pads_writel(pcie, value, PADS_CTL);
1048
1049	/* override IDDQ */
1050	value = pads_readl(pcie, PADS_CTL);
1051	value |= PADS_CTL_IDDQ_1L;
1052	pads_writel(pcie, value, PADS_CTL);
1053
1054	/* reset PLL */
1055	value = pads_readl(pcie, soc->pads_pll_ctl);
1056	value &= ~PADS_PLL_CTL_RST_B4SM;
1057	pads_writel(pcie, value, soc->pads_pll_ctl);
1058
1059	usleep_range(20, 100);
1060
1061	return 0;
1062}
1063
1064static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
1065{
1066	struct device *dev = port->pcie->dev;
1067	unsigned int i;
1068	int err;
1069
1070	for (i = 0; i < port->lanes; i++) {
1071		err = phy_power_on(port->phys[i]);
1072		if (err < 0) {
1073			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1074			return err;
1075		}
1076	}
1077
1078	return 0;
1079}
1080
1081static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1082{
1083	struct device *dev = port->pcie->dev;
1084	unsigned int i;
1085	int err;
1086
1087	for (i = 0; i < port->lanes; i++) {
1088		err = phy_power_off(port->phys[i]);
1089		if (err < 0) {
1090			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1091				err);
1092			return err;
1093		}
1094	}
1095
1096	return 0;
1097}
1098
1099static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1100{
1101	struct device *dev = pcie->dev;
1102	struct tegra_pcie_port *port;
1103	int err;
1104
1105	if (pcie->legacy_phy) {
1106		if (pcie->phy)
1107			err = phy_power_on(pcie->phy);
1108		else
1109			err = tegra_pcie_phy_enable(pcie);
1110
1111		if (err < 0)
1112			dev_err(dev, "failed to power on PHY: %d\n", err);
1113
1114		return err;
1115	}
1116
1117	list_for_each_entry(port, &pcie->ports, list) {
1118		err = tegra_pcie_port_phy_power_on(port);
1119		if (err < 0) {
1120			dev_err(dev,
1121				"failed to power on PCIe port %u PHY: %d\n",
1122				port->index, err);
1123			return err;
1124		}
1125	}
1126
1127	return 0;
1128}
1129
1130static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1131{
1132	struct device *dev = pcie->dev;
1133	struct tegra_pcie_port *port;
1134	int err;
1135
1136	if (pcie->legacy_phy) {
1137		if (pcie->phy)
1138			err = phy_power_off(pcie->phy);
1139		else
1140			err = tegra_pcie_phy_disable(pcie);
1141
1142		if (err < 0)
1143			dev_err(dev, "failed to power off PHY: %d\n", err);
1144
1145		return err;
1146	}
1147
1148	list_for_each_entry(port, &pcie->ports, list) {
1149		err = tegra_pcie_port_phy_power_off(port);
1150		if (err < 0) {
1151			dev_err(dev,
1152				"failed to power off PCIe port %u PHY: %d\n",
1153				port->index, err);
1154			return err;
1155		}
1156	}
1157
1158	return 0;
1159}
1160
1161static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1162{
1163	const struct tegra_pcie_soc *soc = pcie->soc;
1164	struct tegra_pcie_port *port;
1165	unsigned long value;
1166
1167	/* enable PLL power down */
1168	if (pcie->phy) {
1169		value = afi_readl(pcie, AFI_PLLE_CONTROL);
1170		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1171		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1172		afi_writel(pcie, value, AFI_PLLE_CONTROL);
1173	}
1174
1175	/* power down PCIe slot clock bias pad */
1176	if (soc->has_pex_bias_ctrl)
1177		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1178
1179	/* configure mode and disable all ports */
1180	value = afi_readl(pcie, AFI_PCIE_CONFIG);
1181	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1182	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1183	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1184
1185	list_for_each_entry(port, &pcie->ports, list) {
1186		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1187		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1188	}
1189
1190	afi_writel(pcie, value, AFI_PCIE_CONFIG);
1191
1192	if (soc->has_gen2) {
1193		value = afi_readl(pcie, AFI_FUSE);
1194		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1195		afi_writel(pcie, value, AFI_FUSE);
1196	} else {
1197		value = afi_readl(pcie, AFI_FUSE);
1198		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1199		afi_writel(pcie, value, AFI_FUSE);
1200	}
1201
1202	/* Disable AFI dynamic clock gating and enable PCIe */
1203	value = afi_readl(pcie, AFI_CONFIGURATION);
1204	value |= AFI_CONFIGURATION_EN_FPCI;
1205	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1206	afi_writel(pcie, value, AFI_CONFIGURATION);
1207
1208	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1209		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1210		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1211
1212	if (soc->has_intr_prsnt_sense)
1213		value |= AFI_INTR_EN_PRSNT_SENSE;
1214
1215	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1216	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1217
1218	/* don't enable MSI for now, only when needed */
1219	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1220
1221	/* disable all exceptions */
1222	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1223}
1224
1225static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1226{
1227	struct device *dev = pcie->dev;
1228	const struct tegra_pcie_soc *soc = pcie->soc;
1229	int err;
1230
1231	reset_control_assert(pcie->afi_rst);
1232
1233	clk_disable_unprepare(pcie->pll_e);
1234	if (soc->has_cml_clk)
1235		clk_disable_unprepare(pcie->cml_clk);
1236	clk_disable_unprepare(pcie->afi_clk);
1237
1238	if (!dev->pm_domain)
1239		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1240
1241	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1242	if (err < 0)
1243		dev_warn(dev, "failed to disable regulators: %d\n", err);
1244}
1245
1246static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1247{
1248	struct device *dev = pcie->dev;
1249	const struct tegra_pcie_soc *soc = pcie->soc;
1250	int err;
1251
1252	reset_control_assert(pcie->pcie_xrst);
1253	reset_control_assert(pcie->afi_rst);
1254	reset_control_assert(pcie->pex_rst);
1255
1256	if (!dev->pm_domain)
1257		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1258
1259	/* enable regulators */
1260	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1261	if (err < 0)
1262		dev_err(dev, "failed to enable regulators: %d\n", err);
1263
1264	if (!dev->pm_domain) {
1265		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1266		if (err) {
1267			dev_err(dev, "failed to power ungate: %d\n", err);
1268			goto regulator_disable;
1269		}
1270		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1271		if (err) {
1272			dev_err(dev, "failed to remove clamp: %d\n", err);
1273			goto powergate;
1274		}
1275	}
1276
1277	err = clk_prepare_enable(pcie->afi_clk);
1278	if (err < 0) {
1279		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1280		goto powergate;
1281	}
1282
1283	if (soc->has_cml_clk) {
1284		err = clk_prepare_enable(pcie->cml_clk);
1285		if (err < 0) {
1286			dev_err(dev, "failed to enable CML clock: %d\n", err);
1287			goto disable_afi_clk;
1288		}
1289	}
1290
1291	err = clk_prepare_enable(pcie->pll_e);
1292	if (err < 0) {
1293		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1294		goto disable_cml_clk;
1295	}
1296
1297	reset_control_deassert(pcie->afi_rst);
1298
1299	return 0;
1300
1301disable_cml_clk:
1302	if (soc->has_cml_clk)
1303		clk_disable_unprepare(pcie->cml_clk);
1304disable_afi_clk:
1305	clk_disable_unprepare(pcie->afi_clk);
1306powergate:
1307	if (!dev->pm_domain)
1308		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1309regulator_disable:
1310	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1311
1312	return err;
1313}
1314
1315static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1316{
1317	const struct tegra_pcie_soc *soc = pcie->soc;
1318
1319	/* Configure the reference clock driver */
1320	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1321
1322	if (soc->num_ports > 2)
1323		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1324}
1325
1326static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1327{
1328	struct device *dev = pcie->dev;
1329	const struct tegra_pcie_soc *soc = pcie->soc;
1330
1331	pcie->pex_clk = devm_clk_get(dev, "pex");
1332	if (IS_ERR(pcie->pex_clk))
1333		return PTR_ERR(pcie->pex_clk);
1334
1335	pcie->afi_clk = devm_clk_get(dev, "afi");
1336	if (IS_ERR(pcie->afi_clk))
1337		return PTR_ERR(pcie->afi_clk);
1338
1339	pcie->pll_e = devm_clk_get(dev, "pll_e");
1340	if (IS_ERR(pcie->pll_e))
1341		return PTR_ERR(pcie->pll_e);
1342
1343	if (soc->has_cml_clk) {
1344		pcie->cml_clk = devm_clk_get(dev, "cml");
1345		if (IS_ERR(pcie->cml_clk))
1346			return PTR_ERR(pcie->cml_clk);
1347	}
1348
1349	return 0;
1350}
1351
1352static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1353{
1354	struct device *dev = pcie->dev;
1355
1356	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1357	if (IS_ERR(pcie->pex_rst))
1358		return PTR_ERR(pcie->pex_rst);
1359
1360	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1361	if (IS_ERR(pcie->afi_rst))
1362		return PTR_ERR(pcie->afi_rst);
1363
1364	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1365	if (IS_ERR(pcie->pcie_xrst))
1366		return PTR_ERR(pcie->pcie_xrst);
1367
1368	return 0;
1369}
1370
1371static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1372{
1373	struct device *dev = pcie->dev;
1374	int err;
1375
1376	pcie->phy = devm_phy_optional_get(dev, "pcie");
1377	if (IS_ERR(pcie->phy)) {
1378		err = PTR_ERR(pcie->phy);
1379		dev_err(dev, "failed to get PHY: %d\n", err);
1380		return err;
1381	}
1382
1383	err = phy_init(pcie->phy);
1384	if (err < 0) {
1385		dev_err(dev, "failed to initialize PHY: %d\n", err);
1386		return err;
1387	}
1388
1389	pcie->legacy_phy = true;
1390
1391	return 0;
1392}
1393
1394static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1395						  struct device_node *np,
1396						  const char *consumer,
1397						  unsigned int index)
1398{
1399	struct phy *phy;
1400	char *name;
1401
1402	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1403	if (!name)
1404		return ERR_PTR(-ENOMEM);
1405
1406	phy = devm_of_phy_get(dev, np, name);
1407	kfree(name);
1408
1409	if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1410		phy = NULL;
1411
1412	return phy;
1413}
1414
1415static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1416{
1417	struct device *dev = port->pcie->dev;
1418	struct phy *phy;
1419	unsigned int i;
1420	int err;
1421
1422	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1423	if (!port->phys)
1424		return -ENOMEM;
1425
1426	for (i = 0; i < port->lanes; i++) {
1427		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1428		if (IS_ERR(phy)) {
1429			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1430				PTR_ERR(phy));
1431			return PTR_ERR(phy);
1432		}
1433
1434		err = phy_init(phy);
1435		if (err < 0) {
1436			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1437				err);
1438			return err;
1439		}
1440
1441		port->phys[i] = phy;
1442	}
1443
1444	return 0;
1445}
1446
1447static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1448{
1449	const struct tegra_pcie_soc *soc = pcie->soc;
1450	struct device_node *np = pcie->dev->of_node;
1451	struct tegra_pcie_port *port;
1452	int err;
1453
1454	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1455		return tegra_pcie_phys_get_legacy(pcie);
1456
1457	list_for_each_entry(port, &pcie->ports, list) {
1458		err = tegra_pcie_port_get_phys(port);
1459		if (err < 0)
1460			return err;
1461	}
1462
1463	return 0;
1464}
1465
1466static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1467{
1468	struct tegra_pcie_port *port;
1469	struct device *dev = pcie->dev;
1470	int err, i;
1471
1472	if (pcie->legacy_phy) {
1473		err = phy_exit(pcie->phy);
1474		if (err < 0)
1475			dev_err(dev, "failed to teardown PHY: %d\n", err);
1476		return;
1477	}
1478
1479	list_for_each_entry(port, &pcie->ports, list) {
1480		for (i = 0; i < port->lanes; i++) {
1481			err = phy_exit(port->phys[i]);
1482			if (err < 0)
1483				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1484					i, err);
1485		}
1486	}
1487}
1488
1489
1490static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1491{
1492	struct device *dev = pcie->dev;
1493	struct platform_device *pdev = to_platform_device(dev);
1494	struct resource *pads, *afi, *res;
1495	const struct tegra_pcie_soc *soc = pcie->soc;
1496	int err;
1497
1498	err = tegra_pcie_clocks_get(pcie);
1499	if (err) {
1500		dev_err(dev, "failed to get clocks: %d\n", err);
1501		return err;
1502	}
1503
1504	err = tegra_pcie_resets_get(pcie);
1505	if (err) {
1506		dev_err(dev, "failed to get resets: %d\n", err);
1507		return err;
1508	}
1509
1510	if (soc->program_uphy) {
1511		err = tegra_pcie_phys_get(pcie);
1512		if (err < 0) {
1513			dev_err(dev, "failed to get PHYs: %d\n", err);
1514			return err;
1515		}
1516	}
1517
1518	pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1519	pcie->pads = devm_ioremap_resource(dev, pads);
1520	if (IS_ERR(pcie->pads)) {
1521		err = PTR_ERR(pcie->pads);
1522		goto phys_put;
1523	}
1524
1525	afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1526	pcie->afi = devm_ioremap_resource(dev, afi);
1527	if (IS_ERR(pcie->afi)) {
1528		err = PTR_ERR(pcie->afi);
1529		goto phys_put;
1530	}
1531
1532	/* request configuration space, but remap later, on demand */
1533	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1534	if (!res) {
1535		err = -EADDRNOTAVAIL;
1536		goto phys_put;
1537	}
1538
1539	pcie->cs = *res;
1540
1541	/* constrain configuration space to 4 KiB */
1542	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1543
1544	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1545	if (IS_ERR(pcie->cfg)) {
1546		err = PTR_ERR(pcie->cfg);
1547		goto phys_put;
1548	}
1549
1550	/* request interrupt */
1551	err = platform_get_irq_byname(pdev, "intr");
1552	if (err < 0) {
1553		dev_err(dev, "failed to get IRQ: %d\n", err);
1554		goto phys_put;
1555	}
1556
1557	pcie->irq = err;
1558
1559	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1560	if (err) {
1561		dev_err(dev, "failed to register IRQ: %d\n", err);
1562		goto phys_put;
1563	}
1564
1565	return 0;
1566
1567phys_put:
1568	if (soc->program_uphy)
1569		tegra_pcie_phys_put(pcie);
 
1570	return err;
1571}
1572
1573static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1574{
1575	const struct tegra_pcie_soc *soc = pcie->soc;
1576
1577	if (pcie->irq > 0)
1578		free_irq(pcie->irq, pcie);
1579
1580	if (soc->program_uphy)
1581		tegra_pcie_phys_put(pcie);
1582
1583	return 0;
1584}
1585
1586static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1587{
1588	struct tegra_pcie *pcie = port->pcie;
1589	const struct tegra_pcie_soc *soc = pcie->soc;
1590	int err;
1591	u32 val;
1592	u8 ack_bit;
1593
1594	val = afi_readl(pcie, AFI_PCIE_PME);
1595	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1596	afi_writel(pcie, val, AFI_PCIE_PME);
1597
1598	ack_bit = soc->ports[port->index].pme.ack_bit;
1599	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1600				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1601	if (err)
1602		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1603			port->index);
1604
1605	usleep_range(10000, 11000);
1606
1607	val = afi_readl(pcie, AFI_PCIE_PME);
1608	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1609	afi_writel(pcie, val, AFI_PCIE_PME);
1610}
1611
1612static int tegra_msi_alloc(struct tegra_msi *chip)
1613{
1614	int msi;
1615
1616	mutex_lock(&chip->lock);
1617
1618	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1619	if (msi < INT_PCI_MSI_NR)
1620		set_bit(msi, chip->used);
1621	else
1622		msi = -ENOSPC;
1623
1624	mutex_unlock(&chip->lock);
1625
1626	return msi;
1627}
1628
1629static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1630{
1631	struct device *dev = chip->chip.dev;
1632
1633	mutex_lock(&chip->lock);
1634
1635	if (!test_bit(irq, chip->used))
1636		dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1637	else
1638		clear_bit(irq, chip->used);
1639
1640	mutex_unlock(&chip->lock);
1641}
1642
1643static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1644{
1645	struct tegra_pcie *pcie = data;
1646	struct device *dev = pcie->dev;
1647	struct tegra_msi *msi = &pcie->msi;
1648	unsigned int i, processed = 0;
 
 
 
1649
1650	for (i = 0; i < 8; i++) {
1651		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1652
1653		while (reg) {
1654			unsigned int offset = find_first_bit(&reg, 32);
1655			unsigned int index = i * 32 + offset;
1656			unsigned int irq;
1657
1658			/* clear the interrupt */
1659			afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1660
1661			irq = irq_find_mapping(msi->domain, index);
1662			if (irq) {
1663				if (test_bit(index, msi->used))
1664					generic_handle_irq(irq);
1665				else
1666					dev_info(dev, "unhandled MSI\n");
1667			} else {
1668				/*
1669				 * that's weird who triggered this?
1670				 * just clear it
1671				 */
1672				dev_info(dev, "unexpected MSI\n");
 
1673			}
1674
1675			/* see if there's any more pending in this vector */
1676			reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1677
1678			processed++;
1679		}
1680	}
1681
1682	return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
 
 
 
 
 
1683}
1684
1685static int tegra_msi_setup_irq(struct msi_controller *chip,
1686			       struct pci_dev *pdev, struct msi_desc *desc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1687{
1688	struct tegra_msi *msi = to_tegra_msi(chip);
1689	struct msi_msg msg;
1690	unsigned int irq;
1691	int hwirq;
1692
1693	hwirq = tegra_msi_alloc(msi);
1694	if (hwirq < 0)
1695		return hwirq;
1696
1697	irq = irq_create_mapping(msi->domain, hwirq);
1698	if (!irq) {
1699		tegra_msi_free(msi, hwirq);
1700		return -EINVAL;
1701	}
1702
1703	irq_set_msi_desc(irq, desc);
1704
1705	msg.address_lo = lower_32_bits(msi->phys);
1706	msg.address_hi = upper_32_bits(msi->phys);
1707	msg.data = hwirq;
1708
1709	pci_write_msi_msg(irq, &msg);
 
 
 
 
 
1710
1711	return 0;
1712}
1713
1714static void tegra_msi_teardown_irq(struct msi_controller *chip,
1715				   unsigned int irq)
1716{
1717	struct tegra_msi *msi = to_tegra_msi(chip);
1718	struct irq_data *d = irq_get_irq_data(irq);
1719	irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
1720
1721	irq_dispose_mapping(irq);
1722	tegra_msi_free(msi, hwirq);
 
1723}
1724
1725static struct irq_chip tegra_msi_irq_chip = {
1726	.name = "Tegra PCIe MSI",
1727	.irq_enable = pci_msi_unmask_irq,
1728	.irq_disable = pci_msi_mask_irq,
1729	.irq_mask = pci_msi_mask_irq,
1730	.irq_unmask = pci_msi_unmask_irq,
 
 
 
1731};
1732
1733static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1734			 irq_hw_number_t hwirq)
1735{
1736	irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1737	irq_set_chip_data(irq, domain->host_data);
 
 
 
 
 
 
 
 
 
1738
1739	tegra_cpuidle_pcie_irqs_in_use();
 
 
 
 
 
1740
1741	return 0;
1742}
1743
1744static const struct irq_domain_ops msi_domain_ops = {
1745	.map = tegra_msi_map,
1746};
 
 
 
 
1747
1748static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1749{
1750	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1751	struct platform_device *pdev = to_platform_device(pcie->dev);
1752	struct tegra_msi *msi = &pcie->msi;
1753	struct device *dev = pcie->dev;
1754	int err;
1755
1756	mutex_init(&msi->lock);
1757
1758	msi->chip.dev = dev;
1759	msi->chip.setup_irq = tegra_msi_setup_irq;
1760	msi->chip.teardown_irq = tegra_msi_teardown_irq;
1761
1762	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1763					    &msi_domain_ops, &msi->chip);
1764	if (!msi->domain) {
1765		dev_err(dev, "failed to create IRQ domain\n");
1766		return -ENOMEM;
1767	}
1768
1769	err = platform_get_irq_byname(pdev, "msi");
1770	if (err < 0) {
1771		dev_err(dev, "failed to get IRQ: %d\n", err);
1772		goto free_irq_domain;
1773	}
1774
1775	msi->irq = err;
1776
1777	err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1778			  tegra_msi_irq_chip.name, pcie);
1779	if (err < 0) {
1780		dev_err(dev, "failed to request IRQ: %d\n", err);
1781		goto free_irq_domain;
1782	}
1783
1784	/* Though the PCIe controller can address >32-bit address space, to
1785	 * facilitate endpoints that support only 32-bit MSI target address,
1786	 * the mask is set to 32-bit to make sure that MSI target address is
1787	 * always a 32-bit address
1788	 */
1789	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1790	if (err < 0) {
1791		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1792		goto free_irq;
1793	}
1794
1795	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1796				    DMA_ATTR_NO_KERNEL_MAPPING);
1797	if (!msi->virt) {
1798		dev_err(dev, "failed to allocate DMA memory for MSI\n");
1799		err = -ENOMEM;
1800		goto free_irq;
1801	}
1802
1803	host->msi = &msi->chip;
1804
1805	return 0;
1806
1807free_irq:
1808	free_irq(msi->irq, pcie);
1809free_irq_domain:
1810	irq_domain_remove(msi->domain);
 
 
1811	return err;
1812}
1813
1814static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1815{
1816	const struct tegra_pcie_soc *soc = pcie->soc;
1817	struct tegra_msi *msi = &pcie->msi;
1818	u32 reg;
 
1819
1820	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1821	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1822	/* this register is in 4K increments */
1823	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1824
1825	/* enable all MSI vectors */
1826	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1827	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1828	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1829	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1830	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1831	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1832	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1833	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1834
1835	/* and unmask the MSI interrupt */
1836	reg = afi_readl(pcie, AFI_INTR_MASK);
1837	reg |= AFI_INTR_MASK_MSI_MASK;
1838	afi_writel(pcie, reg, AFI_INTR_MASK);
1839}
1840
1841static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1842{
1843	struct tegra_msi *msi = &pcie->msi;
1844	unsigned int i, irq;
1845
1846	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1847		       DMA_ATTR_NO_KERNEL_MAPPING);
1848
1849	if (msi->irq > 0)
1850		free_irq(msi->irq, pcie);
1851
1852	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1853		irq = irq_find_mapping(msi->domain, i);
1854		if (irq > 0)
1855			irq_dispose_mapping(irq);
1856	}
1857
1858	irq_domain_remove(msi->domain);
 
 
 
1859}
1860
1861static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1862{
1863	u32 value;
1864
1865	/* mask the MSI interrupt */
1866	value = afi_readl(pcie, AFI_INTR_MASK);
1867	value &= ~AFI_INTR_MASK_MSI_MASK;
1868	afi_writel(pcie, value, AFI_INTR_MASK);
1869
1870	/* disable all MSI vectors */
1871	afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1872	afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1873	afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1874	afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1875	afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1876	afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1877	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1878	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1879
1880	return 0;
1881}
1882
1883static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1884{
1885	u32 value;
1886
1887	value = afi_readl(pcie, AFI_INTR_MASK);
1888	value &= ~AFI_INTR_MASK_INT_MASK;
1889	afi_writel(pcie, value, AFI_INTR_MASK);
1890}
1891
1892static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1893				      u32 *xbar)
1894{
1895	struct device *dev = pcie->dev;
1896	struct device_node *np = dev->of_node;
1897
1898	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1899		switch (lanes) {
1900		case 0x010004:
1901			dev_info(dev, "4x1, 1x1 configuration\n");
1902			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1903			return 0;
1904
1905		case 0x010102:
1906			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1907			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1908			return 0;
1909
1910		case 0x010101:
1911			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1912			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1913			return 0;
1914
1915		default:
1916			dev_info(dev, "wrong configuration updated in DT, "
1917				 "switching to default 2x1, 1x1, 1x1 "
1918				 "configuration\n");
1919			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1920			return 0;
1921		}
1922	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1923		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1924		switch (lanes) {
1925		case 0x0000104:
1926			dev_info(dev, "4x1, 1x1 configuration\n");
1927			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1928			return 0;
1929
1930		case 0x0000102:
1931			dev_info(dev, "2x1, 1x1 configuration\n");
1932			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1933			return 0;
1934		}
1935	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1936		switch (lanes) {
1937		case 0x00000204:
1938			dev_info(dev, "4x1, 2x1 configuration\n");
1939			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1940			return 0;
1941
1942		case 0x00020202:
1943			dev_info(dev, "2x3 configuration\n");
1944			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1945			return 0;
1946
1947		case 0x00010104:
1948			dev_info(dev, "4x1, 1x2 configuration\n");
1949			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1950			return 0;
1951		}
1952	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1953		switch (lanes) {
1954		case 0x00000004:
1955			dev_info(dev, "single-mode configuration\n");
1956			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1957			return 0;
1958
1959		case 0x00000202:
1960			dev_info(dev, "dual-mode configuration\n");
1961			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1962			return 0;
1963		}
1964	}
1965
1966	return -EINVAL;
1967}
1968
1969/*
1970 * Check whether a given set of supplies is available in a device tree node.
1971 * This is used to check whether the new or the legacy device tree bindings
1972 * should be used.
1973 */
1974static bool of_regulator_bulk_available(struct device_node *np,
1975					struct regulator_bulk_data *supplies,
1976					unsigned int num_supplies)
1977{
1978	char property[32];
1979	unsigned int i;
1980
1981	for (i = 0; i < num_supplies; i++) {
1982		snprintf(property, 32, "%s-supply", supplies[i].supply);
1983
1984		if (of_find_property(np, property, NULL) == NULL)
1985			return false;
1986	}
1987
1988	return true;
1989}
1990
1991/*
1992 * Old versions of the device tree binding for this device used a set of power
1993 * supplies that didn't match the hardware inputs. This happened to work for a
1994 * number of cases but is not future proof. However to preserve backwards-
1995 * compatibility with old device trees, this function will try to use the old
1996 * set of supplies.
1997 */
1998static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1999{
2000	struct device *dev = pcie->dev;
2001	struct device_node *np = dev->of_node;
2002
2003	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
2004		pcie->num_supplies = 3;
2005	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
2006		pcie->num_supplies = 2;
2007
2008	if (pcie->num_supplies == 0) {
2009		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
2010		return -ENODEV;
2011	}
2012
2013	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2014				      sizeof(*pcie->supplies),
2015				      GFP_KERNEL);
2016	if (!pcie->supplies)
2017		return -ENOMEM;
2018
2019	pcie->supplies[0].supply = "pex-clk";
2020	pcie->supplies[1].supply = "vdd";
2021
2022	if (pcie->num_supplies > 2)
2023		pcie->supplies[2].supply = "avdd";
2024
2025	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
2026}
2027
2028/*
2029 * Obtains the list of regulators required for a particular generation of the
2030 * IP block.
2031 *
2032 * This would've been nice to do simply by providing static tables for use
2033 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
2034 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
2035 * and either seems to be optional depending on which ports are being used.
2036 */
2037static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
2038{
2039	struct device *dev = pcie->dev;
2040	struct device_node *np = dev->of_node;
2041	unsigned int i = 0;
2042
2043	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
2044		pcie->num_supplies = 4;
2045
2046		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2047					      sizeof(*pcie->supplies),
2048					      GFP_KERNEL);
2049		if (!pcie->supplies)
2050			return -ENOMEM;
2051
2052		pcie->supplies[i++].supply = "dvdd-pex";
2053		pcie->supplies[i++].supply = "hvdd-pex-pll";
2054		pcie->supplies[i++].supply = "hvdd-pex";
2055		pcie->supplies[i++].supply = "vddio-pexctl-aud";
2056	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
2057		pcie->num_supplies = 6;
2058
2059		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2060					      sizeof(*pcie->supplies),
2061					      GFP_KERNEL);
2062		if (!pcie->supplies)
2063			return -ENOMEM;
2064
2065		pcie->supplies[i++].supply = "avdd-pll-uerefe";
2066		pcie->supplies[i++].supply = "hvddio-pex";
2067		pcie->supplies[i++].supply = "dvddio-pex";
2068		pcie->supplies[i++].supply = "dvdd-pex-pll";
2069		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
2070		pcie->supplies[i++].supply = "vddio-pex-ctl";
2071	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2072		pcie->num_supplies = 7;
2073
2074		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2075					      sizeof(*pcie->supplies),
2076					      GFP_KERNEL);
2077		if (!pcie->supplies)
2078			return -ENOMEM;
2079
2080		pcie->supplies[i++].supply = "avddio-pex";
2081		pcie->supplies[i++].supply = "dvddio-pex";
2082		pcie->supplies[i++].supply = "avdd-pex-pll";
2083		pcie->supplies[i++].supply = "hvdd-pex";
2084		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
2085		pcie->supplies[i++].supply = "vddio-pex-ctl";
2086		pcie->supplies[i++].supply = "avdd-pll-erefe";
2087	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2088		bool need_pexa = false, need_pexb = false;
2089
2090		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2091		if (lane_mask & 0x0f)
2092			need_pexa = true;
2093
2094		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2095		if (lane_mask & 0x30)
2096			need_pexb = true;
2097
2098		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2099					 (need_pexb ? 2 : 0);
2100
2101		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2102					      sizeof(*pcie->supplies),
2103					      GFP_KERNEL);
2104		if (!pcie->supplies)
2105			return -ENOMEM;
2106
2107		pcie->supplies[i++].supply = "avdd-pex-pll";
2108		pcie->supplies[i++].supply = "hvdd-pex";
2109		pcie->supplies[i++].supply = "vddio-pex-ctl";
2110		pcie->supplies[i++].supply = "avdd-plle";
2111
2112		if (need_pexa) {
2113			pcie->supplies[i++].supply = "avdd-pexa";
2114			pcie->supplies[i++].supply = "vdd-pexa";
2115		}
2116
2117		if (need_pexb) {
2118			pcie->supplies[i++].supply = "avdd-pexb";
2119			pcie->supplies[i++].supply = "vdd-pexb";
2120		}
2121	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2122		pcie->num_supplies = 5;
2123
2124		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2125					      sizeof(*pcie->supplies),
2126					      GFP_KERNEL);
2127		if (!pcie->supplies)
2128			return -ENOMEM;
2129
2130		pcie->supplies[0].supply = "avdd-pex";
2131		pcie->supplies[1].supply = "vdd-pex";
2132		pcie->supplies[2].supply = "avdd-pex-pll";
2133		pcie->supplies[3].supply = "avdd-plle";
2134		pcie->supplies[4].supply = "vddio-pex-clk";
2135	}
2136
2137	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2138					pcie->num_supplies))
2139		return devm_regulator_bulk_get(dev, pcie->num_supplies,
2140					       pcie->supplies);
2141
2142	/*
2143	 * If not all regulators are available for this new scheme, assume
2144	 * that the device tree complies with an older version of the device
2145	 * tree binding.
2146	 */
2147	dev_info(dev, "using legacy DT binding for power supplies\n");
2148
2149	devm_kfree(dev, pcie->supplies);
2150	pcie->num_supplies = 0;
2151
2152	return tegra_pcie_get_legacy_regulators(pcie);
2153}
2154
2155static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2156{
2157	struct device *dev = pcie->dev;
2158	struct device_node *np = dev->of_node, *port;
2159	const struct tegra_pcie_soc *soc = pcie->soc;
2160	struct of_pci_range_parser parser;
2161	struct of_pci_range range;
2162	u32 lanes = 0, mask = 0;
2163	unsigned int lane = 0;
2164	struct resource res;
2165	int err;
2166
2167	if (of_pci_range_parser_init(&parser, np)) {
2168		dev_err(dev, "missing \"ranges\" property\n");
2169		return -EINVAL;
2170	}
2171
2172	for_each_of_pci_range(&parser, &range) {
2173		err = of_pci_range_to_resource(&range, np, &res);
2174		if (err < 0)
2175			return err;
2176
2177		switch (res.flags & IORESOURCE_TYPE_BITS) {
2178		case IORESOURCE_IO:
2179			/* Track the bus -> CPU I/O mapping offset. */
2180			pcie->offset.io = res.start - range.pci_addr;
2181
2182			memcpy(&pcie->pio, &res, sizeof(res));
2183			pcie->pio.name = np->full_name;
2184
2185			/*
2186			 * The Tegra PCIe host bridge uses this to program the
2187			 * mapping of the I/O space to the physical address,
2188			 * so we override the .start and .end fields here that
2189			 * of_pci_range_to_resource() converted to I/O space.
2190			 * We also set the IORESOURCE_MEM type to clarify that
2191			 * the resource is in the physical memory space.
2192			 */
2193			pcie->io.start = range.cpu_addr;
2194			pcie->io.end = range.cpu_addr + range.size - 1;
2195			pcie->io.flags = IORESOURCE_MEM;
2196			pcie->io.name = "I/O";
2197
2198			memcpy(&res, &pcie->io, sizeof(res));
2199			break;
2200
2201		case IORESOURCE_MEM:
2202			/*
2203			 * Track the bus -> CPU memory mapping offset. This
2204			 * assumes that the prefetchable and non-prefetchable
2205			 * regions will be the last of type IORESOURCE_MEM in
2206			 * the ranges property.
2207			 * */
2208			pcie->offset.mem = res.start - range.pci_addr;
2209
2210			if (res.flags & IORESOURCE_PREFETCH) {
2211				memcpy(&pcie->prefetch, &res, sizeof(res));
2212				pcie->prefetch.name = "prefetchable";
2213			} else {
2214				memcpy(&pcie->mem, &res, sizeof(res));
2215				pcie->mem.name = "non-prefetchable";
2216			}
2217			break;
2218		}
2219	}
2220
2221	err = of_pci_parse_bus_range(np, &pcie->busn);
2222	if (err < 0) {
2223		dev_err(dev, "failed to parse ranges property: %d\n", err);
2224		pcie->busn.name = np->name;
2225		pcie->busn.start = 0;
2226		pcie->busn.end = 0xff;
2227		pcie->busn.flags = IORESOURCE_BUS;
2228	}
2229
2230	/* parse root ports */
2231	for_each_child_of_node(np, port) {
2232		struct tegra_pcie_port *rp;
2233		unsigned int index;
2234		u32 value;
2235		char *label;
2236
2237		err = of_pci_get_devfn(port);
2238		if (err < 0) {
2239			dev_err(dev, "failed to parse address: %d\n", err);
2240			goto err_node_put;
2241		}
2242
2243		index = PCI_SLOT(err);
2244
2245		if (index < 1 || index > soc->num_ports) {
2246			dev_err(dev, "invalid port number: %d\n", index);
2247			err = -EINVAL;
2248			goto err_node_put;
2249		}
2250
2251		index--;
2252
2253		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2254		if (err < 0) {
2255			dev_err(dev, "failed to parse # of lanes: %d\n",
2256				err);
2257			goto err_node_put;
2258		}
2259
2260		if (value > 16) {
2261			dev_err(dev, "invalid # of lanes: %u\n", value);
2262			err = -EINVAL;
2263			goto err_node_put;
2264		}
2265
2266		lanes |= value << (index << 3);
2267
2268		if (!of_device_is_available(port)) {
2269			lane += value;
2270			continue;
2271		}
2272
2273		mask |= ((1 << value) - 1) << lane;
2274		lane += value;
2275
2276		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2277		if (!rp) {
2278			err = -ENOMEM;
2279			goto err_node_put;
2280		}
2281
2282		err = of_address_to_resource(port, 0, &rp->regs);
2283		if (err < 0) {
2284			dev_err(dev, "failed to parse address: %d\n", err);
2285			goto err_node_put;
2286		}
2287
2288		INIT_LIST_HEAD(&rp->list);
2289		rp->index = index;
2290		rp->lanes = value;
2291		rp->pcie = pcie;
2292		rp->np = port;
2293
2294		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2295		if (IS_ERR(rp->base))
2296			return PTR_ERR(rp->base);
 
 
2297
2298		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2299		if (!label) {
2300			dev_err(dev, "failed to create reset GPIO label\n");
2301			return -ENOMEM;
2302		}
2303
2304		/*
2305		 * Returns -ENOENT if reset-gpios property is not populated
2306		 * and in this case fall back to using AFI per port register
2307		 * to toggle PERST# SFIO line.
2308		 */
2309		rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
2310							     "reset-gpios", 0,
2311							     GPIOD_OUT_LOW,
2312							     label);
2313		if (IS_ERR(rp->reset_gpio)) {
2314			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2315				rp->reset_gpio = NULL;
2316			} else {
2317				dev_err(dev, "failed to get reset GPIO: %d\n",
2318					err);
2319				return PTR_ERR(rp->reset_gpio);
 
2320			}
2321		}
2322
2323		list_add_tail(&rp->list, &pcie->ports);
2324	}
2325
2326	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2327	if (err < 0) {
2328		dev_err(dev, "invalid lane configuration\n");
2329		return err;
2330	}
2331
2332	err = tegra_pcie_get_regulators(pcie, mask);
2333	if (err < 0)
2334		return err;
2335
2336	return 0;
2337
2338err_node_put:
2339	of_node_put(port);
2340	return err;
2341}
2342
2343/*
2344 * FIXME: If there are no PCIe cards attached, then calling this function
2345 * can result in the increase of the bootup time as there are big timeout
2346 * loops.
2347 */
2348#define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
2349static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2350{
2351	struct device *dev = port->pcie->dev;
2352	unsigned int retries = 3;
2353	unsigned long value;
2354
2355	/* override presence detection */
2356	value = readl(port->base + RP_PRIV_MISC);
2357	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2358	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2359	writel(value, port->base + RP_PRIV_MISC);
2360
2361	do {
2362		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2363
2364		do {
2365			value = readl(port->base + RP_VEND_XP);
2366
2367			if (value & RP_VEND_XP_DL_UP)
2368				break;
2369
2370			usleep_range(1000, 2000);
2371		} while (--timeout);
2372
2373		if (!timeout) {
2374			dev_dbg(dev, "link %u down, retrying\n", port->index);
2375			goto retry;
2376		}
2377
2378		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2379
2380		do {
2381			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2382
2383			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2384				return true;
2385
2386			usleep_range(1000, 2000);
2387		} while (--timeout);
2388
2389retry:
2390		tegra_pcie_port_reset(port);
2391	} while (--retries);
2392
2393	return false;
2394}
2395
2396static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2397{
2398	struct device *dev = pcie->dev;
2399	struct tegra_pcie_port *port;
2400	ktime_t deadline;
2401	u32 value;
2402
2403	list_for_each_entry(port, &pcie->ports, list) {
2404		/*
2405		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2406		 * is not supported by Tegra. tegra_pcie_change_link_speed()
2407		 * is called only for Tegra chips which support Gen2.
2408		 * So there no harm if supported link speed is not verified.
2409		 */
2410		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2411		value &= ~PCI_EXP_LNKSTA_CLS;
2412		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2413		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2414
2415		/*
2416		 * Poll until link comes back from recovery to avoid race
2417		 * condition.
2418		 */
2419		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2420
2421		while (ktime_before(ktime_get(), deadline)) {
2422			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2423			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2424				break;
2425
2426			usleep_range(2000, 3000);
2427		}
2428
2429		if (value & PCI_EXP_LNKSTA_LT)
2430			dev_warn(dev, "PCIe port %u link is in recovery\n",
2431				 port->index);
2432
2433		/* Retrain the link */
2434		value = readl(port->base + RP_LINK_CONTROL_STATUS);
2435		value |= PCI_EXP_LNKCTL_RL;
2436		writel(value, port->base + RP_LINK_CONTROL_STATUS);
2437
2438		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2439
2440		while (ktime_before(ktime_get(), deadline)) {
2441			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2442			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2443				break;
2444
2445			usleep_range(2000, 3000);
2446		}
2447
2448		if (value & PCI_EXP_LNKSTA_LT)
2449			dev_err(dev, "failed to retrain link of port %u\n",
2450				port->index);
2451	}
2452}
2453
2454static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2455{
2456	struct device *dev = pcie->dev;
2457	struct tegra_pcie_port *port, *tmp;
2458
2459	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2460		dev_info(dev, "probing port %u, using %u lanes\n",
2461			 port->index, port->lanes);
2462
2463		tegra_pcie_port_enable(port);
2464	}
2465
2466	/* Start LTSSM from Tegra side */
2467	reset_control_deassert(pcie->pcie_xrst);
2468
2469	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2470		if (tegra_pcie_port_check_link(port))
2471			continue;
2472
2473		dev_info(dev, "link %u down, ignoring\n", port->index);
2474
2475		tegra_pcie_port_disable(port);
2476		tegra_pcie_port_free(port);
2477	}
2478
2479	if (pcie->soc->has_gen2)
2480		tegra_pcie_change_link_speed(pcie);
2481}
2482
2483static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2484{
2485	struct tegra_pcie_port *port, *tmp;
2486
2487	reset_control_assert(pcie->pcie_xrst);
2488
2489	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2490		tegra_pcie_port_disable(port);
2491}
2492
2493static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2494	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2495	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2496};
2497
2498static const struct tegra_pcie_soc tegra20_pcie = {
2499	.num_ports = 2,
2500	.ports = tegra20_pcie_ports,
2501	.msi_base_shift = 0,
2502	.afi_pex2_ctrl = 0x128,
2503	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2504	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2505	.pads_refclk_cfg0 = 0xfa5cfa5c,
2506	.has_pex_clkreq_en = false,
2507	.has_pex_bias_ctrl = false,
2508	.has_intr_prsnt_sense = false,
2509	.has_cml_clk = false,
2510	.has_gen2 = false,
2511	.force_pca_enable = false,
2512	.program_uphy = true,
2513	.update_clamp_threshold = false,
2514	.program_deskew_time = false,
2515	.raw_violation_fixup = false,
2516	.update_fc_timer = false,
2517	.has_cache_bars = true,
2518	.ectl.enable = false,
2519};
2520
2521static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2522	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2523	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2524	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2525};
2526
2527static const struct tegra_pcie_soc tegra30_pcie = {
2528	.num_ports = 3,
2529	.ports = tegra30_pcie_ports,
2530	.msi_base_shift = 8,
 
2531	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2532	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2533	.pads_refclk_cfg0 = 0xfa5cfa5c,
2534	.pads_refclk_cfg1 = 0xfa5cfa5c,
2535	.has_pex_clkreq_en = true,
2536	.has_pex_bias_ctrl = true,
2537	.has_intr_prsnt_sense = true,
2538	.has_cml_clk = true,
2539	.has_gen2 = false,
2540	.force_pca_enable = false,
2541	.program_uphy = true,
2542	.update_clamp_threshold = false,
2543	.program_deskew_time = false,
2544	.raw_violation_fixup = false,
2545	.update_fc_timer = false,
2546	.has_cache_bars = false,
2547	.ectl.enable = false,
2548};
2549
2550static const struct tegra_pcie_soc tegra124_pcie = {
2551	.num_ports = 2,
2552	.ports = tegra20_pcie_ports,
2553	.msi_base_shift = 8,
2554	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2555	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2556	.pads_refclk_cfg0 = 0x44ac44ac,
2557	/* FC threshold is bit[25:18] */
2558	.update_fc_threshold = 0x03fc0000,
2559	.has_pex_clkreq_en = true,
2560	.has_pex_bias_ctrl = true,
2561	.has_intr_prsnt_sense = true,
2562	.has_cml_clk = true,
2563	.has_gen2 = true,
2564	.force_pca_enable = false,
2565	.program_uphy = true,
2566	.update_clamp_threshold = true,
2567	.program_deskew_time = false,
2568	.raw_violation_fixup = true,
2569	.update_fc_timer = false,
2570	.has_cache_bars = false,
2571	.ectl.enable = false,
2572};
2573
2574static const struct tegra_pcie_soc tegra210_pcie = {
2575	.num_ports = 2,
2576	.ports = tegra20_pcie_ports,
2577	.msi_base_shift = 8,
2578	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2579	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2580	.pads_refclk_cfg0 = 0x90b890b8,
2581	/* FC threshold is bit[25:18] */
2582	.update_fc_threshold = 0x01800000,
2583	.has_pex_clkreq_en = true,
2584	.has_pex_bias_ctrl = true,
2585	.has_intr_prsnt_sense = true,
2586	.has_cml_clk = true,
2587	.has_gen2 = true,
2588	.force_pca_enable = true,
2589	.program_uphy = true,
2590	.update_clamp_threshold = true,
2591	.program_deskew_time = true,
2592	.raw_violation_fixup = false,
2593	.update_fc_timer = true,
2594	.has_cache_bars = false,
2595	.ectl = {
2596		.regs = {
2597			.rp_ectl_2_r1 = 0x0000000f,
2598			.rp_ectl_4_r1 = 0x00000067,
2599			.rp_ectl_5_r1 = 0x55010000,
2600			.rp_ectl_6_r1 = 0x00000001,
2601			.rp_ectl_2_r2 = 0x0000008f,
2602			.rp_ectl_4_r2 = 0x000000c7,
2603			.rp_ectl_5_r2 = 0x55010000,
2604			.rp_ectl_6_r2 = 0x00000001,
2605		},
2606		.enable = true,
2607	},
2608};
2609
2610static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2611	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2612	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2613	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2614};
2615
2616static const struct tegra_pcie_soc tegra186_pcie = {
2617	.num_ports = 3,
2618	.ports = tegra186_pcie_ports,
2619	.msi_base_shift = 8,
2620	.afi_pex2_ctrl = 0x19c,
2621	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2622	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2623	.pads_refclk_cfg0 = 0x80b880b8,
2624	.pads_refclk_cfg1 = 0x000480b8,
2625	.has_pex_clkreq_en = true,
2626	.has_pex_bias_ctrl = true,
2627	.has_intr_prsnt_sense = true,
2628	.has_cml_clk = false,
2629	.has_gen2 = true,
2630	.force_pca_enable = false,
2631	.program_uphy = false,
2632	.update_clamp_threshold = false,
2633	.program_deskew_time = false,
2634	.raw_violation_fixup = false,
2635	.update_fc_timer = false,
2636	.has_cache_bars = false,
2637	.ectl.enable = false,
2638};
2639
2640static const struct of_device_id tegra_pcie_of_match[] = {
2641	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2642	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2643	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2644	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2645	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2646	{ },
2647};
 
2648
2649static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2650{
2651	struct tegra_pcie *pcie = s->private;
2652
2653	if (list_empty(&pcie->ports))
2654		return NULL;
2655
2656	seq_printf(s, "Index  Status\n");
2657
2658	return seq_list_start(&pcie->ports, *pos);
2659}
2660
2661static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2662{
2663	struct tegra_pcie *pcie = s->private;
2664
2665	return seq_list_next(v, &pcie->ports, pos);
2666}
2667
2668static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2669{
2670}
2671
2672static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2673{
2674	bool up = false, active = false;
2675	struct tegra_pcie_port *port;
2676	unsigned int value;
2677
2678	port = list_entry(v, struct tegra_pcie_port, list);
2679
2680	value = readl(port->base + RP_VEND_XP);
2681
2682	if (value & RP_VEND_XP_DL_UP)
2683		up = true;
2684
2685	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2686
2687	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2688		active = true;
2689
2690	seq_printf(s, "%2u     ", port->index);
2691
2692	if (up)
2693		seq_printf(s, "up");
2694
2695	if (active) {
2696		if (up)
2697			seq_printf(s, ", ");
2698
2699		seq_printf(s, "active");
2700	}
2701
2702	seq_printf(s, "\n");
2703	return 0;
2704}
2705
2706static const struct seq_operations tegra_pcie_ports_seq_ops = {
2707	.start = tegra_pcie_ports_seq_start,
2708	.next = tegra_pcie_ports_seq_next,
2709	.stop = tegra_pcie_ports_seq_stop,
2710	.show = tegra_pcie_ports_seq_show,
2711};
2712
2713static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2714{
2715	struct tegra_pcie *pcie = inode->i_private;
2716	struct seq_file *s;
2717	int err;
2718
2719	err = seq_open(file, &tegra_pcie_ports_seq_ops);
2720	if (err)
2721		return err;
2722
2723	s = file->private_data;
2724	s->private = pcie;
2725
2726	return 0;
2727}
2728
2729static const struct file_operations tegra_pcie_ports_ops = {
2730	.owner = THIS_MODULE,
2731	.open = tegra_pcie_ports_open,
2732	.read = seq_read,
2733	.llseek = seq_lseek,
2734	.release = seq_release,
2735};
2736
2737static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2738{
2739	debugfs_remove_recursive(pcie->debugfs);
2740	pcie->debugfs = NULL;
2741}
2742
2743static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2744{
2745	struct dentry *file;
2746
2747	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2748	if (!pcie->debugfs)
2749		return -ENOMEM;
2750
2751	file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2752				   pcie, &tegra_pcie_ports_ops);
2753	if (!file)
2754		goto remove;
2755
2756	return 0;
2757
2758remove:
2759	tegra_pcie_debugfs_exit(pcie);
2760	return -ENOMEM;
2761}
2762
2763static int tegra_pcie_probe(struct platform_device *pdev)
2764{
2765	struct device *dev = &pdev->dev;
2766	struct pci_host_bridge *host;
2767	struct tegra_pcie *pcie;
2768	struct pci_bus *child;
2769	int err;
2770
2771	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2772	if (!host)
2773		return -ENOMEM;
2774
2775	pcie = pci_host_bridge_priv(host);
2776	host->sysdata = pcie;
2777	platform_set_drvdata(pdev, pcie);
2778
2779	pcie->soc = of_device_get_match_data(dev);
2780	INIT_LIST_HEAD(&pcie->ports);
2781	pcie->dev = dev;
2782
2783	err = tegra_pcie_parse_dt(pcie);
2784	if (err < 0)
2785		return err;
2786
2787	err = tegra_pcie_get_resources(pcie);
2788	if (err < 0) {
2789		dev_err(dev, "failed to request resources: %d\n", err);
2790		return err;
2791	}
2792
2793	err = tegra_pcie_msi_setup(pcie);
2794	if (err < 0) {
2795		dev_err(dev, "failed to enable MSI support: %d\n", err);
2796		goto put_resources;
2797	}
2798
2799	pm_runtime_enable(pcie->dev);
2800	err = pm_runtime_get_sync(pcie->dev);
2801	if (err) {
2802		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2803		goto teardown_msi;
2804	}
2805
2806	err = tegra_pcie_request_resources(pcie);
2807	if (err)
2808		goto pm_runtime_put;
 
2809
2810	host->busnr = pcie->busn.start;
2811	host->dev.parent = &pdev->dev;
2812	host->ops = &tegra_pcie_ops;
2813	host->map_irq = tegra_pcie_map_irq;
2814	host->swizzle_irq = pci_common_swizzle;
2815
2816	err = pci_scan_root_bus_bridge(host);
2817	if (err < 0) {
2818		dev_err(dev, "failed to register host: %d\n", err);
2819		goto free_resources;
2820	}
2821
2822	pci_bus_size_bridges(host->bus);
2823	pci_bus_assign_resources(host->bus);
2824
2825	list_for_each_entry(child, &host->bus->children, node)
2826		pcie_bus_configure_settings(child);
2827
2828	pci_bus_add_devices(host->bus);
2829
2830	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2831		err = tegra_pcie_debugfs_init(pcie);
2832		if (err < 0)
2833			dev_err(dev, "failed to setup debugfs: %d\n", err);
2834	}
2835
2836	return 0;
2837
2838free_resources:
2839	tegra_pcie_free_resources(pcie);
2840pm_runtime_put:
2841	pm_runtime_put_sync(pcie->dev);
2842	pm_runtime_disable(pcie->dev);
2843teardown_msi:
2844	tegra_pcie_msi_teardown(pcie);
2845put_resources:
2846	tegra_pcie_put_resources(pcie);
2847	return err;
2848}
2849
2850static int tegra_pcie_remove(struct platform_device *pdev)
2851{
2852	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2853	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2854	struct tegra_pcie_port *port, *tmp;
2855
2856	if (IS_ENABLED(CONFIG_DEBUG_FS))
2857		tegra_pcie_debugfs_exit(pcie);
2858
2859	pci_stop_root_bus(host->bus);
2860	pci_remove_root_bus(host->bus);
2861	tegra_pcie_free_resources(pcie);
2862	pm_runtime_put_sync(pcie->dev);
2863	pm_runtime_disable(pcie->dev);
2864
2865	if (IS_ENABLED(CONFIG_PCI_MSI))
2866		tegra_pcie_msi_teardown(pcie);
2867
2868	tegra_pcie_put_resources(pcie);
2869
2870	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2871		tegra_pcie_port_free(port);
2872
2873	return 0;
2874}
2875
2876static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
2877{
2878	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2879	struct tegra_pcie_port *port;
2880	int err;
2881
2882	list_for_each_entry(port, &pcie->ports, list)
2883		tegra_pcie_pme_turnoff(port);
2884
2885	tegra_pcie_disable_ports(pcie);
2886
2887	/*
2888	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2889	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2890	 */
2891	tegra_pcie_disable_interrupts(pcie);
2892
2893	if (pcie->soc->program_uphy) {
2894		err = tegra_pcie_phy_power_off(pcie);
2895		if (err < 0)
2896			dev_err(dev, "failed to power off PHY(s): %d\n", err);
2897	}
2898
2899	reset_control_assert(pcie->pex_rst);
2900	clk_disable_unprepare(pcie->pex_clk);
2901
2902	if (IS_ENABLED(CONFIG_PCI_MSI))
2903		tegra_pcie_disable_msi(pcie);
2904
2905	pinctrl_pm_select_idle_state(dev);
2906	tegra_pcie_power_off(pcie);
2907
2908	return 0;
2909}
2910
2911static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
2912{
2913	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2914	int err;
2915
2916	err = tegra_pcie_power_on(pcie);
2917	if (err) {
2918		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2919		return err;
2920	}
2921
2922	err = pinctrl_pm_select_default_state(dev);
2923	if (err < 0) {
2924		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2925		goto poweroff;
2926	}
2927
2928	tegra_pcie_enable_controller(pcie);
2929	tegra_pcie_setup_translations(pcie);
2930
2931	if (IS_ENABLED(CONFIG_PCI_MSI))
2932		tegra_pcie_enable_msi(pcie);
2933
2934	err = clk_prepare_enable(pcie->pex_clk);
2935	if (err) {
2936		dev_err(dev, "failed to enable PEX clock: %d\n", err);
2937		goto pex_dpd_enable;
2938	}
2939
2940	reset_control_deassert(pcie->pex_rst);
2941
2942	if (pcie->soc->program_uphy) {
2943		err = tegra_pcie_phy_power_on(pcie);
2944		if (err < 0) {
2945			dev_err(dev, "failed to power on PHY(s): %d\n", err);
2946			goto disable_pex_clk;
2947		}
2948	}
2949
2950	tegra_pcie_apply_pad_settings(pcie);
2951	tegra_pcie_enable_ports(pcie);
2952
2953	return 0;
2954
2955disable_pex_clk:
2956	reset_control_assert(pcie->pex_rst);
2957	clk_disable_unprepare(pcie->pex_clk);
2958pex_dpd_enable:
2959	pinctrl_pm_select_idle_state(dev);
2960poweroff:
2961	tegra_pcie_power_off(pcie);
2962
2963	return err;
2964}
2965
2966static const struct dev_pm_ops tegra_pcie_pm_ops = {
2967	SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2968	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
2969				      tegra_pcie_pm_resume)
2970};
2971
2972static struct platform_driver tegra_pcie_driver = {
2973	.driver = {
2974		.name = "tegra-pcie",
2975		.of_match_table = tegra_pcie_of_match,
2976		.suppress_bind_attrs = true,
2977		.pm = &tegra_pcie_pm_ops,
2978	},
2979	.probe = tegra_pcie_probe,
2980	.remove = tegra_pcie_remove,
2981};
2982module_platform_driver(tegra_pcie_driver);
2983MODULE_LICENSE("GPL");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * PCIe host controller driver for Tegra SoCs
   4 *
   5 * Copyright (c) 2010, CompuLab, Ltd.
   6 * Author: Mike Rapoport <mike@compulab.co.il>
   7 *
   8 * Based on NVIDIA PCIe driver
   9 * Copyright (c) 2008-2009, NVIDIA Corporation.
  10 *
  11 * Bits taken from arch/arm/mach-dove/pcie.c
  12 *
  13 * Author: Thierry Reding <treding@nvidia.com>
  14 */
  15
  16#include <linux/clk.h>
  17#include <linux/debugfs.h>
  18#include <linux/delay.h>
  19#include <linux/export.h>
  20#include <linux/gpio/consumer.h>
  21#include <linux/interrupt.h>
  22#include <linux/iopoll.h>
  23#include <linux/irq.h>
  24#include <linux/irqchip/chained_irq.h>
  25#include <linux/irqdomain.h>
  26#include <linux/kernel.h>
  27#include <linux/init.h>
  28#include <linux/module.h>
  29#include <linux/msi.h>
  30#include <linux/of_address.h>
  31#include <linux/of_pci.h>
  32#include <linux/of_platform.h>
  33#include <linux/pci.h>
  34#include <linux/phy/phy.h>
  35#include <linux/pinctrl/consumer.h>
  36#include <linux/platform_device.h>
  37#include <linux/reset.h>
  38#include <linux/sizes.h>
  39#include <linux/slab.h>
  40#include <linux/vmalloc.h>
  41#include <linux/regulator/consumer.h>
  42
  43#include <soc/tegra/cpuidle.h>
  44#include <soc/tegra/pmc.h>
  45
  46#include "../pci.h"
  47
  48#define INT_PCI_MSI_NR (8 * 32)
  49
  50/* register definitions */
  51
  52#define AFI_AXI_BAR0_SZ	0x00
  53#define AFI_AXI_BAR1_SZ	0x04
  54#define AFI_AXI_BAR2_SZ	0x08
  55#define AFI_AXI_BAR3_SZ	0x0c
  56#define AFI_AXI_BAR4_SZ	0x10
  57#define AFI_AXI_BAR5_SZ	0x14
  58
  59#define AFI_AXI_BAR0_START	0x18
  60#define AFI_AXI_BAR1_START	0x1c
  61#define AFI_AXI_BAR2_START	0x20
  62#define AFI_AXI_BAR3_START	0x24
  63#define AFI_AXI_BAR4_START	0x28
  64#define AFI_AXI_BAR5_START	0x2c
  65
  66#define AFI_FPCI_BAR0	0x30
  67#define AFI_FPCI_BAR1	0x34
  68#define AFI_FPCI_BAR2	0x38
  69#define AFI_FPCI_BAR3	0x3c
  70#define AFI_FPCI_BAR4	0x40
  71#define AFI_FPCI_BAR5	0x44
  72
  73#define AFI_CACHE_BAR0_SZ	0x48
  74#define AFI_CACHE_BAR0_ST	0x4c
  75#define AFI_CACHE_BAR1_SZ	0x50
  76#define AFI_CACHE_BAR1_ST	0x54
  77
  78#define AFI_MSI_BAR_SZ		0x60
  79#define AFI_MSI_FPCI_BAR_ST	0x64
  80#define AFI_MSI_AXI_BAR_ST	0x68
  81
  82#define AFI_MSI_VEC(x)		(0x6c + ((x) * 4))
  83#define AFI_MSI_EN_VEC(x)	(0x8c + ((x) * 4))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  84
  85#define AFI_CONFIGURATION		0xac
  86#define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
  87#define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
  88
  89#define AFI_FPCI_ERROR_MASKS	0xb0
  90
  91#define AFI_INTR_MASK		0xb4
  92#define  AFI_INTR_MASK_INT_MASK	(1 << 0)
  93#define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
  94
  95#define AFI_INTR_CODE			0xb8
  96#define  AFI_INTR_CODE_MASK		0xf
  97#define  AFI_INTR_INI_SLAVE_ERROR	1
  98#define  AFI_INTR_INI_DECODE_ERROR	2
  99#define  AFI_INTR_TARGET_ABORT		3
 100#define  AFI_INTR_MASTER_ABORT		4
 101#define  AFI_INTR_INVALID_WRITE		5
 102#define  AFI_INTR_LEGACY		6
 103#define  AFI_INTR_FPCI_DECODE_ERROR	7
 104#define  AFI_INTR_AXI_DECODE_ERROR	8
 105#define  AFI_INTR_FPCI_TIMEOUT		9
 106#define  AFI_INTR_PE_PRSNT_SENSE	10
 107#define  AFI_INTR_PE_CLKREQ_SENSE	11
 108#define  AFI_INTR_CLKCLAMP_SENSE	12
 109#define  AFI_INTR_RDY4PD_SENSE		13
 110#define  AFI_INTR_P2P_ERROR		14
 111
 112#define AFI_INTR_SIGNATURE	0xbc
 113#define AFI_UPPER_FPCI_ADDRESS	0xc0
 114#define AFI_SM_INTR_ENABLE	0xc4
 115#define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
 116#define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
 117#define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
 118#define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
 119#define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
 120#define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
 121#define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
 122#define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
 123
 124#define AFI_AFI_INTR_ENABLE		0xc8
 125#define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
 126#define  AFI_INTR_EN_INI_DECERR		(1 << 1)
 127#define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
 128#define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
 129#define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
 130#define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
 131#define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
 132#define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
 133#define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
 134
 135#define AFI_PCIE_PME		0xf0
 136
 137#define AFI_PCIE_CONFIG					0x0f8
 138#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
 139#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
 140#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
 141#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
 142#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
 143#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
 144#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
 145#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
 146#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
 147#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
 148#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
 149#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
 150#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
 151#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
 152#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
 153
 154#define AFI_FUSE			0x104
 155#define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
 156
 157#define AFI_PEX0_CTRL			0x110
 158#define AFI_PEX1_CTRL			0x118
 159#define  AFI_PEX_CTRL_RST		(1 << 0)
 160#define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
 161#define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
 162#define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
 163
 164#define AFI_PLLE_CONTROL		0x160
 165#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
 166#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
 167
 168#define AFI_PEXBIAS_CTRL_0		0x168
 169
 
 
 
 
 
 
 
 170#define RP_ECTL_2_R1	0x00000e84
 171#define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
 172
 173#define RP_ECTL_4_R1	0x00000e8c
 174#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
 175#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
 176
 177#define RP_ECTL_5_R1	0x00000e90
 178#define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
 179
 180#define RP_ECTL_6_R1	0x00000e94
 181#define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
 182
 183#define RP_ECTL_2_R2	0x00000ea4
 184#define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
 185
 186#define RP_ECTL_4_R2	0x00000eac
 187#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
 188#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
 189
 190#define RP_ECTL_5_R2	0x00000eb0
 191#define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
 192
 193#define RP_ECTL_6_R2	0x00000eb4
 194#define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
 195
 196#define RP_VEND_XP	0x00000f00
 197#define  RP_VEND_XP_DL_UP			(1 << 30)
 198#define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
 199#define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
 200#define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
 201
 202#define RP_VEND_CTL0	0x00000f44
 203#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
 204#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
 205
 206#define RP_VEND_CTL1	0x00000f48
 207#define  RP_VEND_CTL1_ERPT	(1 << 13)
 208
 209#define RP_VEND_XP_BIST	0x00000f4c
 210#define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
 211
 212#define RP_VEND_CTL2 0x00000fa8
 213#define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
 214
 215#define RP_PRIV_MISC	0x00000fe0
 216#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
 217#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
 218#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
 219#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
 220#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
 221#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
 222#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
 223#define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
 224
 225#define RP_LINK_CONTROL_STATUS			0x00000090
 226#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
 227#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
 228
 229#define RP_LINK_CONTROL_STATUS_2		0x000000b0
 230
 231#define PADS_CTL_SEL		0x0000009c
 232
 233#define PADS_CTL		0x000000a0
 234#define  PADS_CTL_IDDQ_1L	(1 << 0)
 235#define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
 236#define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
 237
 238#define PADS_PLL_CTL_TEGRA20			0x000000b8
 239#define PADS_PLL_CTL_TEGRA30			0x000000b4
 240#define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
 241#define  PADS_PLL_CTL_LOCKDET			(1 << 8)
 242#define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
 243#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
 244#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
 245#define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
 246#define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
 247#define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
 248#define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
 249#define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
 250
 251#define PADS_REFCLK_CFG0			0x000000c8
 252#define PADS_REFCLK_CFG1			0x000000cc
 253#define PADS_REFCLK_BIAS			0x000000d0
 254
 255/*
 256 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
 257 * entries, one entry per PCIe port. These field definitions and desired
 258 * values aren't in the TRM, but do come from NVIDIA.
 259 */
 260#define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
 261#define PADS_REFCLK_CFG_E_TERM_SHIFT		7
 262#define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
 263#define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
 264
 265#define PME_ACK_TIMEOUT 10000
 266#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
 267
 268struct tegra_msi {
 
 269	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
 270	struct irq_domain *domain;
 271	struct mutex map_lock;
 272	spinlock_t mask_lock;
 273	void *virt;
 274	dma_addr_t phys;
 275	int irq;
 276};
 277
 278/* used to differentiate between Tegra SoC generations */
 279struct tegra_pcie_port_soc {
 280	struct {
 281		u8 turnoff_bit;
 282		u8 ack_bit;
 283	} pme;
 284};
 285
 286struct tegra_pcie_soc {
 287	unsigned int num_ports;
 288	const struct tegra_pcie_port_soc *ports;
 289	unsigned int msi_base_shift;
 290	unsigned long afi_pex2_ctrl;
 291	u32 pads_pll_ctl;
 292	u32 tx_ref_sel;
 293	u32 pads_refclk_cfg0;
 294	u32 pads_refclk_cfg1;
 295	u32 update_fc_threshold;
 296	bool has_pex_clkreq_en;
 297	bool has_pex_bias_ctrl;
 298	bool has_intr_prsnt_sense;
 299	bool has_cml_clk;
 300	bool has_gen2;
 301	bool force_pca_enable;
 302	bool program_uphy;
 303	bool update_clamp_threshold;
 304	bool program_deskew_time;
 
 305	bool update_fc_timer;
 306	bool has_cache_bars;
 307	struct {
 308		struct {
 309			u32 rp_ectl_2_r1;
 310			u32 rp_ectl_4_r1;
 311			u32 rp_ectl_5_r1;
 312			u32 rp_ectl_6_r1;
 313			u32 rp_ectl_2_r2;
 314			u32 rp_ectl_4_r2;
 315			u32 rp_ectl_5_r2;
 316			u32 rp_ectl_6_r2;
 317		} regs;
 318		bool enable;
 319	} ectl;
 320};
 321
 
 
 
 
 
 322struct tegra_pcie {
 323	struct device *dev;
 324
 325	void __iomem *pads;
 326	void __iomem *afi;
 327	void __iomem *cfg;
 328	int irq;
 329
 330	struct resource cs;
 
 
 
 
 
 
 
 
 
 
 331
 332	struct clk *pex_clk;
 333	struct clk *afi_clk;
 334	struct clk *pll_e;
 335	struct clk *cml_clk;
 336
 337	struct reset_control *pex_rst;
 338	struct reset_control *afi_rst;
 339	struct reset_control *pcie_xrst;
 340
 341	bool legacy_phy;
 342	struct phy *phy;
 343
 344	struct tegra_msi msi;
 345
 346	struct list_head ports;
 347	u32 xbar_config;
 348
 349	struct regulator_bulk_data *supplies;
 350	unsigned int num_supplies;
 351
 352	const struct tegra_pcie_soc *soc;
 353	struct dentry *debugfs;
 354};
 355
 356static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
 357{
 358	return container_of(msi, struct tegra_pcie, msi);
 359}
 360
 361struct tegra_pcie_port {
 362	struct tegra_pcie *pcie;
 363	struct device_node *np;
 364	struct list_head list;
 365	struct resource regs;
 366	void __iomem *base;
 367	unsigned int index;
 368	unsigned int lanes;
 369
 370	struct phy **phys;
 371
 372	struct gpio_desc *reset_gpio;
 373};
 374
 375struct tegra_pcie_bus {
 376	struct list_head list;
 377	unsigned int nr;
 378};
 379
 380static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
 381			      unsigned long offset)
 382{
 383	writel(value, pcie->afi + offset);
 384}
 385
 386static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
 387{
 388	return readl(pcie->afi + offset);
 389}
 390
 391static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
 392			       unsigned long offset)
 393{
 394	writel(value, pcie->pads + offset);
 395}
 396
 397static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
 398{
 399	return readl(pcie->pads + offset);
 400}
 401
 402/*
 403 * The configuration space mapping on Tegra is somewhat similar to the ECAM
 404 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
 405 * register accesses are mapped:
 406 *
 407 *    [27:24] extended register number
 408 *    [23:16] bus number
 409 *    [15:11] device number
 410 *    [10: 8] function number
 411 *    [ 7: 0] register number
 412 *
 413 * Mapping the whole extended configuration space would require 256 MiB of
 414 * virtual address space, only a small part of which will actually be used.
 415 *
 416 * To work around this, a 4 KiB region is used to generate the required
 417 * configuration transaction with relevant B:D:F and register offset values.
 418 * This is achieved by dynamically programming base address and size of
 419 * AFI_AXI_BAR used for end point config space mapping to make sure that the
 420 * address (access to which generates correct config transaction) falls in
 421 * this 4 KiB region.
 422 */
 423static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
 424					   unsigned int where)
 425{
 426	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
 427	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
 428}
 429
 430static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
 431					unsigned int devfn,
 432					int where)
 433{
 434	struct tegra_pcie *pcie = bus->sysdata;
 435	void __iomem *addr = NULL;
 436
 437	if (bus->number == 0) {
 438		unsigned int slot = PCI_SLOT(devfn);
 439		struct tegra_pcie_port *port;
 440
 441		list_for_each_entry(port, &pcie->ports, list) {
 442			if (port->index + 1 == slot) {
 443				addr = port->base + (where & ~3);
 444				break;
 445			}
 446		}
 447	} else {
 448		unsigned int offset;
 449		u32 base;
 450
 451		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
 452
 453		/* move 4 KiB window to offset within the FPCI region */
 454		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
 455		afi_writel(pcie, base, AFI_FPCI_BAR0);
 456
 457		/* move to correct offset within the 4 KiB page */
 458		addr = pcie->cfg + (offset & (SZ_4K - 1));
 459	}
 460
 461	return addr;
 462}
 463
 464static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 465				  int where, int size, u32 *value)
 466{
 467	if (bus->number == 0)
 468		return pci_generic_config_read32(bus, devfn, where, size,
 469						 value);
 470
 471	return pci_generic_config_read(bus, devfn, where, size, value);
 472}
 473
 474static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 475				   int where, int size, u32 value)
 476{
 477	if (bus->number == 0)
 478		return pci_generic_config_write32(bus, devfn, where, size,
 479						  value);
 480
 481	return pci_generic_config_write(bus, devfn, where, size, value);
 482}
 483
 484static struct pci_ops tegra_pcie_ops = {
 485	.map_bus = tegra_pcie_map_bus,
 486	.read = tegra_pcie_config_read,
 487	.write = tegra_pcie_config_write,
 488};
 489
 490static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
 491{
 492	const struct tegra_pcie_soc *soc = port->pcie->soc;
 493	unsigned long ret = 0;
 494
 495	switch (port->index) {
 496	case 0:
 497		ret = AFI_PEX0_CTRL;
 498		break;
 499
 500	case 1:
 501		ret = AFI_PEX1_CTRL;
 502		break;
 503
 504	case 2:
 505		ret = soc->afi_pex2_ctrl;
 506		break;
 507	}
 508
 509	return ret;
 510}
 511
 512static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
 513{
 514	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 515	unsigned long value;
 516
 517	/* pulse reset signal */
 518	if (port->reset_gpio) {
 519		gpiod_set_value(port->reset_gpio, 1);
 520	} else {
 521		value = afi_readl(port->pcie, ctrl);
 522		value &= ~AFI_PEX_CTRL_RST;
 523		afi_writel(port->pcie, value, ctrl);
 524	}
 525
 526	usleep_range(1000, 2000);
 527
 528	if (port->reset_gpio) {
 529		gpiod_set_value(port->reset_gpio, 0);
 530	} else {
 531		value = afi_readl(port->pcie, ctrl);
 532		value |= AFI_PEX_CTRL_RST;
 533		afi_writel(port->pcie, value, ctrl);
 534	}
 535}
 536
 537static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
 538{
 539	const struct tegra_pcie_soc *soc = port->pcie->soc;
 540	u32 value;
 541
 542	/* Enable AER capability */
 543	value = readl(port->base + RP_VEND_CTL1);
 544	value |= RP_VEND_CTL1_ERPT;
 545	writel(value, port->base + RP_VEND_CTL1);
 546
 547	/* Optimal settings to enhance bandwidth */
 548	value = readl(port->base + RP_VEND_XP);
 549	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
 550	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
 551	writel(value, port->base + RP_VEND_XP);
 552
 553	/*
 554	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
 555	 * to avoid truncation of PM messages which results in receiver errors
 556	 */
 557	value = readl(port->base + RP_VEND_XP_BIST);
 558	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
 559	writel(value, port->base + RP_VEND_XP_BIST);
 560
 561	value = readl(port->base + RP_PRIV_MISC);
 562	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
 563	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
 564
 565	if (soc->update_clamp_threshold) {
 566		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
 567				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
 568		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
 569			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
 570	}
 571
 572	writel(value, port->base + RP_PRIV_MISC);
 573}
 574
 575static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
 576{
 577	const struct tegra_pcie_soc *soc = port->pcie->soc;
 578	u32 value;
 579
 580	value = readl(port->base + RP_ECTL_2_R1);
 581	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
 582	value |= soc->ectl.regs.rp_ectl_2_r1;
 583	writel(value, port->base + RP_ECTL_2_R1);
 584
 585	value = readl(port->base + RP_ECTL_4_R1);
 586	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
 587	value |= soc->ectl.regs.rp_ectl_4_r1 <<
 588				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
 589	writel(value, port->base + RP_ECTL_4_R1);
 590
 591	value = readl(port->base + RP_ECTL_5_R1);
 592	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
 593	value |= soc->ectl.regs.rp_ectl_5_r1;
 594	writel(value, port->base + RP_ECTL_5_R1);
 595
 596	value = readl(port->base + RP_ECTL_6_R1);
 597	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
 598	value |= soc->ectl.regs.rp_ectl_6_r1;
 599	writel(value, port->base + RP_ECTL_6_R1);
 600
 601	value = readl(port->base + RP_ECTL_2_R2);
 602	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
 603	value |= soc->ectl.regs.rp_ectl_2_r2;
 604	writel(value, port->base + RP_ECTL_2_R2);
 605
 606	value = readl(port->base + RP_ECTL_4_R2);
 607	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
 608	value |= soc->ectl.regs.rp_ectl_4_r2 <<
 609				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
 610	writel(value, port->base + RP_ECTL_4_R2);
 611
 612	value = readl(port->base + RP_ECTL_5_R2);
 613	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
 614	value |= soc->ectl.regs.rp_ectl_5_r2;
 615	writel(value, port->base + RP_ECTL_5_R2);
 616
 617	value = readl(port->base + RP_ECTL_6_R2);
 618	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
 619	value |= soc->ectl.regs.rp_ectl_6_r2;
 620	writel(value, port->base + RP_ECTL_6_R2);
 621}
 622
 623static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
 624{
 625	const struct tegra_pcie_soc *soc = port->pcie->soc;
 626	u32 value;
 627
 628	/*
 629	 * Sometimes link speed change from Gen2 to Gen1 fails due to
 630	 * instability in deskew logic on lane-0. Increase the deskew
 631	 * retry time to resolve this issue.
 632	 */
 633	if (soc->program_deskew_time) {
 634		value = readl(port->base + RP_VEND_CTL0);
 635		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
 636		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
 637		writel(value, port->base + RP_VEND_CTL0);
 638	}
 639
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 640	if (soc->update_fc_timer) {
 641		value = readl(port->base + RP_VEND_XP);
 642		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
 643		value |= soc->update_fc_threshold;
 644		writel(value, port->base + RP_VEND_XP);
 645	}
 646
 647	/*
 648	 * PCIe link doesn't come up with few legacy PCIe endpoints if
 649	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
 650	 * Hence, the strategy followed here is to initially advertise
 651	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
 652	 */
 653	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
 654	value &= ~PCI_EXP_LNKSTA_CLS;
 655	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
 656	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
 657}
 658
 659static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
 660{
 661	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 662	const struct tegra_pcie_soc *soc = port->pcie->soc;
 663	unsigned long value;
 664
 665	/* enable reference clock */
 666	value = afi_readl(port->pcie, ctrl);
 667	value |= AFI_PEX_CTRL_REFCLK_EN;
 668
 669	if (soc->has_pex_clkreq_en)
 670		value |= AFI_PEX_CTRL_CLKREQ_EN;
 671
 672	value |= AFI_PEX_CTRL_OVERRIDE_EN;
 673
 674	afi_writel(port->pcie, value, ctrl);
 675
 676	tegra_pcie_port_reset(port);
 677
 678	if (soc->force_pca_enable) {
 679		value = readl(port->base + RP_VEND_CTL2);
 680		value |= RP_VEND_CTL2_PCA_ENABLE;
 681		writel(value, port->base + RP_VEND_CTL2);
 682	}
 683
 684	tegra_pcie_enable_rp_features(port);
 685
 686	if (soc->ectl.enable)
 687		tegra_pcie_program_ectl_settings(port);
 688
 689	tegra_pcie_apply_sw_fixup(port);
 690}
 691
 692static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
 693{
 694	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 695	const struct tegra_pcie_soc *soc = port->pcie->soc;
 696	unsigned long value;
 697
 698	/* assert port reset */
 699	value = afi_readl(port->pcie, ctrl);
 700	value &= ~AFI_PEX_CTRL_RST;
 701	afi_writel(port->pcie, value, ctrl);
 702
 703	/* disable reference clock */
 704	value = afi_readl(port->pcie, ctrl);
 705
 706	if (soc->has_pex_clkreq_en)
 707		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
 708
 709	value &= ~AFI_PEX_CTRL_REFCLK_EN;
 710	afi_writel(port->pcie, value, ctrl);
 711
 712	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
 713	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
 714	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
 715	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
 716	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
 717}
 718
 719static void tegra_pcie_port_free(struct tegra_pcie_port *port)
 720{
 721	struct tegra_pcie *pcie = port->pcie;
 722	struct device *dev = pcie->dev;
 723
 724	devm_iounmap(dev, port->base);
 725	devm_release_mem_region(dev, port->regs.start,
 726				resource_size(&port->regs));
 727	list_del(&port->list);
 728	devm_kfree(dev, port);
 729}
 730
 731/* Tegra PCIE root complex wrongly reports device class */
 732static void tegra_pcie_fixup_class(struct pci_dev *dev)
 733{
 734	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
 735}
 736DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
 737DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
 738DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
 739DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
 740
 741/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
 742static void tegra_pcie_relax_enable(struct pci_dev *dev)
 743{
 744	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
 745}
 746DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
 747DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
 748DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
 749DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
 750
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 751static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
 752{
 753	struct tegra_pcie *pcie = pdev->bus->sysdata;
 754	int irq;
 755
 756	tegra_cpuidle_pcie_irqs_in_use();
 757
 758	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
 759	if (!irq)
 760		irq = pcie->irq;
 761
 762	return irq;
 763}
 764
 765static irqreturn_t tegra_pcie_isr(int irq, void *arg)
 766{
 767	const char *err_msg[] = {
 768		"Unknown",
 769		"AXI slave error",
 770		"AXI decode error",
 771		"Target abort",
 772		"Master abort",
 773		"Invalid write",
 774		"Legacy interrupt",
 775		"Response decoding error",
 776		"AXI response decoding error",
 777		"Transaction timeout",
 778		"Slot present pin change",
 779		"Slot clock request change",
 780		"TMS clock ramp change",
 781		"TMS ready for power down",
 782		"Peer2Peer error",
 783	};
 784	struct tegra_pcie *pcie = arg;
 785	struct device *dev = pcie->dev;
 786	u32 code, signature;
 787
 788	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
 789	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
 790	afi_writel(pcie, 0, AFI_INTR_CODE);
 791
 792	if (code == AFI_INTR_LEGACY)
 793		return IRQ_NONE;
 794
 795	if (code >= ARRAY_SIZE(err_msg))
 796		code = 0;
 797
 798	/*
 799	 * do not pollute kernel log with master abort reports since they
 800	 * happen a lot during enumeration
 801	 */
 802	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
 803		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
 804	else
 805		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
 806
 807	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
 808	    code == AFI_INTR_FPCI_DECODE_ERROR) {
 809		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
 810		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
 811
 812		if (code == AFI_INTR_MASTER_ABORT)
 813			dev_dbg(dev, "  FPCI address: %10llx\n", address);
 814		else
 815			dev_err(dev, "  FPCI address: %10llx\n", address);
 816	}
 817
 818	return IRQ_HANDLED;
 819}
 820
 821/*
 822 * FPCI map is as follows:
 823 * - 0xfdfc000000: I/O space
 824 * - 0xfdfe000000: type 0 configuration space
 825 * - 0xfdff000000: type 1 configuration space
 826 * - 0xfe00000000: type 0 extended configuration space
 827 * - 0xfe10000000: type 1 extended configuration space
 828 */
 829static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
 830{
 831	u32 size;
 832	struct resource_entry *entry;
 833	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
 834
 835	/* Bar 0: type 1 extended configuration space */
 836	size = resource_size(&pcie->cs);
 837	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
 838	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
 839
 840	resource_list_for_each_entry(entry, &bridge->windows) {
 841		u32 fpci_bar, axi_address;
 842		struct resource *res = entry->res;
 843
 844		size = resource_size(res);
 845
 846		switch (resource_type(res)) {
 847		case IORESOURCE_IO:
 848			/* Bar 1: downstream IO bar */
 849			fpci_bar = 0xfdfc0000;
 850			axi_address = pci_pio_to_address(res->start);
 851			afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
 852			afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
 853			afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
 854			break;
 855		case IORESOURCE_MEM:
 856			fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
 857			axi_address = res->start;
 858
 859			if (res->flags & IORESOURCE_PREFETCH) {
 860				/* Bar 2: prefetchable memory BAR */
 861				afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
 862				afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
 863				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
 864
 865			} else {
 866				/* Bar 3: non prefetchable memory BAR */
 867				afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
 868				afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
 869				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
 870			}
 871			break;
 872		}
 873	}
 874
 875	/* NULL out the remaining BARs as they are not used */
 876	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
 877	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
 878	afi_writel(pcie, 0, AFI_FPCI_BAR4);
 879
 880	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
 881	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
 882	afi_writel(pcie, 0, AFI_FPCI_BAR5);
 883
 884	if (pcie->soc->has_cache_bars) {
 885		/* map all upstream transactions as uncached */
 886		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
 887		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
 888		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
 889		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
 890	}
 891
 892	/* MSI translations are setup only when needed */
 893	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
 894	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 895	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
 896	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 897}
 898
 899static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
 900{
 901	const struct tegra_pcie_soc *soc = pcie->soc;
 902	u32 value;
 903
 904	timeout = jiffies + msecs_to_jiffies(timeout);
 905
 906	while (time_before(jiffies, timeout)) {
 907		value = pads_readl(pcie, soc->pads_pll_ctl);
 908		if (value & PADS_PLL_CTL_LOCKDET)
 909			return 0;
 910	}
 911
 912	return -ETIMEDOUT;
 913}
 914
 915static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
 916{
 917	struct device *dev = pcie->dev;
 918	const struct tegra_pcie_soc *soc = pcie->soc;
 919	u32 value;
 920	int err;
 921
 922	/* initialize internal PHY, enable up to 16 PCIE lanes */
 923	pads_writel(pcie, 0x0, PADS_CTL_SEL);
 924
 925	/* override IDDQ to 1 on all 4 lanes */
 926	value = pads_readl(pcie, PADS_CTL);
 927	value |= PADS_CTL_IDDQ_1L;
 928	pads_writel(pcie, value, PADS_CTL);
 929
 930	/*
 931	 * Set up PHY PLL inputs select PLLE output as refclock,
 932	 * set TX ref sel to div10 (not div5).
 933	 */
 934	value = pads_readl(pcie, soc->pads_pll_ctl);
 935	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
 936	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
 937	pads_writel(pcie, value, soc->pads_pll_ctl);
 938
 939	/* reset PLL */
 940	value = pads_readl(pcie, soc->pads_pll_ctl);
 941	value &= ~PADS_PLL_CTL_RST_B4SM;
 942	pads_writel(pcie, value, soc->pads_pll_ctl);
 943
 944	usleep_range(20, 100);
 945
 946	/* take PLL out of reset  */
 947	value = pads_readl(pcie, soc->pads_pll_ctl);
 948	value |= PADS_PLL_CTL_RST_B4SM;
 949	pads_writel(pcie, value, soc->pads_pll_ctl);
 950
 951	/* wait for the PLL to lock */
 952	err = tegra_pcie_pll_wait(pcie, 500);
 953	if (err < 0) {
 954		dev_err(dev, "PLL failed to lock: %d\n", err);
 955		return err;
 956	}
 957
 958	/* turn off IDDQ override */
 959	value = pads_readl(pcie, PADS_CTL);
 960	value &= ~PADS_CTL_IDDQ_1L;
 961	pads_writel(pcie, value, PADS_CTL);
 962
 963	/* enable TX/RX data */
 964	value = pads_readl(pcie, PADS_CTL);
 965	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
 966	pads_writel(pcie, value, PADS_CTL);
 967
 968	return 0;
 969}
 970
 971static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
 972{
 973	const struct tegra_pcie_soc *soc = pcie->soc;
 974	u32 value;
 975
 976	/* disable TX/RX data */
 977	value = pads_readl(pcie, PADS_CTL);
 978	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
 979	pads_writel(pcie, value, PADS_CTL);
 980
 981	/* override IDDQ */
 982	value = pads_readl(pcie, PADS_CTL);
 983	value |= PADS_CTL_IDDQ_1L;
 984	pads_writel(pcie, value, PADS_CTL);
 985
 986	/* reset PLL */
 987	value = pads_readl(pcie, soc->pads_pll_ctl);
 988	value &= ~PADS_PLL_CTL_RST_B4SM;
 989	pads_writel(pcie, value, soc->pads_pll_ctl);
 990
 991	usleep_range(20, 100);
 992
 993	return 0;
 994}
 995
 996static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
 997{
 998	struct device *dev = port->pcie->dev;
 999	unsigned int i;
1000	int err;
1001
1002	for (i = 0; i < port->lanes; i++) {
1003		err = phy_power_on(port->phys[i]);
1004		if (err < 0) {
1005			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1006			return err;
1007		}
1008	}
1009
1010	return 0;
1011}
1012
1013static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1014{
1015	struct device *dev = port->pcie->dev;
1016	unsigned int i;
1017	int err;
1018
1019	for (i = 0; i < port->lanes; i++) {
1020		err = phy_power_off(port->phys[i]);
1021		if (err < 0) {
1022			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1023				err);
1024			return err;
1025		}
1026	}
1027
1028	return 0;
1029}
1030
1031static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1032{
1033	struct device *dev = pcie->dev;
1034	struct tegra_pcie_port *port;
1035	int err;
1036
1037	if (pcie->legacy_phy) {
1038		if (pcie->phy)
1039			err = phy_power_on(pcie->phy);
1040		else
1041			err = tegra_pcie_phy_enable(pcie);
1042
1043		if (err < 0)
1044			dev_err(dev, "failed to power on PHY: %d\n", err);
1045
1046		return err;
1047	}
1048
1049	list_for_each_entry(port, &pcie->ports, list) {
1050		err = tegra_pcie_port_phy_power_on(port);
1051		if (err < 0) {
1052			dev_err(dev,
1053				"failed to power on PCIe port %u PHY: %d\n",
1054				port->index, err);
1055			return err;
1056		}
1057	}
1058
1059	return 0;
1060}
1061
1062static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1063{
1064	struct device *dev = pcie->dev;
1065	struct tegra_pcie_port *port;
1066	int err;
1067
1068	if (pcie->legacy_phy) {
1069		if (pcie->phy)
1070			err = phy_power_off(pcie->phy);
1071		else
1072			err = tegra_pcie_phy_disable(pcie);
1073
1074		if (err < 0)
1075			dev_err(dev, "failed to power off PHY: %d\n", err);
1076
1077		return err;
1078	}
1079
1080	list_for_each_entry(port, &pcie->ports, list) {
1081		err = tegra_pcie_port_phy_power_off(port);
1082		if (err < 0) {
1083			dev_err(dev,
1084				"failed to power off PCIe port %u PHY: %d\n",
1085				port->index, err);
1086			return err;
1087		}
1088	}
1089
1090	return 0;
1091}
1092
1093static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1094{
1095	const struct tegra_pcie_soc *soc = pcie->soc;
1096	struct tegra_pcie_port *port;
1097	unsigned long value;
1098
1099	/* enable PLL power down */
1100	if (pcie->phy) {
1101		value = afi_readl(pcie, AFI_PLLE_CONTROL);
1102		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1103		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1104		afi_writel(pcie, value, AFI_PLLE_CONTROL);
1105	}
1106
1107	/* power down PCIe slot clock bias pad */
1108	if (soc->has_pex_bias_ctrl)
1109		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1110
1111	/* configure mode and disable all ports */
1112	value = afi_readl(pcie, AFI_PCIE_CONFIG);
1113	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1114	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1115	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1116
1117	list_for_each_entry(port, &pcie->ports, list) {
1118		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1119		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1120	}
1121
1122	afi_writel(pcie, value, AFI_PCIE_CONFIG);
1123
1124	if (soc->has_gen2) {
1125		value = afi_readl(pcie, AFI_FUSE);
1126		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1127		afi_writel(pcie, value, AFI_FUSE);
1128	} else {
1129		value = afi_readl(pcie, AFI_FUSE);
1130		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1131		afi_writel(pcie, value, AFI_FUSE);
1132	}
1133
1134	/* Disable AFI dynamic clock gating and enable PCIe */
1135	value = afi_readl(pcie, AFI_CONFIGURATION);
1136	value |= AFI_CONFIGURATION_EN_FPCI;
1137	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1138	afi_writel(pcie, value, AFI_CONFIGURATION);
1139
1140	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1141		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1142		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1143
1144	if (soc->has_intr_prsnt_sense)
1145		value |= AFI_INTR_EN_PRSNT_SENSE;
1146
1147	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1148	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1149
1150	/* don't enable MSI for now, only when needed */
1151	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1152
1153	/* disable all exceptions */
1154	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1155}
1156
1157static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1158{
1159	struct device *dev = pcie->dev;
1160	const struct tegra_pcie_soc *soc = pcie->soc;
1161	int err;
1162
1163	reset_control_assert(pcie->afi_rst);
1164
1165	clk_disable_unprepare(pcie->pll_e);
1166	if (soc->has_cml_clk)
1167		clk_disable_unprepare(pcie->cml_clk);
1168	clk_disable_unprepare(pcie->afi_clk);
1169
1170	if (!dev->pm_domain)
1171		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1172
1173	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1174	if (err < 0)
1175		dev_warn(dev, "failed to disable regulators: %d\n", err);
1176}
1177
1178static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1179{
1180	struct device *dev = pcie->dev;
1181	const struct tegra_pcie_soc *soc = pcie->soc;
1182	int err;
1183
1184	reset_control_assert(pcie->pcie_xrst);
1185	reset_control_assert(pcie->afi_rst);
1186	reset_control_assert(pcie->pex_rst);
1187
1188	if (!dev->pm_domain)
1189		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1190
1191	/* enable regulators */
1192	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1193	if (err < 0)
1194		dev_err(dev, "failed to enable regulators: %d\n", err);
1195
1196	if (!dev->pm_domain) {
1197		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1198		if (err) {
1199			dev_err(dev, "failed to power ungate: %d\n", err);
1200			goto regulator_disable;
1201		}
1202		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1203		if (err) {
1204			dev_err(dev, "failed to remove clamp: %d\n", err);
1205			goto powergate;
1206		}
1207	}
1208
1209	err = clk_prepare_enable(pcie->afi_clk);
1210	if (err < 0) {
1211		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1212		goto powergate;
1213	}
1214
1215	if (soc->has_cml_clk) {
1216		err = clk_prepare_enable(pcie->cml_clk);
1217		if (err < 0) {
1218			dev_err(dev, "failed to enable CML clock: %d\n", err);
1219			goto disable_afi_clk;
1220		}
1221	}
1222
1223	err = clk_prepare_enable(pcie->pll_e);
1224	if (err < 0) {
1225		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1226		goto disable_cml_clk;
1227	}
1228
1229	reset_control_deassert(pcie->afi_rst);
1230
1231	return 0;
1232
1233disable_cml_clk:
1234	if (soc->has_cml_clk)
1235		clk_disable_unprepare(pcie->cml_clk);
1236disable_afi_clk:
1237	clk_disable_unprepare(pcie->afi_clk);
1238powergate:
1239	if (!dev->pm_domain)
1240		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1241regulator_disable:
1242	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1243
1244	return err;
1245}
1246
1247static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1248{
1249	const struct tegra_pcie_soc *soc = pcie->soc;
1250
1251	/* Configure the reference clock driver */
1252	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1253
1254	if (soc->num_ports > 2)
1255		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1256}
1257
1258static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1259{
1260	struct device *dev = pcie->dev;
1261	const struct tegra_pcie_soc *soc = pcie->soc;
1262
1263	pcie->pex_clk = devm_clk_get(dev, "pex");
1264	if (IS_ERR(pcie->pex_clk))
1265		return PTR_ERR(pcie->pex_clk);
1266
1267	pcie->afi_clk = devm_clk_get(dev, "afi");
1268	if (IS_ERR(pcie->afi_clk))
1269		return PTR_ERR(pcie->afi_clk);
1270
1271	pcie->pll_e = devm_clk_get(dev, "pll_e");
1272	if (IS_ERR(pcie->pll_e))
1273		return PTR_ERR(pcie->pll_e);
1274
1275	if (soc->has_cml_clk) {
1276		pcie->cml_clk = devm_clk_get(dev, "cml");
1277		if (IS_ERR(pcie->cml_clk))
1278			return PTR_ERR(pcie->cml_clk);
1279	}
1280
1281	return 0;
1282}
1283
1284static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1285{
1286	struct device *dev = pcie->dev;
1287
1288	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1289	if (IS_ERR(pcie->pex_rst))
1290		return PTR_ERR(pcie->pex_rst);
1291
1292	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1293	if (IS_ERR(pcie->afi_rst))
1294		return PTR_ERR(pcie->afi_rst);
1295
1296	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1297	if (IS_ERR(pcie->pcie_xrst))
1298		return PTR_ERR(pcie->pcie_xrst);
1299
1300	return 0;
1301}
1302
1303static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1304{
1305	struct device *dev = pcie->dev;
1306	int err;
1307
1308	pcie->phy = devm_phy_optional_get(dev, "pcie");
1309	if (IS_ERR(pcie->phy)) {
1310		err = PTR_ERR(pcie->phy);
1311		dev_err(dev, "failed to get PHY: %d\n", err);
1312		return err;
1313	}
1314
1315	err = phy_init(pcie->phy);
1316	if (err < 0) {
1317		dev_err(dev, "failed to initialize PHY: %d\n", err);
1318		return err;
1319	}
1320
1321	pcie->legacy_phy = true;
1322
1323	return 0;
1324}
1325
1326static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1327						  struct device_node *np,
1328						  const char *consumer,
1329						  unsigned int index)
1330{
1331	struct phy *phy;
1332	char *name;
1333
1334	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1335	if (!name)
1336		return ERR_PTR(-ENOMEM);
1337
1338	phy = devm_of_phy_get(dev, np, name);
1339	kfree(name);
1340
1341	if (PTR_ERR(phy) == -ENODEV)
1342		phy = NULL;
1343
1344	return phy;
1345}
1346
1347static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1348{
1349	struct device *dev = port->pcie->dev;
1350	struct phy *phy;
1351	unsigned int i;
1352	int err;
1353
1354	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1355	if (!port->phys)
1356		return -ENOMEM;
1357
1358	for (i = 0; i < port->lanes; i++) {
1359		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1360		if (IS_ERR(phy)) {
1361			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1362				PTR_ERR(phy));
1363			return PTR_ERR(phy);
1364		}
1365
1366		err = phy_init(phy);
1367		if (err < 0) {
1368			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1369				err);
1370			return err;
1371		}
1372
1373		port->phys[i] = phy;
1374	}
1375
1376	return 0;
1377}
1378
1379static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1380{
1381	const struct tegra_pcie_soc *soc = pcie->soc;
1382	struct device_node *np = pcie->dev->of_node;
1383	struct tegra_pcie_port *port;
1384	int err;
1385
1386	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1387		return tegra_pcie_phys_get_legacy(pcie);
1388
1389	list_for_each_entry(port, &pcie->ports, list) {
1390		err = tegra_pcie_port_get_phys(port);
1391		if (err < 0)
1392			return err;
1393	}
1394
1395	return 0;
1396}
1397
1398static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1399{
1400	struct tegra_pcie_port *port;
1401	struct device *dev = pcie->dev;
1402	int err, i;
1403
1404	if (pcie->legacy_phy) {
1405		err = phy_exit(pcie->phy);
1406		if (err < 0)
1407			dev_err(dev, "failed to teardown PHY: %d\n", err);
1408		return;
1409	}
1410
1411	list_for_each_entry(port, &pcie->ports, list) {
1412		for (i = 0; i < port->lanes; i++) {
1413			err = phy_exit(port->phys[i]);
1414			if (err < 0)
1415				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1416					i, err);
1417		}
1418	}
1419}
1420
 
1421static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1422{
1423	struct device *dev = pcie->dev;
1424	struct platform_device *pdev = to_platform_device(dev);
1425	struct resource *res;
1426	const struct tegra_pcie_soc *soc = pcie->soc;
1427	int err;
1428
1429	err = tegra_pcie_clocks_get(pcie);
1430	if (err) {
1431		dev_err(dev, "failed to get clocks: %d\n", err);
1432		return err;
1433	}
1434
1435	err = tegra_pcie_resets_get(pcie);
1436	if (err) {
1437		dev_err(dev, "failed to get resets: %d\n", err);
1438		return err;
1439	}
1440
1441	if (soc->program_uphy) {
1442		err = tegra_pcie_phys_get(pcie);
1443		if (err < 0) {
1444			dev_err(dev, "failed to get PHYs: %d\n", err);
1445			return err;
1446		}
1447	}
1448
1449	pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
 
1450	if (IS_ERR(pcie->pads)) {
1451		err = PTR_ERR(pcie->pads);
1452		goto phys_put;
1453	}
1454
1455	pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
 
1456	if (IS_ERR(pcie->afi)) {
1457		err = PTR_ERR(pcie->afi);
1458		goto phys_put;
1459	}
1460
1461	/* request configuration space, but remap later, on demand */
1462	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1463	if (!res) {
1464		err = -EADDRNOTAVAIL;
1465		goto phys_put;
1466	}
1467
1468	pcie->cs = *res;
1469
1470	/* constrain configuration space to 4 KiB */
1471	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1472
1473	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1474	if (IS_ERR(pcie->cfg)) {
1475		err = PTR_ERR(pcie->cfg);
1476		goto phys_put;
1477	}
1478
1479	/* request interrupt */
1480	err = platform_get_irq_byname(pdev, "intr");
1481	if (err < 0)
 
1482		goto phys_put;
 
1483
1484	pcie->irq = err;
1485
1486	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1487	if (err) {
1488		dev_err(dev, "failed to register IRQ: %d\n", err);
1489		goto phys_put;
1490	}
1491
1492	return 0;
1493
1494phys_put:
1495	if (soc->program_uphy)
1496		tegra_pcie_phys_put(pcie);
1497
1498	return err;
1499}
1500
1501static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1502{
1503	const struct tegra_pcie_soc *soc = pcie->soc;
1504
1505	if (pcie->irq > 0)
1506		free_irq(pcie->irq, pcie);
1507
1508	if (soc->program_uphy)
1509		tegra_pcie_phys_put(pcie);
1510
1511	return 0;
1512}
1513
1514static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1515{
1516	struct tegra_pcie *pcie = port->pcie;
1517	const struct tegra_pcie_soc *soc = pcie->soc;
1518	int err;
1519	u32 val;
1520	u8 ack_bit;
1521
1522	val = afi_readl(pcie, AFI_PCIE_PME);
1523	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1524	afi_writel(pcie, val, AFI_PCIE_PME);
1525
1526	ack_bit = soc->ports[port->index].pme.ack_bit;
1527	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1528				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1529	if (err)
1530		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1531			port->index);
1532
1533	usleep_range(10000, 11000);
1534
1535	val = afi_readl(pcie, AFI_PCIE_PME);
1536	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1537	afi_writel(pcie, val, AFI_PCIE_PME);
1538}
1539
1540static void tegra_pcie_msi_irq(struct irq_desc *desc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1541{
1542	struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
1543	struct irq_chip *chip = irq_desc_get_chip(desc);
1544	struct tegra_msi *msi = &pcie->msi;
1545	struct device *dev = pcie->dev;
1546	unsigned int i;
1547
1548	chained_irq_enter(chip, desc);
1549
1550	for (i = 0; i < 8; i++) {
1551		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
1552
1553		while (reg) {
1554			unsigned int offset = find_first_bit(&reg, 32);
1555			unsigned int index = i * 32 + offset;
1556			unsigned int irq;
1557
1558			irq = irq_find_mapping(msi->domain->parent, index);
 
 
 
1559			if (irq) {
1560				generic_handle_irq(irq);
 
 
 
1561			} else {
1562				/*
1563				 * that's weird who triggered this?
1564				 * just clear it
1565				 */
1566				dev_info(dev, "unexpected MSI\n");
1567				afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
1568			}
1569
1570			/* see if there's any more pending in this vector */
1571			reg = afi_readl(pcie, AFI_MSI_VEC(i));
 
 
1572		}
1573	}
1574
1575	chained_irq_exit(chip, desc);
1576}
1577
1578static void tegra_msi_top_irq_ack(struct irq_data *d)
1579{
1580	irq_chip_ack_parent(d);
1581}
1582
1583static void tegra_msi_top_irq_mask(struct irq_data *d)
1584{
1585	pci_msi_mask_irq(d);
1586	irq_chip_mask_parent(d);
1587}
1588
1589static void tegra_msi_top_irq_unmask(struct irq_data *d)
1590{
1591	pci_msi_unmask_irq(d);
1592	irq_chip_unmask_parent(d);
1593}
1594
1595static struct irq_chip tegra_msi_top_chip = {
1596	.name		= "Tegra PCIe MSI",
1597	.irq_ack	= tegra_msi_top_irq_ack,
1598	.irq_mask	= tegra_msi_top_irq_mask,
1599	.irq_unmask	= tegra_msi_top_irq_unmask,
1600};
1601
1602static void tegra_msi_irq_ack(struct irq_data *d)
1603{
1604	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1605	struct tegra_pcie *pcie = msi_to_pcie(msi);
1606	unsigned int index = d->hwirq / 32;
1607
1608	/* clear the interrupt */
1609	afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
1610}
1611
1612static void tegra_msi_irq_mask(struct irq_data *d)
1613{
1614	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1615	struct tegra_pcie *pcie = msi_to_pcie(msi);
1616	unsigned int index = d->hwirq / 32;
1617	unsigned long flags;
1618	u32 value;
1619
1620	spin_lock_irqsave(&msi->mask_lock, flags);
1621	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1622	value &= ~BIT(d->hwirq % 32);
1623	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1624	spin_unlock_irqrestore(&msi->mask_lock, flags);
1625}
1626
1627static void tegra_msi_irq_unmask(struct irq_data *d)
1628{
1629	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
1630	struct tegra_pcie *pcie = msi_to_pcie(msi);
1631	unsigned int index = d->hwirq / 32;
1632	unsigned long flags;
1633	u32 value;
1634
1635	spin_lock_irqsave(&msi->mask_lock, flags);
1636	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
1637	value |= BIT(d->hwirq % 32);
1638	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
1639	spin_unlock_irqrestore(&msi->mask_lock, flags);
1640}
1641
1642static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
1643{
1644	return -EINVAL;
1645}
1646
1647static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1648{
1649	struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
1650
1651	msg->address_lo = lower_32_bits(msi->phys);
1652	msg->address_hi = upper_32_bits(msi->phys);
1653	msg->data = data->hwirq;
1654}
1655
1656static struct irq_chip tegra_msi_bottom_chip = {
1657	.name			= "Tegra MSI",
1658	.irq_ack		= tegra_msi_irq_ack,
1659	.irq_mask		= tegra_msi_irq_mask,
1660	.irq_unmask		= tegra_msi_irq_unmask,
1661	.irq_set_affinity 	= tegra_msi_set_affinity,
1662	.irq_compose_msi_msg	= tegra_compose_msi_msg,
1663};
1664
1665static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
1666				  unsigned int nr_irqs, void *args)
1667{
1668	struct tegra_msi *msi = domain->host_data;
1669	unsigned int i;
 
1670	int hwirq;
1671
1672	mutex_lock(&msi->map_lock);
 
 
1673
1674	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
 
 
 
 
1675
1676	mutex_unlock(&msi->map_lock);
1677
1678	if (hwirq < 0)
1679		return -ENOSPC;
 
1680
1681	for (i = 0; i < nr_irqs; i++)
1682		irq_domain_set_info(domain, virq + i, hwirq + i,
1683				    &tegra_msi_bottom_chip, domain->host_data,
1684				    handle_edge_irq, NULL, NULL);
1685
1686	tegra_cpuidle_pcie_irqs_in_use();
1687
1688	return 0;
1689}
1690
1691static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
1692				  unsigned int nr_irqs)
1693{
1694	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1695	struct tegra_msi *msi = domain->host_data;
1696
1697	mutex_lock(&msi->map_lock);
1698
1699	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
1700
1701	mutex_unlock(&msi->map_lock);
1702}
1703
1704static const struct irq_domain_ops tegra_msi_domain_ops = {
1705	.alloc = tegra_msi_domain_alloc,
1706	.free = tegra_msi_domain_free,
1707};
1708
1709static struct msi_domain_info tegra_msi_info = {
1710	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1711		   MSI_FLAG_PCI_MSIX),
1712	.chip	= &tegra_msi_top_chip,
1713};
1714
1715static int tegra_allocate_domains(struct tegra_msi *msi)
 
1716{
1717	struct tegra_pcie *pcie = msi_to_pcie(msi);
1718	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
1719	struct irq_domain *parent;
1720
1721	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
1722					  &tegra_msi_domain_ops, msi);
1723	if (!parent) {
1724		dev_err(pcie->dev, "failed to create IRQ domain\n");
1725		return -ENOMEM;
1726	}
1727	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
1728
1729	msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
1730	if (!msi->domain) {
1731		dev_err(pcie->dev, "failed to create MSI domain\n");
1732		irq_domain_remove(parent);
1733		return -ENOMEM;
1734	}
1735
1736	return 0;
1737}
1738
1739static void tegra_free_domains(struct tegra_msi *msi)
1740{
1741	struct irq_domain *parent = msi->domain->parent;
1742
1743	irq_domain_remove(msi->domain);
1744	irq_domain_remove(parent);
1745}
1746
1747static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1748{
 
1749	struct platform_device *pdev = to_platform_device(pcie->dev);
1750	struct tegra_msi *msi = &pcie->msi;
1751	struct device *dev = pcie->dev;
1752	int err;
1753
1754	mutex_init(&msi->map_lock);
1755	spin_lock_init(&msi->mask_lock);
 
 
 
1756
1757	if (IS_ENABLED(CONFIG_PCI_MSI)) {
1758		err = tegra_allocate_domains(msi);
1759		if (err)
1760			return err;
 
1761	}
1762
1763	err = platform_get_irq_byname(pdev, "msi");
1764	if (err < 0)
 
1765		goto free_irq_domain;
 
1766
1767	msi->irq = err;
1768
1769	irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
 
 
 
 
 
1770
1771	/* Though the PCIe controller can address >32-bit address space, to
1772	 * facilitate endpoints that support only 32-bit MSI target address,
1773	 * the mask is set to 32-bit to make sure that MSI target address is
1774	 * always a 32-bit address
1775	 */
1776	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1777	if (err < 0) {
1778		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1779		goto free_irq;
1780	}
1781
1782	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1783				    DMA_ATTR_NO_KERNEL_MAPPING);
1784	if (!msi->virt) {
1785		dev_err(dev, "failed to allocate DMA memory for MSI\n");
1786		err = -ENOMEM;
1787		goto free_irq;
1788	}
1789
 
 
1790	return 0;
1791
1792free_irq:
1793	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1794free_irq_domain:
1795	if (IS_ENABLED(CONFIG_PCI_MSI))
1796		tegra_free_domains(msi);
1797
1798	return err;
1799}
1800
1801static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1802{
1803	const struct tegra_pcie_soc *soc = pcie->soc;
1804	struct tegra_msi *msi = &pcie->msi;
1805	u32 reg, msi_state[INT_PCI_MSI_NR / 32];
1806	int i;
1807
1808	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1809	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1810	/* this register is in 4K increments */
1811	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1812
1813	/* Restore the MSI allocation state */
1814	bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
1815	for (i = 0; i < ARRAY_SIZE(msi_state); i++)
1816		afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
 
 
 
 
 
1817
1818	/* and unmask the MSI interrupt */
1819	reg = afi_readl(pcie, AFI_INTR_MASK);
1820	reg |= AFI_INTR_MASK_MSI_MASK;
1821	afi_writel(pcie, reg, AFI_INTR_MASK);
1822}
1823
1824static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1825{
1826	struct tegra_msi *msi = &pcie->msi;
1827	unsigned int i, irq;
1828
1829	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1830		       DMA_ATTR_NO_KERNEL_MAPPING);
1831
 
 
 
1832	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1833		irq = irq_find_mapping(msi->domain, i);
1834		if (irq > 0)
1835			irq_domain_free_irqs(irq, 1);
1836	}
1837
1838	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
1839
1840	if (IS_ENABLED(CONFIG_PCI_MSI))
1841		tegra_free_domains(msi);
1842}
1843
1844static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1845{
1846	u32 value;
1847
1848	/* mask the MSI interrupt */
1849	value = afi_readl(pcie, AFI_INTR_MASK);
1850	value &= ~AFI_INTR_MASK_MSI_MASK;
1851	afi_writel(pcie, value, AFI_INTR_MASK);
1852
 
 
 
 
 
 
 
 
 
 
1853	return 0;
1854}
1855
1856static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1857{
1858	u32 value;
1859
1860	value = afi_readl(pcie, AFI_INTR_MASK);
1861	value &= ~AFI_INTR_MASK_INT_MASK;
1862	afi_writel(pcie, value, AFI_INTR_MASK);
1863}
1864
1865static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1866				      u32 *xbar)
1867{
1868	struct device *dev = pcie->dev;
1869	struct device_node *np = dev->of_node;
1870
1871	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1872		switch (lanes) {
1873		case 0x010004:
1874			dev_info(dev, "4x1, 1x1 configuration\n");
1875			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1876			return 0;
1877
1878		case 0x010102:
1879			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1880			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1881			return 0;
1882
1883		case 0x010101:
1884			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1885			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1886			return 0;
1887
1888		default:
1889			dev_info(dev, "wrong configuration updated in DT, "
1890				 "switching to default 2x1, 1x1, 1x1 "
1891				 "configuration\n");
1892			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1893			return 0;
1894		}
1895	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1896		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1897		switch (lanes) {
1898		case 0x0000104:
1899			dev_info(dev, "4x1, 1x1 configuration\n");
1900			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1901			return 0;
1902
1903		case 0x0000102:
1904			dev_info(dev, "2x1, 1x1 configuration\n");
1905			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1906			return 0;
1907		}
1908	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1909		switch (lanes) {
1910		case 0x00000204:
1911			dev_info(dev, "4x1, 2x1 configuration\n");
1912			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1913			return 0;
1914
1915		case 0x00020202:
1916			dev_info(dev, "2x3 configuration\n");
1917			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1918			return 0;
1919
1920		case 0x00010104:
1921			dev_info(dev, "4x1, 1x2 configuration\n");
1922			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1923			return 0;
1924		}
1925	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1926		switch (lanes) {
1927		case 0x00000004:
1928			dev_info(dev, "single-mode configuration\n");
1929			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1930			return 0;
1931
1932		case 0x00000202:
1933			dev_info(dev, "dual-mode configuration\n");
1934			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1935			return 0;
1936		}
1937	}
1938
1939	return -EINVAL;
1940}
1941
1942/*
1943 * Check whether a given set of supplies is available in a device tree node.
1944 * This is used to check whether the new or the legacy device tree bindings
1945 * should be used.
1946 */
1947static bool of_regulator_bulk_available(struct device_node *np,
1948					struct regulator_bulk_data *supplies,
1949					unsigned int num_supplies)
1950{
1951	char property[32];
1952	unsigned int i;
1953
1954	for (i = 0; i < num_supplies; i++) {
1955		snprintf(property, 32, "%s-supply", supplies[i].supply);
1956
1957		if (of_find_property(np, property, NULL) == NULL)
1958			return false;
1959	}
1960
1961	return true;
1962}
1963
1964/*
1965 * Old versions of the device tree binding for this device used a set of power
1966 * supplies that didn't match the hardware inputs. This happened to work for a
1967 * number of cases but is not future proof. However to preserve backwards-
1968 * compatibility with old device trees, this function will try to use the old
1969 * set of supplies.
1970 */
1971static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1972{
1973	struct device *dev = pcie->dev;
1974	struct device_node *np = dev->of_node;
1975
1976	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1977		pcie->num_supplies = 3;
1978	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1979		pcie->num_supplies = 2;
1980
1981	if (pcie->num_supplies == 0) {
1982		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1983		return -ENODEV;
1984	}
1985
1986	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1987				      sizeof(*pcie->supplies),
1988				      GFP_KERNEL);
1989	if (!pcie->supplies)
1990		return -ENOMEM;
1991
1992	pcie->supplies[0].supply = "pex-clk";
1993	pcie->supplies[1].supply = "vdd";
1994
1995	if (pcie->num_supplies > 2)
1996		pcie->supplies[2].supply = "avdd";
1997
1998	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1999}
2000
2001/*
2002 * Obtains the list of regulators required for a particular generation of the
2003 * IP block.
2004 *
2005 * This would've been nice to do simply by providing static tables for use
2006 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
2007 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
2008 * and either seems to be optional depending on which ports are being used.
2009 */
2010static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
2011{
2012	struct device *dev = pcie->dev;
2013	struct device_node *np = dev->of_node;
2014	unsigned int i = 0;
2015
2016	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
2017		pcie->num_supplies = 4;
2018
2019		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2020					      sizeof(*pcie->supplies),
2021					      GFP_KERNEL);
2022		if (!pcie->supplies)
2023			return -ENOMEM;
2024
2025		pcie->supplies[i++].supply = "dvdd-pex";
2026		pcie->supplies[i++].supply = "hvdd-pex-pll";
2027		pcie->supplies[i++].supply = "hvdd-pex";
2028		pcie->supplies[i++].supply = "vddio-pexctl-aud";
2029	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
2030		pcie->num_supplies = 3;
2031
2032		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2033					      sizeof(*pcie->supplies),
2034					      GFP_KERNEL);
2035		if (!pcie->supplies)
2036			return -ENOMEM;
2037
 
2038		pcie->supplies[i++].supply = "hvddio-pex";
2039		pcie->supplies[i++].supply = "dvddio-pex";
 
 
2040		pcie->supplies[i++].supply = "vddio-pex-ctl";
2041	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2042		pcie->num_supplies = 4;
2043
2044		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2045					      sizeof(*pcie->supplies),
2046					      GFP_KERNEL);
2047		if (!pcie->supplies)
2048			return -ENOMEM;
2049
2050		pcie->supplies[i++].supply = "avddio-pex";
2051		pcie->supplies[i++].supply = "dvddio-pex";
 
2052		pcie->supplies[i++].supply = "hvdd-pex";
 
2053		pcie->supplies[i++].supply = "vddio-pex-ctl";
 
2054	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2055		bool need_pexa = false, need_pexb = false;
2056
2057		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2058		if (lane_mask & 0x0f)
2059			need_pexa = true;
2060
2061		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2062		if (lane_mask & 0x30)
2063			need_pexb = true;
2064
2065		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2066					 (need_pexb ? 2 : 0);
2067
2068		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2069					      sizeof(*pcie->supplies),
2070					      GFP_KERNEL);
2071		if (!pcie->supplies)
2072			return -ENOMEM;
2073
2074		pcie->supplies[i++].supply = "avdd-pex-pll";
2075		pcie->supplies[i++].supply = "hvdd-pex";
2076		pcie->supplies[i++].supply = "vddio-pex-ctl";
2077		pcie->supplies[i++].supply = "avdd-plle";
2078
2079		if (need_pexa) {
2080			pcie->supplies[i++].supply = "avdd-pexa";
2081			pcie->supplies[i++].supply = "vdd-pexa";
2082		}
2083
2084		if (need_pexb) {
2085			pcie->supplies[i++].supply = "avdd-pexb";
2086			pcie->supplies[i++].supply = "vdd-pexb";
2087		}
2088	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2089		pcie->num_supplies = 5;
2090
2091		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2092					      sizeof(*pcie->supplies),
2093					      GFP_KERNEL);
2094		if (!pcie->supplies)
2095			return -ENOMEM;
2096
2097		pcie->supplies[0].supply = "avdd-pex";
2098		pcie->supplies[1].supply = "vdd-pex";
2099		pcie->supplies[2].supply = "avdd-pex-pll";
2100		pcie->supplies[3].supply = "avdd-plle";
2101		pcie->supplies[4].supply = "vddio-pex-clk";
2102	}
2103
2104	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2105					pcie->num_supplies))
2106		return devm_regulator_bulk_get(dev, pcie->num_supplies,
2107					       pcie->supplies);
2108
2109	/*
2110	 * If not all regulators are available for this new scheme, assume
2111	 * that the device tree complies with an older version of the device
2112	 * tree binding.
2113	 */
2114	dev_info(dev, "using legacy DT binding for power supplies\n");
2115
2116	devm_kfree(dev, pcie->supplies);
2117	pcie->num_supplies = 0;
2118
2119	return tegra_pcie_get_legacy_regulators(pcie);
2120}
2121
2122static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2123{
2124	struct device *dev = pcie->dev;
2125	struct device_node *np = dev->of_node, *port;
2126	const struct tegra_pcie_soc *soc = pcie->soc;
 
 
2127	u32 lanes = 0, mask = 0;
2128	unsigned int lane = 0;
 
2129	int err;
2130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2131	/* parse root ports */
2132	for_each_child_of_node(np, port) {
2133		struct tegra_pcie_port *rp;
2134		unsigned int index;
2135		u32 value;
2136		char *label;
2137
2138		err = of_pci_get_devfn(port);
2139		if (err < 0) {
2140			dev_err(dev, "failed to parse address: %d\n", err);
2141			goto err_node_put;
2142		}
2143
2144		index = PCI_SLOT(err);
2145
2146		if (index < 1 || index > soc->num_ports) {
2147			dev_err(dev, "invalid port number: %d\n", index);
2148			err = -EINVAL;
2149			goto err_node_put;
2150		}
2151
2152		index--;
2153
2154		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2155		if (err < 0) {
2156			dev_err(dev, "failed to parse # of lanes: %d\n",
2157				err);
2158			goto err_node_put;
2159		}
2160
2161		if (value > 16) {
2162			dev_err(dev, "invalid # of lanes: %u\n", value);
2163			err = -EINVAL;
2164			goto err_node_put;
2165		}
2166
2167		lanes |= value << (index << 3);
2168
2169		if (!of_device_is_available(port)) {
2170			lane += value;
2171			continue;
2172		}
2173
2174		mask |= ((1 << value) - 1) << lane;
2175		lane += value;
2176
2177		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2178		if (!rp) {
2179			err = -ENOMEM;
2180			goto err_node_put;
2181		}
2182
2183		err = of_address_to_resource(port, 0, &rp->regs);
2184		if (err < 0) {
2185			dev_err(dev, "failed to parse address: %d\n", err);
2186			goto err_node_put;
2187		}
2188
2189		INIT_LIST_HEAD(&rp->list);
2190		rp->index = index;
2191		rp->lanes = value;
2192		rp->pcie = pcie;
2193		rp->np = port;
2194
2195		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2196		if (IS_ERR(rp->base)) {
2197			err = PTR_ERR(rp->base);
2198			goto err_node_put;
2199		}
2200
2201		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2202		if (!label) {
2203			err = -ENOMEM;
2204			goto err_node_put;
2205		}
2206
2207		/*
2208		 * Returns -ENOENT if reset-gpios property is not populated
2209		 * and in this case fall back to using AFI per port register
2210		 * to toggle PERST# SFIO line.
2211		 */
2212		rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
2213							     "reset-gpios", 0,
2214							     GPIOD_OUT_LOW,
2215							     label);
2216		if (IS_ERR(rp->reset_gpio)) {
2217			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2218				rp->reset_gpio = NULL;
2219			} else {
2220				dev_err(dev, "failed to get reset GPIO: %ld\n",
2221					PTR_ERR(rp->reset_gpio));
2222				err = PTR_ERR(rp->reset_gpio);
2223				goto err_node_put;
2224			}
2225		}
2226
2227		list_add_tail(&rp->list, &pcie->ports);
2228	}
2229
2230	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2231	if (err < 0) {
2232		dev_err(dev, "invalid lane configuration\n");
2233		return err;
2234	}
2235
2236	err = tegra_pcie_get_regulators(pcie, mask);
2237	if (err < 0)
2238		return err;
2239
2240	return 0;
2241
2242err_node_put:
2243	of_node_put(port);
2244	return err;
2245}
2246
2247/*
2248 * FIXME: If there are no PCIe cards attached, then calling this function
2249 * can result in the increase of the bootup time as there are big timeout
2250 * loops.
2251 */
2252#define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
2253static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2254{
2255	struct device *dev = port->pcie->dev;
2256	unsigned int retries = 3;
2257	unsigned long value;
2258
2259	/* override presence detection */
2260	value = readl(port->base + RP_PRIV_MISC);
2261	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2262	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2263	writel(value, port->base + RP_PRIV_MISC);
2264
2265	do {
2266		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2267
2268		do {
2269			value = readl(port->base + RP_VEND_XP);
2270
2271			if (value & RP_VEND_XP_DL_UP)
2272				break;
2273
2274			usleep_range(1000, 2000);
2275		} while (--timeout);
2276
2277		if (!timeout) {
2278			dev_dbg(dev, "link %u down, retrying\n", port->index);
2279			goto retry;
2280		}
2281
2282		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2283
2284		do {
2285			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2286
2287			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2288				return true;
2289
2290			usleep_range(1000, 2000);
2291		} while (--timeout);
2292
2293retry:
2294		tegra_pcie_port_reset(port);
2295	} while (--retries);
2296
2297	return false;
2298}
2299
2300static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2301{
2302	struct device *dev = pcie->dev;
2303	struct tegra_pcie_port *port;
2304	ktime_t deadline;
2305	u32 value;
2306
2307	list_for_each_entry(port, &pcie->ports, list) {
2308		/*
2309		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2310		 * is not supported by Tegra. tegra_pcie_change_link_speed()
2311		 * is called only for Tegra chips which support Gen2.
2312		 * So there no harm if supported link speed is not verified.
2313		 */
2314		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2315		value &= ~PCI_EXP_LNKSTA_CLS;
2316		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2317		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2318
2319		/*
2320		 * Poll until link comes back from recovery to avoid race
2321		 * condition.
2322		 */
2323		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2324
2325		while (ktime_before(ktime_get(), deadline)) {
2326			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2327			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2328				break;
2329
2330			usleep_range(2000, 3000);
2331		}
2332
2333		if (value & PCI_EXP_LNKSTA_LT)
2334			dev_warn(dev, "PCIe port %u link is in recovery\n",
2335				 port->index);
2336
2337		/* Retrain the link */
2338		value = readl(port->base + RP_LINK_CONTROL_STATUS);
2339		value |= PCI_EXP_LNKCTL_RL;
2340		writel(value, port->base + RP_LINK_CONTROL_STATUS);
2341
2342		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2343
2344		while (ktime_before(ktime_get(), deadline)) {
2345			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2346			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2347				break;
2348
2349			usleep_range(2000, 3000);
2350		}
2351
2352		if (value & PCI_EXP_LNKSTA_LT)
2353			dev_err(dev, "failed to retrain link of port %u\n",
2354				port->index);
2355	}
2356}
2357
2358static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2359{
2360	struct device *dev = pcie->dev;
2361	struct tegra_pcie_port *port, *tmp;
2362
2363	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2364		dev_info(dev, "probing port %u, using %u lanes\n",
2365			 port->index, port->lanes);
2366
2367		tegra_pcie_port_enable(port);
2368	}
2369
2370	/* Start LTSSM from Tegra side */
2371	reset_control_deassert(pcie->pcie_xrst);
2372
2373	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2374		if (tegra_pcie_port_check_link(port))
2375			continue;
2376
2377		dev_info(dev, "link %u down, ignoring\n", port->index);
2378
2379		tegra_pcie_port_disable(port);
2380		tegra_pcie_port_free(port);
2381	}
2382
2383	if (pcie->soc->has_gen2)
2384		tegra_pcie_change_link_speed(pcie);
2385}
2386
2387static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2388{
2389	struct tegra_pcie_port *port, *tmp;
2390
2391	reset_control_assert(pcie->pcie_xrst);
2392
2393	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2394		tegra_pcie_port_disable(port);
2395}
2396
2397static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2398	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2399	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2400};
2401
2402static const struct tegra_pcie_soc tegra20_pcie = {
2403	.num_ports = 2,
2404	.ports = tegra20_pcie_ports,
2405	.msi_base_shift = 0,
 
2406	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2407	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2408	.pads_refclk_cfg0 = 0xfa5cfa5c,
2409	.has_pex_clkreq_en = false,
2410	.has_pex_bias_ctrl = false,
2411	.has_intr_prsnt_sense = false,
2412	.has_cml_clk = false,
2413	.has_gen2 = false,
2414	.force_pca_enable = false,
2415	.program_uphy = true,
2416	.update_clamp_threshold = false,
2417	.program_deskew_time = false,
 
2418	.update_fc_timer = false,
2419	.has_cache_bars = true,
2420	.ectl.enable = false,
2421};
2422
2423static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2424	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2425	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2426	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2427};
2428
2429static const struct tegra_pcie_soc tegra30_pcie = {
2430	.num_ports = 3,
2431	.ports = tegra30_pcie_ports,
2432	.msi_base_shift = 8,
2433	.afi_pex2_ctrl = 0x128,
2434	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2435	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2436	.pads_refclk_cfg0 = 0xfa5cfa5c,
2437	.pads_refclk_cfg1 = 0xfa5cfa5c,
2438	.has_pex_clkreq_en = true,
2439	.has_pex_bias_ctrl = true,
2440	.has_intr_prsnt_sense = true,
2441	.has_cml_clk = true,
2442	.has_gen2 = false,
2443	.force_pca_enable = false,
2444	.program_uphy = true,
2445	.update_clamp_threshold = false,
2446	.program_deskew_time = false,
 
2447	.update_fc_timer = false,
2448	.has_cache_bars = false,
2449	.ectl.enable = false,
2450};
2451
2452static const struct tegra_pcie_soc tegra124_pcie = {
2453	.num_ports = 2,
2454	.ports = tegra20_pcie_ports,
2455	.msi_base_shift = 8,
2456	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2457	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2458	.pads_refclk_cfg0 = 0x44ac44ac,
 
 
2459	.has_pex_clkreq_en = true,
2460	.has_pex_bias_ctrl = true,
2461	.has_intr_prsnt_sense = true,
2462	.has_cml_clk = true,
2463	.has_gen2 = true,
2464	.force_pca_enable = false,
2465	.program_uphy = true,
2466	.update_clamp_threshold = true,
2467	.program_deskew_time = false,
 
2468	.update_fc_timer = false,
2469	.has_cache_bars = false,
2470	.ectl.enable = false,
2471};
2472
2473static const struct tegra_pcie_soc tegra210_pcie = {
2474	.num_ports = 2,
2475	.ports = tegra20_pcie_ports,
2476	.msi_base_shift = 8,
2477	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2478	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2479	.pads_refclk_cfg0 = 0x90b890b8,
2480	/* FC threshold is bit[25:18] */
2481	.update_fc_threshold = 0x01800000,
2482	.has_pex_clkreq_en = true,
2483	.has_pex_bias_ctrl = true,
2484	.has_intr_prsnt_sense = true,
2485	.has_cml_clk = true,
2486	.has_gen2 = true,
2487	.force_pca_enable = true,
2488	.program_uphy = true,
2489	.update_clamp_threshold = true,
2490	.program_deskew_time = true,
 
2491	.update_fc_timer = true,
2492	.has_cache_bars = false,
2493	.ectl = {
2494		.regs = {
2495			.rp_ectl_2_r1 = 0x0000000f,
2496			.rp_ectl_4_r1 = 0x00000067,
2497			.rp_ectl_5_r1 = 0x55010000,
2498			.rp_ectl_6_r1 = 0x00000001,
2499			.rp_ectl_2_r2 = 0x0000008f,
2500			.rp_ectl_4_r2 = 0x000000c7,
2501			.rp_ectl_5_r2 = 0x55010000,
2502			.rp_ectl_6_r2 = 0x00000001,
2503		},
2504		.enable = true,
2505	},
2506};
2507
2508static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2509	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2510	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2511	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2512};
2513
2514static const struct tegra_pcie_soc tegra186_pcie = {
2515	.num_ports = 3,
2516	.ports = tegra186_pcie_ports,
2517	.msi_base_shift = 8,
2518	.afi_pex2_ctrl = 0x19c,
2519	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2520	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2521	.pads_refclk_cfg0 = 0x80b880b8,
2522	.pads_refclk_cfg1 = 0x000480b8,
2523	.has_pex_clkreq_en = true,
2524	.has_pex_bias_ctrl = true,
2525	.has_intr_prsnt_sense = true,
2526	.has_cml_clk = false,
2527	.has_gen2 = true,
2528	.force_pca_enable = false,
2529	.program_uphy = false,
2530	.update_clamp_threshold = false,
2531	.program_deskew_time = false,
 
2532	.update_fc_timer = false,
2533	.has_cache_bars = false,
2534	.ectl.enable = false,
2535};
2536
2537static const struct of_device_id tegra_pcie_of_match[] = {
2538	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2539	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2540	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2541	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2542	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2543	{ },
2544};
2545MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
2546
2547static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2548{
2549	struct tegra_pcie *pcie = s->private;
2550
2551	if (list_empty(&pcie->ports))
2552		return NULL;
2553
2554	seq_printf(s, "Index  Status\n");
2555
2556	return seq_list_start(&pcie->ports, *pos);
2557}
2558
2559static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2560{
2561	struct tegra_pcie *pcie = s->private;
2562
2563	return seq_list_next(v, &pcie->ports, pos);
2564}
2565
2566static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2567{
2568}
2569
2570static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2571{
2572	bool up = false, active = false;
2573	struct tegra_pcie_port *port;
2574	unsigned int value;
2575
2576	port = list_entry(v, struct tegra_pcie_port, list);
2577
2578	value = readl(port->base + RP_VEND_XP);
2579
2580	if (value & RP_VEND_XP_DL_UP)
2581		up = true;
2582
2583	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2584
2585	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2586		active = true;
2587
2588	seq_printf(s, "%2u     ", port->index);
2589
2590	if (up)
2591		seq_printf(s, "up");
2592
2593	if (active) {
2594		if (up)
2595			seq_printf(s, ", ");
2596
2597		seq_printf(s, "active");
2598	}
2599
2600	seq_printf(s, "\n");
2601	return 0;
2602}
2603
2604static const struct seq_operations tegra_pcie_ports_sops = {
2605	.start = tegra_pcie_ports_seq_start,
2606	.next = tegra_pcie_ports_seq_next,
2607	.stop = tegra_pcie_ports_seq_stop,
2608	.show = tegra_pcie_ports_seq_show,
2609};
2610
2611DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2612
2613static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2614{
2615	debugfs_remove_recursive(pcie->debugfs);
2616	pcie->debugfs = NULL;
2617}
2618
2619static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2620{
 
 
2621	pcie->debugfs = debugfs_create_dir("pcie", NULL);
 
 
 
 
 
 
 
2622
2623	debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
2624			    &tegra_pcie_ports_fops);
 
 
 
2625}
2626
2627static int tegra_pcie_probe(struct platform_device *pdev)
2628{
2629	struct device *dev = &pdev->dev;
2630	struct pci_host_bridge *host;
2631	struct tegra_pcie *pcie;
 
2632	int err;
2633
2634	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2635	if (!host)
2636		return -ENOMEM;
2637
2638	pcie = pci_host_bridge_priv(host);
2639	host->sysdata = pcie;
2640	platform_set_drvdata(pdev, pcie);
2641
2642	pcie->soc = of_device_get_match_data(dev);
2643	INIT_LIST_HEAD(&pcie->ports);
2644	pcie->dev = dev;
2645
2646	err = tegra_pcie_parse_dt(pcie);
2647	if (err < 0)
2648		return err;
2649
2650	err = tegra_pcie_get_resources(pcie);
2651	if (err < 0) {
2652		dev_err(dev, "failed to request resources: %d\n", err);
2653		return err;
2654	}
2655
2656	err = tegra_pcie_msi_setup(pcie);
2657	if (err < 0) {
2658		dev_err(dev, "failed to enable MSI support: %d\n", err);
2659		goto put_resources;
2660	}
2661
2662	pm_runtime_enable(pcie->dev);
2663	err = pm_runtime_get_sync(pcie->dev);
2664	if (err < 0) {
2665		dev_err(dev, "fail to enable pcie controller: %d\n", err);
 
 
 
 
 
2666		goto pm_runtime_put;
2667	}
2668
 
 
2669	host->ops = &tegra_pcie_ops;
2670	host->map_irq = tegra_pcie_map_irq;
 
2671
2672	err = pci_host_probe(host);
2673	if (err < 0) {
2674		dev_err(dev, "failed to register host: %d\n", err);
2675		goto pm_runtime_put;
2676	}
2677
2678	if (IS_ENABLED(CONFIG_DEBUG_FS))
2679		tegra_pcie_debugfs_init(pcie);
 
 
 
 
 
 
 
 
 
 
 
2680
2681	return 0;
2682
 
 
2683pm_runtime_put:
2684	pm_runtime_put_sync(pcie->dev);
2685	pm_runtime_disable(pcie->dev);
 
2686	tegra_pcie_msi_teardown(pcie);
2687put_resources:
2688	tegra_pcie_put_resources(pcie);
2689	return err;
2690}
2691
2692static int tegra_pcie_remove(struct platform_device *pdev)
2693{
2694	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2695	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2696	struct tegra_pcie_port *port, *tmp;
2697
2698	if (IS_ENABLED(CONFIG_DEBUG_FS))
2699		tegra_pcie_debugfs_exit(pcie);
2700
2701	pci_stop_root_bus(host->bus);
2702	pci_remove_root_bus(host->bus);
 
2703	pm_runtime_put_sync(pcie->dev);
2704	pm_runtime_disable(pcie->dev);
2705
2706	if (IS_ENABLED(CONFIG_PCI_MSI))
2707		tegra_pcie_msi_teardown(pcie);
2708
2709	tegra_pcie_put_resources(pcie);
2710
2711	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2712		tegra_pcie_port_free(port);
2713
2714	return 0;
2715}
2716
2717static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
2718{
2719	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2720	struct tegra_pcie_port *port;
2721	int err;
2722
2723	list_for_each_entry(port, &pcie->ports, list)
2724		tegra_pcie_pme_turnoff(port);
2725
2726	tegra_pcie_disable_ports(pcie);
2727
2728	/*
2729	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2730	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2731	 */
2732	tegra_pcie_disable_interrupts(pcie);
2733
2734	if (pcie->soc->program_uphy) {
2735		err = tegra_pcie_phy_power_off(pcie);
2736		if (err < 0)
2737			dev_err(dev, "failed to power off PHY(s): %d\n", err);
2738	}
2739
2740	reset_control_assert(pcie->pex_rst);
2741	clk_disable_unprepare(pcie->pex_clk);
2742
2743	if (IS_ENABLED(CONFIG_PCI_MSI))
2744		tegra_pcie_disable_msi(pcie);
2745
2746	pinctrl_pm_select_idle_state(dev);
2747	tegra_pcie_power_off(pcie);
2748
2749	return 0;
2750}
2751
2752static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
2753{
2754	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2755	int err;
2756
2757	err = tegra_pcie_power_on(pcie);
2758	if (err) {
2759		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2760		return err;
2761	}
2762
2763	err = pinctrl_pm_select_default_state(dev);
2764	if (err < 0) {
2765		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2766		goto poweroff;
2767	}
2768
2769	tegra_pcie_enable_controller(pcie);
2770	tegra_pcie_setup_translations(pcie);
2771
2772	if (IS_ENABLED(CONFIG_PCI_MSI))
2773		tegra_pcie_enable_msi(pcie);
2774
2775	err = clk_prepare_enable(pcie->pex_clk);
2776	if (err) {
2777		dev_err(dev, "failed to enable PEX clock: %d\n", err);
2778		goto pex_dpd_enable;
2779	}
2780
2781	reset_control_deassert(pcie->pex_rst);
2782
2783	if (pcie->soc->program_uphy) {
2784		err = tegra_pcie_phy_power_on(pcie);
2785		if (err < 0) {
2786			dev_err(dev, "failed to power on PHY(s): %d\n", err);
2787			goto disable_pex_clk;
2788		}
2789	}
2790
2791	tegra_pcie_apply_pad_settings(pcie);
2792	tegra_pcie_enable_ports(pcie);
2793
2794	return 0;
2795
2796disable_pex_clk:
2797	reset_control_assert(pcie->pex_rst);
2798	clk_disable_unprepare(pcie->pex_clk);
2799pex_dpd_enable:
2800	pinctrl_pm_select_idle_state(dev);
2801poweroff:
2802	tegra_pcie_power_off(pcie);
2803
2804	return err;
2805}
2806
2807static const struct dev_pm_ops tegra_pcie_pm_ops = {
2808	SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2809	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
2810				      tegra_pcie_pm_resume)
2811};
2812
2813static struct platform_driver tegra_pcie_driver = {
2814	.driver = {
2815		.name = "tegra-pcie",
2816		.of_match_table = tegra_pcie_of_match,
2817		.suppress_bind_attrs = true,
2818		.pm = &tegra_pcie_pm_ops,
2819	},
2820	.probe = tegra_pcie_probe,
2821	.remove = tegra_pcie_remove,
2822};
2823module_platform_driver(tegra_pcie_driver);
2824MODULE_LICENSE("GPL");