Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Driver for the Aardvark PCIe controller, used on Marvell Armada
   4 * 3700.
   5 *
   6 * Copyright (C) 2016 Marvell
   7 *
   8 * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
   9 */
  10
  11#include <linux/bitfield.h>
  12#include <linux/delay.h>
  13#include <linux/gpio/consumer.h>
  14#include <linux/interrupt.h>
  15#include <linux/irq.h>
  16#include <linux/irqdomain.h>
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/pci.h>
  20#include <linux/pci-ecam.h>
  21#include <linux/init.h>
  22#include <linux/phy/phy.h>
  23#include <linux/platform_device.h>
  24#include <linux/msi.h>
  25#include <linux/of_address.h>
  26#include <linux/of_pci.h>
  27
  28#include "../pci.h"
  29#include "../pci-bridge-emul.h"
  30
  31/* PCIe core registers */
  32#define PCIE_CORE_DEV_ID_REG					0x0
  33#define PCIE_CORE_CMD_STATUS_REG				0x4
  34#define PCIE_CORE_DEV_REV_REG					0x8
  35#define PCIE_CORE_SSDEV_ID_REG					0x2c
  36#define PCIE_CORE_PCIEXP_CAP					0xc0
  37#define PCIE_CORE_PCIERR_CAP					0x100
  38#define PCIE_CORE_ERR_CAPCTL_REG				0x118
  39#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX			BIT(5)
  40#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN			BIT(6)
  41#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHCK			BIT(7)
  42#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV			BIT(8)
  43/* PIO registers base address and register offsets */
  44#define PIO_BASE_ADDR				0x4000
  45#define PIO_CTRL				(PIO_BASE_ADDR + 0x0)
  46#define   PIO_CTRL_TYPE_MASK			GENMASK(3, 0)
  47#define   PIO_CTRL_ADDR_WIN_DISABLE		BIT(24)
  48#define PIO_STAT				(PIO_BASE_ADDR + 0x4)
  49#define   PIO_COMPLETION_STATUS_SHIFT		7
  50#define   PIO_COMPLETION_STATUS_MASK		GENMASK(9, 7)
  51#define   PIO_COMPLETION_STATUS_OK		0
  52#define   PIO_COMPLETION_STATUS_UR		1
  53#define   PIO_COMPLETION_STATUS_RRS		2
  54#define   PIO_COMPLETION_STATUS_CA		4
  55#define   PIO_NON_POSTED_REQ			BIT(10)
  56#define   PIO_ERR_STATUS			BIT(11)
  57#define PIO_ADDR_LS				(PIO_BASE_ADDR + 0x8)
  58#define PIO_ADDR_MS				(PIO_BASE_ADDR + 0xc)
  59#define PIO_WR_DATA				(PIO_BASE_ADDR + 0x10)
  60#define PIO_WR_DATA_STRB			(PIO_BASE_ADDR + 0x14)
  61#define PIO_RD_DATA				(PIO_BASE_ADDR + 0x18)
  62#define PIO_START				(PIO_BASE_ADDR + 0x1c)
  63#define PIO_ISR					(PIO_BASE_ADDR + 0x20)
  64#define PIO_ISRM				(PIO_BASE_ADDR + 0x24)
  65
  66/* Aardvark Control registers */
  67#define CONTROL_BASE_ADDR			0x4800
  68#define PCIE_CORE_CTRL0_REG			(CONTROL_BASE_ADDR + 0x0)
  69#define     PCIE_GEN_SEL_MSK			0x3
  70#define     PCIE_GEN_SEL_SHIFT			0x0
  71#define     SPEED_GEN_1				0
  72#define     SPEED_GEN_2				1
  73#define     SPEED_GEN_3				2
  74#define     IS_RC_MSK				1
  75#define     IS_RC_SHIFT				2
  76#define     LANE_CNT_MSK			0x18
  77#define     LANE_CNT_SHIFT			0x3
  78#define     LANE_COUNT_1			(0 << LANE_CNT_SHIFT)
  79#define     LANE_COUNT_2			(1 << LANE_CNT_SHIFT)
  80#define     LANE_COUNT_4			(2 << LANE_CNT_SHIFT)
  81#define     LANE_COUNT_8			(3 << LANE_CNT_SHIFT)
  82#define     LINK_TRAINING_EN			BIT(6)
  83#define     LEGACY_INTA				BIT(28)
  84#define     LEGACY_INTB				BIT(29)
  85#define     LEGACY_INTC				BIT(30)
  86#define     LEGACY_INTD				BIT(31)
  87#define PCIE_CORE_CTRL1_REG			(CONTROL_BASE_ADDR + 0x4)
  88#define     HOT_RESET_GEN			BIT(0)
  89#define PCIE_CORE_CTRL2_REG			(CONTROL_BASE_ADDR + 0x8)
  90#define     PCIE_CORE_CTRL2_RESERVED		0x7
  91#define     PCIE_CORE_CTRL2_TD_ENABLE		BIT(4)
  92#define     PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE	BIT(5)
  93#define     PCIE_CORE_CTRL2_OB_WIN_ENABLE	BIT(6)
  94#define     PCIE_CORE_CTRL2_MSI_ENABLE		BIT(10)
  95#define PCIE_CORE_REF_CLK_REG			(CONTROL_BASE_ADDR + 0x14)
  96#define     PCIE_CORE_REF_CLK_TX_ENABLE		BIT(1)
  97#define     PCIE_CORE_REF_CLK_RX_ENABLE		BIT(2)
  98#define PCIE_MSG_LOG_REG			(CONTROL_BASE_ADDR + 0x30)
  99#define PCIE_ISR0_REG				(CONTROL_BASE_ADDR + 0x40)
 100#define PCIE_MSG_PM_PME_MASK			BIT(7)
 101#define PCIE_ISR0_MASK_REG			(CONTROL_BASE_ADDR + 0x44)
 102#define     PCIE_ISR0_MSI_INT_PENDING		BIT(24)
 103#define     PCIE_ISR0_CORR_ERR			BIT(11)
 104#define     PCIE_ISR0_NFAT_ERR			BIT(12)
 105#define     PCIE_ISR0_FAT_ERR			BIT(13)
 106#define     PCIE_ISR0_ERR_MASK			GENMASK(13, 11)
 107#define     PCIE_ISR0_INTX_ASSERT(val)		BIT(16 + (val))
 108#define     PCIE_ISR0_INTX_DEASSERT(val)	BIT(20 + (val))
 109#define     PCIE_ISR0_ALL_MASK			GENMASK(31, 0)
 110#define PCIE_ISR1_REG				(CONTROL_BASE_ADDR + 0x48)
 111#define PCIE_ISR1_MASK_REG			(CONTROL_BASE_ADDR + 0x4C)
 112#define     PCIE_ISR1_POWER_STATE_CHANGE	BIT(4)
 113#define     PCIE_ISR1_FLUSH			BIT(5)
 114#define     PCIE_ISR1_INTX_ASSERT(val)		BIT(8 + (val))
 115#define     PCIE_ISR1_ALL_MASK			GENMASK(31, 0)
 116#define PCIE_MSI_ADDR_LOW_REG			(CONTROL_BASE_ADDR + 0x50)
 117#define PCIE_MSI_ADDR_HIGH_REG			(CONTROL_BASE_ADDR + 0x54)
 118#define PCIE_MSI_STATUS_REG			(CONTROL_BASE_ADDR + 0x58)
 119#define PCIE_MSI_MASK_REG			(CONTROL_BASE_ADDR + 0x5C)
 120#define     PCIE_MSI_ALL_MASK			GENMASK(31, 0)
 121#define PCIE_MSI_PAYLOAD_REG			(CONTROL_BASE_ADDR + 0x9C)
 122#define     PCIE_MSI_DATA_MASK			GENMASK(15, 0)
 123
 124/* PCIe window configuration */
 125#define OB_WIN_BASE_ADDR			0x4c00
 126#define OB_WIN_BLOCK_SIZE			0x20
 127#define OB_WIN_COUNT				8
 128#define OB_WIN_REG_ADDR(win, offset)		(OB_WIN_BASE_ADDR + \
 129						 OB_WIN_BLOCK_SIZE * (win) + \
 130						 (offset))
 131#define OB_WIN_MATCH_LS(win)			OB_WIN_REG_ADDR(win, 0x00)
 132#define     OB_WIN_ENABLE			BIT(0)
 133#define OB_WIN_MATCH_MS(win)			OB_WIN_REG_ADDR(win, 0x04)
 134#define OB_WIN_REMAP_LS(win)			OB_WIN_REG_ADDR(win, 0x08)
 135#define OB_WIN_REMAP_MS(win)			OB_WIN_REG_ADDR(win, 0x0c)
 136#define OB_WIN_MASK_LS(win)			OB_WIN_REG_ADDR(win, 0x10)
 137#define OB_WIN_MASK_MS(win)			OB_WIN_REG_ADDR(win, 0x14)
 138#define OB_WIN_ACTIONS(win)			OB_WIN_REG_ADDR(win, 0x18)
 139#define OB_WIN_DEFAULT_ACTIONS			(OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
 140#define     OB_WIN_FUNC_NUM_MASK		GENMASK(31, 24)
 141#define     OB_WIN_FUNC_NUM_SHIFT		24
 142#define     OB_WIN_FUNC_NUM_ENABLE		BIT(23)
 143#define     OB_WIN_BUS_NUM_BITS_MASK		GENMASK(22, 20)
 144#define     OB_WIN_BUS_NUM_BITS_SHIFT		20
 145#define     OB_WIN_MSG_CODE_ENABLE		BIT(22)
 146#define     OB_WIN_MSG_CODE_MASK		GENMASK(21, 14)
 147#define     OB_WIN_MSG_CODE_SHIFT		14
 148#define     OB_WIN_MSG_PAYLOAD_LEN		BIT(12)
 149#define     OB_WIN_ATTR_ENABLE			BIT(11)
 150#define     OB_WIN_ATTR_TC_MASK			GENMASK(10, 8)
 151#define     OB_WIN_ATTR_TC_SHIFT		8
 152#define     OB_WIN_ATTR_RELAXED			BIT(7)
 153#define     OB_WIN_ATTR_NOSNOOP			BIT(6)
 154#define     OB_WIN_ATTR_POISON			BIT(5)
 155#define     OB_WIN_ATTR_IDO			BIT(4)
 156#define     OB_WIN_TYPE_MASK			GENMASK(3, 0)
 157#define     OB_WIN_TYPE_SHIFT			0
 158#define     OB_WIN_TYPE_MEM			0x0
 159#define     OB_WIN_TYPE_IO			0x4
 160#define     OB_WIN_TYPE_CONFIG_TYPE0		0x8
 161#define     OB_WIN_TYPE_CONFIG_TYPE1		0x9
 162#define     OB_WIN_TYPE_MSG			0xc
 163
 164/* LMI registers base address and register offsets */
 165#define LMI_BASE_ADDR				0x6000
 166#define CFG_REG					(LMI_BASE_ADDR + 0x0)
 167#define     LTSSM_SHIFT				24
 168#define     LTSSM_MASK				0x3f
 169#define     RC_BAR_CONFIG			0x300
 170
 171/* LTSSM values in CFG_REG */
 172enum {
 173	LTSSM_DETECT_QUIET			= 0x0,
 174	LTSSM_DETECT_ACTIVE			= 0x1,
 175	LTSSM_POLLING_ACTIVE			= 0x2,
 176	LTSSM_POLLING_COMPLIANCE		= 0x3,
 177	LTSSM_POLLING_CONFIGURATION		= 0x4,
 178	LTSSM_CONFIG_LINKWIDTH_START		= 0x5,
 179	LTSSM_CONFIG_LINKWIDTH_ACCEPT		= 0x6,
 180	LTSSM_CONFIG_LANENUM_ACCEPT		= 0x7,
 181	LTSSM_CONFIG_LANENUM_WAIT		= 0x8,
 182	LTSSM_CONFIG_COMPLETE			= 0x9,
 183	LTSSM_CONFIG_IDLE			= 0xa,
 184	LTSSM_RECOVERY_RCVR_LOCK		= 0xb,
 185	LTSSM_RECOVERY_SPEED			= 0xc,
 186	LTSSM_RECOVERY_RCVR_CFG			= 0xd,
 187	LTSSM_RECOVERY_IDLE			= 0xe,
 188	LTSSM_L0				= 0x10,
 189	LTSSM_RX_L0S_ENTRY			= 0x11,
 190	LTSSM_RX_L0S_IDLE			= 0x12,
 191	LTSSM_RX_L0S_FTS			= 0x13,
 192	LTSSM_TX_L0S_ENTRY			= 0x14,
 193	LTSSM_TX_L0S_IDLE			= 0x15,
 194	LTSSM_TX_L0S_FTS			= 0x16,
 195	LTSSM_L1_ENTRY				= 0x17,
 196	LTSSM_L1_IDLE				= 0x18,
 197	LTSSM_L2_IDLE				= 0x19,
 198	LTSSM_L2_TRANSMIT_WAKE			= 0x1a,
 199	LTSSM_DISABLED				= 0x20,
 200	LTSSM_LOOPBACK_ENTRY_MASTER		= 0x21,
 201	LTSSM_LOOPBACK_ACTIVE_MASTER		= 0x22,
 202	LTSSM_LOOPBACK_EXIT_MASTER		= 0x23,
 203	LTSSM_LOOPBACK_ENTRY_SLAVE		= 0x24,
 204	LTSSM_LOOPBACK_ACTIVE_SLAVE		= 0x25,
 205	LTSSM_LOOPBACK_EXIT_SLAVE		= 0x26,
 206	LTSSM_HOT_RESET				= 0x27,
 207	LTSSM_RECOVERY_EQUALIZATION_PHASE0	= 0x28,
 208	LTSSM_RECOVERY_EQUALIZATION_PHASE1	= 0x29,
 209	LTSSM_RECOVERY_EQUALIZATION_PHASE2	= 0x2a,
 210	LTSSM_RECOVERY_EQUALIZATION_PHASE3	= 0x2b,
 211};
 212
 213#define VENDOR_ID_REG				(LMI_BASE_ADDR + 0x44)
 214
 215/* PCIe core controller registers */
 216#define CTRL_CORE_BASE_ADDR			0x18000
 217#define CTRL_CONFIG_REG				(CTRL_CORE_BASE_ADDR + 0x0)
 218#define     CTRL_MODE_SHIFT			0x0
 219#define     CTRL_MODE_MASK			0x1
 220#define     PCIE_CORE_MODE_DIRECT		0x0
 221#define     PCIE_CORE_MODE_COMMAND		0x1
 222
 223/* PCIe Central Interrupts Registers */
 224#define CENTRAL_INT_BASE_ADDR			0x1b000
 225#define HOST_CTRL_INT_STATUS_REG		(CENTRAL_INT_BASE_ADDR + 0x0)
 226#define HOST_CTRL_INT_MASK_REG			(CENTRAL_INT_BASE_ADDR + 0x4)
 227#define     PCIE_IRQ_CMDQ_INT			BIT(0)
 228#define     PCIE_IRQ_MSI_STATUS_INT		BIT(1)
 229#define     PCIE_IRQ_CMD_SENT_DONE		BIT(3)
 230#define     PCIE_IRQ_DMA_INT			BIT(4)
 231#define     PCIE_IRQ_IB_DXFERDONE		BIT(5)
 232#define     PCIE_IRQ_OB_DXFERDONE		BIT(6)
 233#define     PCIE_IRQ_OB_RXFERDONE		BIT(7)
 234#define     PCIE_IRQ_COMPQ_INT			BIT(12)
 235#define     PCIE_IRQ_DIR_RD_DDR_DET		BIT(13)
 236#define     PCIE_IRQ_DIR_WR_DDR_DET		BIT(14)
 237#define     PCIE_IRQ_CORE_INT			BIT(16)
 238#define     PCIE_IRQ_CORE_INT_PIO		BIT(17)
 239#define     PCIE_IRQ_DPMU_INT			BIT(18)
 240#define     PCIE_IRQ_PCIE_MIS_INT		BIT(19)
 241#define     PCIE_IRQ_MSI_INT1_DET		BIT(20)
 242#define     PCIE_IRQ_MSI_INT2_DET		BIT(21)
 243#define     PCIE_IRQ_RC_DBELL_DET		BIT(22)
 244#define     PCIE_IRQ_EP_STATUS			BIT(23)
 245#define     PCIE_IRQ_ALL_MASK			GENMASK(31, 0)
 246#define     PCIE_IRQ_ENABLE_INTS_MASK		PCIE_IRQ_CORE_INT
 247
 248/* Transaction types */
 249#define PCIE_CONFIG_RD_TYPE0			0x8
 250#define PCIE_CONFIG_RD_TYPE1			0x9
 251#define PCIE_CONFIG_WR_TYPE0			0xa
 252#define PCIE_CONFIG_WR_TYPE1			0xb
 253
 254#define PIO_RETRY_CNT			750000 /* 1.5 s */
 255#define PIO_RETRY_DELAY			2 /* 2 us*/
 256
 257#define LINK_WAIT_MAX_RETRIES		10
 258#define LINK_WAIT_USLEEP_MIN		90000
 259#define LINK_WAIT_USLEEP_MAX		100000
 260#define RETRAIN_WAIT_MAX_RETRIES	10
 261#define RETRAIN_WAIT_USLEEP_US		2000
 262
 263#define MSI_IRQ_NUM			32
 264
 265#define CFG_RD_RRS_VAL			0xffff0001
 266
 267struct advk_pcie {
 268	struct platform_device *pdev;
 269	void __iomem *base;
 270	struct {
 271		phys_addr_t match;
 272		phys_addr_t remap;
 273		phys_addr_t mask;
 274		u32 actions;
 275	} wins[OB_WIN_COUNT];
 276	u8 wins_count;
 277	struct irq_domain *rp_irq_domain;
 278	struct irq_domain *irq_domain;
 279	struct irq_chip irq_chip;
 280	raw_spinlock_t irq_lock;
 281	struct irq_domain *msi_domain;
 282	struct irq_domain *msi_inner_domain;
 283	raw_spinlock_t msi_irq_lock;
 284	DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
 285	struct mutex msi_used_lock;
 286	int link_gen;
 287	struct pci_bridge_emul bridge;
 288	struct gpio_desc *reset_gpio;
 289	struct phy *phy;
 290};
 291
 292static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
 293{
 294	writel(val, pcie->base + reg);
 295}
 296
 297static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
 298{
 299	return readl(pcie->base + reg);
 300}
 301
 302static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
 303{
 304	u32 val;
 305	u8 ltssm_state;
 306
 307	val = advk_readl(pcie, CFG_REG);
 308	ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
 309	return ltssm_state;
 310}
 311
 312static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
 313{
 314	/* check if LTSSM is in normal operation - some L* state */
 315	u8 ltssm_state = advk_pcie_ltssm_state(pcie);
 316	return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
 317}
 318
 319static inline bool advk_pcie_link_active(struct advk_pcie *pcie)
 320{
 321	/*
 322	 * According to PCIe Base specification 3.0, Table 4-14: Link
 323	 * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle
 324	 * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0,
 325	 * L0s, L1 and L2 states. And according to 3.2.1. Data Link
 326	 * Control and Management State Machine Rules is DL Up status
 327	 * reported in DL Active state.
 328	 */
 329	u8 ltssm_state = advk_pcie_ltssm_state(pcie);
 330	return ltssm_state >= LTSSM_CONFIG_IDLE && ltssm_state < LTSSM_DISABLED;
 331}
 332
 333static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
 334{
 335	/*
 336	 * According to PCIe Base specification 3.0, Table 4-14: Link
 337	 * Status Mapped to the LTSSM is Link Training mapped to LTSSM
 338	 * Configuration and Recovery states.
 339	 */
 340	u8 ltssm_state = advk_pcie_ltssm_state(pcie);
 341	return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
 342		 ltssm_state < LTSSM_L0) ||
 343		(ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
 344		 ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
 345}
 346
 347static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
 348{
 349	int retries;
 350
 351	/* check if the link is up or not */
 352	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
 353		if (advk_pcie_link_up(pcie))
 354			return 0;
 355
 356		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
 357	}
 358
 359	return -ETIMEDOUT;
 360}
 361
 362static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
 363{
 364	size_t retries;
 365
 366	for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
 367		if (advk_pcie_link_training(pcie))
 368			break;
 369		udelay(RETRAIN_WAIT_USLEEP_US);
 370	}
 371}
 372
 373static void advk_pcie_issue_perst(struct advk_pcie *pcie)
 374{
 375	if (!pcie->reset_gpio)
 376		return;
 377
 378	/* 10ms delay is needed for some cards */
 379	dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
 380	gpiod_set_value_cansleep(pcie->reset_gpio, 1);
 381	usleep_range(10000, 11000);
 382	gpiod_set_value_cansleep(pcie->reset_gpio, 0);
 383}
 384
 385static void advk_pcie_train_link(struct advk_pcie *pcie)
 386{
 387	struct device *dev = &pcie->pdev->dev;
 388	u32 reg;
 389	int ret;
 390
 391	/*
 392	 * Setup PCIe rev / gen compliance based on device tree property
 393	 * 'max-link-speed' which also forces maximal link speed.
 394	 */
 395	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
 396	reg &= ~PCIE_GEN_SEL_MSK;
 397	if (pcie->link_gen == 3)
 398		reg |= SPEED_GEN_3;
 399	else if (pcie->link_gen == 2)
 400		reg |= SPEED_GEN_2;
 401	else
 402		reg |= SPEED_GEN_1;
 403	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
 404
 405	/*
 406	 * Set maximal link speed value also into PCIe Link Control 2 register.
 407	 * Armada 3700 Functional Specification says that default value is based
 408	 * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
 409	 */
 410	reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
 411	reg &= ~PCI_EXP_LNKCTL2_TLS;
 412	if (pcie->link_gen == 3)
 413		reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
 414	else if (pcie->link_gen == 2)
 415		reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
 416	else
 417		reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
 418	advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
 419
 420	/* Enable link training after selecting PCIe generation */
 421	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
 422	reg |= LINK_TRAINING_EN;
 423	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
 424
 425	/*
 426	 * Reset PCIe card via PERST# signal. Some cards are not detected
 427	 * during link training when they are in some non-initial state.
 428	 */
 429	advk_pcie_issue_perst(pcie);
 430
 431	/*
 432	 * PERST# signal could have been asserted by pinctrl subsystem before
 433	 * probe() callback has been called or issued explicitly by reset gpio
 434	 * function advk_pcie_issue_perst(), making the endpoint going into
 435	 * fundamental reset. As required by PCI Express spec (PCI Express
 436	 * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
 437	 * Conventional Reset) a delay for at least 100ms after such a reset
 438	 * before sending a Configuration Request to the device is needed.
 439	 * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
 440	 * waits for link at least 900ms.
 441	 */
 442	ret = advk_pcie_wait_for_link(pcie);
 443	if (ret < 0)
 444		dev_err(dev, "link never came up\n");
 445	else
 446		dev_info(dev, "link up\n");
 447}
 448
 449/*
 450 * Set PCIe address window register which could be used for memory
 451 * mapping.
 452 */
 453static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
 454				 phys_addr_t match, phys_addr_t remap,
 455				 phys_addr_t mask, u32 actions)
 456{
 457	advk_writel(pcie, OB_WIN_ENABLE |
 458			  lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
 459	advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
 460	advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
 461	advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
 462	advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
 463	advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
 464	advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
 465}
 466
 467static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
 468{
 469	advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
 470	advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
 471	advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
 472	advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
 473	advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
 474	advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
 475	advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
 476}
 477
 478static void advk_pcie_setup_hw(struct advk_pcie *pcie)
 479{
 480	phys_addr_t msi_addr;
 481	u32 reg;
 482	int i;
 483
 484	/*
 485	 * Configure PCIe Reference clock. Direction is from the PCIe
 486	 * controller to the endpoint card, so enable transmitting of
 487	 * Reference clock differential signal off-chip and disable
 488	 * receiving off-chip differential signal.
 489	 */
 490	reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
 491	reg |= PCIE_CORE_REF_CLK_TX_ENABLE;
 492	reg &= ~PCIE_CORE_REF_CLK_RX_ENABLE;
 493	advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG);
 494
 495	/* Set to Direct mode */
 496	reg = advk_readl(pcie, CTRL_CONFIG_REG);
 497	reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
 498	reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT);
 499	advk_writel(pcie, reg, CTRL_CONFIG_REG);
 500
 501	/* Set PCI global control register to RC mode */
 502	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
 503	reg |= (IS_RC_MSK << IS_RC_SHIFT);
 504	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
 505
 506	/*
 507	 * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
 508	 * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
 509	 * id in high 16 bits. Updating this register changes readback value of
 510	 * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
 511	 * for erratum 4.1: "The value of device and vendor ID is incorrect".
 512	 */
 513	reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
 514	advk_writel(pcie, reg, VENDOR_ID_REG);
 515
 516	/*
 517	 * Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
 518	 * because the default value is Mass storage controller (0x010400).
 519	 *
 520	 * Note that this Aardvark PCI Bridge does not have compliant Type 1
 521	 * Configuration Space and it even cannot be accessed via Aardvark's
 522	 * PCI config space access method. Something like config space is
 523	 * available in internal Aardvark registers starting at offset 0x0
 524	 * and is reported as Type 0. In range 0x10 - 0x34 it has totally
 525	 * different registers.
 526	 *
 527	 * Therefore driver uses emulation of PCI Bridge which emulates
 528	 * access to configuration space via internal Aardvark registers or
 529	 * emulated configuration buffer.
 530	 */
 531	reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
 532	reg &= ~0xffffff00;
 533	reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
 534	advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
 535
 536	/* Disable Root Bridge I/O space, memory space and bus mastering */
 537	reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
 538	reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
 539	advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
 540
 541	/* Set Advanced Error Capabilities and Control PF0 register */
 542	reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
 543		PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
 544		PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
 545		PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
 546	advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
 547
 548	/* Set PCIe Device Control register */
 549	reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
 550	reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
 551	reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
 552	reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
 553	reg &= ~PCI_EXP_DEVCTL_READRQ;
 554	reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
 555	reg |= PCI_EXP_DEVCTL_READRQ_512B;
 556	advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
 557
 558	/* Program PCIe Control 2 to disable strict ordering */
 559	reg = PCIE_CORE_CTRL2_RESERVED |
 560		PCIE_CORE_CTRL2_TD_ENABLE;
 561	advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
 562
 563	/* Set lane X1 */
 564	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
 565	reg &= ~LANE_CNT_MSK;
 566	reg |= LANE_COUNT_1;
 567	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
 568
 569	/* Set MSI address */
 570	msi_addr = virt_to_phys(pcie);
 571	advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG);
 572	advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG);
 573
 574	/* Enable MSI */
 575	reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
 576	reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
 577	advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
 578
 579	/* Clear all interrupts */
 580	advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
 581	advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
 582	advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
 583	advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
 584
 585	/* Disable All ISR0/1 and MSI Sources */
 586	advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
 587	advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
 588	advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
 589
 590	/* Unmask summary MSI interrupt */
 591	reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
 592	reg &= ~PCIE_ISR0_MSI_INT_PENDING;
 593	advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
 594
 595	/* Unmask PME interrupt for processing of PME requester */
 596	reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
 597	reg &= ~PCIE_MSG_PM_PME_MASK;
 598	advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
 599
 600	/* Enable summary interrupt for GIC SPI source */
 601	reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
 602	advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
 603
 604	/*
 605	 * Enable AXI address window location generation:
 606	 * When it is enabled, the default outbound window
 607	 * configurations (Default User Field: 0xD0074CFC)
 608	 * are used to transparent address translation for
 609	 * the outbound transactions. Thus, PCIe address
 610	 * windows are not required for transparent memory
 611	 * access when default outbound window configuration
 612	 * is set for memory access.
 613	 */
 614	reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
 615	reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
 616	advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
 617
 618	/*
 619	 * Set memory access in Default User Field so it
 620	 * is not required to configure PCIe address for
 621	 * transparent memory access.
 622	 */
 623	advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
 624
 625	/*
 626	 * Bypass the address window mapping for PIO:
 627	 * Since PIO access already contains all required
 628	 * info over AXI interface by PIO registers, the
 629	 * address window is not required.
 630	 */
 631	reg = advk_readl(pcie, PIO_CTRL);
 632	reg |= PIO_CTRL_ADDR_WIN_DISABLE;
 633	advk_writel(pcie, reg, PIO_CTRL);
 634
 635	/*
 636	 * Configure PCIe address windows for non-memory or
 637	 * non-transparent access as by default PCIe uses
 638	 * transparent memory access.
 639	 */
 640	for (i = 0; i < pcie->wins_count; i++)
 641		advk_pcie_set_ob_win(pcie, i,
 642				     pcie->wins[i].match, pcie->wins[i].remap,
 643				     pcie->wins[i].mask, pcie->wins[i].actions);
 644
 645	/* Disable remaining PCIe outbound windows */
 646	for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
 647		advk_pcie_disable_ob_win(pcie, i);
 648
 649	advk_pcie_train_link(pcie);
 650}
 651
 652static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_rrs, u32 *val)
 653{
 654	struct device *dev = &pcie->pdev->dev;
 655	u32 reg;
 656	unsigned int status;
 657	char *strcomp_status, *str_posted;
 658	int ret;
 659
 660	reg = advk_readl(pcie, PIO_STAT);
 661	status = (reg & PIO_COMPLETION_STATUS_MASK) >>
 662		PIO_COMPLETION_STATUS_SHIFT;
 663
 664	/*
 665	 * According to HW spec, the PIO status check sequence as below:
 666	 * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
 667	 *    it still needs to check Error Status(bit11), only when this bit
 668	 *    indicates no error happen, the operation is successful.
 669	 * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
 670	 *    means a PIO write error, and for PIO read it is successful with
 671	 *    a read value of 0xFFFFFFFF.
 672	 * 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7)
 673	 *    only means a PIO write error, and for PIO read it is successful
 674	 *    with a read value of 0xFFFF0001.
 675	 * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
 676	 *    error for both PIO read and PIO write operation.
 677	 * 5) other errors are indicated as 'unknown'.
 678	 */
 679	switch (status) {
 680	case PIO_COMPLETION_STATUS_OK:
 681		if (reg & PIO_ERR_STATUS) {
 682			strcomp_status = "COMP_ERR";
 683			ret = -EFAULT;
 684			break;
 685		}
 686		/* Get the read result */
 687		if (val)
 688			*val = advk_readl(pcie, PIO_RD_DATA);
 689		/* No error */
 690		strcomp_status = NULL;
 691		ret = 0;
 692		break;
 693	case PIO_COMPLETION_STATUS_UR:
 694		strcomp_status = "UR";
 695		ret = -EOPNOTSUPP;
 696		break;
 697	case PIO_COMPLETION_STATUS_RRS:
 698		if (allow_rrs && val) {
 699			/* PCIe r6.0, sec 2.3.2, says:
 700			 * If Configuration RRS Software Visibility is enabled:
 701			 * For a Configuration Read Request that includes both
 702			 * bytes of the Vendor ID field of a device Function's
 703			 * Configuration Space Header, the Root Complex must
 704			 * complete the Request to the host by returning a
 705			 * read-data value of 0001h for the Vendor ID field and
 706			 * all '1's for any additional bytes included in the
 707			 * request.
 708			 *
 709			 * So RRS in this case is not an error status.
 710			 */
 711			*val = CFG_RD_RRS_VAL;
 712			strcomp_status = NULL;
 713			ret = 0;
 714			break;
 715		}
 716		/* PCIe r6.0, sec 2.3.2, says:
 717		 * If RRS Software Visibility is not enabled, the Root Complex
 718		 * must re-issue the Configuration Request as a new Request.
 719		 * If RRS Software Visibility is enabled: For a Configuration
 720		 * Write Request or for any other Configuration Read Request,
 721		 * the Root Complex must re-issue the Configuration Request as
 722		 * a new Request.
 723		 * A Root Complex implementation may choose to limit the number
 724		 * of Configuration Request/RRS Completion Status loops before
 725		 * determining that something is wrong with the target of the
 726		 * Request and taking appropriate action, e.g., complete the
 727		 * Request to the host as a failed transaction.
 728		 *
 729		 * So return -EAGAIN and caller (pci-aardvark.c driver) will
 730		 * re-issue request again up to the PIO_RETRY_CNT retries.
 731		 */
 732		strcomp_status = "RRS";
 733		ret = -EAGAIN;
 734		break;
 735	case PIO_COMPLETION_STATUS_CA:
 736		strcomp_status = "CA";
 737		ret = -ECANCELED;
 738		break;
 739	default:
 740		strcomp_status = "Unknown";
 741		ret = -EINVAL;
 742		break;
 743	}
 744
 745	if (!strcomp_status)
 746		return ret;
 747
 748	if (reg & PIO_NON_POSTED_REQ)
 749		str_posted = "Non-posted";
 750	else
 751		str_posted = "Posted";
 752
 753	dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
 754		str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
 755
 756	return ret;
 757}
 758
 759static int advk_pcie_wait_pio(struct advk_pcie *pcie)
 760{
 761	struct device *dev = &pcie->pdev->dev;
 762	int i;
 763
 764	for (i = 1; i <= PIO_RETRY_CNT; i++) {
 765		u32 start, isr;
 766
 767		start = advk_readl(pcie, PIO_START);
 768		isr = advk_readl(pcie, PIO_ISR);
 769		if (!start && isr)
 770			return i;
 771		udelay(PIO_RETRY_DELAY);
 772	}
 773
 774	dev_err(dev, "PIO read/write transfer time out\n");
 775	return -ETIMEDOUT;
 776}
 777
 778static pci_bridge_emul_read_status_t
 779advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
 780				    int reg, u32 *value)
 781{
 782	struct advk_pcie *pcie = bridge->data;
 783
 784	switch (reg) {
 785	case PCI_COMMAND:
 786		*value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
 787		return PCI_BRIDGE_EMUL_HANDLED;
 788
 789	case PCI_INTERRUPT_LINE: {
 790		/*
 791		 * From the whole 32bit register we support reading from HW only
 792		 * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR.
 793		 * Other bits are retrieved only from emulated config buffer.
 794		 */
 795		__le32 *cfgspace = (__le32 *)&bridge->conf;
 796		u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
 797		if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK)
 798			val &= ~(PCI_BRIDGE_CTL_SERR << 16);
 799		else
 800			val |= PCI_BRIDGE_CTL_SERR << 16;
 801		if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
 802			val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
 803		else
 804			val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
 805		*value = val;
 806		return PCI_BRIDGE_EMUL_HANDLED;
 807	}
 808
 809	default:
 810		return PCI_BRIDGE_EMUL_NOT_HANDLED;
 811	}
 812}
 813
 814static void
 815advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
 816				     int reg, u32 old, u32 new, u32 mask)
 817{
 818	struct advk_pcie *pcie = bridge->data;
 819
 820	switch (reg) {
 821	case PCI_COMMAND:
 822		advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
 823		break;
 824
 825	case PCI_INTERRUPT_LINE:
 826		/*
 827		 * According to Figure 6-3: Pseudo Logic Diagram for Error
 828		 * Message Controls in PCIe base specification, SERR# Enable bit
 829		 * in Bridge Control register enable receiving of ERR_* messages
 830		 */
 831		if (mask & (PCI_BRIDGE_CTL_SERR << 16)) {
 832			u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
 833			if (new & (PCI_BRIDGE_CTL_SERR << 16))
 834				val &= ~PCIE_ISR0_ERR_MASK;
 835			else
 836				val |= PCIE_ISR0_ERR_MASK;
 837			advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
 838		}
 839		if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
 840			u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
 841			if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
 842				val |= HOT_RESET_GEN;
 843			else
 844				val &= ~HOT_RESET_GEN;
 845			advk_writel(pcie, val, PCIE_CORE_CTRL1_REG);
 846		}
 847		break;
 848
 849	default:
 850		break;
 851	}
 852}
 853
 854static pci_bridge_emul_read_status_t
 855advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
 856				    int reg, u32 *value)
 857{
 858	struct advk_pcie *pcie = bridge->data;
 859
 860
 861	switch (reg) {
 862	/*
 863	 * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are
 864	 * also supported, but do not need to be handled here, because their
 865	 * values are stored in emulated config space buffer, and we read them
 866	 * from there when needed.
 867	 */
 868
 869	case PCI_EXP_LNKCAP: {
 870		u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
 871		/*
 872		 * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0.
 873		 * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm
 874		 * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag.
 875		 */
 876		val |= PCI_EXP_LNKCAP_DLLLARC;
 877		*value = val;
 878		return PCI_BRIDGE_EMUL_HANDLED;
 879	}
 880
 881	case PCI_EXP_LNKCTL: {
 882		/* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
 883		u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
 884			~(PCI_EXP_LNKSTA_LT << 16);
 885		if (advk_pcie_link_training(pcie))
 886			val |= (PCI_EXP_LNKSTA_LT << 16);
 887		if (advk_pcie_link_active(pcie))
 888			val |= (PCI_EXP_LNKSTA_DLLLA << 16);
 889		*value = val;
 890		return PCI_BRIDGE_EMUL_HANDLED;
 891	}
 892
 893	case PCI_EXP_DEVCAP:
 894	case PCI_EXP_DEVCTL:
 895	case PCI_EXP_DEVCAP2:
 896	case PCI_EXP_DEVCTL2:
 897	case PCI_EXP_LNKCAP2:
 898	case PCI_EXP_LNKCTL2:
 899		*value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
 900		return PCI_BRIDGE_EMUL_HANDLED;
 901
 902	default:
 903		return PCI_BRIDGE_EMUL_NOT_HANDLED;
 904	}
 905
 906}
 907
 908static void
 909advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
 910				     int reg, u32 old, u32 new, u32 mask)
 911{
 912	struct advk_pcie *pcie = bridge->data;
 913
 914	switch (reg) {
 915	case PCI_EXP_LNKCTL:
 916		advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
 917		if (new & PCI_EXP_LNKCTL_RL)
 918			advk_pcie_wait_for_retrain(pcie);
 919		break;
 920
 921	case PCI_EXP_RTCTL: {
 922		u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
 923		/* Only emulation of PMEIE and RRS_SVE bits is provided */
 924		rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_RRS_SVE;
 925		bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
 926		break;
 927	}
 928
 929	/*
 930	 * PCI_EXP_RTSTA is also supported, but does not need to be handled
 931	 * here, because its value is stored in emulated config space buffer,
 932	 * and we write it there when needed.
 933	 */
 934
 935	case PCI_EXP_DEVCTL:
 936	case PCI_EXP_DEVCTL2:
 937	case PCI_EXP_LNKCTL2:
 938		advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
 939		break;
 940
 941	default:
 942		break;
 943	}
 944}
 945
 946static pci_bridge_emul_read_status_t
 947advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
 948				   int reg, u32 *value)
 949{
 950	struct advk_pcie *pcie = bridge->data;
 951
 952	switch (reg) {
 953	case 0:
 954		*value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
 955
 956		/*
 957		 * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada
 958		 * 3700 Functional Specification does not document registers
 959		 * at those addresses.
 960		 *
 961		 * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error
 962		 * Reporting Capability header the last Extended Capability.
 963		 * If we obtain documentation for those registers in the
 964		 * future, this can be changed.
 965		 */
 966		*value &= 0x000fffff;
 967		return PCI_BRIDGE_EMUL_HANDLED;
 968
 969	case PCI_ERR_UNCOR_STATUS:
 970	case PCI_ERR_UNCOR_MASK:
 971	case PCI_ERR_UNCOR_SEVER:
 972	case PCI_ERR_COR_STATUS:
 973	case PCI_ERR_COR_MASK:
 974	case PCI_ERR_CAP:
 975	case PCI_ERR_HEADER_LOG + 0:
 976	case PCI_ERR_HEADER_LOG + 4:
 977	case PCI_ERR_HEADER_LOG + 8:
 978	case PCI_ERR_HEADER_LOG + 12:
 979	case PCI_ERR_ROOT_COMMAND:
 980	case PCI_ERR_ROOT_STATUS:
 981	case PCI_ERR_ROOT_ERR_SRC:
 982		*value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
 983		return PCI_BRIDGE_EMUL_HANDLED;
 984
 985	default:
 986		return PCI_BRIDGE_EMUL_NOT_HANDLED;
 987	}
 988}
 989
 990static void
 991advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
 992				    int reg, u32 old, u32 new, u32 mask)
 993{
 994	struct advk_pcie *pcie = bridge->data;
 995
 996	switch (reg) {
 997	/* These are W1C registers, so clear other bits */
 998	case PCI_ERR_UNCOR_STATUS:
 999	case PCI_ERR_COR_STATUS:
1000	case PCI_ERR_ROOT_STATUS:
1001		new &= mask;
1002		fallthrough;
1003
1004	case PCI_ERR_UNCOR_MASK:
1005	case PCI_ERR_UNCOR_SEVER:
1006	case PCI_ERR_COR_MASK:
1007	case PCI_ERR_CAP:
1008	case PCI_ERR_HEADER_LOG + 0:
1009	case PCI_ERR_HEADER_LOG + 4:
1010	case PCI_ERR_HEADER_LOG + 8:
1011	case PCI_ERR_HEADER_LOG + 12:
1012	case PCI_ERR_ROOT_COMMAND:
1013	case PCI_ERR_ROOT_ERR_SRC:
1014		advk_writel(pcie, new, PCIE_CORE_PCIERR_CAP + reg);
1015		break;
1016
1017	default:
1018		break;
1019	}
1020}
1021
1022static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
1023	.read_base = advk_pci_bridge_emul_base_conf_read,
1024	.write_base = advk_pci_bridge_emul_base_conf_write,
1025	.read_pcie = advk_pci_bridge_emul_pcie_conf_read,
1026	.write_pcie = advk_pci_bridge_emul_pcie_conf_write,
1027	.read_ext = advk_pci_bridge_emul_ext_conf_read,
1028	.write_ext = advk_pci_bridge_emul_ext_conf_write,
1029};
1030
1031/*
1032 * Initialize the configuration space of the PCI-to-PCI bridge
1033 * associated with the given PCIe interface.
1034 */
1035static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
1036{
1037	struct pci_bridge_emul *bridge = &pcie->bridge;
1038
1039	bridge->conf.vendor =
1040		cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
1041	bridge->conf.device =
1042		cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
1043	bridge->conf.class_revision =
1044		cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
1045
1046	/* Support 32 bits I/O addressing */
1047	bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
1048	bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
1049
1050	/* Support 64 bits memory pref */
1051	bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
1052	bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
1053
1054	/* Support interrupt A for MSI feature */
1055	bridge->conf.intpin = PCI_INTERRUPT_INTA;
1056
1057	/*
1058	 * Aardvark HW provides PCIe Capability structure in version 2 and
1059	 * indicate slot support, which is emulated.
1060	 */
1061	bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT);
1062
1063	/*
1064	 * Set Presence Detect State bit permanently since there is no support
1065	 * for unplugging the card nor detecting whether it is plugged. (If a
1066	 * platform exists in the future that supports it, via a GPIO for
1067	 * example, it should be implemented via this bit.)
1068	 *
1069	 * Set physical slot number to 1 since there is only one port and zero
1070	 * value is reserved for ports within the same silicon as Root Port
1071	 * which is not our case.
1072	 */
1073	bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN,
1074							   1));
1075	bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
1076
1077	/* Indicates supports for Completion Retry Status */
1078	bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_RRS_SV);
1079
1080	bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
1081	bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
1082	bridge->has_pcie = true;
1083	bridge->pcie_start = PCIE_CORE_PCIEXP_CAP;
1084	bridge->data = pcie;
1085	bridge->ops = &advk_pci_bridge_emul_ops;
1086
1087	return pci_bridge_emul_init(bridge, 0);
1088}
1089
1090static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
1091				  int devfn)
1092{
1093	if (pci_is_root_bus(bus) && PCI_SLOT(devfn) != 0)
1094		return false;
1095
1096	/*
1097	 * If the link goes down after we check for link-up, we have a problem:
1098	 * if a PIO request is executed while link-down, the whole controller
1099	 * gets stuck in a non-functional state, and even after link comes up
1100	 * again, PIO requests won't work anymore, and a reset of the whole PCIe
1101	 * controller is needed. Therefore we need to prevent sending PIO
1102	 * requests while the link is down.
1103	 */
1104	if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie))
1105		return false;
1106
1107	return true;
1108}
1109
1110static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
1111{
1112	struct device *dev = &pcie->pdev->dev;
1113
1114	/*
1115	 * Trying to start a new PIO transfer when previous has not completed
1116	 * cause External Abort on CPU which results in kernel panic:
1117	 *
1118	 *     SError Interrupt on CPU0, code 0xbf000002 -- SError
1119	 *     Kernel panic - not syncing: Asynchronous SError Interrupt
1120	 *
1121	 * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
1122	 * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
1123	 * concurrent calls at the same time. But because PIO transfer may take
1124	 * about 1.5s when link is down or card is disconnected, it means that
1125	 * advk_pcie_wait_pio() does not always have to wait for completion.
1126	 *
1127	 * Some versions of ARM Trusted Firmware handles this External Abort at
1128	 * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
1129	 * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
1130	 */
1131	if (advk_readl(pcie, PIO_START)) {
1132		dev_err(dev, "Previous PIO read/write transfer is still running\n");
1133		return true;
1134	}
1135
1136	return false;
1137}
1138
1139static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
1140			     int where, int size, u32 *val)
1141{
1142	struct advk_pcie *pcie = bus->sysdata;
1143	int retry_count;
1144	bool allow_rrs;
1145	u32 reg;
1146	int ret;
1147
1148	if (!advk_pcie_valid_device(pcie, bus, devfn))
1149		return PCIBIOS_DEVICE_NOT_FOUND;
1150
1151	if (pci_is_root_bus(bus))
1152		return pci_bridge_emul_conf_read(&pcie->bridge, where,
1153						 size, val);
1154
1155	/*
1156	 * Configuration Request Retry Status (RRS) is possible to return
1157	 * only when reading both bytes from PCI_VENDOR_ID at once and
1158	 * RRS_SVE flag on Root Port is enabled.
1159	 */
1160	allow_rrs = (where == PCI_VENDOR_ID) && (size >= 2) &&
1161		    (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
1162		     PCI_EXP_RTCTL_RRS_SVE);
1163
1164	if (advk_pcie_pio_is_running(pcie))
1165		goto try_rrs;
1166
1167	/* Program the control register */
1168	reg = advk_readl(pcie, PIO_CTRL);
1169	reg &= ~PIO_CTRL_TYPE_MASK;
1170	if (pci_is_root_bus(bus->parent))
1171		reg |= PCIE_CONFIG_RD_TYPE0;
1172	else
1173		reg |= PCIE_CONFIG_RD_TYPE1;
1174	advk_writel(pcie, reg, PIO_CTRL);
1175
1176	/* Program the address registers */
1177	reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
1178	advk_writel(pcie, reg, PIO_ADDR_LS);
1179	advk_writel(pcie, 0, PIO_ADDR_MS);
1180
1181	/* Program the data strobe */
1182	advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
1183
1184	retry_count = 0;
1185	do {
1186		/* Clear PIO DONE ISR and start the transfer */
1187		advk_writel(pcie, 1, PIO_ISR);
1188		advk_writel(pcie, 1, PIO_START);
1189
1190		ret = advk_pcie_wait_pio(pcie);
1191		if (ret < 0)
1192			goto try_rrs;
1193
1194		retry_count += ret;
1195
1196		/* Check PIO status and get the read result */
1197		ret = advk_pcie_check_pio_status(pcie, allow_rrs, val);
1198	} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
1199
1200	if (ret < 0)
1201		goto fail;
1202
1203	if (size == 1)
1204		*val = (*val >> (8 * (where & 3))) & 0xff;
1205	else if (size == 2)
1206		*val = (*val >> (8 * (where & 3))) & 0xffff;
1207
1208	return PCIBIOS_SUCCESSFUL;
1209
1210try_rrs:
1211	/*
1212	 * If it is possible, return Configuration Request Retry Status so
1213	 * that caller tries to issue the request again instead of failing.
1214	 */
1215	if (allow_rrs) {
1216		*val = CFG_RD_RRS_VAL;
1217		return PCIBIOS_SUCCESSFUL;
1218	}
1219
1220fail:
1221	*val = 0xffffffff;
1222	return PCIBIOS_SET_FAILED;
1223}
1224
1225static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
1226				int where, int size, u32 val)
1227{
1228	struct advk_pcie *pcie = bus->sysdata;
1229	u32 reg;
1230	u32 data_strobe = 0x0;
1231	int retry_count;
1232	int offset;
1233	int ret;
1234
1235	if (!advk_pcie_valid_device(pcie, bus, devfn))
1236		return PCIBIOS_DEVICE_NOT_FOUND;
1237
1238	if (pci_is_root_bus(bus))
1239		return pci_bridge_emul_conf_write(&pcie->bridge, where,
1240						  size, val);
1241
1242	if (where % size)
1243		return PCIBIOS_SET_FAILED;
1244
1245	if (advk_pcie_pio_is_running(pcie))
1246		return PCIBIOS_SET_FAILED;
1247
1248	/* Program the control register */
1249	reg = advk_readl(pcie, PIO_CTRL);
1250	reg &= ~PIO_CTRL_TYPE_MASK;
1251	if (pci_is_root_bus(bus->parent))
1252		reg |= PCIE_CONFIG_WR_TYPE0;
1253	else
1254		reg |= PCIE_CONFIG_WR_TYPE1;
1255	advk_writel(pcie, reg, PIO_CTRL);
1256
1257	/* Program the address registers */
1258	reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
1259	advk_writel(pcie, reg, PIO_ADDR_LS);
1260	advk_writel(pcie, 0, PIO_ADDR_MS);
1261
1262	/* Calculate the write strobe */
1263	offset      = where & 0x3;
1264	reg         = val << (8 * offset);
1265	data_strobe = GENMASK(size - 1, 0) << offset;
1266
1267	/* Program the data register */
1268	advk_writel(pcie, reg, PIO_WR_DATA);
1269
1270	/* Program the data strobe */
1271	advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
1272
1273	retry_count = 0;
1274	do {
1275		/* Clear PIO DONE ISR and start the transfer */
1276		advk_writel(pcie, 1, PIO_ISR);
1277		advk_writel(pcie, 1, PIO_START);
1278
1279		ret = advk_pcie_wait_pio(pcie);
1280		if (ret < 0)
1281			return PCIBIOS_SET_FAILED;
1282
1283		retry_count += ret;
1284
1285		ret = advk_pcie_check_pio_status(pcie, false, NULL);
1286	} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
1287
1288	return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
1289}
1290
1291static struct pci_ops advk_pcie_ops = {
1292	.read = advk_pcie_rd_conf,
1293	.write = advk_pcie_wr_conf,
1294};
1295
1296static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
1297					 struct msi_msg *msg)
1298{
1299	struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
1300	phys_addr_t msi_addr = virt_to_phys(pcie);
1301
1302	msg->address_lo = lower_32_bits(msi_addr);
1303	msg->address_hi = upper_32_bits(msi_addr);
1304	msg->data = data->hwirq;
1305}
1306
1307static void advk_msi_irq_mask(struct irq_data *d)
1308{
1309	struct advk_pcie *pcie = d->domain->host_data;
1310	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1311	unsigned long flags;
1312	u32 mask;
1313
1314	raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
1315	mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
1316	mask |= BIT(hwirq);
1317	advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
1318	raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
1319}
1320
1321static void advk_msi_irq_unmask(struct irq_data *d)
1322{
1323	struct advk_pcie *pcie = d->domain->host_data;
1324	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1325	unsigned long flags;
1326	u32 mask;
1327
1328	raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
1329	mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
1330	mask &= ~BIT(hwirq);
1331	advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
1332	raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
1333}
1334
1335static void advk_msi_top_irq_mask(struct irq_data *d)
1336{
1337	pci_msi_mask_irq(d);
1338	irq_chip_mask_parent(d);
1339}
1340
1341static void advk_msi_top_irq_unmask(struct irq_data *d)
1342{
1343	pci_msi_unmask_irq(d);
1344	irq_chip_unmask_parent(d);
1345}
1346
1347static struct irq_chip advk_msi_bottom_irq_chip = {
1348	.name			= "MSI",
1349	.irq_compose_msi_msg	= advk_msi_irq_compose_msi_msg,
1350	.irq_mask		= advk_msi_irq_mask,
1351	.irq_unmask		= advk_msi_irq_unmask,
1352};
1353
1354static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
1355				     unsigned int virq,
1356				     unsigned int nr_irqs, void *args)
1357{
1358	struct advk_pcie *pcie = domain->host_data;
1359	int hwirq, i;
1360
1361	mutex_lock(&pcie->msi_used_lock);
1362	hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM,
1363					order_base_2(nr_irqs));
1364	mutex_unlock(&pcie->msi_used_lock);
1365	if (hwirq < 0)
1366		return -ENOSPC;
1367
1368	for (i = 0; i < nr_irqs; i++)
1369		irq_domain_set_info(domain, virq + i, hwirq + i,
1370				    &advk_msi_bottom_irq_chip,
1371				    domain->host_data, handle_simple_irq,
1372				    NULL, NULL);
1373
1374	return 0;
1375}
1376
1377static void advk_msi_irq_domain_free(struct irq_domain *domain,
1378				     unsigned int virq, unsigned int nr_irqs)
1379{
1380	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1381	struct advk_pcie *pcie = domain->host_data;
1382
1383	mutex_lock(&pcie->msi_used_lock);
1384	bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs));
1385	mutex_unlock(&pcie->msi_used_lock);
1386}
1387
1388static const struct irq_domain_ops advk_msi_domain_ops = {
1389	.alloc = advk_msi_irq_domain_alloc,
1390	.free = advk_msi_irq_domain_free,
1391};
1392
1393static void advk_pcie_irq_mask(struct irq_data *d)
1394{
1395	struct advk_pcie *pcie = d->domain->host_data;
1396	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1397	unsigned long flags;
1398	u32 mask;
1399
1400	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
1401	mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1402	mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
1403	advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
1404	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
1405}
1406
1407static void advk_pcie_irq_unmask(struct irq_data *d)
1408{
1409	struct advk_pcie *pcie = d->domain->host_data;
1410	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1411	unsigned long flags;
1412	u32 mask;
1413
1414	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
1415	mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1416	mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
1417	advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
1418	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
1419}
1420
1421static int advk_pcie_irq_map(struct irq_domain *h,
1422			     unsigned int virq, irq_hw_number_t hwirq)
1423{
1424	struct advk_pcie *pcie = h->host_data;
1425
1426	irq_set_status_flags(virq, IRQ_LEVEL);
1427	irq_set_chip_and_handler(virq, &pcie->irq_chip,
1428				 handle_level_irq);
1429	irq_set_chip_data(virq, pcie);
1430
1431	return 0;
1432}
1433
1434static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
1435	.map = advk_pcie_irq_map,
1436	.xlate = irq_domain_xlate_onecell,
1437};
1438
1439static struct irq_chip advk_msi_irq_chip = {
1440	.name		= "advk-MSI",
1441	.irq_mask	= advk_msi_top_irq_mask,
1442	.irq_unmask	= advk_msi_top_irq_unmask,
1443};
1444
1445static struct msi_domain_info advk_msi_domain_info = {
1446	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1447		  MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI |
1448		  MSI_FLAG_PCI_MSIX,
1449	.chip	= &advk_msi_irq_chip,
1450};
1451
1452static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
1453{
1454	struct device *dev = &pcie->pdev->dev;
1455
1456	raw_spin_lock_init(&pcie->msi_irq_lock);
1457	mutex_init(&pcie->msi_used_lock);
1458
1459	pcie->msi_inner_domain =
1460		irq_domain_add_linear(NULL, MSI_IRQ_NUM,
1461				      &advk_msi_domain_ops, pcie);
1462	if (!pcie->msi_inner_domain)
1463		return -ENOMEM;
1464
1465	pcie->msi_domain =
1466		pci_msi_create_irq_domain(dev_fwnode(dev),
1467					  &advk_msi_domain_info,
1468					  pcie->msi_inner_domain);
1469	if (!pcie->msi_domain) {
1470		irq_domain_remove(pcie->msi_inner_domain);
1471		return -ENOMEM;
1472	}
1473
1474	return 0;
1475}
1476
1477static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
1478{
1479	irq_domain_remove(pcie->msi_domain);
1480	irq_domain_remove(pcie->msi_inner_domain);
1481}
1482
1483static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
1484{
1485	struct device *dev = &pcie->pdev->dev;
1486	struct device_node *node = dev->of_node;
1487	struct device_node *pcie_intc_node;
1488	struct irq_chip *irq_chip;
1489	int ret = 0;
1490
1491	raw_spin_lock_init(&pcie->irq_lock);
1492
1493	pcie_intc_node =  of_get_next_child(node, NULL);
1494	if (!pcie_intc_node) {
1495		dev_err(dev, "No PCIe Intc node found\n");
1496		return -ENODEV;
1497	}
1498
1499	irq_chip = &pcie->irq_chip;
1500
1501	irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
1502					dev_name(dev));
1503	if (!irq_chip->name) {
1504		ret = -ENOMEM;
1505		goto out_put_node;
1506	}
1507
1508	irq_chip->irq_mask = advk_pcie_irq_mask;
1509	irq_chip->irq_unmask = advk_pcie_irq_unmask;
1510
1511	pcie->irq_domain =
1512		irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
1513				      &advk_pcie_irq_domain_ops, pcie);
1514	if (!pcie->irq_domain) {
1515		dev_err(dev, "Failed to get a INTx IRQ domain\n");
1516		ret = -ENOMEM;
1517		goto out_put_node;
1518	}
1519
1520out_put_node:
1521	of_node_put(pcie_intc_node);
1522	return ret;
1523}
1524
1525static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
1526{
1527	irq_domain_remove(pcie->irq_domain);
1528}
1529
1530static struct irq_chip advk_rp_irq_chip = {
1531	.name = "advk-RP",
1532};
1533
1534static int advk_pcie_rp_irq_map(struct irq_domain *h,
1535				unsigned int virq, irq_hw_number_t hwirq)
1536{
1537	struct advk_pcie *pcie = h->host_data;
1538
1539	irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq);
1540	irq_set_chip_data(virq, pcie);
1541
1542	return 0;
1543}
1544
1545static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
1546	.map = advk_pcie_rp_irq_map,
1547	.xlate = irq_domain_xlate_onecell,
1548};
1549
1550static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
1551{
1552	pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
1553						    &advk_pcie_rp_irq_domain_ops,
1554						    pcie);
1555	if (!pcie->rp_irq_domain) {
1556		dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
1557		return -ENOMEM;
1558	}
1559
1560	return 0;
1561}
1562
1563static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie)
1564{
1565	irq_domain_remove(pcie->rp_irq_domain);
1566}
1567
1568static void advk_pcie_handle_pme(struct advk_pcie *pcie)
1569{
1570	u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16;
1571
1572	advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG);
1573
1574	/*
1575	 * PCIE_MSG_LOG_REG contains the last inbound message, so store
1576	 * the requester ID only when PME was not asserted yet.
1577	 * Also do not trigger PME interrupt when PME is still asserted.
1578	 */
1579	if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) {
1580		pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME);
1581
1582		/*
1583		 * Trigger PME interrupt only if PMEIE bit in Root Control is set.
1584		 * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0.
1585		 */
1586		if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE))
1587			return;
1588
1589		if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
1590			dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n");
1591	}
1592}
1593
1594static void advk_pcie_handle_msi(struct advk_pcie *pcie)
1595{
1596	u32 msi_val, msi_mask, msi_status, msi_idx;
1597
1598	msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
1599	msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
1600	msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
1601
1602	for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
1603		if (!(BIT(msi_idx) & msi_status))
1604			continue;
1605
1606		advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
1607		if (generic_handle_domain_irq(pcie->msi_inner_domain, msi_idx) == -EINVAL)
1608			dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx);
1609	}
1610
1611	advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
1612		    PCIE_ISR0_REG);
1613}
1614
1615static void advk_pcie_handle_int(struct advk_pcie *pcie)
1616{
1617	u32 isr0_val, isr0_mask, isr0_status;
1618	u32 isr1_val, isr1_mask, isr1_status;
1619	int i;
1620
1621	isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
1622	isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
1623	isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
1624
1625	isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
1626	isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1627	isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
1628
1629	/* Process PME interrupt as the first one to do not miss PME requester id */
1630	if (isr0_status & PCIE_MSG_PM_PME_MASK)
1631		advk_pcie_handle_pme(pcie);
1632
1633	/* Process ERR interrupt */
1634	if (isr0_status & PCIE_ISR0_ERR_MASK) {
1635		advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG);
1636
1637		/*
1638		 * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use
1639		 * PCIe interrupt 0
1640		 */
1641		if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
1642			dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n");
1643	}
1644
1645	/* Process MSI interrupts */
1646	if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
1647		advk_pcie_handle_msi(pcie);
1648
1649	/* Process legacy interrupts */
1650	for (i = 0; i < PCI_NUM_INTX; i++) {
1651		if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
1652			continue;
1653
1654		advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
1655			    PCIE_ISR1_REG);
1656
1657		if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL)
1658			dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n",
1659					    (char)i + 'A');
1660	}
1661}
1662
1663static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
1664{
1665	struct advk_pcie *pcie = arg;
1666	u32 status;
1667
1668	status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
1669	if (!(status & PCIE_IRQ_CORE_INT))
1670		return IRQ_NONE;
1671
1672	advk_pcie_handle_int(pcie);
1673
1674	/* Clear interrupt */
1675	advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
1676
1677	return IRQ_HANDLED;
1678}
1679
1680static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
1681{
1682	struct advk_pcie *pcie = dev->bus->sysdata;
1683
1684	/*
1685	 * Emulated root bridge has its own emulated irq chip and irq domain.
1686	 * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and
1687	 * hwirq for irq_create_mapping() is indexed from zero.
1688	 */
1689	if (pci_is_root_bus(dev->bus))
1690		return irq_create_mapping(pcie->rp_irq_domain, pin - 1);
1691	else
1692		return of_irq_parse_and_map_pci(dev, slot, pin);
1693}
1694
1695static void advk_pcie_disable_phy(struct advk_pcie *pcie)
1696{
1697	phy_power_off(pcie->phy);
1698	phy_exit(pcie->phy);
1699}
1700
1701static int advk_pcie_enable_phy(struct advk_pcie *pcie)
1702{
1703	int ret;
1704
1705	if (!pcie->phy)
1706		return 0;
1707
1708	ret = phy_init(pcie->phy);
1709	if (ret)
1710		return ret;
1711
1712	ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
1713	if (ret) {
1714		phy_exit(pcie->phy);
1715		return ret;
1716	}
1717
1718	ret = phy_power_on(pcie->phy);
1719	if (ret) {
1720		phy_exit(pcie->phy);
1721		return ret;
1722	}
1723
1724	return 0;
1725}
1726
1727static int advk_pcie_setup_phy(struct advk_pcie *pcie)
1728{
1729	struct device *dev = &pcie->pdev->dev;
1730	struct device_node *node = dev->of_node;
1731	int ret = 0;
1732
1733	pcie->phy = devm_of_phy_get(dev, node, NULL);
1734	if (IS_ERR(pcie->phy) && (PTR_ERR(pcie->phy) == -EPROBE_DEFER))
1735		return PTR_ERR(pcie->phy);
1736
1737	/* Old bindings miss the PHY handle */
1738	if (IS_ERR(pcie->phy)) {
1739		dev_warn(dev, "PHY unavailable (%ld)\n", PTR_ERR(pcie->phy));
1740		pcie->phy = NULL;
1741		return 0;
1742	}
1743
1744	ret = advk_pcie_enable_phy(pcie);
1745	if (ret)
1746		dev_err(dev, "Failed to initialize PHY (%d)\n", ret);
1747
1748	return ret;
1749}
1750
1751static int advk_pcie_probe(struct platform_device *pdev)
1752{
1753	struct device *dev = &pdev->dev;
1754	struct advk_pcie *pcie;
1755	struct pci_host_bridge *bridge;
1756	struct resource_entry *entry;
1757	int ret, irq;
1758
1759	bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
1760	if (!bridge)
1761		return -ENOMEM;
1762
1763	pcie = pci_host_bridge_priv(bridge);
1764	pcie->pdev = pdev;
1765	platform_set_drvdata(pdev, pcie);
1766
1767	resource_list_for_each_entry(entry, &bridge->windows) {
1768		resource_size_t start = entry->res->start;
1769		resource_size_t size = resource_size(entry->res);
1770		unsigned long type = resource_type(entry->res);
1771		u64 win_size;
1772
1773		/*
1774		 * Aardvark hardware allows to configure also PCIe window
1775		 * for config type 0 and type 1 mapping, but driver uses
1776		 * only PIO for issuing configuration transfers which does
1777		 * not use PCIe window configuration.
1778		 */
1779		if (type != IORESOURCE_MEM && type != IORESOURCE_IO)
1780			continue;
1781
1782		/*
1783		 * Skip transparent memory resources. Default outbound access
1784		 * configuration is set to transparent memory access so it
1785		 * does not need window configuration.
1786		 */
1787		if (type == IORESOURCE_MEM && entry->offset == 0)
1788			continue;
1789
1790		/*
1791		 * The n-th PCIe window is configured by tuple (match, remap, mask)
1792		 * and an access to address A uses this window if A matches the
1793		 * match with given mask.
1794		 * So every PCIe window size must be a power of two and every start
1795		 * address must be aligned to window size. Minimal size is 64 KiB
1796		 * because lower 16 bits of mask must be zero. Remapped address
1797		 * may have set only bits from the mask.
1798		 */
1799		while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
1800			/* Calculate the largest aligned window size */
1801			win_size = (1ULL << (fls64(size)-1)) |
1802				   (start ? (1ULL << __ffs64(start)) : 0);
1803			win_size = 1ULL << __ffs64(win_size);
1804			if (win_size < 0x10000)
1805				break;
1806
1807			dev_dbg(dev,
1808				"Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
1809				pcie->wins_count, (unsigned long long)start,
1810				(unsigned long long)start + win_size, type);
1811
1812			if (type == IORESOURCE_IO) {
1813				pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
1814				pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
1815			} else {
1816				pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
1817				pcie->wins[pcie->wins_count].match = start;
1818			}
1819			pcie->wins[pcie->wins_count].remap = start - entry->offset;
1820			pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
1821
1822			if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
1823				break;
1824
1825			start += win_size;
1826			size -= win_size;
1827			pcie->wins_count++;
1828		}
1829
1830		if (size > 0) {
1831			dev_err(&pcie->pdev->dev,
1832				"Invalid PCIe region [0x%llx-0x%llx]\n",
1833				(unsigned long long)entry->res->start,
1834				(unsigned long long)entry->res->end + 1);
1835			return -EINVAL;
1836		}
1837	}
1838
1839	pcie->base = devm_platform_ioremap_resource(pdev, 0);
1840	if (IS_ERR(pcie->base))
1841		return PTR_ERR(pcie->base);
1842
1843	irq = platform_get_irq(pdev, 0);
1844	if (irq < 0)
1845		return irq;
1846
1847	ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
1848			       IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
1849			       pcie);
1850	if (ret) {
1851		dev_err(dev, "Failed to register interrupt\n");
1852		return ret;
1853	}
1854
1855	pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
1856	ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
1857	if (ret) {
1858		if (ret != -EPROBE_DEFER)
1859			dev_err(dev, "Failed to get reset-gpio: %i\n", ret);
1860		return ret;
1861	}
1862
1863	ret = gpiod_set_consumer_name(pcie->reset_gpio, "pcie1-reset");
1864	if (ret) {
1865		dev_err(dev, "Failed to set reset gpio name: %d\n", ret);
1866		return ret;
1867	}
1868
1869	ret = of_pci_get_max_link_speed(dev->of_node);
1870	if (ret <= 0 || ret > 3)
1871		pcie->link_gen = 3;
1872	else
1873		pcie->link_gen = ret;
1874
1875	ret = advk_pcie_setup_phy(pcie);
1876	if (ret)
1877		return ret;
1878
1879	advk_pcie_setup_hw(pcie);
1880
1881	ret = advk_sw_pci_bridge_init(pcie);
1882	if (ret) {
1883		dev_err(dev, "Failed to register emulated root PCI bridge\n");
1884		return ret;
1885	}
1886
1887	ret = advk_pcie_init_irq_domain(pcie);
1888	if (ret) {
1889		dev_err(dev, "Failed to initialize irq\n");
1890		return ret;
1891	}
1892
1893	ret = advk_pcie_init_msi_irq_domain(pcie);
1894	if (ret) {
1895		dev_err(dev, "Failed to initialize irq\n");
1896		advk_pcie_remove_irq_domain(pcie);
1897		return ret;
1898	}
1899
1900	ret = advk_pcie_init_rp_irq_domain(pcie);
1901	if (ret) {
1902		dev_err(dev, "Failed to initialize irq\n");
1903		advk_pcie_remove_msi_irq_domain(pcie);
1904		advk_pcie_remove_irq_domain(pcie);
1905		return ret;
1906	}
1907
1908	bridge->sysdata = pcie;
1909	bridge->ops = &advk_pcie_ops;
1910	bridge->map_irq = advk_pcie_map_irq;
1911
1912	ret = pci_host_probe(bridge);
1913	if (ret < 0) {
1914		advk_pcie_remove_rp_irq_domain(pcie);
1915		advk_pcie_remove_msi_irq_domain(pcie);
1916		advk_pcie_remove_irq_domain(pcie);
1917		return ret;
1918	}
1919
1920	return 0;
1921}
1922
1923static void advk_pcie_remove(struct platform_device *pdev)
1924{
1925	struct advk_pcie *pcie = platform_get_drvdata(pdev);
1926	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1927	u32 val;
1928	int i;
1929
1930	/* Remove PCI bus with all devices */
1931	pci_lock_rescan_remove();
1932	pci_stop_root_bus(bridge->bus);
1933	pci_remove_root_bus(bridge->bus);
1934	pci_unlock_rescan_remove();
1935
1936	/* Disable Root Bridge I/O space, memory space and bus mastering */
1937	val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
1938	val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1939	advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG);
1940
1941	/* Disable MSI */
1942	val = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
1943	val &= ~PCIE_CORE_CTRL2_MSI_ENABLE;
1944	advk_writel(pcie, val, PCIE_CORE_CTRL2_REG);
1945
1946	/* Clear MSI address */
1947	advk_writel(pcie, 0, PCIE_MSI_ADDR_LOW_REG);
1948	advk_writel(pcie, 0, PCIE_MSI_ADDR_HIGH_REG);
1949
1950	/* Mask all interrupts */
1951	advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
1952	advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
1953	advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
1954	advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG);
1955
1956	/* Clear all interrupts */
1957	advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
1958	advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
1959	advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
1960	advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
1961
1962	/* Remove IRQ domains */
1963	advk_pcie_remove_rp_irq_domain(pcie);
1964	advk_pcie_remove_msi_irq_domain(pcie);
1965	advk_pcie_remove_irq_domain(pcie);
1966
1967	/* Free config space for emulated root bridge */
1968	pci_bridge_emul_cleanup(&pcie->bridge);
1969
1970	/* Assert PERST# signal which prepares PCIe card for power down */
1971	if (pcie->reset_gpio)
1972		gpiod_set_value_cansleep(pcie->reset_gpio, 1);
1973
1974	/* Disable link training */
1975	val = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1976	val &= ~LINK_TRAINING_EN;
1977	advk_writel(pcie, val, PCIE_CORE_CTRL0_REG);
1978
1979	/* Disable outbound address windows mapping */
1980	for (i = 0; i < OB_WIN_COUNT; i++)
1981		advk_pcie_disable_ob_win(pcie, i);
1982
1983	/* Disable phy */
1984	advk_pcie_disable_phy(pcie);
1985}
1986
1987static const struct of_device_id advk_pcie_of_match_table[] = {
1988	{ .compatible = "marvell,armada-3700-pcie", },
1989	{},
1990};
1991MODULE_DEVICE_TABLE(of, advk_pcie_of_match_table);
1992
1993static struct platform_driver advk_pcie_driver = {
1994	.driver = {
1995		.name = "advk-pcie",
1996		.of_match_table = advk_pcie_of_match_table,
1997	},
1998	.probe = advk_pcie_probe,
1999	.remove = advk_pcie_remove,
2000};
2001module_platform_driver(advk_pcie_driver);
2002
2003MODULE_DESCRIPTION("Aardvark PCIe controller");
2004MODULE_LICENSE("GPL v2");