Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe host controller driver for Freescale i.MX6 SoCs
   4 *
   5 * Copyright (C) 2013 Kosagi
   6 *		https://www.kosagi.com
   7 *
   8 * Author: Sean Cross <xobs@kosagi.com>
   9 */
  10
  11#include <linux/bitfield.h>
  12#include <linux/clk.h>
  13#include <linux/delay.h>
  14#include <linux/gpio.h>
  15#include <linux/kernel.h>
  16#include <linux/mfd/syscon.h>
  17#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  18#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
  19#include <linux/module.h>
 
  20#include <linux/of_gpio.h>
  21#include <linux/of_device.h>
  22#include <linux/of_address.h>
  23#include <linux/pci.h>
  24#include <linux/platform_device.h>
  25#include <linux/regmap.h>
  26#include <linux/regulator/consumer.h>
  27#include <linux/resource.h>
  28#include <linux/signal.h>
  29#include <linux/types.h>
  30#include <linux/interrupt.h>
  31#include <linux/reset.h>
 
  32#include <linux/pm_domain.h>
  33#include <linux/pm_runtime.h>
  34
  35#include "pcie-designware.h"
  36
  37#define IMX8MQ_GPR_PCIE_REF_USE_PAD		BIT(9)
  38#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN	BIT(10)
  39#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE	BIT(11)
  40#define IMX8MQ_GPR_PCIE_VREG_BYPASS		BIT(12)
  41#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE	GENMASK(11, 8)
  42#define IMX8MQ_PCIE2_BASE_ADDR			0x33c00000
  43
 
 
 
 
 
 
 
 
 
 
 
 
 
  44#define to_imx6_pcie(x)	dev_get_drvdata((x)->dev)
  45
  46enum imx6_pcie_variants {
  47	IMX6Q,
  48	IMX6SX,
  49	IMX6QP,
  50	IMX7D,
  51	IMX8MQ,
 
 
 
 
 
 
 
  52};
  53
  54#define IMX6_PCIE_FLAG_IMX6_PHY			BIT(0)
  55#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE	BIT(1)
  56#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND		BIT(2)
 
 
 
 
 
 
 
 
 
 
 
 
 
  57
  58struct imx6_pcie_drvdata {
  59	enum imx6_pcie_variants variant;
 
  60	u32 flags;
  61	int dbi_length;
 
 
 
 
 
 
 
 
 
  62};
  63
  64struct imx6_pcie {
  65	struct dw_pcie		*pci;
  66	int			reset_gpio;
  67	bool			gpio_active_high;
  68	struct clk		*pcie_bus;
  69	struct clk		*pcie_phy;
  70	struct clk		*pcie_inbound_axi;
  71	struct clk		*pcie;
  72	struct clk		*pcie_aux;
  73	struct regmap		*iomuxc_gpr;
 
  74	u32			controller_id;
  75	struct reset_control	*pciephy_reset;
  76	struct reset_control	*apps_reset;
  77	struct reset_control	*turnoff_reset;
  78	u32			tx_deemph_gen1;
  79	u32			tx_deemph_gen2_3p5db;
  80	u32			tx_deemph_gen2_6db;
  81	u32			tx_swing_full;
  82	u32			tx_swing_low;
  83	struct regulator	*vpcie;
  84	struct regulator	*vph;
  85	void __iomem		*phy_base;
  86
  87	/* power domain for pcie */
  88	struct device		*pd_pcie;
  89	/* power domain for pcie phy */
  90	struct device		*pd_pcie_phy;
 
  91	const struct imx6_pcie_drvdata *drvdata;
  92};
  93
  94/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
  95#define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200
  96#define PHY_PLL_LOCK_WAIT_TIMEOUT	(2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
  97
  98/* PCIe Port Logic registers (memory-mapped) */
  99#define PL_OFFSET 0x700
 100
 101#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
 102#define PCIE_PHY_CTRL_DATA(x)		FIELD_PREP(GENMASK(15, 0), (x))
 103#define PCIE_PHY_CTRL_CAP_ADR		BIT(16)
 104#define PCIE_PHY_CTRL_CAP_DAT		BIT(17)
 105#define PCIE_PHY_CTRL_WR		BIT(18)
 106#define PCIE_PHY_CTRL_RD		BIT(19)
 107
 108#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
 109#define PCIE_PHY_STAT_ACK		BIT(16)
 110
 111/* PHY registers (not memory-mapped) */
 112#define PCIE_PHY_ATEOVRD			0x10
 113#define  PCIE_PHY_ATEOVRD_EN			BIT(2)
 114#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT	0
 115#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK	0x1
 116
 117#define PCIE_PHY_MPLL_OVRD_IN_LO		0x11
 118#define  PCIE_PHY_MPLL_MULTIPLIER_SHIFT		2
 119#define  PCIE_PHY_MPLL_MULTIPLIER_MASK		0x7f
 120#define  PCIE_PHY_MPLL_MULTIPLIER_OVRD		BIT(9)
 121
 122#define PCIE_PHY_RX_ASIC_OUT 0x100D
 123#define PCIE_PHY_RX_ASIC_OUT_VALID	(1 << 0)
 124
 125/* iMX7 PCIe PHY registers */
 126#define PCIE_PHY_CMN_REG4		0x14
 127/* These are probably the bits that *aren't* DCC_FB_EN */
 128#define PCIE_PHY_CMN_REG4_DCC_FB_EN	0x29
 129
 130#define PCIE_PHY_CMN_REG15	        0x54
 131#define PCIE_PHY_CMN_REG15_DLY_4	BIT(2)
 132#define PCIE_PHY_CMN_REG15_PLL_PD	BIT(5)
 133#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD	BIT(7)
 134
 135#define PCIE_PHY_CMN_REG24		0x90
 136#define PCIE_PHY_CMN_REG24_RX_EQ	BIT(6)
 137#define PCIE_PHY_CMN_REG24_RX_EQ_SEL	BIT(3)
 138
 139#define PCIE_PHY_CMN_REG26		0x98
 140#define PCIE_PHY_CMN_REG26_ATT_MODE	0xBC
 141
 142#define PHY_RX_OVRD_IN_LO 0x1005
 143#define PHY_RX_OVRD_IN_LO_RX_DATA_EN		BIT(5)
 144#define PHY_RX_OVRD_IN_LO_RX_PLL_EN		BIT(3)
 145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 146static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
 147{
 148	struct dw_pcie *pci = imx6_pcie->pci;
 149	bool val;
 150	u32 max_iterations = 10;
 151	u32 wait_counter = 0;
 152
 153	do {
 154		val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
 155			PCIE_PHY_STAT_ACK;
 156		wait_counter++;
 157
 158		if (val == exp_val)
 159			return 0;
 160
 161		udelay(1);
 162	} while (wait_counter < max_iterations);
 163
 164	return -ETIMEDOUT;
 165}
 166
 167static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
 168{
 169	struct dw_pcie *pci = imx6_pcie->pci;
 170	u32 val;
 171	int ret;
 172
 173	val = PCIE_PHY_CTRL_DATA(addr);
 174	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
 175
 176	val |= PCIE_PHY_CTRL_CAP_ADR;
 177	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
 178
 179	ret = pcie_phy_poll_ack(imx6_pcie, true);
 180	if (ret)
 181		return ret;
 182
 183	val = PCIE_PHY_CTRL_DATA(addr);
 184	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
 185
 186	return pcie_phy_poll_ack(imx6_pcie, false);
 187}
 188
 189/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
 190static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
 191{
 192	struct dw_pcie *pci = imx6_pcie->pci;
 193	u32 phy_ctl;
 194	int ret;
 195
 196	ret = pcie_phy_wait_ack(imx6_pcie, addr);
 197	if (ret)
 198		return ret;
 199
 200	/* assert Read signal */
 201	phy_ctl = PCIE_PHY_CTRL_RD;
 202	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
 203
 204	ret = pcie_phy_poll_ack(imx6_pcie, true);
 205	if (ret)
 206		return ret;
 207
 208	*data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
 209
 210	/* deassert Read signal */
 211	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
 212
 213	return pcie_phy_poll_ack(imx6_pcie, false);
 214}
 215
 216static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
 217{
 218	struct dw_pcie *pci = imx6_pcie->pci;
 219	u32 var;
 220	int ret;
 221
 222	/* write addr */
 223	/* cap addr */
 224	ret = pcie_phy_wait_ack(imx6_pcie, addr);
 225	if (ret)
 226		return ret;
 227
 228	var = PCIE_PHY_CTRL_DATA(data);
 229	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
 230
 231	/* capture data */
 232	var |= PCIE_PHY_CTRL_CAP_DAT;
 233	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
 234
 235	ret = pcie_phy_poll_ack(imx6_pcie, true);
 236	if (ret)
 237		return ret;
 238
 239	/* deassert cap data */
 240	var = PCIE_PHY_CTRL_DATA(data);
 241	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
 242
 243	/* wait for ack de-assertion */
 244	ret = pcie_phy_poll_ack(imx6_pcie, false);
 245	if (ret)
 246		return ret;
 247
 248	/* assert wr signal */
 249	var = PCIE_PHY_CTRL_WR;
 250	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
 251
 252	/* wait for ack */
 253	ret = pcie_phy_poll_ack(imx6_pcie, true);
 254	if (ret)
 255		return ret;
 256
 257	/* deassert wr signal */
 258	var = PCIE_PHY_CTRL_DATA(data);
 259	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
 260
 261	/* wait for ack de-assertion */
 262	ret = pcie_phy_poll_ack(imx6_pcie, false);
 263	if (ret)
 264		return ret;
 265
 266	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
 267
 268	return 0;
 269}
 270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
 272{
 273	u16 tmp;
 274
 275	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
 276		return;
 277
 278	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
 279	tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
 280		PHY_RX_OVRD_IN_LO_RX_PLL_EN);
 281	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
 282
 283	usleep_range(2000, 3000);
 284
 285	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
 286	tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
 287		  PHY_RX_OVRD_IN_LO_RX_PLL_EN);
 288	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
 289}
 290
 291#ifdef CONFIG_ARM
 292/*  Added for PCI abort handling */
 293static int imx6q_pcie_abort_handler(unsigned long addr,
 294		unsigned int fsr, struct pt_regs *regs)
 295{
 296	unsigned long pc = instruction_pointer(regs);
 297	unsigned long instr = *(unsigned long *)pc;
 298	int reg = (instr >> 12) & 15;
 299
 300	/*
 301	 * If the instruction being executed was a read,
 302	 * make it look like it read all-ones.
 303	 */
 304	if ((instr & 0x0c100000) == 0x04100000) {
 305		unsigned long val;
 306
 307		if (instr & 0x00400000)
 308			val = 255;
 309		else
 310			val = -1;
 311
 312		regs->uregs[reg] = val;
 313		regs->ARM_pc += 4;
 314		return 0;
 315	}
 316
 317	if ((instr & 0x0e100090) == 0x00100090) {
 318		regs->uregs[reg] = -1;
 319		regs->ARM_pc += 4;
 320		return 0;
 321	}
 322
 323	return 1;
 324}
 325#endif
 326
 327static int imx6_pcie_attach_pd(struct device *dev)
 328{
 329	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
 330	struct device_link *link;
 331
 332	/* Do nothing when in a single power domain */
 333	if (dev->pm_domain)
 334		return 0;
 335
 336	imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
 337	if (IS_ERR(imx6_pcie->pd_pcie))
 338		return PTR_ERR(imx6_pcie->pd_pcie);
 339	/* Do nothing when power domain missing */
 340	if (!imx6_pcie->pd_pcie)
 341		return 0;
 342	link = device_link_add(dev, imx6_pcie->pd_pcie,
 343			DL_FLAG_STATELESS |
 344			DL_FLAG_PM_RUNTIME |
 345			DL_FLAG_RPM_ACTIVE);
 346	if (!link) {
 347		dev_err(dev, "Failed to add device_link to pcie pd.\n");
 348		return -EINVAL;
 349	}
 350
 351	imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
 352	if (IS_ERR(imx6_pcie->pd_pcie_phy))
 353		return PTR_ERR(imx6_pcie->pd_pcie_phy);
 354
 355	link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
 356			DL_FLAG_STATELESS |
 357			DL_FLAG_PM_RUNTIME |
 358			DL_FLAG_RPM_ACTIVE);
 359	if (!link) {
 360		dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
 361		return -EINVAL;
 362	}
 363
 364	return 0;
 365}
 366
 367static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
 368{
 369	struct device *dev = imx6_pcie->pci->dev;
 370
 371	switch (imx6_pcie->drvdata->variant) {
 372	case IMX7D:
 373	case IMX8MQ:
 374		reset_control_assert(imx6_pcie->pciephy_reset);
 375		reset_control_assert(imx6_pcie->apps_reset);
 376		break;
 377	case IMX6SX:
 378		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 379				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
 380				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
 381		/* Force PCIe PHY reset */
 382		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
 383				   IMX6SX_GPR5_PCIE_BTNRST_RESET,
 384				   IMX6SX_GPR5_PCIE_BTNRST_RESET);
 385		break;
 386	case IMX6QP:
 387		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 388				   IMX6Q_GPR1_PCIE_SW_RST,
 389				   IMX6Q_GPR1_PCIE_SW_RST);
 390		break;
 391	case IMX6Q:
 392		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 393				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
 394		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 395				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
 396		break;
 397	}
 398
 399	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
 400		int ret = regulator_disable(imx6_pcie->vpcie);
 401
 402		if (ret)
 403			dev_err(dev, "failed to disable vpcie regulator: %d\n",
 404				ret);
 405	}
 406}
 407
 408static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
 409{
 410	WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
 411	return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
 412}
 413
 414static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
 415{
 416	struct dw_pcie *pci = imx6_pcie->pci;
 417	struct device *dev = pci->dev;
 418	unsigned int offset;
 419	int ret = 0;
 420
 421	switch (imx6_pcie->drvdata->variant) {
 422	case IMX6SX:
 423		ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
 424		if (ret) {
 425			dev_err(dev, "unable to enable pcie_axi clock\n");
 426			break;
 427		}
 428
 429		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 430				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
 431		break;
 432	case IMX6QP:
 433	case IMX6Q:
 434		/* power up core phy and enable ref clock */
 435		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 436				   IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
 437		/*
 438		 * the async reset input need ref clock to sync internally,
 439		 * when the ref clock comes after reset, internal synced
 440		 * reset time is too short, cannot meet the requirement.
 441		 * add one ~10us delay here.
 442		 */
 443		usleep_range(10, 100);
 444		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 445				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
 446		break;
 447	case IMX7D:
 
 
 448		break;
 
 
 449	case IMX8MQ:
 450		ret = clk_prepare_enable(imx6_pcie->pcie_aux);
 451		if (ret) {
 452			dev_err(dev, "unable to enable pcie_aux clock\n");
 453			break;
 454		}
 455
 456		offset = imx6_pcie_grp_offset(imx6_pcie);
 457		/*
 458		 * Set the over ride low and enabled
 459		 * make sure that REF_CLK is turned on.
 460		 */
 461		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
 462				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
 463				   0);
 464		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
 465				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
 466				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
 467		break;
 468	}
 469
 470	return ret;
 471}
 472
 473static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
 474{
 475	u32 val;
 476	struct device *dev = imx6_pcie->pci->dev;
 477
 478	if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
 479				     IOMUXC_GPR22, val,
 480				     val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
 481				     PHY_PLL_LOCK_WAIT_USLEEP_MAX,
 482				     PHY_PLL_LOCK_WAIT_TIMEOUT))
 483		dev_err(dev, "PCIe PLL lock timeout\n");
 
 
 
 
 
 
 
 
 484}
 485
 486static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
 487{
 488	struct dw_pcie *pci = imx6_pcie->pci;
 489	struct device *dev = pci->dev;
 490	int ret;
 491
 492	if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
 493		ret = regulator_enable(imx6_pcie->vpcie);
 494		if (ret) {
 495			dev_err(dev, "failed to enable vpcie regulator: %d\n",
 496				ret);
 497			return;
 498		}
 499	}
 500
 501	ret = clk_prepare_enable(imx6_pcie->pcie_phy);
 502	if (ret) {
 503		dev_err(dev, "unable to enable pcie_phy clock\n");
 504		goto err_pcie_phy;
 505	}
 506
 507	ret = clk_prepare_enable(imx6_pcie->pcie_bus);
 508	if (ret) {
 509		dev_err(dev, "unable to enable pcie_bus clock\n");
 510		goto err_pcie_bus;
 511	}
 512
 513	ret = clk_prepare_enable(imx6_pcie->pcie);
 514	if (ret) {
 515		dev_err(dev, "unable to enable pcie clock\n");
 516		goto err_pcie;
 517	}
 518
 519	ret = imx6_pcie_enable_ref_clk(imx6_pcie);
 520	if (ret) {
 521		dev_err(dev, "unable to enable pcie ref clock\n");
 522		goto err_ref_clk;
 523	}
 524
 525	/* allow the clocks to stabilize */
 526	usleep_range(200, 500);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 527
 528	/* Some boards don't have PCIe reset GPIO. */
 529	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
 530		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
 531					imx6_pcie->gpio_active_high);
 532		msleep(100);
 533		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
 534					!imx6_pcie->gpio_active_high);
 535	}
 
 
 
 
 536
 537	switch (imx6_pcie->drvdata->variant) {
 538	case IMX8MQ:
 539		reset_control_deassert(imx6_pcie->pciephy_reset);
 540		break;
 541	case IMX7D:
 542		reset_control_deassert(imx6_pcie->pciephy_reset);
 543
 544		/* Workaround for ERR010728, failure of PCI-e PLL VCO to
 545		 * oscillate, especially when cold.  This turns off "Duty-cycle
 546		 * Corrector" and other mysterious undocumented things.
 547		 */
 548		if (likely(imx6_pcie->phy_base)) {
 549			/* De-assert DCC_FB_EN */
 550			writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
 551			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
 552			/* Assert RX_EQS and RX_EQS_SEL */
 553			writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
 554				| PCIE_PHY_CMN_REG24_RX_EQ,
 555			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
 556			/* Assert ATT_MODE */
 557			writel(PCIE_PHY_CMN_REG26_ATT_MODE,
 558			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
 559		} else {
 560			dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
 561		}
 562
 563		imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
 564		break;
 565	case IMX6SX:
 566		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
 567				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
 568		break;
 569	case IMX6QP:
 570		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 571				   IMX6Q_GPR1_PCIE_SW_RST, 0);
 572
 573		usleep_range(200, 500);
 574		break;
 575	case IMX6Q:		/* Nothing to do */
 576		break;
 577	}
 578
 579	return;
 580
 581err_ref_clk:
 582	clk_disable_unprepare(imx6_pcie->pcie);
 583err_pcie:
 584	clk_disable_unprepare(imx6_pcie->pcie_bus);
 585err_pcie_bus:
 586	clk_disable_unprepare(imx6_pcie->pcie_phy);
 587err_pcie_phy:
 588	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
 589		ret = regulator_disable(imx6_pcie->vpcie);
 590		if (ret)
 591			dev_err(dev, "failed to disable vpcie regulator: %d\n",
 592				ret);
 593	}
 594}
 595
 596static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
 597{
 598	unsigned int mask, val;
 599
 600	if (imx6_pcie->drvdata->variant == IMX8MQ &&
 601	    imx6_pcie->controller_id == 1) {
 602		mask   = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
 603		val    = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
 604				    PCI_EXP_TYPE_ROOT_PORT);
 605	} else {
 606		mask = IMX6Q_GPR12_DEVICE_TYPE;
 607		val  = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
 608				  PCI_EXP_TYPE_ROOT_PORT);
 609	}
 610
 611	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
 612}
 613
 614static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
 615{
 616	switch (imx6_pcie->drvdata->variant) {
 617	case IMX8MQ:
 618		/*
 619		 * TODO: Currently this code assumes external
 620		 * oscillator is being used
 621		 */
 622		regmap_update_bits(imx6_pcie->iomuxc_gpr,
 623				   imx6_pcie_grp_offset(imx6_pcie),
 624				   IMX8MQ_GPR_PCIE_REF_USE_PAD,
 625				   IMX8MQ_GPR_PCIE_REF_USE_PAD);
 626		/*
 627		 * Regarding the datasheet, the PCIE_VPH is suggested
 628		 * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
 629		 * VREG_BYPASS should be cleared to zero.
 630		 */
 631		if (imx6_pcie->vph &&
 632		    regulator_get_voltage(imx6_pcie->vph) > 3000000)
 633			regmap_update_bits(imx6_pcie->iomuxc_gpr,
 634					   imx6_pcie_grp_offset(imx6_pcie),
 635					   IMX8MQ_GPR_PCIE_VREG_BYPASS,
 636					   0);
 637		break;
 638	case IMX7D:
 639		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 640				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
 641		break;
 642	case IMX6SX:
 643		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 644				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
 645				   IMX6SX_GPR12_PCIE_RX_EQ_2);
 646		fallthrough;
 647	default:
 648		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 649				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
 650
 651		/* configure constant input signal to the pcie ctrl and phy */
 652		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 653				   IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
 654
 655		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
 656				   IMX6Q_GPR8_TX_DEEMPH_GEN1,
 657				   imx6_pcie->tx_deemph_gen1 << 0);
 658		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
 659				   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
 660				   imx6_pcie->tx_deemph_gen2_3p5db << 6);
 661		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
 662				   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
 663				   imx6_pcie->tx_deemph_gen2_6db << 12);
 664		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
 665				   IMX6Q_GPR8_TX_SWING_FULL,
 666				   imx6_pcie->tx_swing_full << 18);
 667		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
 668				   IMX6Q_GPR8_TX_SWING_LOW,
 669				   imx6_pcie->tx_swing_low << 25);
 670		break;
 671	}
 672
 673	imx6_pcie_configure_type(imx6_pcie);
 674}
 675
 676static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
 677{
 678	unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
 679	int mult, div;
 680	u16 val;
 681
 682	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
 683		return 0;
 684
 685	switch (phy_rate) {
 686	case 125000000:
 687		/*
 688		 * The default settings of the MPLL are for a 125MHz input
 689		 * clock, so no need to reconfigure anything in that case.
 690		 */
 691		return 0;
 692	case 100000000:
 693		mult = 25;
 694		div = 0;
 695		break;
 696	case 200000000:
 697		mult = 25;
 698		div = 1;
 699		break;
 700	default:
 701		dev_err(imx6_pcie->pci->dev,
 702			"Unsupported PHY reference clock rate %lu\n", phy_rate);
 703		return -EINVAL;
 704	}
 705
 706	pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
 707	val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
 708		 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
 709	val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
 710	val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
 711	pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
 712
 713	pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
 714	val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
 715		 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
 716	val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
 717	val |= PCIE_PHY_ATEOVRD_EN;
 718	pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
 719
 720	return 0;
 721}
 722
 723static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
 724{
 725	struct dw_pcie *pci = imx6_pcie->pci;
 726	struct device *dev = pci->dev;
 727	u32 tmp;
 728	unsigned int retries;
 729
 730	for (retries = 0; retries < 200; retries++) {
 731		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
 732		/* Test if the speed change finished. */
 733		if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
 734			return 0;
 735		usleep_range(100, 1000);
 736	}
 737
 738	dev_err(dev, "Speed change timeout\n");
 739	return -ETIMEDOUT;
 740}
 741
 742static void imx6_pcie_ltssm_enable(struct device *dev)
 743{
 744	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
 
 745
 746	switch (imx6_pcie->drvdata->variant) {
 747	case IMX6Q:
 748	case IMX6SX:
 749	case IMX6QP:
 750		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 751				   IMX6Q_GPR12_PCIE_CTL_2,
 752				   IMX6Q_GPR12_PCIE_CTL_2);
 753		break;
 754	case IMX7D:
 755	case IMX8MQ:
 756		reset_control_deassert(imx6_pcie->apps_reset);
 757		break;
 758	}
 
 
 
 
 759}
 760
 761static int imx6_pcie_start_link(struct dw_pcie *pci)
 762{
 763	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
 764	struct device *dev = pci->dev;
 765	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
 766	u32 tmp;
 767	int ret;
 768
 769	/*
 770	 * Force Gen1 operation when starting the link.  In case the link is
 771	 * started in Gen2 mode, there is a possibility the devices on the
 772	 * bus will not be detected at all.  This happens with PCIe switches.
 773	 */
 
 774	tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
 775	tmp &= ~PCI_EXP_LNKCAP_SLS;
 776	tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
 777	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
 
 778
 779	/* Start LTSSM. */
 780	imx6_pcie_ltssm_enable(dev);
 781
 782	ret = dw_pcie_wait_for_link(pci);
 783	if (ret)
 784		goto err_reset_phy;
 785
 786	if (pci->link_gen == 2) {
 787		/* Allow Gen2 mode after the link is up. */
 
 788		tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
 789		tmp &= ~PCI_EXP_LNKCAP_SLS;
 790		tmp |= PCI_EXP_LNKCAP_SLS_5_0GB;
 791		dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
 792
 793		/*
 794		 * Start Directed Speed Change so the best possible
 795		 * speed both link partners support can be negotiated.
 796		 */
 797		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
 798		tmp |= PORT_LOGIC_SPEED_CHANGE;
 799		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
 
 800
 801		if (imx6_pcie->drvdata->flags &
 802		    IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
 803			/*
 804			 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
 805			 * from i.MX6 family when no link speed transition
 806			 * occurs and we go Gen1 -> yep, Gen1. The difference
 807			 * is that, in such case, it will not be cleared by HW
 808			 * which will cause the following code to report false
 809			 * failure.
 810			 */
 811
 812			ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
 813			if (ret) {
 814				dev_err(dev, "Failed to bring link up!\n");
 815				goto err_reset_phy;
 816			}
 817		}
 818
 819		/* Make sure link training is finished as well! */
 820		ret = dw_pcie_wait_for_link(pci);
 821		if (ret) {
 822			dev_err(dev, "Failed to bring link up!\n");
 823			goto err_reset_phy;
 824		}
 825	} else {
 826		dev_info(dev, "Link: Gen2 disabled\n");
 827	}
 828
 
 829	tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
 830	dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
 831	return 0;
 832
 833err_reset_phy:
 
 834	dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
 835		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
 836		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
 837	imx6_pcie_reset_phy(imx6_pcie);
 838	return ret;
 
 
 
 
 
 
 
 
 839}
 840
 841static int imx6_pcie_host_init(struct pcie_port *pp)
 842{
 843	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 
 844	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
 
 
 
 
 
 
 
 
 
 
 845
 846	imx6_pcie_assert_core_reset(imx6_pcie);
 847	imx6_pcie_init_phy(imx6_pcie);
 848	imx6_pcie_deassert_core_reset(imx6_pcie);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 849	imx6_setup_phy_mpll(imx6_pcie);
 850
 851	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 852}
 853
 854static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
 855	.host_init = imx6_pcie_host_init,
 
 856};
 857
 858static const struct dw_pcie_ops dw_pcie_ops = {
 859	.start_link = imx6_pcie_start_link,
 
 860};
 861
 862#ifdef CONFIG_PM_SLEEP
 863static void imx6_pcie_ltssm_disable(struct device *dev)
 864{
 865	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 866
 867	switch (imx6_pcie->drvdata->variant) {
 868	case IMX6SX:
 869	case IMX6QP:
 870		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 871				   IMX6Q_GPR12_PCIE_CTL_2, 0);
 872		break;
 873	case IMX7D:
 874		reset_control_assert(imx6_pcie->apps_reset);
 875		break;
 876	default:
 877		dev_err(dev, "ltssm_disable not supported\n");
 
 878	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 879}
 880
 881static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
 882{
 883	struct device *dev = imx6_pcie->pci->dev;
 884
 885	/* Some variants have a turnoff reset in DT */
 886	if (imx6_pcie->turnoff_reset) {
 887		reset_control_assert(imx6_pcie->turnoff_reset);
 888		reset_control_deassert(imx6_pcie->turnoff_reset);
 889		goto pm_turnoff_sleep;
 890	}
 891
 892	/* Others poke directly at IOMUXC registers */
 893	switch (imx6_pcie->drvdata->variant) {
 894	case IMX6SX:
 
 895		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 896				IMX6SX_GPR12_PCIE_PM_TURN_OFF,
 897				IMX6SX_GPR12_PCIE_PM_TURN_OFF);
 898		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 899				IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
 900		break;
 901	default:
 902		dev_err(dev, "PME_Turn_Off not implemented\n");
 903		return;
 904	}
 905
 906	/*
 907	 * Components with an upstream port must respond to
 908	 * PME_Turn_Off with PME_TO_Ack but we can't check.
 909	 *
 910	 * The standard recommends a 1-10ms timeout after which to
 911	 * proceed anyway as if acks were received.
 912	 */
 913pm_turnoff_sleep:
 914	usleep_range(1000, 10000);
 915}
 916
 917static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
 918{
 919	clk_disable_unprepare(imx6_pcie->pcie);
 920	clk_disable_unprepare(imx6_pcie->pcie_phy);
 921	clk_disable_unprepare(imx6_pcie->pcie_bus);
 922
 923	switch (imx6_pcie->drvdata->variant) {
 924	case IMX6SX:
 925		clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
 926		break;
 927	case IMX7D:
 928		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 929				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
 930				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
 931		break;
 932	case IMX8MQ:
 933		clk_disable_unprepare(imx6_pcie->pcie_aux);
 934		break;
 935	default:
 936		break;
 937	}
 938}
 939
 940static int imx6_pcie_suspend_noirq(struct device *dev)
 941{
 942	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
 
 943
 944	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
 945		return 0;
 946
 
 947	imx6_pcie_pm_turnoff(imx6_pcie);
 948	imx6_pcie_clk_disable(imx6_pcie);
 949	imx6_pcie_ltssm_disable(dev);
 950
 951	return 0;
 952}
 953
 954static int imx6_pcie_resume_noirq(struct device *dev)
 955{
 956	int ret;
 957	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
 958	struct pcie_port *pp = &imx6_pcie->pci->pp;
 959
 960	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
 961		return 0;
 962
 963	imx6_pcie_assert_core_reset(imx6_pcie);
 964	imx6_pcie_init_phy(imx6_pcie);
 965	imx6_pcie_deassert_core_reset(imx6_pcie);
 
 966	dw_pcie_setup_rc(pp);
 967
 968	ret = imx6_pcie_start_link(imx6_pcie->pci);
 969	if (ret < 0)
 970		dev_info(dev, "pcie link is down after resume.\n");
 971
 972	return 0;
 973}
 974#endif
 975
 976static const struct dev_pm_ops imx6_pcie_pm_ops = {
 977	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
 978				      imx6_pcie_resume_noirq)
 979};
 980
 981static int imx6_pcie_probe(struct platform_device *pdev)
 982{
 983	struct device *dev = &pdev->dev;
 984	struct dw_pcie *pci;
 985	struct imx6_pcie *imx6_pcie;
 986	struct device_node *np;
 987	struct resource *dbi_base;
 988	struct device_node *node = dev->of_node;
 989	int ret;
 990	u16 val;
 
 991
 992	imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
 993	if (!imx6_pcie)
 994		return -ENOMEM;
 995
 996	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
 997	if (!pci)
 998		return -ENOMEM;
 999
1000	pci->dev = dev;
1001	pci->ops = &dw_pcie_ops;
1002	pci->pp.ops = &imx6_pcie_host_ops;
1003
1004	imx6_pcie->pci = pci;
1005	imx6_pcie->drvdata = of_device_get_match_data(dev);
1006
1007	/* Find the PHY if one is defined, only imx7d uses it */
1008	np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
1009	if (np) {
1010		struct resource res;
1011
1012		ret = of_address_to_resource(np, 0, &res);
1013		if (ret) {
1014			dev_err(dev, "Unable to map PCIe PHY\n");
1015			return ret;
1016		}
1017		imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
1018		if (IS_ERR(imx6_pcie->phy_base))
1019			return PTR_ERR(imx6_pcie->phy_base);
1020	}
1021
1022	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1023	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
1024	if (IS_ERR(pci->dbi_base))
1025		return PTR_ERR(pci->dbi_base);
1026
1027	/* Fetch GPIOs */
1028	imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
1029	imx6_pcie->gpio_active_high = of_property_read_bool(node,
1030						"reset-gpio-active-high");
1031	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
1032		ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
1033				imx6_pcie->gpio_active_high ?
1034					GPIOF_OUT_INIT_HIGH :
1035					GPIOF_OUT_INIT_LOW,
1036				"PCIe reset");
1037		if (ret) {
1038			dev_err(dev, "unable to get reset gpio\n");
1039			return ret;
1040		}
1041	} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
1042		return imx6_pcie->reset_gpio;
1043	}
1044
 
 
 
 
 
 
1045	/* Fetch clocks */
1046	imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
1047	if (IS_ERR(imx6_pcie->pcie_phy))
1048		return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
1049				     "pcie_phy clock source missing or invalid\n");
1050
1051	imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
1052	if (IS_ERR(imx6_pcie->pcie_bus))
1053		return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
1054				     "pcie_bus clock source missing or invalid\n");
1055
1056	imx6_pcie->pcie = devm_clk_get(dev, "pcie");
1057	if (IS_ERR(imx6_pcie->pcie))
1058		return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
1059				     "pcie clock source missing or invalid\n");
 
 
 
 
 
 
 
 
 
 
1060
1061	switch (imx6_pcie->drvdata->variant) {
1062	case IMX6SX:
1063		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
1064							   "pcie_inbound_axi");
1065		if (IS_ERR(imx6_pcie->pcie_inbound_axi))
1066			return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
1067					     "pcie_inbound_axi clock missing or invalid\n");
1068		break;
1069	case IMX8MQ:
1070		imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
1071		if (IS_ERR(imx6_pcie->pcie_aux))
1072			return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
1073					     "pcie_aux clock source missing or invalid\n");
1074		fallthrough;
1075	case IMX7D:
1076		if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
1077			imx6_pcie->controller_id = 1;
1078
1079		imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
1080									    "pciephy");
1081		if (IS_ERR(imx6_pcie->pciephy_reset)) {
1082			dev_err(dev, "Failed to get PCIEPHY reset control\n");
1083			return PTR_ERR(imx6_pcie->pciephy_reset);
1084		}
1085
1086		imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
1087									 "apps");
1088		if (IS_ERR(imx6_pcie->apps_reset)) {
1089			dev_err(dev, "Failed to get PCIE APPS reset control\n");
1090			return PTR_ERR(imx6_pcie->apps_reset);
1091		}
1092		break;
1093	default:
1094		break;
1095	}
1096
1097	/* Grab turnoff reset */
1098	imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
1099	if (IS_ERR(imx6_pcie->turnoff_reset)) {
1100		dev_err(dev, "Failed to get TURNOFF reset control\n");
1101		return PTR_ERR(imx6_pcie->turnoff_reset);
1102	}
1103
 
1104	/* Grab GPR config register range */
1105	imx6_pcie->iomuxc_gpr =
1106		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
1107	if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
1108		dev_err(dev, "unable to find iomuxc registers\n");
1109		return PTR_ERR(imx6_pcie->iomuxc_gpr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110	}
1111
1112	/* Grab PCIe PHY Tx Settings */
1113	if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
1114				 &imx6_pcie->tx_deemph_gen1))
1115		imx6_pcie->tx_deemph_gen1 = 0;
1116
1117	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
1118				 &imx6_pcie->tx_deemph_gen2_3p5db))
1119		imx6_pcie->tx_deemph_gen2_3p5db = 0;
1120
1121	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
1122				 &imx6_pcie->tx_deemph_gen2_6db))
1123		imx6_pcie->tx_deemph_gen2_6db = 20;
1124
1125	if (of_property_read_u32(node, "fsl,tx-swing-full",
1126				 &imx6_pcie->tx_swing_full))
1127		imx6_pcie->tx_swing_full = 127;
1128
1129	if (of_property_read_u32(node, "fsl,tx-swing-low",
1130				 &imx6_pcie->tx_swing_low))
1131		imx6_pcie->tx_swing_low = 127;
1132
1133	/* Limit link speed */
1134	pci->link_gen = 1;
1135	ret = of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
1136
1137	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
1138	if (IS_ERR(imx6_pcie->vpcie)) {
1139		if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
1140			return PTR_ERR(imx6_pcie->vpcie);
1141		imx6_pcie->vpcie = NULL;
1142	}
1143
1144	imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
1145	if (IS_ERR(imx6_pcie->vph)) {
1146		if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
1147			return PTR_ERR(imx6_pcie->vph);
1148		imx6_pcie->vph = NULL;
1149	}
1150
1151	platform_set_drvdata(pdev, imx6_pcie);
1152
1153	ret = imx6_pcie_attach_pd(dev);
1154	if (ret)
1155		return ret;
1156
1157	ret = dw_pcie_host_init(&pci->pp);
1158	if (ret < 0)
1159		return ret;
 
 
 
 
 
1160
1161	if (pci_msi_enabled()) {
1162		u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
1163		val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
1164		val |= PCI_MSI_FLAGS_ENABLE;
1165		dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
 
 
1166	}
1167
1168	return 0;
1169}
1170
1171static void imx6_pcie_shutdown(struct platform_device *pdev)
1172{
1173	struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
1174
1175	/* bring down link, so bootloader gets clean state in case of reboot */
1176	imx6_pcie_assert_core_reset(imx6_pcie);
1177}
1178
 
 
 
 
 
1179static const struct imx6_pcie_drvdata drvdata[] = {
1180	[IMX6Q] = {
1181		.variant = IMX6Q,
1182		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
1183			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
1184		.dbi_length = 0x200,
 
 
 
 
 
 
 
 
1185	},
1186	[IMX6SX] = {
1187		.variant = IMX6SX,
1188		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
1189			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
1190			 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
 
 
 
 
 
 
 
 
1191	},
1192	[IMX6QP] = {
1193		.variant = IMX6QP,
1194		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
1195			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
 
1196		.dbi_length = 0x200,
 
 
 
 
 
 
 
 
1197	},
1198	[IMX7D] = {
1199		.variant = IMX7D,
1200		.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
 
 
 
 
 
 
 
 
1201	},
1202	[IMX8MQ] = {
1203		.variant = IMX8MQ,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1204	},
1205};
1206
1207static const struct of_device_id imx6_pcie_of_match[] = {
1208	{ .compatible = "fsl,imx6q-pcie",  .data = &drvdata[IMX6Q],  },
1209	{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
1210	{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
1211	{ .compatible = "fsl,imx7d-pcie",  .data = &drvdata[IMX7D],  },
1212	{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
 
 
 
 
 
 
 
1213	{},
1214};
1215
1216static struct platform_driver imx6_pcie_driver = {
1217	.driver = {
1218		.name	= "imx6q-pcie",
1219		.of_match_table = imx6_pcie_of_match,
1220		.suppress_bind_attrs = true,
1221		.pm = &imx6_pcie_pm_ops,
1222		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1223	},
1224	.probe    = imx6_pcie_probe,
1225	.shutdown = imx6_pcie_shutdown,
1226};
1227
1228static void imx6_pcie_quirk(struct pci_dev *dev)
1229{
1230	struct pci_bus *bus = dev->bus;
1231	struct pcie_port *pp = bus->sysdata;
1232
1233	/* Bus parent is the PCI bridge, its parent is this platform driver */
1234	if (!bus->dev.parent || !bus->dev.parent->parent)
1235		return;
1236
1237	/* Make sure we only quirk devices associated with this driver */
1238	if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
1239		return;
1240
1241	if (pci_is_root_bus(bus)) {
1242		struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1243		struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
1244
1245		/*
1246		 * Limit config length to avoid the kernel reading beyond
1247		 * the register set and causing an abort on i.MX 6Quad
1248		 */
1249		if (imx6_pcie->drvdata->dbi_length) {
1250			dev->cfg_size = imx6_pcie->drvdata->dbi_length;
1251			dev_info(&dev->dev, "Limiting cfg_size to %d\n",
1252					dev->cfg_size);
1253		}
1254	}
1255}
1256DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
1257			PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
1258
1259static int __init imx6_pcie_init(void)
1260{
1261#ifdef CONFIG_ARM
 
 
 
 
 
 
 
1262	/*
1263	 * Since probe() can be deferred we need to make sure that
1264	 * hook_fault_code is not called after __init memory is freed
1265	 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
1266	 * we can install the handler here without risking it
1267	 * accessing some uninitialized driver state.
1268	 */
1269	hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
1270			"external abort on non-linefetch");
1271#endif
1272
1273	return platform_driver_register(&imx6_pcie_driver);
1274}
1275device_initcall(imx6_pcie_init);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe host controller driver for Freescale i.MX6 SoCs
   4 *
   5 * Copyright (C) 2013 Kosagi
   6 *		https://www.kosagi.com
   7 *
   8 * Author: Sean Cross <xobs@kosagi.com>
   9 */
  10
  11#include <linux/bitfield.h>
  12#include <linux/clk.h>
  13#include <linux/delay.h>
  14#include <linux/gpio.h>
  15#include <linux/kernel.h>
  16#include <linux/mfd/syscon.h>
  17#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  18#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
  19#include <linux/module.h>
  20#include <linux/of.h>
  21#include <linux/of_gpio.h>
 
  22#include <linux/of_address.h>
  23#include <linux/pci.h>
  24#include <linux/platform_device.h>
  25#include <linux/regmap.h>
  26#include <linux/regulator/consumer.h>
  27#include <linux/resource.h>
  28#include <linux/signal.h>
  29#include <linux/types.h>
  30#include <linux/interrupt.h>
  31#include <linux/reset.h>
  32#include <linux/phy/phy.h>
  33#include <linux/pm_domain.h>
  34#include <linux/pm_runtime.h>
  35
  36#include "pcie-designware.h"
  37
  38#define IMX8MQ_GPR_PCIE_REF_USE_PAD		BIT(9)
  39#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN	BIT(10)
  40#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE	BIT(11)
  41#define IMX8MQ_GPR_PCIE_VREG_BYPASS		BIT(12)
  42#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE	GENMASK(11, 8)
  43#define IMX8MQ_PCIE2_BASE_ADDR			0x33c00000
  44
  45#define IMX95_PCIE_PHY_GEN_CTRL			0x0
  46#define IMX95_PCIE_REF_USE_PAD			BIT(17)
  47
  48#define IMX95_PCIE_SS_RW_REG_0			0xf0
  49#define IMX95_PCIE_REF_CLKEN			BIT(23)
  50#define IMX95_PCIE_PHY_CR_PARA_SEL		BIT(9)
  51
  52#define IMX95_PE0_GEN_CTRL_1			0x1050
  53#define IMX95_PCIE_DEVICE_TYPE			GENMASK(3, 0)
  54
  55#define IMX95_PE0_GEN_CTRL_3			0x1058
  56#define IMX95_PCIE_LTSSM_EN			BIT(0)
  57
  58#define to_imx6_pcie(x)	dev_get_drvdata((x)->dev)
  59
  60enum imx6_pcie_variants {
  61	IMX6Q,
  62	IMX6SX,
  63	IMX6QP,
  64	IMX7D,
  65	IMX8MQ,
  66	IMX8MM,
  67	IMX8MP,
  68	IMX95,
  69	IMX8MQ_EP,
  70	IMX8MM_EP,
  71	IMX8MP_EP,
  72	IMX95_EP,
  73};
  74
  75#define IMX6_PCIE_FLAG_IMX6_PHY			BIT(0)
  76#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE	BIT(1)
  77#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND		BIT(2)
  78#define IMX6_PCIE_FLAG_HAS_PHYDRV			BIT(3)
  79#define IMX6_PCIE_FLAG_HAS_APP_RESET		BIT(4)
  80#define IMX6_PCIE_FLAG_HAS_PHY_RESET		BIT(5)
  81#define IMX6_PCIE_FLAG_HAS_SERDES		BIT(6)
  82#define IMX6_PCIE_FLAG_SUPPORT_64BIT		BIT(7)
  83
  84#define imx6_check_flag(pci, val)     (pci->drvdata->flags & val)
  85
  86#define IMX6_PCIE_MAX_CLKS       6
  87
  88#define IMX6_PCIE_MAX_INSTANCES			2
  89
  90struct imx6_pcie;
  91
  92struct imx6_pcie_drvdata {
  93	enum imx6_pcie_variants variant;
  94	enum dw_pcie_device_mode mode;
  95	u32 flags;
  96	int dbi_length;
  97	const char *gpr;
  98	const char * const *clk_names;
  99	const u32 clks_cnt;
 100	const u32 ltssm_off;
 101	const u32 ltssm_mask;
 102	const u32 mode_off[IMX6_PCIE_MAX_INSTANCES];
 103	const u32 mode_mask[IMX6_PCIE_MAX_INSTANCES];
 104	const struct pci_epc_features *epc_features;
 105	int (*init_phy)(struct imx6_pcie *pcie);
 106};
 107
 108struct imx6_pcie {
 109	struct dw_pcie		*pci;
 110	int			reset_gpio;
 111	bool			gpio_active_high;
 112	bool			link_is_up;
 113	struct clk_bulk_data	clks[IMX6_PCIE_MAX_CLKS];
 
 
 
 114	struct regmap		*iomuxc_gpr;
 115	u16			msi_ctrl;
 116	u32			controller_id;
 117	struct reset_control	*pciephy_reset;
 118	struct reset_control	*apps_reset;
 119	struct reset_control	*turnoff_reset;
 120	u32			tx_deemph_gen1;
 121	u32			tx_deemph_gen2_3p5db;
 122	u32			tx_deemph_gen2_6db;
 123	u32			tx_swing_full;
 124	u32			tx_swing_low;
 125	struct regulator	*vpcie;
 126	struct regulator	*vph;
 127	void __iomem		*phy_base;
 128
 129	/* power domain for pcie */
 130	struct device		*pd_pcie;
 131	/* power domain for pcie phy */
 132	struct device		*pd_pcie_phy;
 133	struct phy		*phy;
 134	const struct imx6_pcie_drvdata *drvdata;
 135};
 136
 137/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
 138#define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200
 139#define PHY_PLL_LOCK_WAIT_TIMEOUT	(2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
 140
 141/* PCIe Port Logic registers (memory-mapped) */
 142#define PL_OFFSET 0x700
 143
 144#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
 145#define PCIE_PHY_CTRL_DATA(x)		FIELD_PREP(GENMASK(15, 0), (x))
 146#define PCIE_PHY_CTRL_CAP_ADR		BIT(16)
 147#define PCIE_PHY_CTRL_CAP_DAT		BIT(17)
 148#define PCIE_PHY_CTRL_WR		BIT(18)
 149#define PCIE_PHY_CTRL_RD		BIT(19)
 150
 151#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
 152#define PCIE_PHY_STAT_ACK		BIT(16)
 153
 154/* PHY registers (not memory-mapped) */
 155#define PCIE_PHY_ATEOVRD			0x10
 156#define  PCIE_PHY_ATEOVRD_EN			BIT(2)
 157#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT	0
 158#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK	0x1
 159
 160#define PCIE_PHY_MPLL_OVRD_IN_LO		0x11
 161#define  PCIE_PHY_MPLL_MULTIPLIER_SHIFT		2
 162#define  PCIE_PHY_MPLL_MULTIPLIER_MASK		0x7f
 163#define  PCIE_PHY_MPLL_MULTIPLIER_OVRD		BIT(9)
 164
 165#define PCIE_PHY_RX_ASIC_OUT 0x100D
 166#define PCIE_PHY_RX_ASIC_OUT_VALID	(1 << 0)
 167
 168/* iMX7 PCIe PHY registers */
 169#define PCIE_PHY_CMN_REG4		0x14
 170/* These are probably the bits that *aren't* DCC_FB_EN */
 171#define PCIE_PHY_CMN_REG4_DCC_FB_EN	0x29
 172
 173#define PCIE_PHY_CMN_REG15	        0x54
 174#define PCIE_PHY_CMN_REG15_DLY_4	BIT(2)
 175#define PCIE_PHY_CMN_REG15_PLL_PD	BIT(5)
 176#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD	BIT(7)
 177
 178#define PCIE_PHY_CMN_REG24		0x90
 179#define PCIE_PHY_CMN_REG24_RX_EQ	BIT(6)
 180#define PCIE_PHY_CMN_REG24_RX_EQ_SEL	BIT(3)
 181
 182#define PCIE_PHY_CMN_REG26		0x98
 183#define PCIE_PHY_CMN_REG26_ATT_MODE	0xBC
 184
 185#define PHY_RX_OVRD_IN_LO 0x1005
 186#define PHY_RX_OVRD_IN_LO_RX_DATA_EN		BIT(5)
 187#define PHY_RX_OVRD_IN_LO_RX_PLL_EN		BIT(3)
 188
 189static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
 190{
 191	WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
 192		imx6_pcie->drvdata->variant != IMX8MQ_EP &&
 193		imx6_pcie->drvdata->variant != IMX8MM &&
 194		imx6_pcie->drvdata->variant != IMX8MM_EP &&
 195		imx6_pcie->drvdata->variant != IMX8MP &&
 196		imx6_pcie->drvdata->variant != IMX8MP_EP);
 197	return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
 198}
 199
 200static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
 201{
 202	regmap_update_bits(imx6_pcie->iomuxc_gpr,
 203			IMX95_PCIE_SS_RW_REG_0,
 204			IMX95_PCIE_PHY_CR_PARA_SEL,
 205			IMX95_PCIE_PHY_CR_PARA_SEL);
 206
 207	regmap_update_bits(imx6_pcie->iomuxc_gpr,
 208			   IMX95_PCIE_PHY_GEN_CTRL,
 209			   IMX95_PCIE_REF_USE_PAD, 0);
 210	regmap_update_bits(imx6_pcie->iomuxc_gpr,
 211			   IMX95_PCIE_SS_RW_REG_0,
 212			   IMX95_PCIE_REF_CLKEN,
 213			   IMX95_PCIE_REF_CLKEN);
 214
 215	return 0;
 216}
 217
 218static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
 219{
 220	const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
 221	unsigned int mask, val, mode, id;
 222
 223	if (drvdata->mode == DW_PCIE_EP_TYPE)
 224		mode = PCI_EXP_TYPE_ENDPOINT;
 225	else
 226		mode = PCI_EXP_TYPE_ROOT_PORT;
 227
 228	id = imx6_pcie->controller_id;
 229
 230	/* If mode_mask[id] is zero, means each controller have its individual gpr */
 231	if (!drvdata->mode_mask[id])
 232		id = 0;
 233
 234	mask = drvdata->mode_mask[id];
 235	val = mode << (ffs(mask) - 1);
 236
 237	regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
 238}
 239
 240static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
 241{
 242	struct dw_pcie *pci = imx6_pcie->pci;
 243	bool val;
 244	u32 max_iterations = 10;
 245	u32 wait_counter = 0;
 246
 247	do {
 248		val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
 249			PCIE_PHY_STAT_ACK;
 250		wait_counter++;
 251
 252		if (val == exp_val)
 253			return 0;
 254
 255		udelay(1);
 256	} while (wait_counter < max_iterations);
 257
 258	return -ETIMEDOUT;
 259}
 260
 261static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
 262{
 263	struct dw_pcie *pci = imx6_pcie->pci;
 264	u32 val;
 265	int ret;
 266
 267	val = PCIE_PHY_CTRL_DATA(addr);
 268	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
 269
 270	val |= PCIE_PHY_CTRL_CAP_ADR;
 271	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
 272
 273	ret = pcie_phy_poll_ack(imx6_pcie, true);
 274	if (ret)
 275		return ret;
 276
 277	val = PCIE_PHY_CTRL_DATA(addr);
 278	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
 279
 280	return pcie_phy_poll_ack(imx6_pcie, false);
 281}
 282
 283/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
 284static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
 285{
 286	struct dw_pcie *pci = imx6_pcie->pci;
 287	u32 phy_ctl;
 288	int ret;
 289
 290	ret = pcie_phy_wait_ack(imx6_pcie, addr);
 291	if (ret)
 292		return ret;
 293
 294	/* assert Read signal */
 295	phy_ctl = PCIE_PHY_CTRL_RD;
 296	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
 297
 298	ret = pcie_phy_poll_ack(imx6_pcie, true);
 299	if (ret)
 300		return ret;
 301
 302	*data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
 303
 304	/* deassert Read signal */
 305	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
 306
 307	return pcie_phy_poll_ack(imx6_pcie, false);
 308}
 309
 310static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
 311{
 312	struct dw_pcie *pci = imx6_pcie->pci;
 313	u32 var;
 314	int ret;
 315
 316	/* write addr */
 317	/* cap addr */
 318	ret = pcie_phy_wait_ack(imx6_pcie, addr);
 319	if (ret)
 320		return ret;
 321
 322	var = PCIE_PHY_CTRL_DATA(data);
 323	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
 324
 325	/* capture data */
 326	var |= PCIE_PHY_CTRL_CAP_DAT;
 327	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
 328
 329	ret = pcie_phy_poll_ack(imx6_pcie, true);
 330	if (ret)
 331		return ret;
 332
 333	/* deassert cap data */
 334	var = PCIE_PHY_CTRL_DATA(data);
 335	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
 336
 337	/* wait for ack de-assertion */
 338	ret = pcie_phy_poll_ack(imx6_pcie, false);
 339	if (ret)
 340		return ret;
 341
 342	/* assert wr signal */
 343	var = PCIE_PHY_CTRL_WR;
 344	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
 345
 346	/* wait for ack */
 347	ret = pcie_phy_poll_ack(imx6_pcie, true);
 348	if (ret)
 349		return ret;
 350
 351	/* deassert wr signal */
 352	var = PCIE_PHY_CTRL_DATA(data);
 353	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
 354
 355	/* wait for ack de-assertion */
 356	ret = pcie_phy_poll_ack(imx6_pcie, false);
 357	if (ret)
 358		return ret;
 359
 360	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
 361
 362	return 0;
 363}
 364
 365static int imx8mq_pcie_init_phy(struct imx6_pcie *imx6_pcie)
 366{
 367	/* TODO: Currently this code assumes external oscillator is being used */
 368	regmap_update_bits(imx6_pcie->iomuxc_gpr,
 369			   imx6_pcie_grp_offset(imx6_pcie),
 370			   IMX8MQ_GPR_PCIE_REF_USE_PAD,
 371			   IMX8MQ_GPR_PCIE_REF_USE_PAD);
 372	/*
 373	 * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is
 374	 * supplied by 3.3V, the VREG_BYPASS should be cleared to zero.
 375	 */
 376	if (imx6_pcie->vph && regulator_get_voltage(imx6_pcie->vph) > 3000000)
 377		regmap_update_bits(imx6_pcie->iomuxc_gpr,
 378				   imx6_pcie_grp_offset(imx6_pcie),
 379				   IMX8MQ_GPR_PCIE_VREG_BYPASS,
 380				   0);
 381
 382	return 0;
 383}
 384
 385static int imx7d_pcie_init_phy(struct imx6_pcie *imx6_pcie)
 386{
 387	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
 388
 389	return 0;
 390}
 391
 392static int imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
 393{
 394	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 395				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
 396
 397	/* configure constant input signal to the pcie ctrl and phy */
 398	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 399			   IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
 400
 401	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
 402			   IMX6Q_GPR8_TX_DEEMPH_GEN1,
 403			   imx6_pcie->tx_deemph_gen1 << 0);
 404	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
 405			   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
 406			   imx6_pcie->tx_deemph_gen2_3p5db << 6);
 407	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
 408			   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
 409			   imx6_pcie->tx_deemph_gen2_6db << 12);
 410	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
 411			   IMX6Q_GPR8_TX_SWING_FULL,
 412			   imx6_pcie->tx_swing_full << 18);
 413	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
 414			   IMX6Q_GPR8_TX_SWING_LOW,
 415			   imx6_pcie->tx_swing_low << 25);
 416	return 0;
 417}
 418
 419static int imx6sx_pcie_init_phy(struct imx6_pcie *imx6_pcie)
 420{
 421	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 422			   IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2);
 423
 424	return imx6_pcie_init_phy(imx6_pcie);
 425}
 426
 427static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
 428{
 429	u32 val;
 430	struct device *dev = imx6_pcie->pci->dev;
 431
 432	if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
 433				     IOMUXC_GPR22, val,
 434				     val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
 435				     PHY_PLL_LOCK_WAIT_USLEEP_MAX,
 436				     PHY_PLL_LOCK_WAIT_TIMEOUT))
 437		dev_err(dev, "PCIe PLL lock timeout\n");
 438}
 439
 440static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
 441{
 442	unsigned long phy_rate = 0;
 443	int mult, div;
 444	u16 val;
 445	int i;
 446
 447	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
 448		return 0;
 449
 450	for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
 451		if (strncmp(imx6_pcie->clks[i].id, "pcie_phy", 8) == 0)
 452			phy_rate = clk_get_rate(imx6_pcie->clks[i].clk);
 453
 454	switch (phy_rate) {
 455	case 125000000:
 456		/*
 457		 * The default settings of the MPLL are for a 125MHz input
 458		 * clock, so no need to reconfigure anything in that case.
 459		 */
 460		return 0;
 461	case 100000000:
 462		mult = 25;
 463		div = 0;
 464		break;
 465	case 200000000:
 466		mult = 25;
 467		div = 1;
 468		break;
 469	default:
 470		dev_err(imx6_pcie->pci->dev,
 471			"Unsupported PHY reference clock rate %lu\n", phy_rate);
 472		return -EINVAL;
 473	}
 474
 475	pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
 476	val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
 477		 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
 478	val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
 479	val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
 480	pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
 481
 482	pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
 483	val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
 484		 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
 485	val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
 486	val |= PCIE_PHY_ATEOVRD_EN;
 487	pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
 488
 489	return 0;
 490}
 491
 492static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
 493{
 494	u16 tmp;
 495
 496	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
 497		return;
 498
 499	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
 500	tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
 501		PHY_RX_OVRD_IN_LO_RX_PLL_EN);
 502	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
 503
 504	usleep_range(2000, 3000);
 505
 506	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
 507	tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
 508		  PHY_RX_OVRD_IN_LO_RX_PLL_EN);
 509	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
 510}
 511
 512#ifdef CONFIG_ARM
 513/*  Added for PCI abort handling */
 514static int imx6q_pcie_abort_handler(unsigned long addr,
 515		unsigned int fsr, struct pt_regs *regs)
 516{
 517	unsigned long pc = instruction_pointer(regs);
 518	unsigned long instr = *(unsigned long *)pc;
 519	int reg = (instr >> 12) & 15;
 520
 521	/*
 522	 * If the instruction being executed was a read,
 523	 * make it look like it read all-ones.
 524	 */
 525	if ((instr & 0x0c100000) == 0x04100000) {
 526		unsigned long val;
 527
 528		if (instr & 0x00400000)
 529			val = 255;
 530		else
 531			val = -1;
 532
 533		regs->uregs[reg] = val;
 534		regs->ARM_pc += 4;
 535		return 0;
 536	}
 537
 538	if ((instr & 0x0e100090) == 0x00100090) {
 539		regs->uregs[reg] = -1;
 540		regs->ARM_pc += 4;
 541		return 0;
 542	}
 543
 544	return 1;
 545}
 546#endif
 547
 548static int imx6_pcie_attach_pd(struct device *dev)
 549{
 550	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
 551	struct device_link *link;
 552
 553	/* Do nothing when in a single power domain */
 554	if (dev->pm_domain)
 555		return 0;
 556
 557	imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
 558	if (IS_ERR(imx6_pcie->pd_pcie))
 559		return PTR_ERR(imx6_pcie->pd_pcie);
 560	/* Do nothing when power domain missing */
 561	if (!imx6_pcie->pd_pcie)
 562		return 0;
 563	link = device_link_add(dev, imx6_pcie->pd_pcie,
 564			DL_FLAG_STATELESS |
 565			DL_FLAG_PM_RUNTIME |
 566			DL_FLAG_RPM_ACTIVE);
 567	if (!link) {
 568		dev_err(dev, "Failed to add device_link to pcie pd.\n");
 569		return -EINVAL;
 570	}
 571
 572	imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
 573	if (IS_ERR(imx6_pcie->pd_pcie_phy))
 574		return PTR_ERR(imx6_pcie->pd_pcie_phy);
 575
 576	link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
 577			DL_FLAG_STATELESS |
 578			DL_FLAG_PM_RUNTIME |
 579			DL_FLAG_RPM_ACTIVE);
 580	if (!link) {
 581		dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
 582		return -EINVAL;
 583	}
 584
 585	return 0;
 586}
 587
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 588static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
 589{
 
 
 590	unsigned int offset;
 591	int ret = 0;
 592
 593	switch (imx6_pcie->drvdata->variant) {
 594	case IMX6SX:
 
 
 
 
 
 
 595		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 596				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
 597		break;
 598	case IMX6QP:
 599	case IMX6Q:
 600		/* power up core phy and enable ref clock */
 601		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 602				   IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
 603		/*
 604		 * the async reset input need ref clock to sync internally,
 605		 * when the ref clock comes after reset, internal synced
 606		 * reset time is too short, cannot meet the requirement.
 607		 * add one ~10us delay here.
 608		 */
 609		usleep_range(10, 100);
 610		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 611				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
 612		break;
 613	case IMX7D:
 614	case IMX95:
 615	case IMX95_EP:
 616		break;
 617	case IMX8MM:
 618	case IMX8MM_EP:
 619	case IMX8MQ:
 620	case IMX8MQ_EP:
 621	case IMX8MP:
 622	case IMX8MP_EP:
 
 
 
 623		offset = imx6_pcie_grp_offset(imx6_pcie);
 624		/*
 625		 * Set the over ride low and enabled
 626		 * make sure that REF_CLK is turned on.
 627		 */
 628		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
 629				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
 630				   0);
 631		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
 632				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
 633				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
 634		break;
 635	}
 636
 637	return ret;
 638}
 639
 640static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
 641{
 642	switch (imx6_pcie->drvdata->variant) {
 643	case IMX6QP:
 644	case IMX6Q:
 645		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 646				IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
 647		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 648				IMX6Q_GPR1_PCIE_TEST_PD,
 649				IMX6Q_GPR1_PCIE_TEST_PD);
 650		break;
 651	case IMX7D:
 652		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 653				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
 654				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
 655		break;
 656	default:
 657		break;
 658	}
 659}
 660
 661static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
 662{
 663	struct dw_pcie *pci = imx6_pcie->pci;
 664	struct device *dev = pci->dev;
 665	int ret;
 666
 667	ret = clk_bulk_prepare_enable(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
 668	if (ret)
 669		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 670
 671	ret = imx6_pcie_enable_ref_clk(imx6_pcie);
 672	if (ret) {
 673		dev_err(dev, "unable to enable pcie ref clock\n");
 674		goto err_ref_clk;
 675	}
 676
 677	/* allow the clocks to stabilize */
 678	usleep_range(200, 500);
 679	return 0;
 680
 681err_ref_clk:
 682	clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
 683
 684	return ret;
 685}
 686
 687static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
 688{
 689	imx6_pcie_disable_ref_clk(imx6_pcie);
 690	clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
 691}
 692
 693static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
 694{
 695	reset_control_assert(imx6_pcie->pciephy_reset);
 696	reset_control_assert(imx6_pcie->apps_reset);
 697
 698	switch (imx6_pcie->drvdata->variant) {
 699	case IMX6SX:
 700		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 701				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
 702				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
 703		/* Force PCIe PHY reset */
 704		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
 705				   IMX6SX_GPR5_PCIE_BTNRST_RESET,
 706				   IMX6SX_GPR5_PCIE_BTNRST_RESET);
 707		break;
 708	case IMX6QP:
 709		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 710				   IMX6Q_GPR1_PCIE_SW_RST,
 711				   IMX6Q_GPR1_PCIE_SW_RST);
 712		break;
 713	case IMX6Q:
 714		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 715				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
 716		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 717				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
 718		break;
 719	default:
 720		break;
 721	}
 722
 723	/* Some boards don't have PCIe reset GPIO. */
 724	if (gpio_is_valid(imx6_pcie->reset_gpio))
 725		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
 726					imx6_pcie->gpio_active_high);
 727}
 728
 729static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
 730{
 731	struct dw_pcie *pci = imx6_pcie->pci;
 732	struct device *dev = pci->dev;
 733
 734	reset_control_deassert(imx6_pcie->pciephy_reset);
 735
 736	switch (imx6_pcie->drvdata->variant) {
 
 
 
 737	case IMX7D:
 
 
 738		/* Workaround for ERR010728, failure of PCI-e PLL VCO to
 739		 * oscillate, especially when cold.  This turns off "Duty-cycle
 740		 * Corrector" and other mysterious undocumented things.
 741		 */
 742		if (likely(imx6_pcie->phy_base)) {
 743			/* De-assert DCC_FB_EN */
 744			writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
 745			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
 746			/* Assert RX_EQS and RX_EQS_SEL */
 747			writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
 748				| PCIE_PHY_CMN_REG24_RX_EQ,
 749			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
 750			/* Assert ATT_MODE */
 751			writel(PCIE_PHY_CMN_REG26_ATT_MODE,
 752			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
 753		} else {
 754			dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
 755		}
 756
 757		imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
 758		break;
 759	case IMX6SX:
 760		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
 761				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
 762		break;
 763	case IMX6QP:
 764		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 765				   IMX6Q_GPR1_PCIE_SW_RST, 0);
 766
 767		usleep_range(200, 500);
 768		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 769	default:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 770		break;
 771	}
 772
 773	/* Some boards don't have PCIe reset GPIO. */
 774	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
 775		msleep(100);
 776		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
 777					!imx6_pcie->gpio_active_high);
 778		/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
 779		msleep(100);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 780	}
 781
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 782	return 0;
 783}
 784
 785static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
 786{
 787	struct dw_pcie *pci = imx6_pcie->pci;
 788	struct device *dev = pci->dev;
 789	u32 tmp;
 790	unsigned int retries;
 791
 792	for (retries = 0; retries < 200; retries++) {
 793		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
 794		/* Test if the speed change finished. */
 795		if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
 796			return 0;
 797		usleep_range(100, 1000);
 798	}
 799
 800	dev_err(dev, "Speed change timeout\n");
 801	return -ETIMEDOUT;
 802}
 803
 804static void imx6_pcie_ltssm_enable(struct device *dev)
 805{
 806	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
 807	const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
 808
 809	if (drvdata->ltssm_mask)
 810		regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
 811				   drvdata->ltssm_mask);
 812
 813	reset_control_deassert(imx6_pcie->apps_reset);
 814}
 815
 816static void imx6_pcie_ltssm_disable(struct device *dev)
 817{
 818	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
 819	const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
 820
 821	if (drvdata->ltssm_mask)
 822		regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off,
 823				   drvdata->ltssm_mask, 0);
 824
 825	reset_control_assert(imx6_pcie->apps_reset);
 826}
 827
 828static int imx6_pcie_start_link(struct dw_pcie *pci)
 829{
 830	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
 831	struct device *dev = pci->dev;
 832	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
 833	u32 tmp;
 834	int ret;
 835
 836	/*
 837	 * Force Gen1 operation when starting the link.  In case the link is
 838	 * started in Gen2 mode, there is a possibility the devices on the
 839	 * bus will not be detected at all.  This happens with PCIe switches.
 840	 */
 841	dw_pcie_dbi_ro_wr_en(pci);
 842	tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
 843	tmp &= ~PCI_EXP_LNKCAP_SLS;
 844	tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
 845	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
 846	dw_pcie_dbi_ro_wr_dis(pci);
 847
 848	/* Start LTSSM. */
 849	imx6_pcie_ltssm_enable(dev);
 850
 851	ret = dw_pcie_wait_for_link(pci);
 852	if (ret)
 853		goto err_reset_phy;
 854
 855	if (pci->link_gen > 1) {
 856		/* Allow faster modes after the link is up */
 857		dw_pcie_dbi_ro_wr_en(pci);
 858		tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
 859		tmp &= ~PCI_EXP_LNKCAP_SLS;
 860		tmp |= pci->link_gen;
 861		dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
 862
 863		/*
 864		 * Start Directed Speed Change so the best possible
 865		 * speed both link partners support can be negotiated.
 866		 */
 867		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
 868		tmp |= PORT_LOGIC_SPEED_CHANGE;
 869		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
 870		dw_pcie_dbi_ro_wr_dis(pci);
 871
 872		if (imx6_pcie->drvdata->flags &
 873		    IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
 874			/*
 875			 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
 876			 * from i.MX6 family when no link speed transition
 877			 * occurs and we go Gen1 -> yep, Gen1. The difference
 878			 * is that, in such case, it will not be cleared by HW
 879			 * which will cause the following code to report false
 880			 * failure.
 881			 */
 882
 883			ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
 884			if (ret) {
 885				dev_err(dev, "Failed to bring link up!\n");
 886				goto err_reset_phy;
 887			}
 888		}
 889
 890		/* Make sure link training is finished as well! */
 891		ret = dw_pcie_wait_for_link(pci);
 892		if (ret)
 
 893			goto err_reset_phy;
 
 894	} else {
 895		dev_info(dev, "Link: Only Gen1 is enabled\n");
 896	}
 897
 898	imx6_pcie->link_is_up = true;
 899	tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
 900	dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
 901	return 0;
 902
 903err_reset_phy:
 904	imx6_pcie->link_is_up = false;
 905	dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
 906		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
 907		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
 908	imx6_pcie_reset_phy(imx6_pcie);
 909	return 0;
 910}
 911
 912static void imx6_pcie_stop_link(struct dw_pcie *pci)
 913{
 914	struct device *dev = pci->dev;
 915
 916	/* Turn off PCIe LTSSM */
 917	imx6_pcie_ltssm_disable(dev);
 918}
 919
 920static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
 921{
 922	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 923	struct device *dev = pci->dev;
 924	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
 925	int ret;
 926
 927	if (imx6_pcie->vpcie) {
 928		ret = regulator_enable(imx6_pcie->vpcie);
 929		if (ret) {
 930			dev_err(dev, "failed to enable vpcie regulator: %d\n",
 931				ret);
 932			return ret;
 933		}
 934	}
 935
 936	imx6_pcie_assert_core_reset(imx6_pcie);
 937
 938	if (imx6_pcie->drvdata->init_phy)
 939		imx6_pcie->drvdata->init_phy(imx6_pcie);
 940
 941	imx6_pcie_configure_type(imx6_pcie);
 942
 943	ret = imx6_pcie_clk_enable(imx6_pcie);
 944	if (ret) {
 945		dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
 946		goto err_reg_disable;
 947	}
 948
 949	if (imx6_pcie->phy) {
 950		ret = phy_init(imx6_pcie->phy);
 951		if (ret) {
 952			dev_err(dev, "pcie PHY power up failed\n");
 953			goto err_clk_disable;
 954		}
 955	}
 956
 957	if (imx6_pcie->phy) {
 958		ret = phy_power_on(imx6_pcie->phy);
 959		if (ret) {
 960			dev_err(dev, "waiting for PHY ready timeout!\n");
 961			goto err_phy_off;
 962		}
 963	}
 964
 965	ret = imx6_pcie_deassert_core_reset(imx6_pcie);
 966	if (ret < 0) {
 967		dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
 968		goto err_phy_off;
 969	}
 970
 971	imx6_setup_phy_mpll(imx6_pcie);
 972
 973	return 0;
 974
 975err_phy_off:
 976	if (imx6_pcie->phy)
 977		phy_exit(imx6_pcie->phy);
 978err_clk_disable:
 979	imx6_pcie_clk_disable(imx6_pcie);
 980err_reg_disable:
 981	if (imx6_pcie->vpcie)
 982		regulator_disable(imx6_pcie->vpcie);
 983	return ret;
 984}
 985
 986static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
 987{
 988	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 989	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
 990
 991	if (imx6_pcie->phy) {
 992		if (phy_power_off(imx6_pcie->phy))
 993			dev_err(pci->dev, "unable to power off PHY\n");
 994		phy_exit(imx6_pcie->phy);
 995	}
 996	imx6_pcie_clk_disable(imx6_pcie);
 997
 998	if (imx6_pcie->vpcie)
 999		regulator_disable(imx6_pcie->vpcie);
1000}
1001
1002static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
1003	.init = imx6_pcie_host_init,
1004	.deinit = imx6_pcie_host_exit,
1005};
1006
1007static const struct dw_pcie_ops dw_pcie_ops = {
1008	.start_link = imx6_pcie_start_link,
1009	.stop_link = imx6_pcie_stop_link,
1010};
1011
1012static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
 
1013{
1014	enum pci_barno bar;
1015	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1016
1017	for (bar = BAR_0; bar <= BAR_5; bar++)
1018		dw_pcie_ep_reset_bar(pci, bar);
1019}
1020
1021static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1022				  unsigned int type, u16 interrupt_num)
1023{
1024	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1025
1026	switch (type) {
1027	case PCI_IRQ_INTX:
1028		return dw_pcie_ep_raise_intx_irq(ep, func_no);
1029	case PCI_IRQ_MSI:
1030		return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
1031	case PCI_IRQ_MSIX:
1032		return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
1033	default:
1034		dev_err(pci->dev, "UNKNOWN IRQ type\n");
1035		return -EINVAL;
1036	}
1037
1038	return 0;
1039}
1040
1041static const struct pci_epc_features imx8m_pcie_epc_features = {
1042	.linkup_notifier = false,
1043	.msi_capable = true,
1044	.msix_capable = false,
1045	.bar[BAR_1] = { .type = BAR_RESERVED, },
1046	.bar[BAR_3] = { .type = BAR_RESERVED, },
1047	.align = SZ_64K,
1048};
1049
1050/*
1051 * BAR#	| Default BAR enable	| Default BAR Type	| Default BAR Size	| BAR Sizing Scheme
1052 * ================================================================================================
1053 * BAR0	| Enable		| 64-bit		| 1 MB			| Programmable Size
1054 * BAR1	| Disable		| 32-bit		| 64 KB			| Fixed Size
1055 *        BAR1 should be disabled if BAR0 is 64bit.
1056 * BAR2	| Enable		| 32-bit		| 1 MB			| Programmable Size
1057 * BAR3	| Enable		| 32-bit		| 64 KB			| Programmable Size
1058 * BAR4	| Enable		| 32-bit		| 1M			| Programmable Size
1059 * BAR5	| Enable		| 32-bit		| 64 KB			| Programmable Size
1060 */
1061static const struct pci_epc_features imx95_pcie_epc_features = {
1062	.msi_capable = true,
1063	.bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
1064	.align = SZ_4K,
1065};
1066
1067static const struct pci_epc_features*
1068imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
1069{
1070	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1071	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
1072
1073	return imx6_pcie->drvdata->epc_features;
1074}
1075
1076static const struct dw_pcie_ep_ops pcie_ep_ops = {
1077	.init = imx6_pcie_ep_init,
1078	.raise_irq = imx6_pcie_ep_raise_irq,
1079	.get_features = imx6_pcie_ep_get_features,
1080};
1081
1082static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
1083			   struct platform_device *pdev)
1084{
1085	int ret;
1086	unsigned int pcie_dbi2_offset;
1087	struct dw_pcie_ep *ep;
1088	struct dw_pcie *pci = imx6_pcie->pci;
1089	struct dw_pcie_rp *pp = &pci->pp;
1090	struct device *dev = pci->dev;
1091
1092	imx6_pcie_host_init(pp);
1093	ep = &pci->ep;
1094	ep->ops = &pcie_ep_ops;
1095
1096	switch (imx6_pcie->drvdata->variant) {
1097	case IMX8MQ_EP:
1098	case IMX8MM_EP:
1099	case IMX8MP_EP:
1100		pcie_dbi2_offset = SZ_1M;
 
 
 
1101		break;
1102	default:
1103		pcie_dbi2_offset = SZ_4K;
1104		break;
1105	}
1106
1107	pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
1108
1109	/*
1110	 * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining
1111	 * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC
1112	 * core code can fetch that from DT. But once all platform DTs were fixed, this and the
1113	 * above "dbi_base2" setting should be removed.
1114	 */
1115	if (device_property_match_string(dev, "reg-names", "dbi2") >= 0)
1116		pci->dbi_base2 = NULL;
1117
1118	if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_SUPPORT_64BIT))
1119		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1120
1121	ret = dw_pcie_ep_init(ep);
1122	if (ret) {
1123		dev_err(dev, "failed to initialize endpoint\n");
1124		return ret;
1125	}
1126	/* Start LTSSM. */
1127	imx6_pcie_ltssm_enable(dev);
1128
1129	return 0;
1130}
1131
1132static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
1133{
1134	struct device *dev = imx6_pcie->pci->dev;
1135
1136	/* Some variants have a turnoff reset in DT */
1137	if (imx6_pcie->turnoff_reset) {
1138		reset_control_assert(imx6_pcie->turnoff_reset);
1139		reset_control_deassert(imx6_pcie->turnoff_reset);
1140		goto pm_turnoff_sleep;
1141	}
1142
1143	/* Others poke directly at IOMUXC registers */
1144	switch (imx6_pcie->drvdata->variant) {
1145	case IMX6SX:
1146	case IMX6QP:
1147		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
1148				IMX6SX_GPR12_PCIE_PM_TURN_OFF,
1149				IMX6SX_GPR12_PCIE_PM_TURN_OFF);
1150		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
1151				IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
1152		break;
1153	default:
1154		dev_err(dev, "PME_Turn_Off not implemented\n");
1155		return;
1156	}
1157
1158	/*
1159	 * Components with an upstream port must respond to
1160	 * PME_Turn_Off with PME_TO_Ack but we can't check.
1161	 *
1162	 * The standard recommends a 1-10ms timeout after which to
1163	 * proceed anyway as if acks were received.
1164	 */
1165pm_turnoff_sleep:
1166	usleep_range(1000, 10000);
1167}
1168
1169static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save)
1170{
1171	u8 offset;
1172	u16 val;
1173	struct dw_pcie *pci = imx6_pcie->pci;
1174
1175	if (pci_msi_enabled()) {
1176		offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
1177		if (save) {
1178			val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
1179			imx6_pcie->msi_ctrl = val;
1180		} else {
1181			dw_pcie_dbi_ro_wr_en(pci);
1182			val = imx6_pcie->msi_ctrl;
1183			dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
1184			dw_pcie_dbi_ro_wr_dis(pci);
1185		}
 
 
 
1186	}
1187}
1188
1189static int imx6_pcie_suspend_noirq(struct device *dev)
1190{
1191	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
1192	struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
1193
1194	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
1195		return 0;
1196
1197	imx6_pcie_msi_save_restore(imx6_pcie, true);
1198	imx6_pcie_pm_turnoff(imx6_pcie);
1199	imx6_pcie_stop_link(imx6_pcie->pci);
1200	imx6_pcie_host_exit(pp);
1201
1202	return 0;
1203}
1204
1205static int imx6_pcie_resume_noirq(struct device *dev)
1206{
1207	int ret;
1208	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
1209	struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
1210
1211	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
1212		return 0;
1213
1214	ret = imx6_pcie_host_init(pp);
1215	if (ret)
1216		return ret;
1217	imx6_pcie_msi_save_restore(imx6_pcie, false);
1218	dw_pcie_setup_rc(pp);
1219
1220	if (imx6_pcie->link_is_up)
1221		imx6_pcie_start_link(imx6_pcie->pci);
 
1222
1223	return 0;
1224}
 
1225
1226static const struct dev_pm_ops imx6_pcie_pm_ops = {
1227	NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
1228				  imx6_pcie_resume_noirq)
1229};
1230
1231static int imx6_pcie_probe(struct platform_device *pdev)
1232{
1233	struct device *dev = &pdev->dev;
1234	struct dw_pcie *pci;
1235	struct imx6_pcie *imx6_pcie;
1236	struct device_node *np;
1237	struct resource *dbi_base;
1238	struct device_node *node = dev->of_node;
1239	int ret;
1240	u16 val;
1241	int i;
1242
1243	imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
1244	if (!imx6_pcie)
1245		return -ENOMEM;
1246
1247	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1248	if (!pci)
1249		return -ENOMEM;
1250
1251	pci->dev = dev;
1252	pci->ops = &dw_pcie_ops;
1253	pci->pp.ops = &imx6_pcie_host_ops;
1254
1255	imx6_pcie->pci = pci;
1256	imx6_pcie->drvdata = of_device_get_match_data(dev);
1257
1258	/* Find the PHY if one is defined, only imx7d uses it */
1259	np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
1260	if (np) {
1261		struct resource res;
1262
1263		ret = of_address_to_resource(np, 0, &res);
1264		if (ret) {
1265			dev_err(dev, "Unable to map PCIe PHY\n");
1266			return ret;
1267		}
1268		imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
1269		if (IS_ERR(imx6_pcie->phy_base))
1270			return PTR_ERR(imx6_pcie->phy_base);
1271	}
1272
1273	pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base);
 
1274	if (IS_ERR(pci->dbi_base))
1275		return PTR_ERR(pci->dbi_base);
1276
1277	/* Fetch GPIOs */
1278	imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
1279	imx6_pcie->gpio_active_high = of_property_read_bool(node,
1280						"reset-gpio-active-high");
1281	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
1282		ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
1283				imx6_pcie->gpio_active_high ?
1284					GPIOF_OUT_INIT_HIGH :
1285					GPIOF_OUT_INIT_LOW,
1286				"PCIe reset");
1287		if (ret) {
1288			dev_err(dev, "unable to get reset gpio\n");
1289			return ret;
1290		}
1291	} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
1292		return imx6_pcie->reset_gpio;
1293	}
1294
1295	if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)
1296		return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
1297
1298	for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
1299		imx6_pcie->clks[i].id = imx6_pcie->drvdata->clk_names[i];
1300
1301	/* Fetch clocks */
1302	ret = devm_clk_bulk_get(dev, imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
1303	if (ret)
1304		return ret;
1305
1306	if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHYDRV)) {
1307		imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
1308		if (IS_ERR(imx6_pcie->phy))
1309			return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
1310					     "failed to get pcie phy\n");
1311	}
1312
1313	if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_APP_RESET)) {
1314		imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
1315		if (IS_ERR(imx6_pcie->apps_reset))
1316			return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
1317					     "failed to get pcie apps reset control\n");
1318	}
1319
1320	if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHY_RESET)) {
1321		imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
1322		if (IS_ERR(imx6_pcie->pciephy_reset))
1323			return dev_err_probe(dev, PTR_ERR(imx6_pcie->pciephy_reset),
1324					     "Failed to get PCIEPHY reset control\n");
1325	}
1326
1327	switch (imx6_pcie->drvdata->variant) {
 
 
 
 
 
 
 
1328	case IMX8MQ:
1329	case IMX8MQ_EP:
 
 
 
 
1330	case IMX7D:
1331		if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
1332			imx6_pcie->controller_id = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1333		break;
1334	default:
1335		break;
1336	}
1337
1338	/* Grab turnoff reset */
1339	imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
1340	if (IS_ERR(imx6_pcie->turnoff_reset)) {
1341		dev_err(dev, "Failed to get TURNOFF reset control\n");
1342		return PTR_ERR(imx6_pcie->turnoff_reset);
1343	}
1344
1345	if (imx6_pcie->drvdata->gpr) {
1346	/* Grab GPR config register range */
1347		imx6_pcie->iomuxc_gpr =
1348			 syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
1349		if (IS_ERR(imx6_pcie->iomuxc_gpr))
1350			return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
1351					     "unable to find iomuxc registers\n");
1352	}
1353
1354	if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_SERDES)) {
1355		void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app");
1356
1357		if (IS_ERR(off))
1358			return dev_err_probe(dev, PTR_ERR(off),
1359					     "unable to find serdes registers\n");
1360
1361		static const struct regmap_config regmap_config = {
1362			.reg_bits = 32,
1363			.val_bits = 32,
1364			.reg_stride = 4,
1365		};
1366
1367		imx6_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
1368		if (IS_ERR(imx6_pcie->iomuxc_gpr))
1369			return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
1370					     "unable to find iomuxc registers\n");
1371	}
1372
1373	/* Grab PCIe PHY Tx Settings */
1374	if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
1375				 &imx6_pcie->tx_deemph_gen1))
1376		imx6_pcie->tx_deemph_gen1 = 0;
1377
1378	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
1379				 &imx6_pcie->tx_deemph_gen2_3p5db))
1380		imx6_pcie->tx_deemph_gen2_3p5db = 0;
1381
1382	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
1383				 &imx6_pcie->tx_deemph_gen2_6db))
1384		imx6_pcie->tx_deemph_gen2_6db = 20;
1385
1386	if (of_property_read_u32(node, "fsl,tx-swing-full",
1387				 &imx6_pcie->tx_swing_full))
1388		imx6_pcie->tx_swing_full = 127;
1389
1390	if (of_property_read_u32(node, "fsl,tx-swing-low",
1391				 &imx6_pcie->tx_swing_low))
1392		imx6_pcie->tx_swing_low = 127;
1393
1394	/* Limit link speed */
1395	pci->link_gen = 1;
1396	of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
1397
1398	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
1399	if (IS_ERR(imx6_pcie->vpcie)) {
1400		if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
1401			return PTR_ERR(imx6_pcie->vpcie);
1402		imx6_pcie->vpcie = NULL;
1403	}
1404
1405	imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
1406	if (IS_ERR(imx6_pcie->vph)) {
1407		if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
1408			return PTR_ERR(imx6_pcie->vph);
1409		imx6_pcie->vph = NULL;
1410	}
1411
1412	platform_set_drvdata(pdev, imx6_pcie);
1413
1414	ret = imx6_pcie_attach_pd(dev);
1415	if (ret)
1416		return ret;
1417
1418	if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
1419		ret = imx6_add_pcie_ep(imx6_pcie, pdev);
1420		if (ret < 0)
1421			return ret;
1422	} else {
1423		ret = dw_pcie_host_init(&pci->pp);
1424		if (ret < 0)
1425			return ret;
1426
1427		if (pci_msi_enabled()) {
1428			u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
1429
1430			val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
1431			val |= PCI_MSI_FLAGS_ENABLE;
1432			dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
1433		}
1434	}
1435
1436	return 0;
1437}
1438
1439static void imx6_pcie_shutdown(struct platform_device *pdev)
1440{
1441	struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
1442
1443	/* bring down link, so bootloader gets clean state in case of reboot */
1444	imx6_pcie_assert_core_reset(imx6_pcie);
1445}
1446
1447static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"};
1448static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
1449static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
1450static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
1451
1452static const struct imx6_pcie_drvdata drvdata[] = {
1453	[IMX6Q] = {
1454		.variant = IMX6Q,
1455		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
1456			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
1457		.dbi_length = 0x200,
1458		.gpr = "fsl,imx6q-iomuxc-gpr",
1459		.clk_names = imx6q_clks,
1460		.clks_cnt = ARRAY_SIZE(imx6q_clks),
1461		.ltssm_off = IOMUXC_GPR12,
1462		.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
1463		.mode_off[0] = IOMUXC_GPR12,
1464		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
1465		.init_phy = imx6_pcie_init_phy,
1466	},
1467	[IMX6SX] = {
1468		.variant = IMX6SX,
1469		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
1470			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
1471			 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1472		.gpr = "fsl,imx6q-iomuxc-gpr",
1473		.clk_names = imx6sx_clks,
1474		.clks_cnt = ARRAY_SIZE(imx6sx_clks),
1475		.ltssm_off = IOMUXC_GPR12,
1476		.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
1477		.mode_off[0] = IOMUXC_GPR12,
1478		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
1479		.init_phy = imx6sx_pcie_init_phy,
1480	},
1481	[IMX6QP] = {
1482		.variant = IMX6QP,
1483		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
1484			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
1485			 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
1486		.dbi_length = 0x200,
1487		.gpr = "fsl,imx6q-iomuxc-gpr",
1488		.clk_names = imx6q_clks,
1489		.clks_cnt = ARRAY_SIZE(imx6q_clks),
1490		.ltssm_off = IOMUXC_GPR12,
1491		.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
1492		.mode_off[0] = IOMUXC_GPR12,
1493		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
1494		.init_phy = imx6_pcie_init_phy,
1495	},
1496	[IMX7D] = {
1497		.variant = IMX7D,
1498		.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
1499			 IMX6_PCIE_FLAG_HAS_APP_RESET |
1500			 IMX6_PCIE_FLAG_HAS_PHY_RESET,
1501		.gpr = "fsl,imx7d-iomuxc-gpr",
1502		.clk_names = imx6q_clks,
1503		.clks_cnt = ARRAY_SIZE(imx6q_clks),
1504		.mode_off[0] = IOMUXC_GPR12,
1505		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
1506		.init_phy = imx7d_pcie_init_phy,
1507	},
1508	[IMX8MQ] = {
1509		.variant = IMX8MQ,
1510		.flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
1511			 IMX6_PCIE_FLAG_HAS_PHY_RESET,
1512		.gpr = "fsl,imx8mq-iomuxc-gpr",
1513		.clk_names = imx8mq_clks,
1514		.clks_cnt = ARRAY_SIZE(imx8mq_clks),
1515		.mode_off[0] = IOMUXC_GPR12,
1516		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
1517		.mode_off[1] = IOMUXC_GPR12,
1518		.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
1519		.init_phy = imx8mq_pcie_init_phy,
1520	},
1521	[IMX8MM] = {
1522		.variant = IMX8MM,
1523		.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
1524			 IMX6_PCIE_FLAG_HAS_PHYDRV |
1525			 IMX6_PCIE_FLAG_HAS_APP_RESET,
1526		.gpr = "fsl,imx8mm-iomuxc-gpr",
1527		.clk_names = imx8mm_clks,
1528		.clks_cnt = ARRAY_SIZE(imx8mm_clks),
1529		.mode_off[0] = IOMUXC_GPR12,
1530		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
1531	},
1532	[IMX8MP] = {
1533		.variant = IMX8MP,
1534		.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
1535			 IMX6_PCIE_FLAG_HAS_PHYDRV |
1536			 IMX6_PCIE_FLAG_HAS_APP_RESET,
1537		.gpr = "fsl,imx8mp-iomuxc-gpr",
1538		.clk_names = imx8mm_clks,
1539		.clks_cnt = ARRAY_SIZE(imx8mm_clks),
1540		.mode_off[0] = IOMUXC_GPR12,
1541		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
1542	},
1543	[IMX95] = {
1544		.variant = IMX95,
1545		.flags = IMX6_PCIE_FLAG_HAS_SERDES,
1546		.clk_names = imx8mq_clks,
1547		.clks_cnt = ARRAY_SIZE(imx8mq_clks),
1548		.ltssm_off = IMX95_PE0_GEN_CTRL_3,
1549		.ltssm_mask = IMX95_PCIE_LTSSM_EN,
1550		.mode_off[0]  = IMX95_PE0_GEN_CTRL_1,
1551		.mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
1552		.init_phy = imx95_pcie_init_phy,
1553	},
1554	[IMX8MQ_EP] = {
1555		.variant = IMX8MQ_EP,
1556		.flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
1557			 IMX6_PCIE_FLAG_HAS_PHY_RESET,
1558		.mode = DW_PCIE_EP_TYPE,
1559		.gpr = "fsl,imx8mq-iomuxc-gpr",
1560		.clk_names = imx8mq_clks,
1561		.clks_cnt = ARRAY_SIZE(imx8mq_clks),
1562		.mode_off[0] = IOMUXC_GPR12,
1563		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
1564		.mode_off[1] = IOMUXC_GPR12,
1565		.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
1566		.epc_features = &imx8m_pcie_epc_features,
1567		.init_phy = imx8mq_pcie_init_phy,
1568	},
1569	[IMX8MM_EP] = {
1570		.variant = IMX8MM_EP,
1571		.flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
1572		.mode = DW_PCIE_EP_TYPE,
1573		.gpr = "fsl,imx8mm-iomuxc-gpr",
1574		.clk_names = imx8mm_clks,
1575		.clks_cnt = ARRAY_SIZE(imx8mm_clks),
1576		.mode_off[0] = IOMUXC_GPR12,
1577		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
1578		.epc_features = &imx8m_pcie_epc_features,
1579	},
1580	[IMX8MP_EP] = {
1581		.variant = IMX8MP_EP,
1582		.flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
1583		.mode = DW_PCIE_EP_TYPE,
1584		.gpr = "fsl,imx8mp-iomuxc-gpr",
1585		.clk_names = imx8mm_clks,
1586		.clks_cnt = ARRAY_SIZE(imx8mm_clks),
1587		.mode_off[0] = IOMUXC_GPR12,
1588		.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
1589		.epc_features = &imx8m_pcie_epc_features,
1590	},
1591	[IMX95_EP] = {
1592		.variant = IMX95_EP,
1593		.flags = IMX6_PCIE_FLAG_HAS_SERDES |
1594			 IMX6_PCIE_FLAG_SUPPORT_64BIT,
1595		.clk_names = imx8mq_clks,
1596		.clks_cnt = ARRAY_SIZE(imx8mq_clks),
1597		.ltssm_off = IMX95_PE0_GEN_CTRL_3,
1598		.ltssm_mask = IMX95_PCIE_LTSSM_EN,
1599		.mode_off[0]  = IMX95_PE0_GEN_CTRL_1,
1600		.mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
1601		.init_phy = imx95_pcie_init_phy,
1602		.epc_features = &imx95_pcie_epc_features,
1603		.mode = DW_PCIE_EP_TYPE,
1604	},
1605};
1606
1607static const struct of_device_id imx6_pcie_of_match[] = {
1608	{ .compatible = "fsl,imx6q-pcie",  .data = &drvdata[IMX6Q],  },
1609	{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
1610	{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
1611	{ .compatible = "fsl,imx7d-pcie",  .data = &drvdata[IMX7D],  },
1612	{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
1613	{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
1614	{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
1615	{ .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
1616	{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
1617	{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
1618	{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
1619	{ .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
1620	{},
1621};
1622
1623static struct platform_driver imx6_pcie_driver = {
1624	.driver = {
1625		.name	= "imx6q-pcie",
1626		.of_match_table = imx6_pcie_of_match,
1627		.suppress_bind_attrs = true,
1628		.pm = &imx6_pcie_pm_ops,
1629		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1630	},
1631	.probe    = imx6_pcie_probe,
1632	.shutdown = imx6_pcie_shutdown,
1633};
1634
1635static void imx6_pcie_quirk(struct pci_dev *dev)
1636{
1637	struct pci_bus *bus = dev->bus;
1638	struct dw_pcie_rp *pp = bus->sysdata;
1639
1640	/* Bus parent is the PCI bridge, its parent is this platform driver */
1641	if (!bus->dev.parent || !bus->dev.parent->parent)
1642		return;
1643
1644	/* Make sure we only quirk devices associated with this driver */
1645	if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
1646		return;
1647
1648	if (pci_is_root_bus(bus)) {
1649		struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1650		struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
1651
1652		/*
1653		 * Limit config length to avoid the kernel reading beyond
1654		 * the register set and causing an abort on i.MX 6Quad
1655		 */
1656		if (imx6_pcie->drvdata->dbi_length) {
1657			dev->cfg_size = imx6_pcie->drvdata->dbi_length;
1658			dev_info(&dev->dev, "Limiting cfg_size to %d\n",
1659					dev->cfg_size);
1660		}
1661	}
1662}
1663DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
1664			PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
1665
1666static int __init imx6_pcie_init(void)
1667{
1668#ifdef CONFIG_ARM
1669	struct device_node *np;
1670
1671	np = of_find_matching_node(NULL, imx6_pcie_of_match);
1672	if (!np)
1673		return -ENODEV;
1674	of_node_put(np);
1675
1676	/*
1677	 * Since probe() can be deferred we need to make sure that
1678	 * hook_fault_code is not called after __init memory is freed
1679	 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
1680	 * we can install the handler here without risking it
1681	 * accessing some uninitialized driver state.
1682	 */
1683	hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
1684			"external abort on non-linefetch");
1685#endif
1686
1687	return platform_driver_register(&imx6_pcie_driver);
1688}
1689device_initcall(imx6_pcie_init);