Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * PCIe host controller driver for Freescale i.MX6 SoCs
  4 *
  5 * Copyright (C) 2013 Kosagi
  6 *		http://www.kosagi.com
  7 *
  8 * Author: Sean Cross <xobs@kosagi.com>
  9 */
 10
 11#include <linux/clk.h>
 12#include <linux/delay.h>
 13#include <linux/gpio.h>
 14#include <linux/kernel.h>
 15#include <linux/mfd/syscon.h>
 16#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 17#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
 18#include <linux/module.h>
 19#include <linux/of_gpio.h>
 20#include <linux/of_device.h>
 21#include <linux/pci.h>
 22#include <linux/platform_device.h>
 23#include <linux/regmap.h>
 24#include <linux/regulator/consumer.h>
 25#include <linux/resource.h>
 26#include <linux/signal.h>
 27#include <linux/types.h>
 28#include <linux/interrupt.h>
 29#include <linux/reset.h>
 30
 31#include "pcie-designware.h"
 32
 33#define to_imx6_pcie(x)	dev_get_drvdata((x)->dev)
 34
 35enum imx6_pcie_variants {
 36	IMX6Q,
 37	IMX6SX,
 38	IMX6QP,
 39	IMX7D,
 40};
 41
 42struct imx6_pcie {
 43	struct dw_pcie		*pci;
 44	int			reset_gpio;
 45	bool			gpio_active_high;
 46	struct clk		*pcie_bus;
 47	struct clk		*pcie_phy;
 48	struct clk		*pcie_inbound_axi;
 49	struct clk		*pcie;
 50	struct regmap		*iomuxc_gpr;
 51	struct reset_control	*pciephy_reset;
 52	struct reset_control	*apps_reset;
 53	enum imx6_pcie_variants variant;
 54	u32			tx_deemph_gen1;
 55	u32			tx_deemph_gen2_3p5db;
 56	u32			tx_deemph_gen2_6db;
 57	u32			tx_swing_full;
 58	u32			tx_swing_low;
 59	int			link_gen;
 60	struct regulator	*vpcie;
 61};
 62
 63/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
 64#define PHY_PLL_LOCK_WAIT_MAX_RETRIES	2000
 65#define PHY_PLL_LOCK_WAIT_USLEEP_MIN	50
 66#define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200
 67
 68/* PCIe Root Complex registers (memory-mapped) */
 69#define PCIE_RC_LCR				0x7c
 70#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1	0x1
 71#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2	0x2
 72#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK	0xf
 73
 74#define PCIE_RC_LCSR				0x80
 75
 76/* PCIe Port Logic registers (memory-mapped) */
 77#define PL_OFFSET 0x700
 78#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
 79#define PCIE_PL_PFLR_LINK_STATE_MASK		(0x3f << 16)
 80#define PCIE_PL_PFLR_FORCE_LINK			(1 << 15)
 81#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
 82#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
 83#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING	(1 << 29)
 84#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP		(1 << 4)
 85
 86#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
 87#define PCIE_PHY_CTRL_DATA_LOC 0
 88#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
 89#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
 90#define PCIE_PHY_CTRL_WR_LOC 18
 91#define PCIE_PHY_CTRL_RD_LOC 19
 92
 93#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
 94#define PCIE_PHY_STAT_ACK_LOC 16
 95
 96#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C
 97#define PORT_LOGIC_SPEED_CHANGE		(0x1 << 17)
 98
 99/* PHY registers (not memory-mapped) */
100#define PCIE_PHY_RX_ASIC_OUT 0x100D
101#define PCIE_PHY_RX_ASIC_OUT_VALID	(1 << 0)
102
103#define PHY_RX_OVRD_IN_LO 0x1005
104#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
105#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
106
107static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
108{
109	struct dw_pcie *pci = imx6_pcie->pci;
110	u32 val;
111	u32 max_iterations = 10;
112	u32 wait_counter = 0;
113
114	do {
115		val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
116		val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
117		wait_counter++;
118
119		if (val == exp_val)
120			return 0;
121
122		udelay(1);
123	} while (wait_counter < max_iterations);
124
125	return -ETIMEDOUT;
126}
127
128static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
129{
130	struct dw_pcie *pci = imx6_pcie->pci;
131	u32 val;
132	int ret;
133
134	val = addr << PCIE_PHY_CTRL_DATA_LOC;
135	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
136
137	val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
138	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
139
140	ret = pcie_phy_poll_ack(imx6_pcie, 1);
141	if (ret)
142		return ret;
143
144	val = addr << PCIE_PHY_CTRL_DATA_LOC;
145	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
146
147	return pcie_phy_poll_ack(imx6_pcie, 0);
148}
149
150/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
151static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
152{
153	struct dw_pcie *pci = imx6_pcie->pci;
154	u32 val, phy_ctl;
155	int ret;
156
157	ret = pcie_phy_wait_ack(imx6_pcie, addr);
158	if (ret)
159		return ret;
160
161	/* assert Read signal */
162	phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
163	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
164
165	ret = pcie_phy_poll_ack(imx6_pcie, 1);
166	if (ret)
167		return ret;
168
169	val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
170	*data = val & 0xffff;
171
172	/* deassert Read signal */
173	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
174
175	return pcie_phy_poll_ack(imx6_pcie, 0);
176}
177
178static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
179{
180	struct dw_pcie *pci = imx6_pcie->pci;
181	u32 var;
182	int ret;
183
184	/* write addr */
185	/* cap addr */
186	ret = pcie_phy_wait_ack(imx6_pcie, addr);
187	if (ret)
188		return ret;
189
190	var = data << PCIE_PHY_CTRL_DATA_LOC;
191	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
192
193	/* capture data */
194	var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
195	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
196
197	ret = pcie_phy_poll_ack(imx6_pcie, 1);
198	if (ret)
199		return ret;
200
201	/* deassert cap data */
202	var = data << PCIE_PHY_CTRL_DATA_LOC;
203	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
204
205	/* wait for ack de-assertion */
206	ret = pcie_phy_poll_ack(imx6_pcie, 0);
207	if (ret)
208		return ret;
209
210	/* assert wr signal */
211	var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
212	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
213
214	/* wait for ack */
215	ret = pcie_phy_poll_ack(imx6_pcie, 1);
216	if (ret)
217		return ret;
218
219	/* deassert wr signal */
220	var = data << PCIE_PHY_CTRL_DATA_LOC;
221	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
222
223	/* wait for ack de-assertion */
224	ret = pcie_phy_poll_ack(imx6_pcie, 0);
225	if (ret)
226		return ret;
227
228	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
229
230	return 0;
231}
232
233static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
234{
235	u32 tmp;
236
237	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
238	tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
239		PHY_RX_OVRD_IN_LO_RX_PLL_EN);
240	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
241
242	usleep_range(2000, 3000);
243
244	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
245	tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
246		  PHY_RX_OVRD_IN_LO_RX_PLL_EN);
247	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
248}
249
250/*  Added for PCI abort handling */
251static int imx6q_pcie_abort_handler(unsigned long addr,
252		unsigned int fsr, struct pt_regs *regs)
253{
254	unsigned long pc = instruction_pointer(regs);
255	unsigned long instr = *(unsigned long *)pc;
256	int reg = (instr >> 12) & 15;
257
258	/*
259	 * If the instruction being executed was a read,
260	 * make it look like it read all-ones.
261	 */
262	if ((instr & 0x0c100000) == 0x04100000) {
263		unsigned long val;
264
265		if (instr & 0x00400000)
266			val = 255;
267		else
268			val = -1;
269
270		regs->uregs[reg] = val;
271		regs->ARM_pc += 4;
272		return 0;
273	}
274
275	if ((instr & 0x0e100090) == 0x00100090) {
276		regs->uregs[reg] = -1;
277		regs->ARM_pc += 4;
278		return 0;
279	}
280
281	return 1;
282}
283
284static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
285{
286	struct device *dev = imx6_pcie->pci->dev;
287
288	switch (imx6_pcie->variant) {
289	case IMX7D:
290		reset_control_assert(imx6_pcie->pciephy_reset);
291		reset_control_assert(imx6_pcie->apps_reset);
292		break;
293	case IMX6SX:
294		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
295				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
296				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
297		/* Force PCIe PHY reset */
298		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
299				   IMX6SX_GPR5_PCIE_BTNRST_RESET,
300				   IMX6SX_GPR5_PCIE_BTNRST_RESET);
301		break;
302	case IMX6QP:
303		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
304				   IMX6Q_GPR1_PCIE_SW_RST,
305				   IMX6Q_GPR1_PCIE_SW_RST);
306		break;
307	case IMX6Q:
308		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
309				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
310		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
311				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
312		break;
313	}
314
315	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
316		int ret = regulator_disable(imx6_pcie->vpcie);
317
318		if (ret)
319			dev_err(dev, "failed to disable vpcie regulator: %d\n",
320				ret);
321	}
322}
323
324static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
325{
326	struct dw_pcie *pci = imx6_pcie->pci;
327	struct device *dev = pci->dev;
328	int ret = 0;
329
330	switch (imx6_pcie->variant) {
331	case IMX6SX:
332		ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
333		if (ret) {
334			dev_err(dev, "unable to enable pcie_axi clock\n");
335			break;
336		}
337
338		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
339				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
340		break;
341	case IMX6QP: 		/* FALLTHROUGH */
342	case IMX6Q:
343		/* power up core phy and enable ref clock */
344		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
345				   IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
346		/*
347		 * the async reset input need ref clock to sync internally,
348		 * when the ref clock comes after reset, internal synced
349		 * reset time is too short, cannot meet the requirement.
350		 * add one ~10us delay here.
351		 */
352		udelay(10);
353		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
354				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
355		break;
356	case IMX7D:
357		break;
358	}
359
360	return ret;
361}
362
363static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
364{
365	u32 val;
366	unsigned int retries;
367	struct device *dev = imx6_pcie->pci->dev;
368
369	for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
370		regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
371
372		if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
373			return;
374
375		usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
376			     PHY_PLL_LOCK_WAIT_USLEEP_MAX);
377	}
378
379	dev_err(dev, "PCIe PLL lock timeout\n");
380}
381
382static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
383{
384	struct dw_pcie *pci = imx6_pcie->pci;
385	struct device *dev = pci->dev;
386	int ret;
387
388	if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
389		ret = regulator_enable(imx6_pcie->vpcie);
390		if (ret) {
391			dev_err(dev, "failed to enable vpcie regulator: %d\n",
392				ret);
393			return;
394		}
395	}
396
397	ret = clk_prepare_enable(imx6_pcie->pcie_phy);
398	if (ret) {
399		dev_err(dev, "unable to enable pcie_phy clock\n");
400		goto err_pcie_phy;
401	}
402
403	ret = clk_prepare_enable(imx6_pcie->pcie_bus);
404	if (ret) {
405		dev_err(dev, "unable to enable pcie_bus clock\n");
406		goto err_pcie_bus;
407	}
408
409	ret = clk_prepare_enable(imx6_pcie->pcie);
410	if (ret) {
411		dev_err(dev, "unable to enable pcie clock\n");
412		goto err_pcie;
413	}
414
415	ret = imx6_pcie_enable_ref_clk(imx6_pcie);
416	if (ret) {
417		dev_err(dev, "unable to enable pcie ref clock\n");
418		goto err_ref_clk;
419	}
420
421	/* allow the clocks to stabilize */
422	usleep_range(200, 500);
423
424	/* Some boards don't have PCIe reset GPIO. */
425	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
426		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
427					imx6_pcie->gpio_active_high);
428		msleep(100);
429		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
430					!imx6_pcie->gpio_active_high);
431	}
432
433	switch (imx6_pcie->variant) {
434	case IMX7D:
435		reset_control_deassert(imx6_pcie->pciephy_reset);
436		imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
437		break;
438	case IMX6SX:
439		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
440				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
441		break;
442	case IMX6QP:
443		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
444				   IMX6Q_GPR1_PCIE_SW_RST, 0);
445
446		usleep_range(200, 500);
447		break;
448	case IMX6Q:		/* Nothing to do */
449		break;
450	}
451
452	return;
453
454err_ref_clk:
455	clk_disable_unprepare(imx6_pcie->pcie);
456err_pcie:
457	clk_disable_unprepare(imx6_pcie->pcie_bus);
458err_pcie_bus:
459	clk_disable_unprepare(imx6_pcie->pcie_phy);
460err_pcie_phy:
461	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
462		ret = regulator_disable(imx6_pcie->vpcie);
463		if (ret)
464			dev_err(dev, "failed to disable vpcie regulator: %d\n",
465				ret);
466	}
467}
468
469static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
470{
471	switch (imx6_pcie->variant) {
472	case IMX7D:
473		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
474				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
475		break;
476	case IMX6SX:
477		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
478				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
479				   IMX6SX_GPR12_PCIE_RX_EQ_2);
480		/* FALLTHROUGH */
481	default:
482		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
483				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
484
485		/* configure constant input signal to the pcie ctrl and phy */
486		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
487				   IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
488
489		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
490				   IMX6Q_GPR8_TX_DEEMPH_GEN1,
491				   imx6_pcie->tx_deemph_gen1 << 0);
492		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
493				   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
494				   imx6_pcie->tx_deemph_gen2_3p5db << 6);
495		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
496				   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
497				   imx6_pcie->tx_deemph_gen2_6db << 12);
498		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
499				   IMX6Q_GPR8_TX_SWING_FULL,
500				   imx6_pcie->tx_swing_full << 18);
501		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
502				   IMX6Q_GPR8_TX_SWING_LOW,
503				   imx6_pcie->tx_swing_low << 25);
504		break;
505	}
506
507	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
508			IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
509}
510
511static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
512{
513	struct dw_pcie *pci = imx6_pcie->pci;
514	struct device *dev = pci->dev;
515
516	/* check if the link is up or not */
517	if (!dw_pcie_wait_for_link(pci))
518		return 0;
519
520	dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
521		dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
522		dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
523	return -ETIMEDOUT;
524}
525
526static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
527{
528	struct dw_pcie *pci = imx6_pcie->pci;
529	struct device *dev = pci->dev;
530	u32 tmp;
531	unsigned int retries;
532
533	for (retries = 0; retries < 200; retries++) {
534		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
535		/* Test if the speed change finished. */
536		if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
537			return 0;
538		usleep_range(100, 1000);
539	}
540
541	dev_err(dev, "Speed change timeout\n");
542	return -EINVAL;
543}
544
545static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
546{
547	struct dw_pcie *pci = imx6_pcie->pci;
548	struct device *dev = pci->dev;
549	u32 tmp;
550	int ret;
551
552	/*
553	 * Force Gen1 operation when starting the link.  In case the link is
554	 * started in Gen2 mode, there is a possibility the devices on the
555	 * bus will not be detected at all.  This happens with PCIe switches.
556	 */
557	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
558	tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
559	tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
560	dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
561
562	/* Start LTSSM. */
563	if (imx6_pcie->variant == IMX7D)
564		reset_control_deassert(imx6_pcie->apps_reset);
565	else
566		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
567				   IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
568
569	ret = imx6_pcie_wait_for_link(imx6_pcie);
570	if (ret)
571		goto err_reset_phy;
572
573	if (imx6_pcie->link_gen == 2) {
574		/* Allow Gen2 mode after the link is up. */
575		tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
576		tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
577		tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
578		dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
579
580		/*
581		 * Start Directed Speed Change so the best possible
582		 * speed both link partners support can be negotiated.
583		 */
584		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
585		tmp |= PORT_LOGIC_SPEED_CHANGE;
586		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
587
588		if (imx6_pcie->variant != IMX7D) {
589			/*
590			 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
591			 * from i.MX6 family when no link speed transition
592			 * occurs and we go Gen1 -> yep, Gen1. The difference
593			 * is that, in such case, it will not be cleared by HW
594			 * which will cause the following code to report false
595			 * failure.
596			 */
597
598			ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
599			if (ret) {
600				dev_err(dev, "Failed to bring link up!\n");
601				goto err_reset_phy;
602			}
603		}
604
605		/* Make sure link training is finished as well! */
606		ret = imx6_pcie_wait_for_link(imx6_pcie);
607		if (ret) {
608			dev_err(dev, "Failed to bring link up!\n");
609			goto err_reset_phy;
610		}
611	} else {
612		dev_info(dev, "Link: Gen2 disabled\n");
613	}
614
615	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
616	dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
617	return 0;
618
619err_reset_phy:
620	dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
621		dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
622		dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
623	imx6_pcie_reset_phy(imx6_pcie);
624	return ret;
625}
626
627static int imx6_pcie_host_init(struct pcie_port *pp)
628{
629	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
630	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
631
632	imx6_pcie_assert_core_reset(imx6_pcie);
633	imx6_pcie_init_phy(imx6_pcie);
634	imx6_pcie_deassert_core_reset(imx6_pcie);
635	dw_pcie_setup_rc(pp);
636	imx6_pcie_establish_link(imx6_pcie);
637
638	if (IS_ENABLED(CONFIG_PCI_MSI))
639		dw_pcie_msi_init(pp);
640
641	return 0;
642}
643
644static int imx6_pcie_link_up(struct dw_pcie *pci)
645{
646	return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
647			PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
648}
649
650static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
651	.host_init = imx6_pcie_host_init,
652};
653
654static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
655			      struct platform_device *pdev)
656{
657	struct dw_pcie *pci = imx6_pcie->pci;
658	struct pcie_port *pp = &pci->pp;
659	struct device *dev = &pdev->dev;
660	int ret;
661
662	if (IS_ENABLED(CONFIG_PCI_MSI)) {
663		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
664		if (pp->msi_irq <= 0) {
665			dev_err(dev, "failed to get MSI irq\n");
666			return -ENODEV;
667		}
668	}
669
670	pp->root_bus_nr = -1;
671	pp->ops = &imx6_pcie_host_ops;
672
673	ret = dw_pcie_host_init(pp);
674	if (ret) {
675		dev_err(dev, "failed to initialize host\n");
676		return ret;
677	}
678
679	return 0;
680}
681
682static const struct dw_pcie_ops dw_pcie_ops = {
683	.link_up = imx6_pcie_link_up,
684};
685
686static int imx6_pcie_probe(struct platform_device *pdev)
687{
688	struct device *dev = &pdev->dev;
689	struct dw_pcie *pci;
690	struct imx6_pcie *imx6_pcie;
691	struct resource *dbi_base;
692	struct device_node *node = dev->of_node;
693	int ret;
694
695	imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
696	if (!imx6_pcie)
697		return -ENOMEM;
698
699	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
700	if (!pci)
701		return -ENOMEM;
702
703	pci->dev = dev;
704	pci->ops = &dw_pcie_ops;
705
706	imx6_pcie->pci = pci;
707	imx6_pcie->variant =
708		(enum imx6_pcie_variants)of_device_get_match_data(dev);
709
710	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
711	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
712	if (IS_ERR(pci->dbi_base))
713		return PTR_ERR(pci->dbi_base);
714
715	/* Fetch GPIOs */
716	imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
717	imx6_pcie->gpio_active_high = of_property_read_bool(node,
718						"reset-gpio-active-high");
719	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
720		ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
721				imx6_pcie->gpio_active_high ?
722					GPIOF_OUT_INIT_HIGH :
723					GPIOF_OUT_INIT_LOW,
724				"PCIe reset");
725		if (ret) {
726			dev_err(dev, "unable to get reset gpio\n");
727			return ret;
728		}
729	} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
730		return imx6_pcie->reset_gpio;
731	}
732
733	/* Fetch clocks */
734	imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
735	if (IS_ERR(imx6_pcie->pcie_phy)) {
736		dev_err(dev, "pcie_phy clock source missing or invalid\n");
737		return PTR_ERR(imx6_pcie->pcie_phy);
738	}
739
740	imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
741	if (IS_ERR(imx6_pcie->pcie_bus)) {
742		dev_err(dev, "pcie_bus clock source missing or invalid\n");
743		return PTR_ERR(imx6_pcie->pcie_bus);
744	}
745
746	imx6_pcie->pcie = devm_clk_get(dev, "pcie");
747	if (IS_ERR(imx6_pcie->pcie)) {
748		dev_err(dev, "pcie clock source missing or invalid\n");
749		return PTR_ERR(imx6_pcie->pcie);
750	}
751
752	switch (imx6_pcie->variant) {
753	case IMX6SX:
754		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
755							   "pcie_inbound_axi");
756		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
757			dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
758			return PTR_ERR(imx6_pcie->pcie_inbound_axi);
759		}
760		break;
761	case IMX7D:
762		imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
763									    "pciephy");
764		if (IS_ERR(imx6_pcie->pciephy_reset)) {
765			dev_err(dev, "Failed to get PCIEPHY reset control\n");
766			return PTR_ERR(imx6_pcie->pciephy_reset);
767		}
768
769		imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
770									 "apps");
771		if (IS_ERR(imx6_pcie->apps_reset)) {
772			dev_err(dev, "Failed to get PCIE APPS reset control\n");
773			return PTR_ERR(imx6_pcie->apps_reset);
774		}
775		break;
776	default:
777		break;
778	}
779
780	/* Grab GPR config register range */
781	imx6_pcie->iomuxc_gpr =
782		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
783	if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
784		dev_err(dev, "unable to find iomuxc registers\n");
785		return PTR_ERR(imx6_pcie->iomuxc_gpr);
786	}
787
788	/* Grab PCIe PHY Tx Settings */
789	if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
790				 &imx6_pcie->tx_deemph_gen1))
791		imx6_pcie->tx_deemph_gen1 = 0;
792
793	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
794				 &imx6_pcie->tx_deemph_gen2_3p5db))
795		imx6_pcie->tx_deemph_gen2_3p5db = 0;
796
797	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
798				 &imx6_pcie->tx_deemph_gen2_6db))
799		imx6_pcie->tx_deemph_gen2_6db = 20;
800
801	if (of_property_read_u32(node, "fsl,tx-swing-full",
802				 &imx6_pcie->tx_swing_full))
803		imx6_pcie->tx_swing_full = 127;
804
805	if (of_property_read_u32(node, "fsl,tx-swing-low",
806				 &imx6_pcie->tx_swing_low))
807		imx6_pcie->tx_swing_low = 127;
808
809	/* Limit link speed */
810	ret = of_property_read_u32(node, "fsl,max-link-speed",
811				   &imx6_pcie->link_gen);
812	if (ret)
813		imx6_pcie->link_gen = 1;
814
815	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
816	if (IS_ERR(imx6_pcie->vpcie)) {
817		if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
818			return -EPROBE_DEFER;
819		imx6_pcie->vpcie = NULL;
820	}
821
822	platform_set_drvdata(pdev, imx6_pcie);
823
824	ret = imx6_add_pcie_port(imx6_pcie, pdev);
825	if (ret < 0)
826		return ret;
827
828	return 0;
829}
830
831static void imx6_pcie_shutdown(struct platform_device *pdev)
832{
833	struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
834
835	/* bring down link, so bootloader gets clean state in case of reboot */
836	imx6_pcie_assert_core_reset(imx6_pcie);
837}
838
839static const struct of_device_id imx6_pcie_of_match[] = {
840	{ .compatible = "fsl,imx6q-pcie",  .data = (void *)IMX6Q,  },
841	{ .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
842	{ .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
843	{ .compatible = "fsl,imx7d-pcie",  .data = (void *)IMX7D,  },
844	{},
845};
846
847static struct platform_driver imx6_pcie_driver = {
848	.driver = {
849		.name	= "imx6q-pcie",
850		.of_match_table = imx6_pcie_of_match,
851		.suppress_bind_attrs = true,
852	},
853	.probe    = imx6_pcie_probe,
854	.shutdown = imx6_pcie_shutdown,
855};
856
857static int __init imx6_pcie_init(void)
858{
859	/*
860	 * Since probe() can be deferred we need to make sure that
861	 * hook_fault_code is not called after __init memory is freed
862	 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
863	 * we can install the handler here without risking it
864	 * accessing some uninitialized driver state.
865	 */
866	hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
867			"external abort on non-linefetch");
868
869	return platform_driver_register(&imx6_pcie_driver);
870}
871device_initcall(imx6_pcie_init);