Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Qualcomm PCIe root complex driver
4 *
5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6 * Copyright 2015 Linaro Limited.
7 *
8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9 */
10
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/gpio/consumer.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iopoll.h>
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/of_device.h>
20#include <linux/of_gpio.h>
21#include <linux/pci.h>
22#include <linux/pm_runtime.h>
23#include <linux/platform_device.h>
24#include <linux/phy/phy.h>
25#include <linux/regulator/consumer.h>
26#include <linux/reset.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29
30#include "../../pci.h"
31#include "pcie-designware.h"
32
33#define PCIE20_PARF_SYS_CTRL 0x00
34#define MST_WAKEUP_EN BIT(13)
35#define SLV_WAKEUP_EN BIT(12)
36#define MSTR_ACLK_CGC_DIS BIT(10)
37#define SLV_ACLK_CGC_DIS BIT(9)
38#define CORE_CLK_CGC_DIS BIT(6)
39#define AUX_PWR_DET BIT(4)
40#define L23_CLK_RMV_DIS BIT(2)
41#define L1_CLK_RMV_DIS BIT(1)
42
43#define PCIE20_PARF_PHY_CTRL 0x40
44#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
45#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
46
47#define PCIE20_PARF_PHY_REFCLK 0x4C
48#define PHY_REFCLK_SSP_EN BIT(16)
49#define PHY_REFCLK_USE_PAD BIT(12)
50
51#define PCIE20_PARF_DBI_BASE_ADDR 0x168
52#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
53#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
54#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
55#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
56#define PCIE20_PARF_LTSSM 0x1B0
57#define PCIE20_PARF_SID_OFFSET 0x234
58#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
59#define PCIE20_PARF_DEVICE_TYPE 0x1000
60
61#define PCIE20_ELBI_SYS_CTRL 0x04
62#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
63
64#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
65#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
66#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
67#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
68#define CFG_BRIDGE_SB_INIT BIT(0)
69
70#define PCIE20_CAP 0x70
71#define PCIE20_DEVICE_CONTROL2_STATUS2 (PCIE20_CAP + PCI_EXP_DEVCTL2)
72#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + PCI_EXP_LNKCAP)
73#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
74#define PCIE_CAP_LINK1_VAL 0x2FD7F
75
76#define PCIE20_PARF_Q2A_FLUSH 0x1AC
77
78#define PCIE20_MISC_CONTROL_1_REG 0x8BC
79#define DBI_RO_WR_EN 1
80
81#define PERST_DELAY_US 1000
82/* PARF registers */
83#define PCIE20_PARF_PCS_DEEMPH 0x34
84#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
85#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
86#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
87
88#define PCIE20_PARF_PCS_SWING 0x38
89#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
90#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
91
92#define PCIE20_PARF_CONFIG_BITS 0x50
93#define PHY_RX0_EQ(x) ((x) << 24)
94
95#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
96#define SLV_ADDR_SPACE_SZ 0x10000000
97
98#define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0
99
100#define DEVICE_TYPE_RC 0x4
101
102#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
103#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
104struct qcom_pcie_resources_2_1_0 {
105 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
106 struct reset_control *pci_reset;
107 struct reset_control *axi_reset;
108 struct reset_control *ahb_reset;
109 struct reset_control *por_reset;
110 struct reset_control *phy_reset;
111 struct reset_control *ext_reset;
112 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
113};
114
115struct qcom_pcie_resources_1_0_0 {
116 struct clk *iface;
117 struct clk *aux;
118 struct clk *master_bus;
119 struct clk *slave_bus;
120 struct reset_control *core;
121 struct regulator *vdda;
122};
123
124#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
125struct qcom_pcie_resources_2_3_2 {
126 struct clk *aux_clk;
127 struct clk *master_clk;
128 struct clk *slave_clk;
129 struct clk *cfg_clk;
130 struct clk *pipe_clk;
131 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
132};
133
134#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
135struct qcom_pcie_resources_2_4_0 {
136 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
137 int num_clks;
138 struct reset_control *axi_m_reset;
139 struct reset_control *axi_s_reset;
140 struct reset_control *pipe_reset;
141 struct reset_control *axi_m_vmid_reset;
142 struct reset_control *axi_s_xpu_reset;
143 struct reset_control *parf_reset;
144 struct reset_control *phy_reset;
145 struct reset_control *axi_m_sticky_reset;
146 struct reset_control *pipe_sticky_reset;
147 struct reset_control *pwr_reset;
148 struct reset_control *ahb_reset;
149 struct reset_control *phy_ahb_reset;
150};
151
152struct qcom_pcie_resources_2_3_3 {
153 struct clk *iface;
154 struct clk *axi_m_clk;
155 struct clk *axi_s_clk;
156 struct clk *ahb_clk;
157 struct clk *aux_clk;
158 struct reset_control *rst[7];
159};
160
161struct qcom_pcie_resources_2_7_0 {
162 struct clk_bulk_data clks[6];
163 struct regulator_bulk_data supplies[2];
164 struct reset_control *pci_reset;
165 struct clk *pipe_clk;
166};
167
168union qcom_pcie_resources {
169 struct qcom_pcie_resources_1_0_0 v1_0_0;
170 struct qcom_pcie_resources_2_1_0 v2_1_0;
171 struct qcom_pcie_resources_2_3_2 v2_3_2;
172 struct qcom_pcie_resources_2_3_3 v2_3_3;
173 struct qcom_pcie_resources_2_4_0 v2_4_0;
174 struct qcom_pcie_resources_2_7_0 v2_7_0;
175};
176
177struct qcom_pcie;
178
179struct qcom_pcie_ops {
180 int (*get_resources)(struct qcom_pcie *pcie);
181 int (*init)(struct qcom_pcie *pcie);
182 int (*post_init)(struct qcom_pcie *pcie);
183 void (*deinit)(struct qcom_pcie *pcie);
184 void (*post_deinit)(struct qcom_pcie *pcie);
185 void (*ltssm_enable)(struct qcom_pcie *pcie);
186};
187
188struct qcom_pcie {
189 struct dw_pcie *pci;
190 void __iomem *parf; /* DT parf */
191 void __iomem *elbi; /* DT elbi */
192 union qcom_pcie_resources res;
193 struct phy *phy;
194 struct gpio_desc *reset;
195 const struct qcom_pcie_ops *ops;
196 int gen;
197};
198
199#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
200
201static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
202{
203 gpiod_set_value_cansleep(pcie->reset, 1);
204 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
205}
206
207static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
208{
209 /* Ensure that PERST has been asserted for at least 100 ms */
210 msleep(100);
211 gpiod_set_value_cansleep(pcie->reset, 0);
212 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
213}
214
215static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
216{
217 struct dw_pcie *pci = pcie->pci;
218
219 if (dw_pcie_link_up(pci))
220 return 0;
221
222 /* Enable Link Training state machine */
223 if (pcie->ops->ltssm_enable)
224 pcie->ops->ltssm_enable(pcie);
225
226 return dw_pcie_wait_for_link(pci);
227}
228
229static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
230{
231 u32 val;
232
233 /* enable link training */
234 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
235 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
236 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
237}
238
239static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
240{
241 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
242 struct dw_pcie *pci = pcie->pci;
243 struct device *dev = pci->dev;
244 int ret;
245
246 res->supplies[0].supply = "vdda";
247 res->supplies[1].supply = "vdda_phy";
248 res->supplies[2].supply = "vdda_refclk";
249 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
250 res->supplies);
251 if (ret)
252 return ret;
253
254 res->clks[0].id = "iface";
255 res->clks[1].id = "core";
256 res->clks[2].id = "phy";
257 res->clks[3].id = "aux";
258 res->clks[4].id = "ref";
259
260 /* iface, core, phy are required */
261 ret = devm_clk_bulk_get(dev, 3, res->clks);
262 if (ret < 0)
263 return ret;
264
265 /* aux, ref are optional */
266 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
267 if (ret < 0)
268 return ret;
269
270 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
271 if (IS_ERR(res->pci_reset))
272 return PTR_ERR(res->pci_reset);
273
274 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
275 if (IS_ERR(res->axi_reset))
276 return PTR_ERR(res->axi_reset);
277
278 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
279 if (IS_ERR(res->ahb_reset))
280 return PTR_ERR(res->ahb_reset);
281
282 res->por_reset = devm_reset_control_get_exclusive(dev, "por");
283 if (IS_ERR(res->por_reset))
284 return PTR_ERR(res->por_reset);
285
286 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
287 if (IS_ERR(res->ext_reset))
288 return PTR_ERR(res->ext_reset);
289
290 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
291 return PTR_ERR_OR_ZERO(res->phy_reset);
292}
293
294static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
295{
296 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
297
298 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
299 reset_control_assert(res->pci_reset);
300 reset_control_assert(res->axi_reset);
301 reset_control_assert(res->ahb_reset);
302 reset_control_assert(res->por_reset);
303 reset_control_assert(res->ext_reset);
304 reset_control_assert(res->phy_reset);
305 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
306}
307
308static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
309{
310 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
311 struct dw_pcie *pci = pcie->pci;
312 struct device *dev = pci->dev;
313 struct device_node *node = dev->of_node;
314 u32 val;
315 int ret;
316
317 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
318 if (ret < 0) {
319 dev_err(dev, "cannot enable regulators\n");
320 return ret;
321 }
322
323 ret = reset_control_deassert(res->ahb_reset);
324 if (ret) {
325 dev_err(dev, "cannot deassert ahb reset\n");
326 goto err_deassert_ahb;
327 }
328
329 ret = reset_control_deassert(res->ext_reset);
330 if (ret) {
331 dev_err(dev, "cannot deassert ext reset\n");
332 goto err_deassert_ext;
333 }
334
335 ret = reset_control_deassert(res->phy_reset);
336 if (ret) {
337 dev_err(dev, "cannot deassert phy reset\n");
338 goto err_deassert_phy;
339 }
340
341 ret = reset_control_deassert(res->pci_reset);
342 if (ret) {
343 dev_err(dev, "cannot deassert pci reset\n");
344 goto err_deassert_pci;
345 }
346
347 ret = reset_control_deassert(res->por_reset);
348 if (ret) {
349 dev_err(dev, "cannot deassert por reset\n");
350 goto err_deassert_por;
351 }
352
353 ret = reset_control_deassert(res->axi_reset);
354 if (ret) {
355 dev_err(dev, "cannot deassert axi reset\n");
356 goto err_deassert_axi;
357 }
358
359 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
360 if (ret)
361 goto err_clks;
362
363 /* enable PCIe clocks and resets */
364 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
365 val &= ~BIT(0);
366 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
367
368 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
369 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
370 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
371 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
372 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
373 pcie->parf + PCIE20_PARF_PCS_DEEMPH);
374 writel(PCS_SWING_TX_SWING_FULL(120) |
375 PCS_SWING_TX_SWING_LOW(120),
376 pcie->parf + PCIE20_PARF_PCS_SWING);
377 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
378 }
379
380 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
381 /* set TX termination offset */
382 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
383 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
384 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
385 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
386 }
387
388 /* enable external reference clock */
389 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
390 val &= ~PHY_REFCLK_USE_PAD;
391 val |= PHY_REFCLK_SSP_EN;
392 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
393
394 /* wait for clock acquisition */
395 usleep_range(1000, 1500);
396
397 if (pcie->gen == 1) {
398 val = readl(pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2);
399 val |= PCI_EXP_LNKSTA_CLS_2_5GB;
400 writel(val, pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2);
401 }
402
403 /* Set the Max TLP size to 2K, instead of using default of 4K */
404 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
405 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
406 writel(CFG_BRIDGE_SB_INIT,
407 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
408
409 return 0;
410
411err_clks:
412 reset_control_assert(res->axi_reset);
413err_deassert_axi:
414 reset_control_assert(res->por_reset);
415err_deassert_por:
416 reset_control_assert(res->pci_reset);
417err_deassert_pci:
418 reset_control_assert(res->phy_reset);
419err_deassert_phy:
420 reset_control_assert(res->ext_reset);
421err_deassert_ext:
422 reset_control_assert(res->ahb_reset);
423err_deassert_ahb:
424 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
425
426 return ret;
427}
428
429static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
430{
431 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
432 struct dw_pcie *pci = pcie->pci;
433 struct device *dev = pci->dev;
434
435 res->vdda = devm_regulator_get(dev, "vdda");
436 if (IS_ERR(res->vdda))
437 return PTR_ERR(res->vdda);
438
439 res->iface = devm_clk_get(dev, "iface");
440 if (IS_ERR(res->iface))
441 return PTR_ERR(res->iface);
442
443 res->aux = devm_clk_get(dev, "aux");
444 if (IS_ERR(res->aux))
445 return PTR_ERR(res->aux);
446
447 res->master_bus = devm_clk_get(dev, "master_bus");
448 if (IS_ERR(res->master_bus))
449 return PTR_ERR(res->master_bus);
450
451 res->slave_bus = devm_clk_get(dev, "slave_bus");
452 if (IS_ERR(res->slave_bus))
453 return PTR_ERR(res->slave_bus);
454
455 res->core = devm_reset_control_get_exclusive(dev, "core");
456 return PTR_ERR_OR_ZERO(res->core);
457}
458
459static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
460{
461 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
462
463 reset_control_assert(res->core);
464 clk_disable_unprepare(res->slave_bus);
465 clk_disable_unprepare(res->master_bus);
466 clk_disable_unprepare(res->iface);
467 clk_disable_unprepare(res->aux);
468 regulator_disable(res->vdda);
469}
470
471static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
472{
473 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
474 struct dw_pcie *pci = pcie->pci;
475 struct device *dev = pci->dev;
476 int ret;
477
478 ret = reset_control_deassert(res->core);
479 if (ret) {
480 dev_err(dev, "cannot deassert core reset\n");
481 return ret;
482 }
483
484 ret = clk_prepare_enable(res->aux);
485 if (ret) {
486 dev_err(dev, "cannot prepare/enable aux clock\n");
487 goto err_res;
488 }
489
490 ret = clk_prepare_enable(res->iface);
491 if (ret) {
492 dev_err(dev, "cannot prepare/enable iface clock\n");
493 goto err_aux;
494 }
495
496 ret = clk_prepare_enable(res->master_bus);
497 if (ret) {
498 dev_err(dev, "cannot prepare/enable master_bus clock\n");
499 goto err_iface;
500 }
501
502 ret = clk_prepare_enable(res->slave_bus);
503 if (ret) {
504 dev_err(dev, "cannot prepare/enable slave_bus clock\n");
505 goto err_master;
506 }
507
508 ret = regulator_enable(res->vdda);
509 if (ret) {
510 dev_err(dev, "cannot enable vdda regulator\n");
511 goto err_slave;
512 }
513
514 /* change DBI base address */
515 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
516
517 if (IS_ENABLED(CONFIG_PCI_MSI)) {
518 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
519
520 val |= BIT(31);
521 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
522 }
523
524 return 0;
525err_slave:
526 clk_disable_unprepare(res->slave_bus);
527err_master:
528 clk_disable_unprepare(res->master_bus);
529err_iface:
530 clk_disable_unprepare(res->iface);
531err_aux:
532 clk_disable_unprepare(res->aux);
533err_res:
534 reset_control_assert(res->core);
535
536 return ret;
537}
538
539static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
540{
541 u32 val;
542
543 /* enable link training */
544 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
545 val |= BIT(8);
546 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
547}
548
549static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
550{
551 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
552 struct dw_pcie *pci = pcie->pci;
553 struct device *dev = pci->dev;
554 int ret;
555
556 res->supplies[0].supply = "vdda";
557 res->supplies[1].supply = "vddpe-3v3";
558 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
559 res->supplies);
560 if (ret)
561 return ret;
562
563 res->aux_clk = devm_clk_get(dev, "aux");
564 if (IS_ERR(res->aux_clk))
565 return PTR_ERR(res->aux_clk);
566
567 res->cfg_clk = devm_clk_get(dev, "cfg");
568 if (IS_ERR(res->cfg_clk))
569 return PTR_ERR(res->cfg_clk);
570
571 res->master_clk = devm_clk_get(dev, "bus_master");
572 if (IS_ERR(res->master_clk))
573 return PTR_ERR(res->master_clk);
574
575 res->slave_clk = devm_clk_get(dev, "bus_slave");
576 if (IS_ERR(res->slave_clk))
577 return PTR_ERR(res->slave_clk);
578
579 res->pipe_clk = devm_clk_get(dev, "pipe");
580 return PTR_ERR_OR_ZERO(res->pipe_clk);
581}
582
583static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
584{
585 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
586
587 clk_disable_unprepare(res->slave_clk);
588 clk_disable_unprepare(res->master_clk);
589 clk_disable_unprepare(res->cfg_clk);
590 clk_disable_unprepare(res->aux_clk);
591
592 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
593}
594
595static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
596{
597 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
598
599 clk_disable_unprepare(res->pipe_clk);
600}
601
602static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
603{
604 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
605 struct dw_pcie *pci = pcie->pci;
606 struct device *dev = pci->dev;
607 u32 val;
608 int ret;
609
610 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
611 if (ret < 0) {
612 dev_err(dev, "cannot enable regulators\n");
613 return ret;
614 }
615
616 ret = clk_prepare_enable(res->aux_clk);
617 if (ret) {
618 dev_err(dev, "cannot prepare/enable aux clock\n");
619 goto err_aux_clk;
620 }
621
622 ret = clk_prepare_enable(res->cfg_clk);
623 if (ret) {
624 dev_err(dev, "cannot prepare/enable cfg clock\n");
625 goto err_cfg_clk;
626 }
627
628 ret = clk_prepare_enable(res->master_clk);
629 if (ret) {
630 dev_err(dev, "cannot prepare/enable master clock\n");
631 goto err_master_clk;
632 }
633
634 ret = clk_prepare_enable(res->slave_clk);
635 if (ret) {
636 dev_err(dev, "cannot prepare/enable slave clock\n");
637 goto err_slave_clk;
638 }
639
640 /* enable PCIe clocks and resets */
641 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
642 val &= ~BIT(0);
643 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
644
645 /* change DBI base address */
646 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
647
648 /* MAC PHY_POWERDOWN MUX DISABLE */
649 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
650 val &= ~BIT(29);
651 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
652
653 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
654 val |= BIT(4);
655 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
656
657 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
658 val |= BIT(31);
659 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
660
661 return 0;
662
663err_slave_clk:
664 clk_disable_unprepare(res->master_clk);
665err_master_clk:
666 clk_disable_unprepare(res->cfg_clk);
667err_cfg_clk:
668 clk_disable_unprepare(res->aux_clk);
669
670err_aux_clk:
671 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
672
673 return ret;
674}
675
676static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
677{
678 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
679 struct dw_pcie *pci = pcie->pci;
680 struct device *dev = pci->dev;
681 int ret;
682
683 ret = clk_prepare_enable(res->pipe_clk);
684 if (ret) {
685 dev_err(dev, "cannot prepare/enable pipe clock\n");
686 return ret;
687 }
688
689 return 0;
690}
691
692static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
693{
694 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
695 struct dw_pcie *pci = pcie->pci;
696 struct device *dev = pci->dev;
697 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
698 int ret;
699
700 res->clks[0].id = "aux";
701 res->clks[1].id = "master_bus";
702 res->clks[2].id = "slave_bus";
703 res->clks[3].id = "iface";
704
705 /* qcom,pcie-ipq4019 is defined without "iface" */
706 res->num_clks = is_ipq ? 3 : 4;
707
708 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
709 if (ret < 0)
710 return ret;
711
712 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
713 if (IS_ERR(res->axi_m_reset))
714 return PTR_ERR(res->axi_m_reset);
715
716 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
717 if (IS_ERR(res->axi_s_reset))
718 return PTR_ERR(res->axi_s_reset);
719
720 if (is_ipq) {
721 /*
722 * These resources relates to the PHY or are secure clocks, but
723 * are controlled here for IPQ4019
724 */
725 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
726 if (IS_ERR(res->pipe_reset))
727 return PTR_ERR(res->pipe_reset);
728
729 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
730 "axi_m_vmid");
731 if (IS_ERR(res->axi_m_vmid_reset))
732 return PTR_ERR(res->axi_m_vmid_reset);
733
734 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
735 "axi_s_xpu");
736 if (IS_ERR(res->axi_s_xpu_reset))
737 return PTR_ERR(res->axi_s_xpu_reset);
738
739 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
740 if (IS_ERR(res->parf_reset))
741 return PTR_ERR(res->parf_reset);
742
743 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
744 if (IS_ERR(res->phy_reset))
745 return PTR_ERR(res->phy_reset);
746 }
747
748 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
749 "axi_m_sticky");
750 if (IS_ERR(res->axi_m_sticky_reset))
751 return PTR_ERR(res->axi_m_sticky_reset);
752
753 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
754 "pipe_sticky");
755 if (IS_ERR(res->pipe_sticky_reset))
756 return PTR_ERR(res->pipe_sticky_reset);
757
758 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
759 if (IS_ERR(res->pwr_reset))
760 return PTR_ERR(res->pwr_reset);
761
762 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
763 if (IS_ERR(res->ahb_reset))
764 return PTR_ERR(res->ahb_reset);
765
766 if (is_ipq) {
767 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
768 if (IS_ERR(res->phy_ahb_reset))
769 return PTR_ERR(res->phy_ahb_reset);
770 }
771
772 return 0;
773}
774
775static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
776{
777 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
778
779 reset_control_assert(res->axi_m_reset);
780 reset_control_assert(res->axi_s_reset);
781 reset_control_assert(res->pipe_reset);
782 reset_control_assert(res->pipe_sticky_reset);
783 reset_control_assert(res->phy_reset);
784 reset_control_assert(res->phy_ahb_reset);
785 reset_control_assert(res->axi_m_sticky_reset);
786 reset_control_assert(res->pwr_reset);
787 reset_control_assert(res->ahb_reset);
788 clk_bulk_disable_unprepare(res->num_clks, res->clks);
789}
790
791static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
792{
793 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
794 struct dw_pcie *pci = pcie->pci;
795 struct device *dev = pci->dev;
796 u32 val;
797 int ret;
798
799 ret = reset_control_assert(res->axi_m_reset);
800 if (ret) {
801 dev_err(dev, "cannot assert axi master reset\n");
802 return ret;
803 }
804
805 ret = reset_control_assert(res->axi_s_reset);
806 if (ret) {
807 dev_err(dev, "cannot assert axi slave reset\n");
808 return ret;
809 }
810
811 usleep_range(10000, 12000);
812
813 ret = reset_control_assert(res->pipe_reset);
814 if (ret) {
815 dev_err(dev, "cannot assert pipe reset\n");
816 return ret;
817 }
818
819 ret = reset_control_assert(res->pipe_sticky_reset);
820 if (ret) {
821 dev_err(dev, "cannot assert pipe sticky reset\n");
822 return ret;
823 }
824
825 ret = reset_control_assert(res->phy_reset);
826 if (ret) {
827 dev_err(dev, "cannot assert phy reset\n");
828 return ret;
829 }
830
831 ret = reset_control_assert(res->phy_ahb_reset);
832 if (ret) {
833 dev_err(dev, "cannot assert phy ahb reset\n");
834 return ret;
835 }
836
837 usleep_range(10000, 12000);
838
839 ret = reset_control_assert(res->axi_m_sticky_reset);
840 if (ret) {
841 dev_err(dev, "cannot assert axi master sticky reset\n");
842 return ret;
843 }
844
845 ret = reset_control_assert(res->pwr_reset);
846 if (ret) {
847 dev_err(dev, "cannot assert power reset\n");
848 return ret;
849 }
850
851 ret = reset_control_assert(res->ahb_reset);
852 if (ret) {
853 dev_err(dev, "cannot assert ahb reset\n");
854 return ret;
855 }
856
857 usleep_range(10000, 12000);
858
859 ret = reset_control_deassert(res->phy_ahb_reset);
860 if (ret) {
861 dev_err(dev, "cannot deassert phy ahb reset\n");
862 return ret;
863 }
864
865 ret = reset_control_deassert(res->phy_reset);
866 if (ret) {
867 dev_err(dev, "cannot deassert phy reset\n");
868 goto err_rst_phy;
869 }
870
871 ret = reset_control_deassert(res->pipe_reset);
872 if (ret) {
873 dev_err(dev, "cannot deassert pipe reset\n");
874 goto err_rst_pipe;
875 }
876
877 ret = reset_control_deassert(res->pipe_sticky_reset);
878 if (ret) {
879 dev_err(dev, "cannot deassert pipe sticky reset\n");
880 goto err_rst_pipe_sticky;
881 }
882
883 usleep_range(10000, 12000);
884
885 ret = reset_control_deassert(res->axi_m_reset);
886 if (ret) {
887 dev_err(dev, "cannot deassert axi master reset\n");
888 goto err_rst_axi_m;
889 }
890
891 ret = reset_control_deassert(res->axi_m_sticky_reset);
892 if (ret) {
893 dev_err(dev, "cannot deassert axi master sticky reset\n");
894 goto err_rst_axi_m_sticky;
895 }
896
897 ret = reset_control_deassert(res->axi_s_reset);
898 if (ret) {
899 dev_err(dev, "cannot deassert axi slave reset\n");
900 goto err_rst_axi_s;
901 }
902
903 ret = reset_control_deassert(res->pwr_reset);
904 if (ret) {
905 dev_err(dev, "cannot deassert power reset\n");
906 goto err_rst_pwr;
907 }
908
909 ret = reset_control_deassert(res->ahb_reset);
910 if (ret) {
911 dev_err(dev, "cannot deassert ahb reset\n");
912 goto err_rst_ahb;
913 }
914
915 usleep_range(10000, 12000);
916
917 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
918 if (ret)
919 goto err_clks;
920
921 /* enable PCIe clocks and resets */
922 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
923 val &= ~BIT(0);
924 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
925
926 /* change DBI base address */
927 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
928
929 /* MAC PHY_POWERDOWN MUX DISABLE */
930 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
931 val &= ~BIT(29);
932 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
933
934 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
935 val |= BIT(4);
936 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
937
938 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
939 val |= BIT(31);
940 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
941
942 return 0;
943
944err_clks:
945 reset_control_assert(res->ahb_reset);
946err_rst_ahb:
947 reset_control_assert(res->pwr_reset);
948err_rst_pwr:
949 reset_control_assert(res->axi_s_reset);
950err_rst_axi_s:
951 reset_control_assert(res->axi_m_sticky_reset);
952err_rst_axi_m_sticky:
953 reset_control_assert(res->axi_m_reset);
954err_rst_axi_m:
955 reset_control_assert(res->pipe_sticky_reset);
956err_rst_pipe_sticky:
957 reset_control_assert(res->pipe_reset);
958err_rst_pipe:
959 reset_control_assert(res->phy_reset);
960err_rst_phy:
961 reset_control_assert(res->phy_ahb_reset);
962 return ret;
963}
964
965static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
966{
967 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
968 struct dw_pcie *pci = pcie->pci;
969 struct device *dev = pci->dev;
970 int i;
971 const char *rst_names[] = { "axi_m", "axi_s", "pipe",
972 "axi_m_sticky", "sticky",
973 "ahb", "sleep", };
974
975 res->iface = devm_clk_get(dev, "iface");
976 if (IS_ERR(res->iface))
977 return PTR_ERR(res->iface);
978
979 res->axi_m_clk = devm_clk_get(dev, "axi_m");
980 if (IS_ERR(res->axi_m_clk))
981 return PTR_ERR(res->axi_m_clk);
982
983 res->axi_s_clk = devm_clk_get(dev, "axi_s");
984 if (IS_ERR(res->axi_s_clk))
985 return PTR_ERR(res->axi_s_clk);
986
987 res->ahb_clk = devm_clk_get(dev, "ahb");
988 if (IS_ERR(res->ahb_clk))
989 return PTR_ERR(res->ahb_clk);
990
991 res->aux_clk = devm_clk_get(dev, "aux");
992 if (IS_ERR(res->aux_clk))
993 return PTR_ERR(res->aux_clk);
994
995 for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
996 res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
997 if (IS_ERR(res->rst[i]))
998 return PTR_ERR(res->rst[i]);
999 }
1000
1001 return 0;
1002}
1003
1004static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
1005{
1006 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1007
1008 clk_disable_unprepare(res->iface);
1009 clk_disable_unprepare(res->axi_m_clk);
1010 clk_disable_unprepare(res->axi_s_clk);
1011 clk_disable_unprepare(res->ahb_clk);
1012 clk_disable_unprepare(res->aux_clk);
1013}
1014
1015static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
1016{
1017 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1018 struct dw_pcie *pci = pcie->pci;
1019 struct device *dev = pci->dev;
1020 int i, ret;
1021 u32 val;
1022
1023 for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1024 ret = reset_control_assert(res->rst[i]);
1025 if (ret) {
1026 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
1027 return ret;
1028 }
1029 }
1030
1031 usleep_range(2000, 2500);
1032
1033 for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1034 ret = reset_control_deassert(res->rst[i]);
1035 if (ret) {
1036 dev_err(dev, "reset #%d deassert failed (%d)\n", i,
1037 ret);
1038 return ret;
1039 }
1040 }
1041
1042 /*
1043 * Don't have a way to see if the reset has completed.
1044 * Wait for some time.
1045 */
1046 usleep_range(2000, 2500);
1047
1048 ret = clk_prepare_enable(res->iface);
1049 if (ret) {
1050 dev_err(dev, "cannot prepare/enable core clock\n");
1051 goto err_clk_iface;
1052 }
1053
1054 ret = clk_prepare_enable(res->axi_m_clk);
1055 if (ret) {
1056 dev_err(dev, "cannot prepare/enable core clock\n");
1057 goto err_clk_axi_m;
1058 }
1059
1060 ret = clk_prepare_enable(res->axi_s_clk);
1061 if (ret) {
1062 dev_err(dev, "cannot prepare/enable axi slave clock\n");
1063 goto err_clk_axi_s;
1064 }
1065
1066 ret = clk_prepare_enable(res->ahb_clk);
1067 if (ret) {
1068 dev_err(dev, "cannot prepare/enable ahb clock\n");
1069 goto err_clk_ahb;
1070 }
1071
1072 ret = clk_prepare_enable(res->aux_clk);
1073 if (ret) {
1074 dev_err(dev, "cannot prepare/enable aux clock\n");
1075 goto err_clk_aux;
1076 }
1077
1078 writel(SLV_ADDR_SPACE_SZ,
1079 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1080
1081 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1082 val &= ~BIT(0);
1083 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1084
1085 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1086
1087 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1088 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1089 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1090 pcie->parf + PCIE20_PARF_SYS_CTRL);
1091 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1092
1093 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
1094 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1095 writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
1096
1097 val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1098 val &= ~PCI_EXP_LNKCAP_ASPMS;
1099 writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1100
1101 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base +
1102 PCIE20_DEVICE_CONTROL2_STATUS2);
1103
1104 return 0;
1105
1106err_clk_aux:
1107 clk_disable_unprepare(res->ahb_clk);
1108err_clk_ahb:
1109 clk_disable_unprepare(res->axi_s_clk);
1110err_clk_axi_s:
1111 clk_disable_unprepare(res->axi_m_clk);
1112err_clk_axi_m:
1113 clk_disable_unprepare(res->iface);
1114err_clk_iface:
1115 /*
1116 * Not checking for failure, will anyway return
1117 * the original failure in 'ret'.
1118 */
1119 for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1120 reset_control_assert(res->rst[i]);
1121
1122 return ret;
1123}
1124
1125static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
1126{
1127 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1128 struct dw_pcie *pci = pcie->pci;
1129 struct device *dev = pci->dev;
1130 int ret;
1131
1132 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
1133 if (IS_ERR(res->pci_reset))
1134 return PTR_ERR(res->pci_reset);
1135
1136 res->supplies[0].supply = "vdda";
1137 res->supplies[1].supply = "vddpe-3v3";
1138 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
1139 res->supplies);
1140 if (ret)
1141 return ret;
1142
1143 res->clks[0].id = "aux";
1144 res->clks[1].id = "cfg";
1145 res->clks[2].id = "bus_master";
1146 res->clks[3].id = "bus_slave";
1147 res->clks[4].id = "slave_q2a";
1148 res->clks[5].id = "tbu";
1149
1150 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
1151 if (ret < 0)
1152 return ret;
1153
1154 res->pipe_clk = devm_clk_get(dev, "pipe");
1155 return PTR_ERR_OR_ZERO(res->pipe_clk);
1156}
1157
1158static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
1159{
1160 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1161 struct dw_pcie *pci = pcie->pci;
1162 struct device *dev = pci->dev;
1163 u32 val;
1164 int ret;
1165
1166 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
1167 if (ret < 0) {
1168 dev_err(dev, "cannot enable regulators\n");
1169 return ret;
1170 }
1171
1172 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
1173 if (ret < 0)
1174 goto err_disable_regulators;
1175
1176 ret = reset_control_assert(res->pci_reset);
1177 if (ret < 0) {
1178 dev_err(dev, "cannot deassert pci reset\n");
1179 goto err_disable_clocks;
1180 }
1181
1182 usleep_range(1000, 1500);
1183
1184 ret = reset_control_deassert(res->pci_reset);
1185 if (ret < 0) {
1186 dev_err(dev, "cannot deassert pci reset\n");
1187 goto err_disable_clocks;
1188 }
1189
1190 ret = clk_prepare_enable(res->pipe_clk);
1191 if (ret) {
1192 dev_err(dev, "cannot prepare/enable pipe clock\n");
1193 goto err_disable_clocks;
1194 }
1195
1196 /* configure PCIe to RC mode */
1197 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
1198
1199 /* enable PCIe clocks and resets */
1200 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1201 val &= ~BIT(0);
1202 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1203
1204 /* change DBI base address */
1205 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1206
1207 /* MAC PHY_POWERDOWN MUX DISABLE */
1208 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
1209 val &= ~BIT(29);
1210 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
1211
1212 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1213 val |= BIT(4);
1214 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1215
1216 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1217 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1218 val |= BIT(31);
1219 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1220 }
1221
1222 return 0;
1223err_disable_clocks:
1224 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1225err_disable_regulators:
1226 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1227
1228 return ret;
1229}
1230
1231static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1232{
1233 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1234
1235 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1236 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1237}
1238
1239static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1240{
1241 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1242
1243 return clk_prepare_enable(res->pipe_clk);
1244}
1245
1246static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
1247{
1248 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1249
1250 clk_disable_unprepare(res->pipe_clk);
1251}
1252
1253static int qcom_pcie_link_up(struct dw_pcie *pci)
1254{
1255 u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
1256
1257 return !!(val & PCI_EXP_LNKSTA_DLLLA);
1258}
1259
1260static int qcom_pcie_host_init(struct pcie_port *pp)
1261{
1262 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1263 struct qcom_pcie *pcie = to_qcom_pcie(pci);
1264 int ret;
1265
1266 qcom_ep_reset_assert(pcie);
1267
1268 ret = pcie->ops->init(pcie);
1269 if (ret)
1270 return ret;
1271
1272 ret = phy_power_on(pcie->phy);
1273 if (ret)
1274 goto err_deinit;
1275
1276 if (pcie->ops->post_init) {
1277 ret = pcie->ops->post_init(pcie);
1278 if (ret)
1279 goto err_disable_phy;
1280 }
1281
1282 dw_pcie_setup_rc(pp);
1283
1284 if (IS_ENABLED(CONFIG_PCI_MSI))
1285 dw_pcie_msi_init(pp);
1286
1287 qcom_ep_reset_deassert(pcie);
1288
1289 ret = qcom_pcie_establish_link(pcie);
1290 if (ret)
1291 goto err;
1292
1293 return 0;
1294err:
1295 qcom_ep_reset_assert(pcie);
1296 if (pcie->ops->post_deinit)
1297 pcie->ops->post_deinit(pcie);
1298err_disable_phy:
1299 phy_power_off(pcie->phy);
1300err_deinit:
1301 pcie->ops->deinit(pcie);
1302
1303 return ret;
1304}
1305
1306static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1307 .host_init = qcom_pcie_host_init,
1308};
1309
1310/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
1311static const struct qcom_pcie_ops ops_2_1_0 = {
1312 .get_resources = qcom_pcie_get_resources_2_1_0,
1313 .init = qcom_pcie_init_2_1_0,
1314 .deinit = qcom_pcie_deinit_2_1_0,
1315 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1316};
1317
1318/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
1319static const struct qcom_pcie_ops ops_1_0_0 = {
1320 .get_resources = qcom_pcie_get_resources_1_0_0,
1321 .init = qcom_pcie_init_1_0_0,
1322 .deinit = qcom_pcie_deinit_1_0_0,
1323 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1324};
1325
1326/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
1327static const struct qcom_pcie_ops ops_2_3_2 = {
1328 .get_resources = qcom_pcie_get_resources_2_3_2,
1329 .init = qcom_pcie_init_2_3_2,
1330 .post_init = qcom_pcie_post_init_2_3_2,
1331 .deinit = qcom_pcie_deinit_2_3_2,
1332 .post_deinit = qcom_pcie_post_deinit_2_3_2,
1333 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1334};
1335
1336/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
1337static const struct qcom_pcie_ops ops_2_4_0 = {
1338 .get_resources = qcom_pcie_get_resources_2_4_0,
1339 .init = qcom_pcie_init_2_4_0,
1340 .deinit = qcom_pcie_deinit_2_4_0,
1341 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1342};
1343
1344/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
1345static const struct qcom_pcie_ops ops_2_3_3 = {
1346 .get_resources = qcom_pcie_get_resources_2_3_3,
1347 .init = qcom_pcie_init_2_3_3,
1348 .deinit = qcom_pcie_deinit_2_3_3,
1349 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1350};
1351
1352/* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
1353static const struct qcom_pcie_ops ops_2_7_0 = {
1354 .get_resources = qcom_pcie_get_resources_2_7_0,
1355 .init = qcom_pcie_init_2_7_0,
1356 .deinit = qcom_pcie_deinit_2_7_0,
1357 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1358 .post_init = qcom_pcie_post_init_2_7_0,
1359 .post_deinit = qcom_pcie_post_deinit_2_7_0,
1360};
1361
1362static const struct dw_pcie_ops dw_pcie_ops = {
1363 .link_up = qcom_pcie_link_up,
1364};
1365
1366static int qcom_pcie_probe(struct platform_device *pdev)
1367{
1368 struct device *dev = &pdev->dev;
1369 struct resource *res;
1370 struct pcie_port *pp;
1371 struct dw_pcie *pci;
1372 struct qcom_pcie *pcie;
1373 int ret;
1374
1375 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1376 if (!pcie)
1377 return -ENOMEM;
1378
1379 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1380 if (!pci)
1381 return -ENOMEM;
1382
1383 pm_runtime_enable(dev);
1384 ret = pm_runtime_get_sync(dev);
1385 if (ret < 0)
1386 goto err_pm_runtime_put;
1387
1388 pci->dev = dev;
1389 pci->ops = &dw_pcie_ops;
1390 pp = &pci->pp;
1391
1392 pcie->pci = pci;
1393
1394 pcie->ops = of_device_get_match_data(dev);
1395
1396 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1397 if (IS_ERR(pcie->reset)) {
1398 ret = PTR_ERR(pcie->reset);
1399 goto err_pm_runtime_put;
1400 }
1401
1402 pcie->gen = of_pci_get_max_link_speed(pdev->dev.of_node);
1403 if (pcie->gen < 0)
1404 pcie->gen = 2;
1405
1406 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1407 if (IS_ERR(pcie->parf)) {
1408 ret = PTR_ERR(pcie->parf);
1409 goto err_pm_runtime_put;
1410 }
1411
1412 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1413 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
1414 if (IS_ERR(pci->dbi_base)) {
1415 ret = PTR_ERR(pci->dbi_base);
1416 goto err_pm_runtime_put;
1417 }
1418
1419 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
1420 if (IS_ERR(pcie->elbi)) {
1421 ret = PTR_ERR(pcie->elbi);
1422 goto err_pm_runtime_put;
1423 }
1424
1425 pcie->phy = devm_phy_optional_get(dev, "pciephy");
1426 if (IS_ERR(pcie->phy)) {
1427 ret = PTR_ERR(pcie->phy);
1428 goto err_pm_runtime_put;
1429 }
1430
1431 ret = pcie->ops->get_resources(pcie);
1432 if (ret)
1433 goto err_pm_runtime_put;
1434
1435 pp->ops = &qcom_pcie_dw_ops;
1436
1437 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1438 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
1439 if (pp->msi_irq < 0) {
1440 ret = pp->msi_irq;
1441 goto err_pm_runtime_put;
1442 }
1443 }
1444
1445 ret = phy_init(pcie->phy);
1446 if (ret) {
1447 pm_runtime_disable(&pdev->dev);
1448 goto err_pm_runtime_put;
1449 }
1450
1451 platform_set_drvdata(pdev, pcie);
1452
1453 ret = dw_pcie_host_init(pp);
1454 if (ret) {
1455 dev_err(dev, "cannot initialize host\n");
1456 pm_runtime_disable(&pdev->dev);
1457 goto err_pm_runtime_put;
1458 }
1459
1460 return 0;
1461
1462err_pm_runtime_put:
1463 pm_runtime_put(dev);
1464 pm_runtime_disable(dev);
1465
1466 return ret;
1467}
1468
1469static const struct of_device_id qcom_pcie_match[] = {
1470 { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1471 { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1472 { .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 },
1473 { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1474 { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1475 { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1476 { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
1477 { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
1478 { .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
1479 { }
1480};
1481
1482static void qcom_fixup_class(struct pci_dev *dev)
1483{
1484 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1485}
1486DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1487DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1488DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1489DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1490DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1491DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1492DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1493
1494static struct platform_driver qcom_pcie_driver = {
1495 .probe = qcom_pcie_probe,
1496 .driver = {
1497 .name = "qcom-pcie",
1498 .suppress_bind_attrs = true,
1499 .of_match_table = qcom_pcie_match,
1500 },
1501};
1502builtin_platform_driver(qcom_pcie_driver);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Qualcomm PCIe root complex driver
4 *
5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6 * Copyright 2015 Linaro Limited.
7 *
8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9 */
10
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/gpio/consumer.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iopoll.h>
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/of_device.h>
20#include <linux/of_gpio.h>
21#include <linux/pci.h>
22#include <linux/pm_runtime.h>
23#include <linux/platform_device.h>
24#include <linux/phy/phy.h>
25#include <linux/regulator/consumer.h>
26#include <linux/reset.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29
30#include "pcie-designware.h"
31
32#define PCIE20_PARF_SYS_CTRL 0x00
33#define MST_WAKEUP_EN BIT(13)
34#define SLV_WAKEUP_EN BIT(12)
35#define MSTR_ACLK_CGC_DIS BIT(10)
36#define SLV_ACLK_CGC_DIS BIT(9)
37#define CORE_CLK_CGC_DIS BIT(6)
38#define AUX_PWR_DET BIT(4)
39#define L23_CLK_RMV_DIS BIT(2)
40#define L1_CLK_RMV_DIS BIT(1)
41
42#define PCIE20_COMMAND_STATUS 0x04
43#define CMD_BME_VAL 0x4
44#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
45#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
46
47#define PCIE20_PARF_PHY_CTRL 0x40
48#define PCIE20_PARF_PHY_REFCLK 0x4C
49#define PCIE20_PARF_DBI_BASE_ADDR 0x168
50#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
51#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
52#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
53#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
54#define PCIE20_PARF_LTSSM 0x1B0
55#define PCIE20_PARF_SID_OFFSET 0x234
56#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
57
58#define PCIE20_ELBI_SYS_CTRL 0x04
59#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
60
61#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
62#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
63#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
64#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
65#define CFG_BRIDGE_SB_INIT BIT(0)
66
67#define PCIE20_CAP 0x70
68#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
69#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
70#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
71#define PCIE_CAP_LINK1_VAL 0x2FD7F
72
73#define PCIE20_PARF_Q2A_FLUSH 0x1AC
74
75#define PCIE20_MISC_CONTROL_1_REG 0x8BC
76#define DBI_RO_WR_EN 1
77
78#define PERST_DELAY_US 1000
79
80#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
81#define SLV_ADDR_SPACE_SZ 0x10000000
82
83#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
84struct qcom_pcie_resources_2_1_0 {
85 struct clk *iface_clk;
86 struct clk *core_clk;
87 struct clk *phy_clk;
88 struct reset_control *pci_reset;
89 struct reset_control *axi_reset;
90 struct reset_control *ahb_reset;
91 struct reset_control *por_reset;
92 struct reset_control *phy_reset;
93 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
94};
95
96struct qcom_pcie_resources_1_0_0 {
97 struct clk *iface;
98 struct clk *aux;
99 struct clk *master_bus;
100 struct clk *slave_bus;
101 struct reset_control *core;
102 struct regulator *vdda;
103};
104
105#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
106struct qcom_pcie_resources_2_3_2 {
107 struct clk *aux_clk;
108 struct clk *master_clk;
109 struct clk *slave_clk;
110 struct clk *cfg_clk;
111 struct clk *pipe_clk;
112 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
113};
114
115#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
116struct qcom_pcie_resources_2_4_0 {
117 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
118 int num_clks;
119 struct reset_control *axi_m_reset;
120 struct reset_control *axi_s_reset;
121 struct reset_control *pipe_reset;
122 struct reset_control *axi_m_vmid_reset;
123 struct reset_control *axi_s_xpu_reset;
124 struct reset_control *parf_reset;
125 struct reset_control *phy_reset;
126 struct reset_control *axi_m_sticky_reset;
127 struct reset_control *pipe_sticky_reset;
128 struct reset_control *pwr_reset;
129 struct reset_control *ahb_reset;
130 struct reset_control *phy_ahb_reset;
131};
132
133struct qcom_pcie_resources_2_3_3 {
134 struct clk *iface;
135 struct clk *axi_m_clk;
136 struct clk *axi_s_clk;
137 struct clk *ahb_clk;
138 struct clk *aux_clk;
139 struct reset_control *rst[7];
140};
141
142union qcom_pcie_resources {
143 struct qcom_pcie_resources_1_0_0 v1_0_0;
144 struct qcom_pcie_resources_2_1_0 v2_1_0;
145 struct qcom_pcie_resources_2_3_2 v2_3_2;
146 struct qcom_pcie_resources_2_3_3 v2_3_3;
147 struct qcom_pcie_resources_2_4_0 v2_4_0;
148};
149
150struct qcom_pcie;
151
152struct qcom_pcie_ops {
153 int (*get_resources)(struct qcom_pcie *pcie);
154 int (*init)(struct qcom_pcie *pcie);
155 int (*post_init)(struct qcom_pcie *pcie);
156 void (*deinit)(struct qcom_pcie *pcie);
157 void (*post_deinit)(struct qcom_pcie *pcie);
158 void (*ltssm_enable)(struct qcom_pcie *pcie);
159};
160
161struct qcom_pcie {
162 struct dw_pcie *pci;
163 void __iomem *parf; /* DT parf */
164 void __iomem *elbi; /* DT elbi */
165 union qcom_pcie_resources res;
166 struct phy *phy;
167 struct gpio_desc *reset;
168 const struct qcom_pcie_ops *ops;
169};
170
171#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
172
173static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
174{
175 gpiod_set_value_cansleep(pcie->reset, 1);
176 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
177}
178
179static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
180{
181 /* Ensure that PERST has been asserted for at least 100 ms */
182 msleep(100);
183 gpiod_set_value_cansleep(pcie->reset, 0);
184 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
185}
186
187static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
188{
189 struct dw_pcie *pci = pcie->pci;
190
191 if (dw_pcie_link_up(pci))
192 return 0;
193
194 /* Enable Link Training state machine */
195 if (pcie->ops->ltssm_enable)
196 pcie->ops->ltssm_enable(pcie);
197
198 return dw_pcie_wait_for_link(pci);
199}
200
201static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
202{
203 u32 val;
204
205 /* enable link training */
206 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
207 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
208 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
209}
210
211static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
212{
213 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
214 struct dw_pcie *pci = pcie->pci;
215 struct device *dev = pci->dev;
216 int ret;
217
218 res->supplies[0].supply = "vdda";
219 res->supplies[1].supply = "vdda_phy";
220 res->supplies[2].supply = "vdda_refclk";
221 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
222 res->supplies);
223 if (ret)
224 return ret;
225
226 res->iface_clk = devm_clk_get(dev, "iface");
227 if (IS_ERR(res->iface_clk))
228 return PTR_ERR(res->iface_clk);
229
230 res->core_clk = devm_clk_get(dev, "core");
231 if (IS_ERR(res->core_clk))
232 return PTR_ERR(res->core_clk);
233
234 res->phy_clk = devm_clk_get(dev, "phy");
235 if (IS_ERR(res->phy_clk))
236 return PTR_ERR(res->phy_clk);
237
238 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
239 if (IS_ERR(res->pci_reset))
240 return PTR_ERR(res->pci_reset);
241
242 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
243 if (IS_ERR(res->axi_reset))
244 return PTR_ERR(res->axi_reset);
245
246 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
247 if (IS_ERR(res->ahb_reset))
248 return PTR_ERR(res->ahb_reset);
249
250 res->por_reset = devm_reset_control_get_exclusive(dev, "por");
251 if (IS_ERR(res->por_reset))
252 return PTR_ERR(res->por_reset);
253
254 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
255 return PTR_ERR_OR_ZERO(res->phy_reset);
256}
257
258static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
259{
260 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
261
262 reset_control_assert(res->pci_reset);
263 reset_control_assert(res->axi_reset);
264 reset_control_assert(res->ahb_reset);
265 reset_control_assert(res->por_reset);
266 reset_control_assert(res->pci_reset);
267 clk_disable_unprepare(res->iface_clk);
268 clk_disable_unprepare(res->core_clk);
269 clk_disable_unprepare(res->phy_clk);
270 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
271}
272
273static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
274{
275 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
276 struct dw_pcie *pci = pcie->pci;
277 struct device *dev = pci->dev;
278 u32 val;
279 int ret;
280
281 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
282 if (ret < 0) {
283 dev_err(dev, "cannot enable regulators\n");
284 return ret;
285 }
286
287 ret = reset_control_assert(res->ahb_reset);
288 if (ret) {
289 dev_err(dev, "cannot assert ahb reset\n");
290 goto err_assert_ahb;
291 }
292
293 ret = clk_prepare_enable(res->iface_clk);
294 if (ret) {
295 dev_err(dev, "cannot prepare/enable iface clock\n");
296 goto err_assert_ahb;
297 }
298
299 ret = clk_prepare_enable(res->phy_clk);
300 if (ret) {
301 dev_err(dev, "cannot prepare/enable phy clock\n");
302 goto err_clk_phy;
303 }
304
305 ret = clk_prepare_enable(res->core_clk);
306 if (ret) {
307 dev_err(dev, "cannot prepare/enable core clock\n");
308 goto err_clk_core;
309 }
310
311 ret = reset_control_deassert(res->ahb_reset);
312 if (ret) {
313 dev_err(dev, "cannot deassert ahb reset\n");
314 goto err_deassert_ahb;
315 }
316
317 /* enable PCIe clocks and resets */
318 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
319 val &= ~BIT(0);
320 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
321
322 /* enable external reference clock */
323 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
324 val |= BIT(16);
325 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
326
327 ret = reset_control_deassert(res->phy_reset);
328 if (ret) {
329 dev_err(dev, "cannot deassert phy reset\n");
330 return ret;
331 }
332
333 ret = reset_control_deassert(res->pci_reset);
334 if (ret) {
335 dev_err(dev, "cannot deassert pci reset\n");
336 return ret;
337 }
338
339 ret = reset_control_deassert(res->por_reset);
340 if (ret) {
341 dev_err(dev, "cannot deassert por reset\n");
342 return ret;
343 }
344
345 ret = reset_control_deassert(res->axi_reset);
346 if (ret) {
347 dev_err(dev, "cannot deassert axi reset\n");
348 return ret;
349 }
350
351 /* wait for clock acquisition */
352 usleep_range(1000, 1500);
353
354
355 /* Set the Max TLP size to 2K, instead of using default of 4K */
356 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
357 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
358 writel(CFG_BRIDGE_SB_INIT,
359 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
360
361 return 0;
362
363err_deassert_ahb:
364 clk_disable_unprepare(res->core_clk);
365err_clk_core:
366 clk_disable_unprepare(res->phy_clk);
367err_clk_phy:
368 clk_disable_unprepare(res->iface_clk);
369err_assert_ahb:
370 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
371
372 return ret;
373}
374
375static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
376{
377 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
378 struct dw_pcie *pci = pcie->pci;
379 struct device *dev = pci->dev;
380
381 res->vdda = devm_regulator_get(dev, "vdda");
382 if (IS_ERR(res->vdda))
383 return PTR_ERR(res->vdda);
384
385 res->iface = devm_clk_get(dev, "iface");
386 if (IS_ERR(res->iface))
387 return PTR_ERR(res->iface);
388
389 res->aux = devm_clk_get(dev, "aux");
390 if (IS_ERR(res->aux))
391 return PTR_ERR(res->aux);
392
393 res->master_bus = devm_clk_get(dev, "master_bus");
394 if (IS_ERR(res->master_bus))
395 return PTR_ERR(res->master_bus);
396
397 res->slave_bus = devm_clk_get(dev, "slave_bus");
398 if (IS_ERR(res->slave_bus))
399 return PTR_ERR(res->slave_bus);
400
401 res->core = devm_reset_control_get_exclusive(dev, "core");
402 return PTR_ERR_OR_ZERO(res->core);
403}
404
405static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
406{
407 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
408
409 reset_control_assert(res->core);
410 clk_disable_unprepare(res->slave_bus);
411 clk_disable_unprepare(res->master_bus);
412 clk_disable_unprepare(res->iface);
413 clk_disable_unprepare(res->aux);
414 regulator_disable(res->vdda);
415}
416
417static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
418{
419 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
420 struct dw_pcie *pci = pcie->pci;
421 struct device *dev = pci->dev;
422 int ret;
423
424 ret = reset_control_deassert(res->core);
425 if (ret) {
426 dev_err(dev, "cannot deassert core reset\n");
427 return ret;
428 }
429
430 ret = clk_prepare_enable(res->aux);
431 if (ret) {
432 dev_err(dev, "cannot prepare/enable aux clock\n");
433 goto err_res;
434 }
435
436 ret = clk_prepare_enable(res->iface);
437 if (ret) {
438 dev_err(dev, "cannot prepare/enable iface clock\n");
439 goto err_aux;
440 }
441
442 ret = clk_prepare_enable(res->master_bus);
443 if (ret) {
444 dev_err(dev, "cannot prepare/enable master_bus clock\n");
445 goto err_iface;
446 }
447
448 ret = clk_prepare_enable(res->slave_bus);
449 if (ret) {
450 dev_err(dev, "cannot prepare/enable slave_bus clock\n");
451 goto err_master;
452 }
453
454 ret = regulator_enable(res->vdda);
455 if (ret) {
456 dev_err(dev, "cannot enable vdda regulator\n");
457 goto err_slave;
458 }
459
460 /* change DBI base address */
461 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
462
463 if (IS_ENABLED(CONFIG_PCI_MSI)) {
464 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
465
466 val |= BIT(31);
467 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
468 }
469
470 return 0;
471err_slave:
472 clk_disable_unprepare(res->slave_bus);
473err_master:
474 clk_disable_unprepare(res->master_bus);
475err_iface:
476 clk_disable_unprepare(res->iface);
477err_aux:
478 clk_disable_unprepare(res->aux);
479err_res:
480 reset_control_assert(res->core);
481
482 return ret;
483}
484
485static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
486{
487 u32 val;
488
489 /* enable link training */
490 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
491 val |= BIT(8);
492 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
493}
494
495static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
496{
497 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
498 struct dw_pcie *pci = pcie->pci;
499 struct device *dev = pci->dev;
500 int ret;
501
502 res->supplies[0].supply = "vdda";
503 res->supplies[1].supply = "vddpe-3v3";
504 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
505 res->supplies);
506 if (ret)
507 return ret;
508
509 res->aux_clk = devm_clk_get(dev, "aux");
510 if (IS_ERR(res->aux_clk))
511 return PTR_ERR(res->aux_clk);
512
513 res->cfg_clk = devm_clk_get(dev, "cfg");
514 if (IS_ERR(res->cfg_clk))
515 return PTR_ERR(res->cfg_clk);
516
517 res->master_clk = devm_clk_get(dev, "bus_master");
518 if (IS_ERR(res->master_clk))
519 return PTR_ERR(res->master_clk);
520
521 res->slave_clk = devm_clk_get(dev, "bus_slave");
522 if (IS_ERR(res->slave_clk))
523 return PTR_ERR(res->slave_clk);
524
525 res->pipe_clk = devm_clk_get(dev, "pipe");
526 return PTR_ERR_OR_ZERO(res->pipe_clk);
527}
528
529static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
530{
531 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
532
533 clk_disable_unprepare(res->slave_clk);
534 clk_disable_unprepare(res->master_clk);
535 clk_disable_unprepare(res->cfg_clk);
536 clk_disable_unprepare(res->aux_clk);
537
538 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
539}
540
541static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
542{
543 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
544
545 clk_disable_unprepare(res->pipe_clk);
546}
547
548static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
549{
550 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
551 struct dw_pcie *pci = pcie->pci;
552 struct device *dev = pci->dev;
553 u32 val;
554 int ret;
555
556 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
557 if (ret < 0) {
558 dev_err(dev, "cannot enable regulators\n");
559 return ret;
560 }
561
562 ret = clk_prepare_enable(res->aux_clk);
563 if (ret) {
564 dev_err(dev, "cannot prepare/enable aux clock\n");
565 goto err_aux_clk;
566 }
567
568 ret = clk_prepare_enable(res->cfg_clk);
569 if (ret) {
570 dev_err(dev, "cannot prepare/enable cfg clock\n");
571 goto err_cfg_clk;
572 }
573
574 ret = clk_prepare_enable(res->master_clk);
575 if (ret) {
576 dev_err(dev, "cannot prepare/enable master clock\n");
577 goto err_master_clk;
578 }
579
580 ret = clk_prepare_enable(res->slave_clk);
581 if (ret) {
582 dev_err(dev, "cannot prepare/enable slave clock\n");
583 goto err_slave_clk;
584 }
585
586 /* enable PCIe clocks and resets */
587 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
588 val &= ~BIT(0);
589 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
590
591 /* change DBI base address */
592 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
593
594 /* MAC PHY_POWERDOWN MUX DISABLE */
595 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
596 val &= ~BIT(29);
597 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
598
599 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
600 val |= BIT(4);
601 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
602
603 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
604 val |= BIT(31);
605 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
606
607 return 0;
608
609err_slave_clk:
610 clk_disable_unprepare(res->master_clk);
611err_master_clk:
612 clk_disable_unprepare(res->cfg_clk);
613err_cfg_clk:
614 clk_disable_unprepare(res->aux_clk);
615
616err_aux_clk:
617 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
618
619 return ret;
620}
621
622static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
623{
624 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
625 struct dw_pcie *pci = pcie->pci;
626 struct device *dev = pci->dev;
627 int ret;
628
629 ret = clk_prepare_enable(res->pipe_clk);
630 if (ret) {
631 dev_err(dev, "cannot prepare/enable pipe clock\n");
632 return ret;
633 }
634
635 return 0;
636}
637
638static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
639{
640 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
641 struct dw_pcie *pci = pcie->pci;
642 struct device *dev = pci->dev;
643 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
644 int ret;
645
646 res->clks[0].id = "aux";
647 res->clks[1].id = "master_bus";
648 res->clks[2].id = "slave_bus";
649 res->clks[3].id = "iface";
650
651 /* qcom,pcie-ipq4019 is defined without "iface" */
652 res->num_clks = is_ipq ? 3 : 4;
653
654 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
655 if (ret < 0)
656 return ret;
657
658 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
659 if (IS_ERR(res->axi_m_reset))
660 return PTR_ERR(res->axi_m_reset);
661
662 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
663 if (IS_ERR(res->axi_s_reset))
664 return PTR_ERR(res->axi_s_reset);
665
666 if (is_ipq) {
667 /*
668 * These resources relates to the PHY or are secure clocks, but
669 * are controlled here for IPQ4019
670 */
671 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
672 if (IS_ERR(res->pipe_reset))
673 return PTR_ERR(res->pipe_reset);
674
675 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
676 "axi_m_vmid");
677 if (IS_ERR(res->axi_m_vmid_reset))
678 return PTR_ERR(res->axi_m_vmid_reset);
679
680 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
681 "axi_s_xpu");
682 if (IS_ERR(res->axi_s_xpu_reset))
683 return PTR_ERR(res->axi_s_xpu_reset);
684
685 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
686 if (IS_ERR(res->parf_reset))
687 return PTR_ERR(res->parf_reset);
688
689 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
690 if (IS_ERR(res->phy_reset))
691 return PTR_ERR(res->phy_reset);
692 }
693
694 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
695 "axi_m_sticky");
696 if (IS_ERR(res->axi_m_sticky_reset))
697 return PTR_ERR(res->axi_m_sticky_reset);
698
699 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
700 "pipe_sticky");
701 if (IS_ERR(res->pipe_sticky_reset))
702 return PTR_ERR(res->pipe_sticky_reset);
703
704 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
705 if (IS_ERR(res->pwr_reset))
706 return PTR_ERR(res->pwr_reset);
707
708 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
709 if (IS_ERR(res->ahb_reset))
710 return PTR_ERR(res->ahb_reset);
711
712 if (is_ipq) {
713 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
714 if (IS_ERR(res->phy_ahb_reset))
715 return PTR_ERR(res->phy_ahb_reset);
716 }
717
718 return 0;
719}
720
721static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
722{
723 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
724
725 reset_control_assert(res->axi_m_reset);
726 reset_control_assert(res->axi_s_reset);
727 reset_control_assert(res->pipe_reset);
728 reset_control_assert(res->pipe_sticky_reset);
729 reset_control_assert(res->phy_reset);
730 reset_control_assert(res->phy_ahb_reset);
731 reset_control_assert(res->axi_m_sticky_reset);
732 reset_control_assert(res->pwr_reset);
733 reset_control_assert(res->ahb_reset);
734 clk_bulk_disable_unprepare(res->num_clks, res->clks);
735}
736
737static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
738{
739 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
740 struct dw_pcie *pci = pcie->pci;
741 struct device *dev = pci->dev;
742 u32 val;
743 int ret;
744
745 ret = reset_control_assert(res->axi_m_reset);
746 if (ret) {
747 dev_err(dev, "cannot assert axi master reset\n");
748 return ret;
749 }
750
751 ret = reset_control_assert(res->axi_s_reset);
752 if (ret) {
753 dev_err(dev, "cannot assert axi slave reset\n");
754 return ret;
755 }
756
757 usleep_range(10000, 12000);
758
759 ret = reset_control_assert(res->pipe_reset);
760 if (ret) {
761 dev_err(dev, "cannot assert pipe reset\n");
762 return ret;
763 }
764
765 ret = reset_control_assert(res->pipe_sticky_reset);
766 if (ret) {
767 dev_err(dev, "cannot assert pipe sticky reset\n");
768 return ret;
769 }
770
771 ret = reset_control_assert(res->phy_reset);
772 if (ret) {
773 dev_err(dev, "cannot assert phy reset\n");
774 return ret;
775 }
776
777 ret = reset_control_assert(res->phy_ahb_reset);
778 if (ret) {
779 dev_err(dev, "cannot assert phy ahb reset\n");
780 return ret;
781 }
782
783 usleep_range(10000, 12000);
784
785 ret = reset_control_assert(res->axi_m_sticky_reset);
786 if (ret) {
787 dev_err(dev, "cannot assert axi master sticky reset\n");
788 return ret;
789 }
790
791 ret = reset_control_assert(res->pwr_reset);
792 if (ret) {
793 dev_err(dev, "cannot assert power reset\n");
794 return ret;
795 }
796
797 ret = reset_control_assert(res->ahb_reset);
798 if (ret) {
799 dev_err(dev, "cannot assert ahb reset\n");
800 return ret;
801 }
802
803 usleep_range(10000, 12000);
804
805 ret = reset_control_deassert(res->phy_ahb_reset);
806 if (ret) {
807 dev_err(dev, "cannot deassert phy ahb reset\n");
808 return ret;
809 }
810
811 ret = reset_control_deassert(res->phy_reset);
812 if (ret) {
813 dev_err(dev, "cannot deassert phy reset\n");
814 goto err_rst_phy;
815 }
816
817 ret = reset_control_deassert(res->pipe_reset);
818 if (ret) {
819 dev_err(dev, "cannot deassert pipe reset\n");
820 goto err_rst_pipe;
821 }
822
823 ret = reset_control_deassert(res->pipe_sticky_reset);
824 if (ret) {
825 dev_err(dev, "cannot deassert pipe sticky reset\n");
826 goto err_rst_pipe_sticky;
827 }
828
829 usleep_range(10000, 12000);
830
831 ret = reset_control_deassert(res->axi_m_reset);
832 if (ret) {
833 dev_err(dev, "cannot deassert axi master reset\n");
834 goto err_rst_axi_m;
835 }
836
837 ret = reset_control_deassert(res->axi_m_sticky_reset);
838 if (ret) {
839 dev_err(dev, "cannot deassert axi master sticky reset\n");
840 goto err_rst_axi_m_sticky;
841 }
842
843 ret = reset_control_deassert(res->axi_s_reset);
844 if (ret) {
845 dev_err(dev, "cannot deassert axi slave reset\n");
846 goto err_rst_axi_s;
847 }
848
849 ret = reset_control_deassert(res->pwr_reset);
850 if (ret) {
851 dev_err(dev, "cannot deassert power reset\n");
852 goto err_rst_pwr;
853 }
854
855 ret = reset_control_deassert(res->ahb_reset);
856 if (ret) {
857 dev_err(dev, "cannot deassert ahb reset\n");
858 goto err_rst_ahb;
859 }
860
861 usleep_range(10000, 12000);
862
863 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
864 if (ret)
865 goto err_clks;
866
867 /* enable PCIe clocks and resets */
868 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
869 val &= ~BIT(0);
870 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
871
872 /* change DBI base address */
873 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
874
875 /* MAC PHY_POWERDOWN MUX DISABLE */
876 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
877 val &= ~BIT(29);
878 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
879
880 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
881 val |= BIT(4);
882 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
883
884 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
885 val |= BIT(31);
886 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
887
888 return 0;
889
890err_clks:
891 reset_control_assert(res->ahb_reset);
892err_rst_ahb:
893 reset_control_assert(res->pwr_reset);
894err_rst_pwr:
895 reset_control_assert(res->axi_s_reset);
896err_rst_axi_s:
897 reset_control_assert(res->axi_m_sticky_reset);
898err_rst_axi_m_sticky:
899 reset_control_assert(res->axi_m_reset);
900err_rst_axi_m:
901 reset_control_assert(res->pipe_sticky_reset);
902err_rst_pipe_sticky:
903 reset_control_assert(res->pipe_reset);
904err_rst_pipe:
905 reset_control_assert(res->phy_reset);
906err_rst_phy:
907 reset_control_assert(res->phy_ahb_reset);
908 return ret;
909}
910
911static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
912{
913 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
914 struct dw_pcie *pci = pcie->pci;
915 struct device *dev = pci->dev;
916 int i;
917 const char *rst_names[] = { "axi_m", "axi_s", "pipe",
918 "axi_m_sticky", "sticky",
919 "ahb", "sleep", };
920
921 res->iface = devm_clk_get(dev, "iface");
922 if (IS_ERR(res->iface))
923 return PTR_ERR(res->iface);
924
925 res->axi_m_clk = devm_clk_get(dev, "axi_m");
926 if (IS_ERR(res->axi_m_clk))
927 return PTR_ERR(res->axi_m_clk);
928
929 res->axi_s_clk = devm_clk_get(dev, "axi_s");
930 if (IS_ERR(res->axi_s_clk))
931 return PTR_ERR(res->axi_s_clk);
932
933 res->ahb_clk = devm_clk_get(dev, "ahb");
934 if (IS_ERR(res->ahb_clk))
935 return PTR_ERR(res->ahb_clk);
936
937 res->aux_clk = devm_clk_get(dev, "aux");
938 if (IS_ERR(res->aux_clk))
939 return PTR_ERR(res->aux_clk);
940
941 for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
942 res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
943 if (IS_ERR(res->rst[i]))
944 return PTR_ERR(res->rst[i]);
945 }
946
947 return 0;
948}
949
950static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
951{
952 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
953
954 clk_disable_unprepare(res->iface);
955 clk_disable_unprepare(res->axi_m_clk);
956 clk_disable_unprepare(res->axi_s_clk);
957 clk_disable_unprepare(res->ahb_clk);
958 clk_disable_unprepare(res->aux_clk);
959}
960
961static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
962{
963 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
964 struct dw_pcie *pci = pcie->pci;
965 struct device *dev = pci->dev;
966 int i, ret;
967 u32 val;
968
969 for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
970 ret = reset_control_assert(res->rst[i]);
971 if (ret) {
972 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
973 return ret;
974 }
975 }
976
977 usleep_range(2000, 2500);
978
979 for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
980 ret = reset_control_deassert(res->rst[i]);
981 if (ret) {
982 dev_err(dev, "reset #%d deassert failed (%d)\n", i,
983 ret);
984 return ret;
985 }
986 }
987
988 /*
989 * Don't have a way to see if the reset has completed.
990 * Wait for some time.
991 */
992 usleep_range(2000, 2500);
993
994 ret = clk_prepare_enable(res->iface);
995 if (ret) {
996 dev_err(dev, "cannot prepare/enable core clock\n");
997 goto err_clk_iface;
998 }
999
1000 ret = clk_prepare_enable(res->axi_m_clk);
1001 if (ret) {
1002 dev_err(dev, "cannot prepare/enable core clock\n");
1003 goto err_clk_axi_m;
1004 }
1005
1006 ret = clk_prepare_enable(res->axi_s_clk);
1007 if (ret) {
1008 dev_err(dev, "cannot prepare/enable axi slave clock\n");
1009 goto err_clk_axi_s;
1010 }
1011
1012 ret = clk_prepare_enable(res->ahb_clk);
1013 if (ret) {
1014 dev_err(dev, "cannot prepare/enable ahb clock\n");
1015 goto err_clk_ahb;
1016 }
1017
1018 ret = clk_prepare_enable(res->aux_clk);
1019 if (ret) {
1020 dev_err(dev, "cannot prepare/enable aux clock\n");
1021 goto err_clk_aux;
1022 }
1023
1024 writel(SLV_ADDR_SPACE_SZ,
1025 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1026
1027 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1028 val &= ~BIT(0);
1029 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1030
1031 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1032
1033 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1034 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1035 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1036 pcie->parf + PCIE20_PARF_SYS_CTRL);
1037 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1038
1039 writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
1040 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1041 writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
1042
1043 val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1044 val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
1045 writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1046
1047 writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
1048 PCIE20_DEVICE_CONTROL2_STATUS2);
1049
1050 return 0;
1051
1052err_clk_aux:
1053 clk_disable_unprepare(res->ahb_clk);
1054err_clk_ahb:
1055 clk_disable_unprepare(res->axi_s_clk);
1056err_clk_axi_s:
1057 clk_disable_unprepare(res->axi_m_clk);
1058err_clk_axi_m:
1059 clk_disable_unprepare(res->iface);
1060err_clk_iface:
1061 /*
1062 * Not checking for failure, will anyway return
1063 * the original failure in 'ret'.
1064 */
1065 for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1066 reset_control_assert(res->rst[i]);
1067
1068 return ret;
1069}
1070
1071static int qcom_pcie_link_up(struct dw_pcie *pci)
1072{
1073 u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
1074
1075 return !!(val & PCI_EXP_LNKSTA_DLLLA);
1076}
1077
1078static int qcom_pcie_host_init(struct pcie_port *pp)
1079{
1080 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1081 struct qcom_pcie *pcie = to_qcom_pcie(pci);
1082 int ret;
1083
1084 qcom_ep_reset_assert(pcie);
1085
1086 ret = pcie->ops->init(pcie);
1087 if (ret)
1088 return ret;
1089
1090 ret = phy_power_on(pcie->phy);
1091 if (ret)
1092 goto err_deinit;
1093
1094 if (pcie->ops->post_init) {
1095 ret = pcie->ops->post_init(pcie);
1096 if (ret)
1097 goto err_disable_phy;
1098 }
1099
1100 dw_pcie_setup_rc(pp);
1101
1102 if (IS_ENABLED(CONFIG_PCI_MSI))
1103 dw_pcie_msi_init(pp);
1104
1105 qcom_ep_reset_deassert(pcie);
1106
1107 ret = qcom_pcie_establish_link(pcie);
1108 if (ret)
1109 goto err;
1110
1111 return 0;
1112err:
1113 qcom_ep_reset_assert(pcie);
1114 if (pcie->ops->post_deinit)
1115 pcie->ops->post_deinit(pcie);
1116err_disable_phy:
1117 phy_power_off(pcie->phy);
1118err_deinit:
1119 pcie->ops->deinit(pcie);
1120
1121 return ret;
1122}
1123
1124static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1125 .host_init = qcom_pcie_host_init,
1126};
1127
1128/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
1129static const struct qcom_pcie_ops ops_2_1_0 = {
1130 .get_resources = qcom_pcie_get_resources_2_1_0,
1131 .init = qcom_pcie_init_2_1_0,
1132 .deinit = qcom_pcie_deinit_2_1_0,
1133 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1134};
1135
1136/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
1137static const struct qcom_pcie_ops ops_1_0_0 = {
1138 .get_resources = qcom_pcie_get_resources_1_0_0,
1139 .init = qcom_pcie_init_1_0_0,
1140 .deinit = qcom_pcie_deinit_1_0_0,
1141 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1142};
1143
1144/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
1145static const struct qcom_pcie_ops ops_2_3_2 = {
1146 .get_resources = qcom_pcie_get_resources_2_3_2,
1147 .init = qcom_pcie_init_2_3_2,
1148 .post_init = qcom_pcie_post_init_2_3_2,
1149 .deinit = qcom_pcie_deinit_2_3_2,
1150 .post_deinit = qcom_pcie_post_deinit_2_3_2,
1151 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1152};
1153
1154/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
1155static const struct qcom_pcie_ops ops_2_4_0 = {
1156 .get_resources = qcom_pcie_get_resources_2_4_0,
1157 .init = qcom_pcie_init_2_4_0,
1158 .deinit = qcom_pcie_deinit_2_4_0,
1159 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1160};
1161
1162/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
1163static const struct qcom_pcie_ops ops_2_3_3 = {
1164 .get_resources = qcom_pcie_get_resources_2_3_3,
1165 .init = qcom_pcie_init_2_3_3,
1166 .deinit = qcom_pcie_deinit_2_3_3,
1167 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1168};
1169
1170static const struct dw_pcie_ops dw_pcie_ops = {
1171 .link_up = qcom_pcie_link_up,
1172};
1173
1174static int qcom_pcie_probe(struct platform_device *pdev)
1175{
1176 struct device *dev = &pdev->dev;
1177 struct resource *res;
1178 struct pcie_port *pp;
1179 struct dw_pcie *pci;
1180 struct qcom_pcie *pcie;
1181 int ret;
1182
1183 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1184 if (!pcie)
1185 return -ENOMEM;
1186
1187 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1188 if (!pci)
1189 return -ENOMEM;
1190
1191 pm_runtime_enable(dev);
1192 ret = pm_runtime_get_sync(dev);
1193 if (ret < 0) {
1194 pm_runtime_disable(dev);
1195 return ret;
1196 }
1197
1198 pci->dev = dev;
1199 pci->ops = &dw_pcie_ops;
1200 pp = &pci->pp;
1201
1202 pcie->pci = pci;
1203
1204 pcie->ops = of_device_get_match_data(dev);
1205
1206 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1207 if (IS_ERR(pcie->reset)) {
1208 ret = PTR_ERR(pcie->reset);
1209 goto err_pm_runtime_put;
1210 }
1211
1212 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
1213 pcie->parf = devm_ioremap_resource(dev, res);
1214 if (IS_ERR(pcie->parf)) {
1215 ret = PTR_ERR(pcie->parf);
1216 goto err_pm_runtime_put;
1217 }
1218
1219 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1220 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
1221 if (IS_ERR(pci->dbi_base)) {
1222 ret = PTR_ERR(pci->dbi_base);
1223 goto err_pm_runtime_put;
1224 }
1225
1226 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
1227 pcie->elbi = devm_ioremap_resource(dev, res);
1228 if (IS_ERR(pcie->elbi)) {
1229 ret = PTR_ERR(pcie->elbi);
1230 goto err_pm_runtime_put;
1231 }
1232
1233 pcie->phy = devm_phy_optional_get(dev, "pciephy");
1234 if (IS_ERR(pcie->phy)) {
1235 ret = PTR_ERR(pcie->phy);
1236 goto err_pm_runtime_put;
1237 }
1238
1239 ret = pcie->ops->get_resources(pcie);
1240 if (ret)
1241 goto err_pm_runtime_put;
1242
1243 pp->ops = &qcom_pcie_dw_ops;
1244
1245 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1246 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
1247 if (pp->msi_irq < 0) {
1248 ret = pp->msi_irq;
1249 goto err_pm_runtime_put;
1250 }
1251 }
1252
1253 ret = phy_init(pcie->phy);
1254 if (ret) {
1255 pm_runtime_disable(&pdev->dev);
1256 goto err_pm_runtime_put;
1257 }
1258
1259 platform_set_drvdata(pdev, pcie);
1260
1261 ret = dw_pcie_host_init(pp);
1262 if (ret) {
1263 dev_err(dev, "cannot initialize host\n");
1264 pm_runtime_disable(&pdev->dev);
1265 goto err_pm_runtime_put;
1266 }
1267
1268 return 0;
1269
1270err_pm_runtime_put:
1271 pm_runtime_put(dev);
1272 pm_runtime_disable(dev);
1273
1274 return ret;
1275}
1276
1277static const struct of_device_id qcom_pcie_match[] = {
1278 { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1279 { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1280 { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1281 { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1282 { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1283 { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
1284 { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
1285 { }
1286};
1287
1288static void qcom_fixup_class(struct pci_dev *dev)
1289{
1290 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1291}
1292DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class);
1293
1294static struct platform_driver qcom_pcie_driver = {
1295 .probe = qcom_pcie_probe,
1296 .driver = {
1297 .name = "qcom-pcie",
1298 .suppress_bind_attrs = true,
1299 .of_match_table = qcom_pcie_match,
1300 },
1301};
1302builtin_platform_driver(qcom_pcie_driver);