Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe host controller driver for Kirin Phone SoCs
4 *
5 * Copyright (C) 2017 HiSilicon Electronics Co., Ltd.
6 * http://www.huawei.com
7 *
8 * Author: Xiaowei Song <songxiaowei@huawei.com>
9 */
10
11#include <linux/compiler.h>
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/err.h>
15#include <linux/gpio.h>
16#include <linux/interrupt.h>
17#include <linux/mfd/syscon.h>
18#include <linux/of_address.h>
19#include <linux/of_gpio.h>
20#include <linux/of_pci.h>
21#include <linux/pci.h>
22#include <linux/pci_regs.h>
23#include <linux/platform_device.h>
24#include <linux/regmap.h>
25#include <linux/resource.h>
26#include <linux/types.h>
27#include "pcie-designware.h"
28
29#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
30
31#define REF_CLK_FREQ 100000000
32
33/* PCIe ELBI registers */
34#define SOC_PCIECTRL_CTRL0_ADDR 0x000
35#define SOC_PCIECTRL_CTRL1_ADDR 0x004
36#define SOC_PCIEPHY_CTRL2_ADDR 0x008
37#define SOC_PCIEPHY_CTRL3_ADDR 0x00c
38#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
39
40/* info located in APB */
41#define PCIE_APP_LTSSM_ENABLE 0x01c
42#define PCIE_APB_PHY_CTRL0 0x0
43#define PCIE_APB_PHY_CTRL1 0x4
44#define PCIE_APB_PHY_STATUS0 0x400
45#define PCIE_LINKUP_ENABLE (0x8020)
46#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11)
47#define PIPE_CLK_STABLE (0x1 << 19)
48#define PHY_REF_PAD_BIT (0x1 << 8)
49#define PHY_PWR_DOWN_BIT (0x1 << 22)
50#define PHY_RST_ACK_BIT (0x1 << 16)
51
52/* info located in sysctrl */
53#define SCTRL_PCIE_CMOS_OFFSET 0x60
54#define SCTRL_PCIE_CMOS_BIT 0x10
55#define SCTRL_PCIE_ISO_OFFSET 0x44
56#define SCTRL_PCIE_ISO_BIT 0x30
57#define SCTRL_PCIE_HPCLK_OFFSET 0x190
58#define SCTRL_PCIE_HPCLK_BIT 0x184000
59#define SCTRL_PCIE_OE_OFFSET 0x14a
60#define PCIE_DEBOUNCE_PARAM 0xF0F400
61#define PCIE_OE_BYPASS (0x3 << 28)
62
63/* peri_crg ctrl */
64#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88
65#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000
66
67/* Time for delay */
68#define REF_2_PERST_MIN 20000
69#define REF_2_PERST_MAX 25000
70#define PERST_2_ACCESS_MIN 10000
71#define PERST_2_ACCESS_MAX 12000
72#define LINK_WAIT_MIN 900
73#define LINK_WAIT_MAX 1000
74#define PIPE_CLK_WAIT_MIN 550
75#define PIPE_CLK_WAIT_MAX 600
76#define TIME_CMOS_MIN 100
77#define TIME_CMOS_MAX 105
78#define TIME_PHY_PD_MIN 10
79#define TIME_PHY_PD_MAX 11
80
81struct kirin_pcie {
82 struct dw_pcie *pci;
83 void __iomem *apb_base;
84 void __iomem *phy_base;
85 struct regmap *crgctrl;
86 struct regmap *sysctrl;
87 struct clk *apb_sys_clk;
88 struct clk *apb_phy_clk;
89 struct clk *phy_ref_clk;
90 struct clk *pcie_aclk;
91 struct clk *pcie_aux_clk;
92 int gpio_id_reset;
93};
94
95/* Registers in PCIeCTRL */
96static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie,
97 u32 val, u32 reg)
98{
99 writel(val, kirin_pcie->apb_base + reg);
100}
101
102static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg)
103{
104 return readl(kirin_pcie->apb_base + reg);
105}
106
107/* Registers in PCIePHY */
108static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie,
109 u32 val, u32 reg)
110{
111 writel(val, kirin_pcie->phy_base + reg);
112}
113
114static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg)
115{
116 return readl(kirin_pcie->phy_base + reg);
117}
118
119static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie,
120 struct platform_device *pdev)
121{
122 struct device *dev = &pdev->dev;
123
124 kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
125 if (IS_ERR(kirin_pcie->phy_ref_clk))
126 return PTR_ERR(kirin_pcie->phy_ref_clk);
127
128 kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux");
129 if (IS_ERR(kirin_pcie->pcie_aux_clk))
130 return PTR_ERR(kirin_pcie->pcie_aux_clk);
131
132 kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
133 if (IS_ERR(kirin_pcie->apb_phy_clk))
134 return PTR_ERR(kirin_pcie->apb_phy_clk);
135
136 kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
137 if (IS_ERR(kirin_pcie->apb_sys_clk))
138 return PTR_ERR(kirin_pcie->apb_sys_clk);
139
140 kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk");
141 if (IS_ERR(kirin_pcie->pcie_aclk))
142 return PTR_ERR(kirin_pcie->pcie_aclk);
143
144 return 0;
145}
146
147static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
148 struct platform_device *pdev)
149{
150 struct device *dev = &pdev->dev;
151 struct resource *apb;
152 struct resource *phy;
153 struct resource *dbi;
154
155 apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
156 kirin_pcie->apb_base = devm_ioremap_resource(dev, apb);
157 if (IS_ERR(kirin_pcie->apb_base))
158 return PTR_ERR(kirin_pcie->apb_base);
159
160 phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
161 kirin_pcie->phy_base = devm_ioremap_resource(dev, phy);
162 if (IS_ERR(kirin_pcie->phy_base))
163 return PTR_ERR(kirin_pcie->phy_base);
164
165 dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
166 kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
167 if (IS_ERR(kirin_pcie->pci->dbi_base))
168 return PTR_ERR(kirin_pcie->pci->dbi_base);
169
170 kirin_pcie->crgctrl =
171 syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
172 if (IS_ERR(kirin_pcie->crgctrl))
173 return PTR_ERR(kirin_pcie->crgctrl);
174
175 kirin_pcie->sysctrl =
176 syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
177 if (IS_ERR(kirin_pcie->sysctrl))
178 return PTR_ERR(kirin_pcie->sysctrl);
179
180 return 0;
181}
182
183static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie)
184{
185 struct device *dev = kirin_pcie->pci->dev;
186 u32 reg_val;
187
188 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
189 reg_val &= ~PHY_REF_PAD_BIT;
190 kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
191
192 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0);
193 reg_val &= ~PHY_PWR_DOWN_BIT;
194 kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0);
195 usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
196
197 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
198 reg_val &= ~PHY_RST_ACK_BIT;
199 kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
200
201 usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
202 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
203 if (reg_val & PIPE_CLK_STABLE) {
204 dev_err(dev, "PIPE clk is not stable\n");
205 return -EINVAL;
206 }
207
208 return 0;
209}
210
211static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie)
212{
213 u32 val;
214
215 regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
216 val |= PCIE_DEBOUNCE_PARAM;
217 val &= ~PCIE_OE_BYPASS;
218 regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
219}
220
221static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable)
222{
223 int ret = 0;
224
225 if (!enable)
226 goto close_clk;
227
228 ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ);
229 if (ret)
230 return ret;
231
232 ret = clk_prepare_enable(kirin_pcie->phy_ref_clk);
233 if (ret)
234 return ret;
235
236 ret = clk_prepare_enable(kirin_pcie->apb_sys_clk);
237 if (ret)
238 goto apb_sys_fail;
239
240 ret = clk_prepare_enable(kirin_pcie->apb_phy_clk);
241 if (ret)
242 goto apb_phy_fail;
243
244 ret = clk_prepare_enable(kirin_pcie->pcie_aclk);
245 if (ret)
246 goto aclk_fail;
247
248 ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk);
249 if (ret)
250 goto aux_clk_fail;
251
252 return 0;
253
254close_clk:
255 clk_disable_unprepare(kirin_pcie->pcie_aux_clk);
256aux_clk_fail:
257 clk_disable_unprepare(kirin_pcie->pcie_aclk);
258aclk_fail:
259 clk_disable_unprepare(kirin_pcie->apb_phy_clk);
260apb_phy_fail:
261 clk_disable_unprepare(kirin_pcie->apb_sys_clk);
262apb_sys_fail:
263 clk_disable_unprepare(kirin_pcie->phy_ref_clk);
264
265 return ret;
266}
267
268static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie)
269{
270 int ret;
271
272 /* Power supply for Host */
273 regmap_write(kirin_pcie->sysctrl,
274 SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
275 usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
276 kirin_pcie_oe_enable(kirin_pcie);
277
278 ret = kirin_pcie_clk_ctrl(kirin_pcie, true);
279 if (ret)
280 return ret;
281
282 /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
283 regmap_write(kirin_pcie->sysctrl,
284 SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
285 regmap_write(kirin_pcie->crgctrl,
286 CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
287 regmap_write(kirin_pcie->sysctrl,
288 SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
289
290 ret = kirin_pcie_phy_init(kirin_pcie);
291 if (ret)
292 goto close_clk;
293
294 /* perst assert Endpoint */
295 if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) {
296 usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
297 ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1);
298 if (ret)
299 goto close_clk;
300 usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
301
302 return 0;
303 }
304
305close_clk:
306 kirin_pcie_clk_ctrl(kirin_pcie, false);
307 return ret;
308}
309
310static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
311 bool on)
312{
313 u32 val;
314
315 val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR);
316 if (on)
317 val = val | PCIE_ELBI_SLV_DBI_ENABLE;
318 else
319 val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
320
321 kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR);
322}
323
324static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
325 bool on)
326{
327 u32 val;
328
329 val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR);
330 if (on)
331 val = val | PCIE_ELBI_SLV_DBI_ENABLE;
332 else
333 val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
334
335 kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
336}
337
338static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
339 int where, int size, u32 *val)
340{
341 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
342 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
343 int ret;
344
345 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
346 ret = dw_pcie_read(pci->dbi_base + where, size, val);
347 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
348
349 return ret;
350}
351
352static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
353 int where, int size, u32 val)
354{
355 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
356 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
357 int ret;
358
359 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
360 ret = dw_pcie_write(pci->dbi_base + where, size, val);
361 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
362
363 return ret;
364}
365
366static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
367 u32 reg, size_t size)
368{
369 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
370 u32 ret;
371
372 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
373 dw_pcie_read(base + reg, size, &ret);
374 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
375
376 return ret;
377}
378
379static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
380 u32 reg, size_t size, u32 val)
381{
382 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
383
384 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
385 dw_pcie_write(base + reg, size, val);
386 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
387}
388
389static int kirin_pcie_link_up(struct dw_pcie *pci)
390{
391 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
392 u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
393
394 if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
395 return 1;
396
397 return 0;
398}
399
400static int kirin_pcie_establish_link(struct pcie_port *pp)
401{
402 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
403 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
404 struct device *dev = kirin_pcie->pci->dev;
405 int count = 0;
406
407 if (kirin_pcie_link_up(pci))
408 return 0;
409
410 dw_pcie_setup_rc(pp);
411
412 /* assert LTSSM enable */
413 kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
414 PCIE_APP_LTSSM_ENABLE);
415
416 /* check if the link is up or not */
417 while (!kirin_pcie_link_up(pci)) {
418 usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
419 count++;
420 if (count == 1000) {
421 dev_err(dev, "Link Fail\n");
422 return -EINVAL;
423 }
424 }
425
426 return 0;
427}
428
429static int kirin_pcie_host_init(struct pcie_port *pp)
430{
431 kirin_pcie_establish_link(pp);
432
433 if (IS_ENABLED(CONFIG_PCI_MSI))
434 dw_pcie_msi_init(pp);
435
436 return 0;
437}
438
439static const struct dw_pcie_ops kirin_dw_pcie_ops = {
440 .read_dbi = kirin_pcie_read_dbi,
441 .write_dbi = kirin_pcie_write_dbi,
442 .link_up = kirin_pcie_link_up,
443};
444
445static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
446 .rd_own_conf = kirin_pcie_rd_own_conf,
447 .wr_own_conf = kirin_pcie_wr_own_conf,
448 .host_init = kirin_pcie_host_init,
449};
450
451static int kirin_pcie_add_msi(struct dw_pcie *pci,
452 struct platform_device *pdev)
453{
454 int irq;
455
456 if (IS_ENABLED(CONFIG_PCI_MSI)) {
457 irq = platform_get_irq(pdev, 0);
458 if (irq < 0) {
459 dev_err(&pdev->dev,
460 "failed to get MSI IRQ (%d)\n", irq);
461 return irq;
462 }
463
464 pci->pp.msi_irq = irq;
465 }
466
467 return 0;
468}
469
470static int kirin_add_pcie_port(struct dw_pcie *pci,
471 struct platform_device *pdev)
472{
473 int ret;
474
475 ret = kirin_pcie_add_msi(pci, pdev);
476 if (ret)
477 return ret;
478
479 pci->pp.ops = &kirin_pcie_host_ops;
480
481 return dw_pcie_host_init(&pci->pp);
482}
483
484static int kirin_pcie_probe(struct platform_device *pdev)
485{
486 struct device *dev = &pdev->dev;
487 struct kirin_pcie *kirin_pcie;
488 struct dw_pcie *pci;
489 int ret;
490
491 if (!dev->of_node) {
492 dev_err(dev, "NULL node\n");
493 return -EINVAL;
494 }
495
496 kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
497 if (!kirin_pcie)
498 return -ENOMEM;
499
500 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
501 if (!pci)
502 return -ENOMEM;
503
504 pci->dev = dev;
505 pci->ops = &kirin_dw_pcie_ops;
506 kirin_pcie->pci = pci;
507
508 ret = kirin_pcie_get_clk(kirin_pcie, pdev);
509 if (ret)
510 return ret;
511
512 ret = kirin_pcie_get_resource(kirin_pcie, pdev);
513 if (ret)
514 return ret;
515
516 kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
517 "reset-gpios", 0);
518 if (kirin_pcie->gpio_id_reset < 0)
519 return -ENODEV;
520
521 ret = kirin_pcie_power_on(kirin_pcie);
522 if (ret)
523 return ret;
524
525 platform_set_drvdata(pdev, kirin_pcie);
526
527 return kirin_add_pcie_port(pci, pdev);
528}
529
530static const struct of_device_id kirin_pcie_match[] = {
531 { .compatible = "hisilicon,kirin960-pcie" },
532 {},
533};
534
535static struct platform_driver kirin_pcie_driver = {
536 .probe = kirin_pcie_probe,
537 .driver = {
538 .name = "kirin-pcie",
539 .of_match_table = kirin_pcie_match,
540 .suppress_bind_attrs = true,
541 },
542};
543builtin_platform_driver(kirin_pcie_driver);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe host controller driver for Kirin Phone SoCs
4 *
5 * Copyright (C) 2017 HiSilicon Electronics Co., Ltd.
6 * https://www.huawei.com
7 *
8 * Author: Xiaowei Song <songxiaowei@huawei.com>
9 */
10
11#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/delay.h>
14#include <linux/err.h>
15#include <linux/gpio.h>
16#include <linux/gpio/consumer.h>
17#include <linux/interrupt.h>
18#include <linux/mfd/syscon.h>
19#include <linux/of_address.h>
20#include <linux/of_device.h>
21#include <linux/of_gpio.h>
22#include <linux/of_pci.h>
23#include <linux/phy/phy.h>
24#include <linux/pci.h>
25#include <linux/pci_regs.h>
26#include <linux/platform_device.h>
27#include <linux/regmap.h>
28#include <linux/resource.h>
29#include <linux/types.h>
30#include "pcie-designware.h"
31
32#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
33
34/* PCIe ELBI registers */
35#define SOC_PCIECTRL_CTRL0_ADDR 0x000
36#define SOC_PCIECTRL_CTRL1_ADDR 0x004
37#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
38
39/* info located in APB */
40#define PCIE_APP_LTSSM_ENABLE 0x01c
41#define PCIE_APB_PHY_STATUS0 0x400
42#define PCIE_LINKUP_ENABLE (0x8020)
43#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11)
44
45/* info located in sysctrl */
46#define SCTRL_PCIE_CMOS_OFFSET 0x60
47#define SCTRL_PCIE_CMOS_BIT 0x10
48#define SCTRL_PCIE_ISO_OFFSET 0x44
49#define SCTRL_PCIE_ISO_BIT 0x30
50#define SCTRL_PCIE_HPCLK_OFFSET 0x190
51#define SCTRL_PCIE_HPCLK_BIT 0x184000
52#define SCTRL_PCIE_OE_OFFSET 0x14a
53#define PCIE_DEBOUNCE_PARAM 0xF0F400
54#define PCIE_OE_BYPASS (0x3 << 28)
55
56/*
57 * Max number of connected PCI slots at an external PCI bridge
58 *
59 * This is used on HiKey 970, which has a PEX 8606 bridge with 4 connected
60 * lanes (lane 0 upstream, and the other three lanes, one connected to an
61 * in-board Ethernet adapter and the other two connected to M.2 and mini
62 * PCI slots.
63 *
64 * Each slot has a different clock source and uses a separate PERST# pin.
65 */
66#define MAX_PCI_SLOTS 3
67
68enum pcie_kirin_phy_type {
69 PCIE_KIRIN_INTERNAL_PHY,
70 PCIE_KIRIN_EXTERNAL_PHY
71};
72
73struct kirin_pcie {
74 enum pcie_kirin_phy_type type;
75
76 struct dw_pcie *pci;
77 struct regmap *apb;
78 struct phy *phy;
79 void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */
80
81 /* DWC PERST# */
82 int gpio_id_dwc_perst;
83
84 /* Per-slot PERST# */
85 int num_slots;
86 int gpio_id_reset[MAX_PCI_SLOTS];
87 const char *reset_names[MAX_PCI_SLOTS];
88
89 /* Per-slot clkreq */
90 int n_gpio_clkreq;
91 int gpio_id_clkreq[MAX_PCI_SLOTS];
92 const char *clkreq_names[MAX_PCI_SLOTS];
93};
94
95/*
96 * Kirin 960 PHY. Can't be split into a PHY driver without changing the
97 * DT schema.
98 */
99
100#define REF_CLK_FREQ 100000000
101
102/* PHY info located in APB */
103#define PCIE_APB_PHY_CTRL0 0x0
104#define PCIE_APB_PHY_CTRL1 0x4
105#define PCIE_APB_PHY_STATUS0 0x400
106#define PIPE_CLK_STABLE BIT(19)
107#define PHY_REF_PAD_BIT BIT(8)
108#define PHY_PWR_DOWN_BIT BIT(22)
109#define PHY_RST_ACK_BIT BIT(16)
110
111/* peri_crg ctrl */
112#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88
113#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000
114
115/* Time for delay */
116#define REF_2_PERST_MIN 21000
117#define REF_2_PERST_MAX 25000
118#define PERST_2_ACCESS_MIN 10000
119#define PERST_2_ACCESS_MAX 12000
120#define PIPE_CLK_WAIT_MIN 550
121#define PIPE_CLK_WAIT_MAX 600
122#define TIME_CMOS_MIN 100
123#define TIME_CMOS_MAX 105
124#define TIME_PHY_PD_MIN 10
125#define TIME_PHY_PD_MAX 11
126
127struct hi3660_pcie_phy {
128 struct device *dev;
129 void __iomem *base;
130 struct regmap *crgctrl;
131 struct regmap *sysctrl;
132 struct clk *apb_sys_clk;
133 struct clk *apb_phy_clk;
134 struct clk *phy_ref_clk;
135 struct clk *aclk;
136 struct clk *aux_clk;
137};
138
139/* Registers in PCIePHY */
140static inline void kirin_apb_phy_writel(struct hi3660_pcie_phy *hi3660_pcie_phy,
141 u32 val, u32 reg)
142{
143 writel(val, hi3660_pcie_phy->base + reg);
144}
145
146static inline u32 kirin_apb_phy_readl(struct hi3660_pcie_phy *hi3660_pcie_phy,
147 u32 reg)
148{
149 return readl(hi3660_pcie_phy->base + reg);
150}
151
152static int hi3660_pcie_phy_get_clk(struct hi3660_pcie_phy *phy)
153{
154 struct device *dev = phy->dev;
155
156 phy->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
157 if (IS_ERR(phy->phy_ref_clk))
158 return PTR_ERR(phy->phy_ref_clk);
159
160 phy->aux_clk = devm_clk_get(dev, "pcie_aux");
161 if (IS_ERR(phy->aux_clk))
162 return PTR_ERR(phy->aux_clk);
163
164 phy->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
165 if (IS_ERR(phy->apb_phy_clk))
166 return PTR_ERR(phy->apb_phy_clk);
167
168 phy->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
169 if (IS_ERR(phy->apb_sys_clk))
170 return PTR_ERR(phy->apb_sys_clk);
171
172 phy->aclk = devm_clk_get(dev, "pcie_aclk");
173 if (IS_ERR(phy->aclk))
174 return PTR_ERR(phy->aclk);
175
176 return 0;
177}
178
179static int hi3660_pcie_phy_get_resource(struct hi3660_pcie_phy *phy)
180{
181 struct device *dev = phy->dev;
182 struct platform_device *pdev;
183
184 /* registers */
185 pdev = container_of(dev, struct platform_device, dev);
186
187 phy->base = devm_platform_ioremap_resource_byname(pdev, "phy");
188 if (IS_ERR(phy->base))
189 return PTR_ERR(phy->base);
190
191 phy->crgctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
192 if (IS_ERR(phy->crgctrl))
193 return PTR_ERR(phy->crgctrl);
194
195 phy->sysctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
196 if (IS_ERR(phy->sysctrl))
197 return PTR_ERR(phy->sysctrl);
198
199 return 0;
200}
201
202static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)
203{
204 struct device *dev = phy->dev;
205 u32 reg_val;
206
207 reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1);
208 reg_val &= ~PHY_REF_PAD_BIT;
209 kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1);
210
211 reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL0);
212 reg_val &= ~PHY_PWR_DOWN_BIT;
213 kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL0);
214 usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
215
216 reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1);
217 reg_val &= ~PHY_RST_ACK_BIT;
218 kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1);
219
220 usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
221 reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0);
222 if (reg_val & PIPE_CLK_STABLE) {
223 dev_err(dev, "PIPE clk is not stable\n");
224 return -EINVAL;
225 }
226
227 return 0;
228}
229
230static void hi3660_pcie_phy_oe_enable(struct hi3660_pcie_phy *phy)
231{
232 u32 val;
233
234 regmap_read(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
235 val |= PCIE_DEBOUNCE_PARAM;
236 val &= ~PCIE_OE_BYPASS;
237 regmap_write(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
238}
239
240static int hi3660_pcie_phy_clk_ctrl(struct hi3660_pcie_phy *phy, bool enable)
241{
242 int ret = 0;
243
244 if (!enable)
245 goto close_clk;
246
247 ret = clk_set_rate(phy->phy_ref_clk, REF_CLK_FREQ);
248 if (ret)
249 return ret;
250
251 ret = clk_prepare_enable(phy->phy_ref_clk);
252 if (ret)
253 return ret;
254
255 ret = clk_prepare_enable(phy->apb_sys_clk);
256 if (ret)
257 goto apb_sys_fail;
258
259 ret = clk_prepare_enable(phy->apb_phy_clk);
260 if (ret)
261 goto apb_phy_fail;
262
263 ret = clk_prepare_enable(phy->aclk);
264 if (ret)
265 goto aclk_fail;
266
267 ret = clk_prepare_enable(phy->aux_clk);
268 if (ret)
269 goto aux_clk_fail;
270
271 return 0;
272
273close_clk:
274 clk_disable_unprepare(phy->aux_clk);
275aux_clk_fail:
276 clk_disable_unprepare(phy->aclk);
277aclk_fail:
278 clk_disable_unprepare(phy->apb_phy_clk);
279apb_phy_fail:
280 clk_disable_unprepare(phy->apb_sys_clk);
281apb_sys_fail:
282 clk_disable_unprepare(phy->phy_ref_clk);
283
284 return ret;
285}
286
287static int hi3660_pcie_phy_power_on(struct kirin_pcie *pcie)
288{
289 struct hi3660_pcie_phy *phy = pcie->phy_priv;
290 int ret;
291
292 /* Power supply for Host */
293 regmap_write(phy->sysctrl,
294 SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
295 usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
296
297 hi3660_pcie_phy_oe_enable(phy);
298
299 ret = hi3660_pcie_phy_clk_ctrl(phy, true);
300 if (ret)
301 return ret;
302
303 /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
304 regmap_write(phy->sysctrl,
305 SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
306 regmap_write(phy->crgctrl,
307 CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
308 regmap_write(phy->sysctrl,
309 SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
310
311 ret = hi3660_pcie_phy_start(phy);
312 if (ret)
313 goto disable_clks;
314
315 return 0;
316
317disable_clks:
318 hi3660_pcie_phy_clk_ctrl(phy, false);
319 return ret;
320}
321
322static int hi3660_pcie_phy_init(struct platform_device *pdev,
323 struct kirin_pcie *pcie)
324{
325 struct device *dev = &pdev->dev;
326 struct hi3660_pcie_phy *phy;
327 int ret;
328
329 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
330 if (!phy)
331 return -ENOMEM;
332
333 pcie->phy_priv = phy;
334 phy->dev = dev;
335
336 ret = hi3660_pcie_phy_get_clk(phy);
337 if (ret)
338 return ret;
339
340 return hi3660_pcie_phy_get_resource(phy);
341}
342
343static int hi3660_pcie_phy_power_off(struct kirin_pcie *pcie)
344{
345 struct hi3660_pcie_phy *phy = pcie->phy_priv;
346
347 /* Drop power supply for Host */
348 regmap_write(phy->sysctrl, SCTRL_PCIE_CMOS_OFFSET, 0x00);
349
350 hi3660_pcie_phy_clk_ctrl(phy, false);
351
352 return 0;
353}
354
355/*
356 * The non-PHY part starts here
357 */
358
359static const struct regmap_config pcie_kirin_regmap_conf = {
360 .name = "kirin_pcie_apb",
361 .reg_bits = 32,
362 .val_bits = 32,
363 .reg_stride = 4,
364};
365
366static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
367 struct platform_device *pdev)
368{
369 struct device *dev = &pdev->dev;
370 char name[32];
371 int ret, i;
372
373 /* This is an optional property */
374 ret = gpiod_count(dev, "hisilicon,clken");
375 if (ret < 0)
376 return 0;
377
378 if (ret > MAX_PCI_SLOTS) {
379 dev_err(dev, "Too many GPIO clock requests!\n");
380 return -EINVAL;
381 }
382
383 pcie->n_gpio_clkreq = ret;
384
385 for (i = 0; i < pcie->n_gpio_clkreq; i++) {
386 pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node,
387 "hisilicon,clken-gpios", i);
388 if (pcie->gpio_id_clkreq[i] < 0)
389 return pcie->gpio_id_clkreq[i];
390
391 sprintf(name, "pcie_clkreq_%d", i);
392 pcie->clkreq_names[i] = devm_kstrdup_const(dev, name,
393 GFP_KERNEL);
394 if (!pcie->clkreq_names[i])
395 return -ENOMEM;
396 }
397
398 return 0;
399}
400
401static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
402 struct platform_device *pdev,
403 struct device_node *node)
404{
405 struct device *dev = &pdev->dev;
406 struct device_node *parent, *child;
407 int ret, slot, i;
408 char name[32];
409
410 for_each_available_child_of_node(node, parent) {
411 for_each_available_child_of_node(parent, child) {
412 i = pcie->num_slots;
413
414 pcie->gpio_id_reset[i] = of_get_named_gpio(child,
415 "reset-gpios", 0);
416 if (pcie->gpio_id_reset[i] < 0)
417 continue;
418
419 pcie->num_slots++;
420 if (pcie->num_slots > MAX_PCI_SLOTS) {
421 dev_err(dev, "Too many PCI slots!\n");
422 ret = -EINVAL;
423 goto put_node;
424 }
425
426 ret = of_pci_get_devfn(child);
427 if (ret < 0) {
428 dev_err(dev, "failed to parse devfn: %d\n", ret);
429 goto put_node;
430 }
431
432 slot = PCI_SLOT(ret);
433
434 sprintf(name, "pcie_perst_%d", slot);
435 pcie->reset_names[i] = devm_kstrdup_const(dev, name,
436 GFP_KERNEL);
437 if (!pcie->reset_names[i]) {
438 ret = -ENOMEM;
439 goto put_node;
440 }
441 }
442 }
443
444 return 0;
445
446put_node:
447 of_node_put(child);
448 of_node_put(parent);
449 return ret;
450}
451
452static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
453 struct platform_device *pdev)
454{
455 struct device *dev = &pdev->dev;
456 struct device_node *child, *node = dev->of_node;
457 void __iomem *apb_base;
458 int ret;
459
460 apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
461 if (IS_ERR(apb_base))
462 return PTR_ERR(apb_base);
463
464 kirin_pcie->apb = devm_regmap_init_mmio(dev, apb_base,
465 &pcie_kirin_regmap_conf);
466 if (IS_ERR(kirin_pcie->apb))
467 return PTR_ERR(kirin_pcie->apb);
468
469 /* pcie internal PERST# gpio */
470 kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node,
471 "reset-gpios", 0);
472 if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) {
473 return -EPROBE_DEFER;
474 } else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) {
475 dev_err(dev, "unable to get a valid gpio pin\n");
476 return -ENODEV;
477 }
478
479 ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);
480 if (ret)
481 return ret;
482
483 /* Parse OF children */
484 for_each_available_child_of_node(node, child) {
485 ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);
486 if (ret)
487 goto put_node;
488 }
489
490 return 0;
491
492put_node:
493 of_node_put(child);
494 return ret;
495}
496
497static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
498 bool on)
499{
500 u32 val;
501
502 regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, &val);
503 if (on)
504 val = val | PCIE_ELBI_SLV_DBI_ENABLE;
505 else
506 val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
507
508 regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, val);
509}
510
511static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
512 bool on)
513{
514 u32 val;
515
516 regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, &val);
517 if (on)
518 val = val | PCIE_ELBI_SLV_DBI_ENABLE;
519 else
520 val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
521
522 regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, val);
523}
524
525static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
526 int where, int size, u32 *val)
527{
528 struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
529
530 if (PCI_SLOT(devfn))
531 return PCIBIOS_DEVICE_NOT_FOUND;
532
533 *val = dw_pcie_read_dbi(pci, where, size);
534 return PCIBIOS_SUCCESSFUL;
535}
536
537static int kirin_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
538 int where, int size, u32 val)
539{
540 struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
541
542 if (PCI_SLOT(devfn))
543 return PCIBIOS_DEVICE_NOT_FOUND;
544
545 dw_pcie_write_dbi(pci, where, size, val);
546 return PCIBIOS_SUCCESSFUL;
547}
548
549static int kirin_pcie_add_bus(struct pci_bus *bus)
550{
551 struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
552 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
553 int i, ret;
554
555 if (!kirin_pcie->num_slots)
556 return 0;
557
558 /* Send PERST# to each slot */
559 for (i = 0; i < kirin_pcie->num_slots; i++) {
560 ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1);
561 if (ret) {
562 dev_err(pci->dev, "PERST# %s error: %d\n",
563 kirin_pcie->reset_names[i], ret);
564 }
565 }
566 usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
567
568 return 0;
569}
570
571static struct pci_ops kirin_pci_ops = {
572 .read = kirin_pcie_rd_own_conf,
573 .write = kirin_pcie_wr_own_conf,
574 .add_bus = kirin_pcie_add_bus,
575};
576
577static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
578 u32 reg, size_t size)
579{
580 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
581 u32 ret;
582
583 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
584 dw_pcie_read(base + reg, size, &ret);
585 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
586
587 return ret;
588}
589
590static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
591 u32 reg, size_t size, u32 val)
592{
593 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
594
595 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
596 dw_pcie_write(base + reg, size, val);
597 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
598}
599
600static int kirin_pcie_link_up(struct dw_pcie *pci)
601{
602 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
603 u32 val;
604
605 regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val);
606 if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
607 return 1;
608
609 return 0;
610}
611
612static int kirin_pcie_start_link(struct dw_pcie *pci)
613{
614 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
615
616 /* assert LTSSM enable */
617 regmap_write(kirin_pcie->apb, PCIE_APP_LTSSM_ENABLE,
618 PCIE_LTSSM_ENABLE_BIT);
619
620 return 0;
621}
622
623static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
624{
625 pp->bridge->ops = &kirin_pci_ops;
626
627 return 0;
628}
629
630static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie,
631 struct device *dev)
632{
633 int ret, i;
634
635 for (i = 0; i < kirin_pcie->num_slots; i++) {
636 if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) {
637 dev_err(dev, "unable to get a valid %s gpio\n",
638 kirin_pcie->reset_names[i]);
639 return -ENODEV;
640 }
641
642 ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i],
643 kirin_pcie->reset_names[i]);
644 if (ret)
645 return ret;
646 }
647
648 for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) {
649 if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) {
650 dev_err(dev, "unable to get a valid %s gpio\n",
651 kirin_pcie->clkreq_names[i]);
652 return -ENODEV;
653 }
654
655 ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i],
656 kirin_pcie->clkreq_names[i]);
657 if (ret)
658 return ret;
659
660 ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0);
661 if (ret)
662 return ret;
663 }
664
665 return 0;
666}
667
668static const struct dw_pcie_ops kirin_dw_pcie_ops = {
669 .read_dbi = kirin_pcie_read_dbi,
670 .write_dbi = kirin_pcie_write_dbi,
671 .link_up = kirin_pcie_link_up,
672 .start_link = kirin_pcie_start_link,
673};
674
675static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
676 .host_init = kirin_pcie_host_init,
677};
678
679static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
680{
681 int i;
682
683 if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY)
684 return hi3660_pcie_phy_power_off(kirin_pcie);
685
686 for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++)
687 gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1);
688
689 phy_power_off(kirin_pcie->phy);
690 phy_exit(kirin_pcie->phy);
691
692 return 0;
693}
694
695static int kirin_pcie_power_on(struct platform_device *pdev,
696 struct kirin_pcie *kirin_pcie)
697{
698 struct device *dev = &pdev->dev;
699 int ret;
700
701 if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY) {
702 ret = hi3660_pcie_phy_init(pdev, kirin_pcie);
703 if (ret)
704 return ret;
705
706 ret = hi3660_pcie_phy_power_on(kirin_pcie);
707 if (ret)
708 return ret;
709 } else {
710 kirin_pcie->phy = devm_of_phy_get(dev, dev->of_node, NULL);
711 if (IS_ERR(kirin_pcie->phy))
712 return PTR_ERR(kirin_pcie->phy);
713
714 ret = kirin_pcie_gpio_request(kirin_pcie, dev);
715 if (ret)
716 return ret;
717
718 ret = phy_init(kirin_pcie->phy);
719 if (ret)
720 goto err;
721
722 ret = phy_power_on(kirin_pcie->phy);
723 if (ret)
724 goto err;
725 }
726
727 /* perst assert Endpoint */
728 usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
729
730 if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) {
731 ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1);
732 if (ret)
733 goto err;
734 }
735
736 usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
737
738 return 0;
739err:
740 kirin_pcie_power_off(kirin_pcie);
741
742 return ret;
743}
744
745static int __exit kirin_pcie_remove(struct platform_device *pdev)
746{
747 struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
748
749 dw_pcie_host_deinit(&kirin_pcie->pci->pp);
750
751 kirin_pcie_power_off(kirin_pcie);
752
753 return 0;
754}
755
756struct kirin_pcie_data {
757 enum pcie_kirin_phy_type phy_type;
758};
759
760static const struct kirin_pcie_data kirin_960_data = {
761 .phy_type = PCIE_KIRIN_INTERNAL_PHY,
762};
763
764static const struct kirin_pcie_data kirin_970_data = {
765 .phy_type = PCIE_KIRIN_EXTERNAL_PHY,
766};
767
768static const struct of_device_id kirin_pcie_match[] = {
769 { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data },
770 { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data },
771 {},
772};
773
774static int kirin_pcie_probe(struct platform_device *pdev)
775{
776 struct device *dev = &pdev->dev;
777 const struct kirin_pcie_data *data;
778 struct kirin_pcie *kirin_pcie;
779 struct dw_pcie *pci;
780 int ret;
781
782 if (!dev->of_node) {
783 dev_err(dev, "NULL node\n");
784 return -EINVAL;
785 }
786
787 data = of_device_get_match_data(dev);
788 if (!data) {
789 dev_err(dev, "OF data missing\n");
790 return -EINVAL;
791 }
792
793 kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
794 if (!kirin_pcie)
795 return -ENOMEM;
796
797 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
798 if (!pci)
799 return -ENOMEM;
800
801 pci->dev = dev;
802 pci->ops = &kirin_dw_pcie_ops;
803 pci->pp.ops = &kirin_pcie_host_ops;
804 kirin_pcie->pci = pci;
805 kirin_pcie->type = data->phy_type;
806
807 ret = kirin_pcie_get_resource(kirin_pcie, pdev);
808 if (ret)
809 return ret;
810
811 platform_set_drvdata(pdev, kirin_pcie);
812
813 ret = kirin_pcie_power_on(pdev, kirin_pcie);
814 if (ret)
815 return ret;
816
817 return dw_pcie_host_init(&pci->pp);
818}
819
820static struct platform_driver kirin_pcie_driver = {
821 .probe = kirin_pcie_probe,
822 .remove = __exit_p(kirin_pcie_remove),
823 .driver = {
824 .name = "kirin-pcie",
825 .of_match_table = kirin_pcie_match,
826 .suppress_bind_attrs = true,
827 },
828};
829module_platform_driver(kirin_pcie_driver);
830
831MODULE_DEVICE_TABLE(of, kirin_pcie_match);
832MODULE_DESCRIPTION("PCIe host controller driver for Kirin Phone SoCs");
833MODULE_AUTHOR("Xiaowei Song <songxiaowei@huawei.com>");
834MODULE_LICENSE("GPL v2");