Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe host controller driver for Texas Instruments Keystone SoCs
4 *
5 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
6 * https://www.ti.com
7 *
8 * Author: Murali Karicheri <m-karicheri2@ti.com>
9 * Implementation based on pci-exynos.c and pcie-designware.c
10 */
11
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/gpio/consumer.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/irqchip/chained_irq.h>
18#include <linux/irqdomain.h>
19#include <linux/mfd/syscon.h>
20#include <linux/msi.h>
21#include <linux/of.h>
22#include <linux/of_device.h>
23#include <linux/of_irq.h>
24#include <linux/of_pci.h>
25#include <linux/phy/phy.h>
26#include <linux/platform_device.h>
27#include <linux/regmap.h>
28#include <linux/resource.h>
29#include <linux/signal.h>
30
31#include "../../pci.h"
32#include "pcie-designware.h"
33
34#define PCIE_VENDORID_MASK 0xffff
35#define PCIE_DEVICEID_SHIFT 16
36
37/* Application registers */
38#define CMD_STATUS 0x004
39#define LTSSM_EN_VAL BIT(0)
40#define OB_XLAT_EN_VAL BIT(1)
41#define DBI_CS2 BIT(5)
42
43#define CFG_SETUP 0x008
44#define CFG_BUS(x) (((x) & 0xff) << 16)
45#define CFG_DEVICE(x) (((x) & 0x1f) << 8)
46#define CFG_FUNC(x) ((x) & 0x7)
47#define CFG_TYPE1 BIT(24)
48
49#define OB_SIZE 0x030
50#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
51#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
52#define OB_ENABLEN BIT(0)
53#define OB_WIN_SIZE 8 /* 8MB */
54
55#define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
56#define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
57#define PCIE_EP_IRQ_SET 0x64
58#define PCIE_EP_IRQ_CLR 0x68
59#define INT_ENABLE BIT(0)
60
61/* IRQ register defines */
62#define IRQ_EOI 0x050
63
64#define MSI_IRQ 0x054
65#define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
66#define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
67#define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
68#define MSI_IRQ_OFFSET 4
69
70#define IRQ_STATUS(n) (0x184 + ((n) << 4))
71#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
72#define INTx_EN BIT(0)
73
74#define ERR_IRQ_STATUS 0x1c4
75#define ERR_IRQ_ENABLE_SET 0x1c8
76#define ERR_AER BIT(5) /* ECRC error */
77#define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
78#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
79#define ERR_CORR BIT(3) /* Correctable error */
80#define ERR_NONFATAL BIT(2) /* Non-fatal error */
81#define ERR_FATAL BIT(1) /* Fatal error */
82#define ERR_SYS BIT(0) /* System error */
83#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
84 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
85
86/* PCIE controller device IDs */
87#define PCIE_RC_K2HK 0xb008
88#define PCIE_RC_K2E 0xb009
89#define PCIE_RC_K2L 0xb00a
90#define PCIE_RC_K2G 0xb00b
91
92#define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
93#define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
94
95#define EP 0x0
96#define LEG_EP 0x1
97#define RC 0x2
98
99#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
100
101#define AM654_PCIE_DEV_TYPE_MASK 0x3
102#define AM654_WIN_SIZE SZ_64K
103
104#define APP_ADDR_SPACE_0 (16 * SZ_1K)
105
106#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
107
108struct ks_pcie_of_data {
109 enum dw_pcie_device_mode mode;
110 const struct dw_pcie_host_ops *host_ops;
111 const struct dw_pcie_ep_ops *ep_ops;
112 unsigned int version;
113};
114
115struct keystone_pcie {
116 struct dw_pcie *pci;
117 /* PCI Device ID */
118 u32 device_id;
119 int legacy_host_irqs[PCI_NUM_INTX];
120 struct device_node *legacy_intc_np;
121
122 int msi_host_irq;
123 int num_lanes;
124 u32 num_viewport;
125 struct phy **phy;
126 struct device_link **link;
127 struct device_node *msi_intc_np;
128 struct irq_domain *legacy_irq_domain;
129 struct device_node *np;
130
131 /* Application register space */
132 void __iomem *va_app_base; /* DT 1st resource */
133 struct resource app;
134 bool is_am6;
135};
136
137static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
138{
139 return readl(ks_pcie->va_app_base + offset);
140}
141
142static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
143 u32 val)
144{
145 writel(val, ks_pcie->va_app_base + offset);
146}
147
148static void ks_pcie_msi_irq_ack(struct irq_data *data)
149{
150 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
151 struct keystone_pcie *ks_pcie;
152 u32 irq = data->hwirq;
153 struct dw_pcie *pci;
154 u32 reg_offset;
155 u32 bit_pos;
156
157 pci = to_dw_pcie_from_pp(pp);
158 ks_pcie = to_keystone_pcie(pci);
159
160 reg_offset = irq % 8;
161 bit_pos = irq >> 3;
162
163 ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
164 BIT(bit_pos));
165 ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
166}
167
168static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
169{
170 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
171 struct keystone_pcie *ks_pcie;
172 struct dw_pcie *pci;
173 u64 msi_target;
174
175 pci = to_dw_pcie_from_pp(pp);
176 ks_pcie = to_keystone_pcie(pci);
177
178 msi_target = ks_pcie->app.start + MSI_IRQ;
179 msg->address_lo = lower_32_bits(msi_target);
180 msg->address_hi = upper_32_bits(msi_target);
181 msg->data = data->hwirq;
182
183 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
184 (int)data->hwirq, msg->address_hi, msg->address_lo);
185}
186
187static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
188 const struct cpumask *mask, bool force)
189{
190 return -EINVAL;
191}
192
193static void ks_pcie_msi_mask(struct irq_data *data)
194{
195 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
196 struct keystone_pcie *ks_pcie;
197 u32 irq = data->hwirq;
198 struct dw_pcie *pci;
199 unsigned long flags;
200 u32 reg_offset;
201 u32 bit_pos;
202
203 raw_spin_lock_irqsave(&pp->lock, flags);
204
205 pci = to_dw_pcie_from_pp(pp);
206 ks_pcie = to_keystone_pcie(pci);
207
208 reg_offset = irq % 8;
209 bit_pos = irq >> 3;
210
211 ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
212 BIT(bit_pos));
213
214 raw_spin_unlock_irqrestore(&pp->lock, flags);
215}
216
217static void ks_pcie_msi_unmask(struct irq_data *data)
218{
219 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
220 struct keystone_pcie *ks_pcie;
221 u32 irq = data->hwirq;
222 struct dw_pcie *pci;
223 unsigned long flags;
224 u32 reg_offset;
225 u32 bit_pos;
226
227 raw_spin_lock_irqsave(&pp->lock, flags);
228
229 pci = to_dw_pcie_from_pp(pp);
230 ks_pcie = to_keystone_pcie(pci);
231
232 reg_offset = irq % 8;
233 bit_pos = irq >> 3;
234
235 ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
236 BIT(bit_pos));
237
238 raw_spin_unlock_irqrestore(&pp->lock, flags);
239}
240
241static struct irq_chip ks_pcie_msi_irq_chip = {
242 .name = "KEYSTONE-PCI-MSI",
243 .irq_ack = ks_pcie_msi_irq_ack,
244 .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
245 .irq_set_affinity = ks_pcie_msi_set_affinity,
246 .irq_mask = ks_pcie_msi_mask,
247 .irq_unmask = ks_pcie_msi_unmask,
248};
249
250static int ks_pcie_msi_host_init(struct pcie_port *pp)
251{
252 pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
253 return dw_pcie_allocate_domains(pp);
254}
255
256static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
257 int offset)
258{
259 struct dw_pcie *pci = ks_pcie->pci;
260 struct device *dev = pci->dev;
261 u32 pending;
262 int virq;
263
264 pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
265
266 if (BIT(0) & pending) {
267 virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
268 dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
269 generic_handle_irq(virq);
270 }
271
272 /* EOI the INTx interrupt */
273 ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
274}
275
276static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
277{
278 ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
279}
280
281static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
282{
283 u32 reg;
284 struct device *dev = ks_pcie->pci->dev;
285
286 reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
287 if (!reg)
288 return IRQ_NONE;
289
290 if (reg & ERR_SYS)
291 dev_err(dev, "System Error\n");
292
293 if (reg & ERR_FATAL)
294 dev_err(dev, "Fatal Error\n");
295
296 if (reg & ERR_NONFATAL)
297 dev_dbg(dev, "Non Fatal Error\n");
298
299 if (reg & ERR_CORR)
300 dev_dbg(dev, "Correctable Error\n");
301
302 if (!ks_pcie->is_am6 && (reg & ERR_AXI))
303 dev_err(dev, "AXI tag lookup fatal Error\n");
304
305 if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
306 dev_err(dev, "ECRC Error\n");
307
308 ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
309
310 return IRQ_HANDLED;
311}
312
313static void ks_pcie_ack_legacy_irq(struct irq_data *d)
314{
315}
316
317static void ks_pcie_mask_legacy_irq(struct irq_data *d)
318{
319}
320
321static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
322{
323}
324
325static struct irq_chip ks_pcie_legacy_irq_chip = {
326 .name = "Keystone-PCI-Legacy-IRQ",
327 .irq_ack = ks_pcie_ack_legacy_irq,
328 .irq_mask = ks_pcie_mask_legacy_irq,
329 .irq_unmask = ks_pcie_unmask_legacy_irq,
330};
331
332static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
333 unsigned int irq,
334 irq_hw_number_t hw_irq)
335{
336 irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
337 handle_level_irq);
338 irq_set_chip_data(irq, d->host_data);
339
340 return 0;
341}
342
343static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
344 .map = ks_pcie_init_legacy_irq_map,
345 .xlate = irq_domain_xlate_onetwocell,
346};
347
348/**
349 * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
350 * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
351 * PCIe host controller driver information.
352 *
353 * Since modification of dbi_cs2 involves different clock domain, read the
354 * status back to ensure the transition is complete.
355 */
356static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
357{
358 u32 val;
359
360 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
361 val |= DBI_CS2;
362 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
363
364 do {
365 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
366 } while (!(val & DBI_CS2));
367}
368
369/**
370 * ks_pcie_clear_dbi_mode() - Disable DBI mode
371 * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
372 * PCIe host controller driver information.
373 *
374 * Since modification of dbi_cs2 involves different clock domain, read the
375 * status back to ensure the transition is complete.
376 */
377static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
378{
379 u32 val;
380
381 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
382 val &= ~DBI_CS2;
383 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
384
385 do {
386 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
387 } while (val & DBI_CS2);
388}
389
390static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
391{
392 u32 val;
393 u32 num_viewport = ks_pcie->num_viewport;
394 struct dw_pcie *pci = ks_pcie->pci;
395 struct pcie_port *pp = &pci->pp;
396 u64 start, end;
397 struct resource *mem;
398 int i;
399
400 mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
401 start = mem->start;
402 end = mem->end;
403
404 /* Disable BARs for inbound access */
405 ks_pcie_set_dbi_mode(ks_pcie);
406 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
407 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
408 ks_pcie_clear_dbi_mode(ks_pcie);
409
410 if (ks_pcie->is_am6)
411 return;
412
413 val = ilog2(OB_WIN_SIZE);
414 ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
415
416 /* Using Direct 1:1 mapping of RC <-> PCI memory space */
417 for (i = 0; i < num_viewport && (start < end); i++) {
418 ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
419 lower_32_bits(start) | OB_ENABLEN);
420 ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
421 upper_32_bits(start));
422 start += OB_WIN_SIZE * SZ_1M;
423 }
424
425 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
426 val |= OB_XLAT_EN_VAL;
427 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
428}
429
430static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
431 unsigned int devfn, int where)
432{
433 struct pcie_port *pp = bus->sysdata;
434 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
435 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
436 u32 reg;
437
438 reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
439 CFG_FUNC(PCI_FUNC(devfn));
440 if (!pci_is_root_bus(bus->parent))
441 reg |= CFG_TYPE1;
442 ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
443
444 return pp->va_cfg0_base + where;
445}
446
447static struct pci_ops ks_child_pcie_ops = {
448 .map_bus = ks_pcie_other_map_bus,
449 .read = pci_generic_config_read,
450 .write = pci_generic_config_write,
451};
452
453/**
454 * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
455 * @bus: A pointer to the PCI bus structure.
456 *
457 * This sets BAR0 to enable inbound access for MSI_IRQ register
458 */
459static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
460{
461 struct pcie_port *pp = bus->sysdata;
462 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
463 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
464
465 if (!pci_is_root_bus(bus))
466 return 0;
467
468 /* Configure and set up BAR0 */
469 ks_pcie_set_dbi_mode(ks_pcie);
470
471 /* Enable BAR0 */
472 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
473 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
474
475 ks_pcie_clear_dbi_mode(ks_pcie);
476
477 /*
478 * For BAR0, just setting bus address for inbound writes (MSI) should
479 * be sufficient. Use physical address to avoid any conflicts.
480 */
481 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
482
483 return 0;
484}
485
486static struct pci_ops ks_pcie_ops = {
487 .map_bus = dw_pcie_own_conf_map_bus,
488 .read = pci_generic_config_read,
489 .write = pci_generic_config_write,
490 .add_bus = ks_pcie_v3_65_add_bus,
491};
492
493/**
494 * ks_pcie_link_up() - Check if link up
495 * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
496 * controller driver information.
497 */
498static int ks_pcie_link_up(struct dw_pcie *pci)
499{
500 u32 val;
501
502 val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
503 val &= PORT_LOGIC_LTSSM_STATE_MASK;
504 return (val == PORT_LOGIC_LTSSM_STATE_L0);
505}
506
507static void ks_pcie_stop_link(struct dw_pcie *pci)
508{
509 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
510 u32 val;
511
512 /* Disable Link training */
513 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
514 val &= ~LTSSM_EN_VAL;
515 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
516}
517
518static int ks_pcie_start_link(struct dw_pcie *pci)
519{
520 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
521 u32 val;
522
523 /* Initiate Link Training */
524 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
525 ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
526
527 return 0;
528}
529
530static void ks_pcie_quirk(struct pci_dev *dev)
531{
532 struct pci_bus *bus = dev->bus;
533 struct pci_dev *bridge;
534 static const struct pci_device_id rc_pci_devids[] = {
535 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
536 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
537 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
538 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
539 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
540 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
541 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
542 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
543 { 0, },
544 };
545
546 if (pci_is_root_bus(bus))
547 bridge = dev;
548
549 /* look for the host bridge */
550 while (!pci_is_root_bus(bus)) {
551 bridge = bus->self;
552 bus = bus->parent;
553 }
554
555 if (!bridge)
556 return;
557
558 /*
559 * Keystone PCI controller has a h/w limitation of
560 * 256 bytes maximum read request size. It can't handle
561 * anything higher than this. So force this limit on
562 * all downstream devices.
563 */
564 if (pci_match_id(rc_pci_devids, bridge)) {
565 if (pcie_get_readrq(dev) > 256) {
566 dev_info(&dev->dev, "limiting MRRS to 256\n");
567 pcie_set_readrq(dev, 256);
568 }
569 }
570}
571DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
572
573static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
574{
575 unsigned int irq = desc->irq_data.hwirq;
576 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
577 u32 offset = irq - ks_pcie->msi_host_irq;
578 struct dw_pcie *pci = ks_pcie->pci;
579 struct pcie_port *pp = &pci->pp;
580 struct device *dev = pci->dev;
581 struct irq_chip *chip = irq_desc_get_chip(desc);
582 u32 vector, virq, reg, pos;
583
584 dev_dbg(dev, "%s, irq %d\n", __func__, irq);
585
586 /*
587 * The chained irq handler installation would have replaced normal
588 * interrupt driver handler so we need to take care of mask/unmask and
589 * ack operation.
590 */
591 chained_irq_enter(chip, desc);
592
593 reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
594 /*
595 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
596 * shows 1, 9, 17, 25 and so forth
597 */
598 for (pos = 0; pos < 4; pos++) {
599 if (!(reg & BIT(pos)))
600 continue;
601
602 vector = offset + (pos << 3);
603 virq = irq_linear_revmap(pp->irq_domain, vector);
604 dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
605 virq);
606 generic_handle_irq(virq);
607 }
608
609 chained_irq_exit(chip, desc);
610}
611
612/**
613 * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
614 * @desc: Pointer to irq descriptor
615 *
616 * Traverse through pending legacy interrupts and invoke handler for each. Also
617 * takes care of interrupt controller level mask/ack operation.
618 */
619static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
620{
621 unsigned int irq = irq_desc_get_irq(desc);
622 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
623 struct dw_pcie *pci = ks_pcie->pci;
624 struct device *dev = pci->dev;
625 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
626 struct irq_chip *chip = irq_desc_get_chip(desc);
627
628 dev_dbg(dev, ": Handling legacy irq %d\n", irq);
629
630 /*
631 * The chained irq handler installation would have replaced normal
632 * interrupt driver handler so we need to take care of mask/unmask and
633 * ack operation.
634 */
635 chained_irq_enter(chip, desc);
636 ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
637 chained_irq_exit(chip, desc);
638}
639
640static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
641{
642 struct device *dev = ks_pcie->pci->dev;
643 struct device_node *np = ks_pcie->np;
644 struct device_node *intc_np;
645 struct irq_data *irq_data;
646 int irq_count, irq, ret, i;
647
648 if (!IS_ENABLED(CONFIG_PCI_MSI))
649 return 0;
650
651 intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
652 if (!intc_np) {
653 if (ks_pcie->is_am6)
654 return 0;
655 dev_warn(dev, "msi-interrupt-controller node is absent\n");
656 return -EINVAL;
657 }
658
659 irq_count = of_irq_count(intc_np);
660 if (!irq_count) {
661 dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
662 ret = -EINVAL;
663 goto err;
664 }
665
666 for (i = 0; i < irq_count; i++) {
667 irq = irq_of_parse_and_map(intc_np, i);
668 if (!irq) {
669 ret = -EINVAL;
670 goto err;
671 }
672
673 if (!ks_pcie->msi_host_irq) {
674 irq_data = irq_get_irq_data(irq);
675 if (!irq_data) {
676 ret = -EINVAL;
677 goto err;
678 }
679 ks_pcie->msi_host_irq = irq_data->hwirq;
680 }
681
682 irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
683 ks_pcie);
684 }
685
686 of_node_put(intc_np);
687 return 0;
688
689err:
690 of_node_put(intc_np);
691 return ret;
692}
693
694static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
695{
696 struct device *dev = ks_pcie->pci->dev;
697 struct irq_domain *legacy_irq_domain;
698 struct device_node *np = ks_pcie->np;
699 struct device_node *intc_np;
700 int irq_count, irq, ret = 0, i;
701
702 intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
703 if (!intc_np) {
704 /*
705 * Since legacy interrupts are modeled as edge-interrupts in
706 * AM6, keep it disabled for now.
707 */
708 if (ks_pcie->is_am6)
709 return 0;
710 dev_warn(dev, "legacy-interrupt-controller node is absent\n");
711 return -EINVAL;
712 }
713
714 irq_count = of_irq_count(intc_np);
715 if (!irq_count) {
716 dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
717 ret = -EINVAL;
718 goto err;
719 }
720
721 for (i = 0; i < irq_count; i++) {
722 irq = irq_of_parse_and_map(intc_np, i);
723 if (!irq) {
724 ret = -EINVAL;
725 goto err;
726 }
727 ks_pcie->legacy_host_irqs[i] = irq;
728
729 irq_set_chained_handler_and_data(irq,
730 ks_pcie_legacy_irq_handler,
731 ks_pcie);
732 }
733
734 legacy_irq_domain =
735 irq_domain_add_linear(intc_np, PCI_NUM_INTX,
736 &ks_pcie_legacy_irq_domain_ops, NULL);
737 if (!legacy_irq_domain) {
738 dev_err(dev, "Failed to add irq domain for legacy irqs\n");
739 ret = -EINVAL;
740 goto err;
741 }
742 ks_pcie->legacy_irq_domain = legacy_irq_domain;
743
744 for (i = 0; i < PCI_NUM_INTX; i++)
745 ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
746
747err:
748 of_node_put(intc_np);
749 return ret;
750}
751
752#ifdef CONFIG_ARM
753/*
754 * When a PCI device does not exist during config cycles, keystone host gets a
755 * bus error instead of returning 0xffffffff. This handler always returns 0
756 * for this kind of faults.
757 */
758static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
759 struct pt_regs *regs)
760{
761 unsigned long instr = *(unsigned long *) instruction_pointer(regs);
762
763 if ((instr & 0x0e100090) == 0x00100090) {
764 int reg = (instr >> 12) & 15;
765
766 regs->uregs[reg] = -1;
767 regs->ARM_pc += 4;
768 }
769
770 return 0;
771}
772#endif
773
774static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
775{
776 int ret;
777 unsigned int id;
778 struct regmap *devctrl_regs;
779 struct dw_pcie *pci = ks_pcie->pci;
780 struct device *dev = pci->dev;
781 struct device_node *np = dev->of_node;
782
783 devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
784 if (IS_ERR(devctrl_regs))
785 return PTR_ERR(devctrl_regs);
786
787 ret = regmap_read(devctrl_regs, 0, &id);
788 if (ret)
789 return ret;
790
791 dw_pcie_dbi_ro_wr_en(pci);
792 dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
793 dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
794 dw_pcie_dbi_ro_wr_dis(pci);
795
796 return 0;
797}
798
799static int __init ks_pcie_host_init(struct pcie_port *pp)
800{
801 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
802 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
803 int ret;
804
805 pp->bridge->ops = &ks_pcie_ops;
806 if (!ks_pcie->is_am6)
807 pp->bridge->child_ops = &ks_child_pcie_ops;
808
809 ret = ks_pcie_config_legacy_irq(ks_pcie);
810 if (ret)
811 return ret;
812
813 ret = ks_pcie_config_msi_irq(ks_pcie);
814 if (ret)
815 return ret;
816
817 ks_pcie_stop_link(pci);
818 ks_pcie_setup_rc_app_regs(ks_pcie);
819 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
820 pci->dbi_base + PCI_IO_BASE);
821
822 ret = ks_pcie_init_id(ks_pcie);
823 if (ret < 0)
824 return ret;
825
826#ifdef CONFIG_ARM
827 /*
828 * PCIe access errors that result into OCP errors are caught by ARM as
829 * "External aborts"
830 */
831 hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
832 "Asynchronous external abort");
833#endif
834
835 return 0;
836}
837
838static const struct dw_pcie_host_ops ks_pcie_host_ops = {
839 .host_init = ks_pcie_host_init,
840 .msi_host_init = ks_pcie_msi_host_init,
841};
842
843static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
844 .host_init = ks_pcie_host_init,
845};
846
847static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
848{
849 struct keystone_pcie *ks_pcie = priv;
850
851 return ks_pcie_handle_error_irq(ks_pcie);
852}
853
854static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
855 u32 reg, size_t size, u32 val)
856{
857 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
858
859 ks_pcie_set_dbi_mode(ks_pcie);
860 dw_pcie_write(base + reg, size, val);
861 ks_pcie_clear_dbi_mode(ks_pcie);
862}
863
864static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
865 .start_link = ks_pcie_start_link,
866 .stop_link = ks_pcie_stop_link,
867 .link_up = ks_pcie_link_up,
868 .write_dbi2 = ks_pcie_am654_write_dbi2,
869};
870
871static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
872{
873 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
874 int flags;
875
876 ep->page_size = AM654_WIN_SIZE;
877 flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
878 dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
879 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
880}
881
882static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
883{
884 struct dw_pcie *pci = ks_pcie->pci;
885 u8 int_pin;
886
887 int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
888 if (int_pin == 0 || int_pin > 4)
889 return;
890
891 ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
892 INT_ENABLE);
893 ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
894 mdelay(1);
895 ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
896 ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
897 INT_ENABLE);
898}
899
900static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
901 enum pci_epc_irq_type type,
902 u16 interrupt_num)
903{
904 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
905 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
906
907 switch (type) {
908 case PCI_EPC_IRQ_LEGACY:
909 ks_pcie_am654_raise_legacy_irq(ks_pcie);
910 break;
911 case PCI_EPC_IRQ_MSI:
912 dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
913 break;
914 case PCI_EPC_IRQ_MSIX:
915 dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
916 break;
917 default:
918 dev_err(pci->dev, "UNKNOWN IRQ type\n");
919 return -EINVAL;
920 }
921
922 return 0;
923}
924
925static const struct pci_epc_features ks_pcie_am654_epc_features = {
926 .linkup_notifier = false,
927 .msi_capable = true,
928 .msix_capable = true,
929 .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
930 .bar_fixed_64bit = 1 << BAR_0,
931 .bar_fixed_size[2] = SZ_1M,
932 .bar_fixed_size[3] = SZ_64K,
933 .bar_fixed_size[4] = 256,
934 .bar_fixed_size[5] = SZ_1M,
935 .align = SZ_1M,
936};
937
938static const struct pci_epc_features*
939ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
940{
941 return &ks_pcie_am654_epc_features;
942}
943
944static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
945 .ep_init = ks_pcie_am654_ep_init,
946 .raise_irq = ks_pcie_am654_raise_irq,
947 .get_features = &ks_pcie_am654_get_features,
948};
949
950static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
951{
952 int num_lanes = ks_pcie->num_lanes;
953
954 while (num_lanes--) {
955 phy_power_off(ks_pcie->phy[num_lanes]);
956 phy_exit(ks_pcie->phy[num_lanes]);
957 }
958}
959
960static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
961{
962 int i;
963 int ret;
964 int num_lanes = ks_pcie->num_lanes;
965
966 for (i = 0; i < num_lanes; i++) {
967 ret = phy_reset(ks_pcie->phy[i]);
968 if (ret < 0)
969 goto err_phy;
970
971 ret = phy_init(ks_pcie->phy[i]);
972 if (ret < 0)
973 goto err_phy;
974
975 ret = phy_power_on(ks_pcie->phy[i]);
976 if (ret < 0) {
977 phy_exit(ks_pcie->phy[i]);
978 goto err_phy;
979 }
980 }
981
982 return 0;
983
984err_phy:
985 while (--i >= 0) {
986 phy_power_off(ks_pcie->phy[i]);
987 phy_exit(ks_pcie->phy[i]);
988 }
989
990 return ret;
991}
992
993static int ks_pcie_set_mode(struct device *dev)
994{
995 struct device_node *np = dev->of_node;
996 struct regmap *syscon;
997 u32 val;
998 u32 mask;
999 int ret = 0;
1000
1001 syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1002 if (IS_ERR(syscon))
1003 return 0;
1004
1005 mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
1006 val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
1007
1008 ret = regmap_update_bits(syscon, 0, mask, val);
1009 if (ret) {
1010 dev_err(dev, "failed to set pcie mode\n");
1011 return ret;
1012 }
1013
1014 return 0;
1015}
1016
1017static int ks_pcie_am654_set_mode(struct device *dev,
1018 enum dw_pcie_device_mode mode)
1019{
1020 struct device_node *np = dev->of_node;
1021 struct regmap *syscon;
1022 u32 val;
1023 u32 mask;
1024 int ret = 0;
1025
1026 syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1027 if (IS_ERR(syscon))
1028 return 0;
1029
1030 mask = AM654_PCIE_DEV_TYPE_MASK;
1031
1032 switch (mode) {
1033 case DW_PCIE_RC_TYPE:
1034 val = RC;
1035 break;
1036 case DW_PCIE_EP_TYPE:
1037 val = EP;
1038 break;
1039 default:
1040 dev_err(dev, "INVALID device type %d\n", mode);
1041 return -EINVAL;
1042 }
1043
1044 ret = regmap_update_bits(syscon, 0, mask, val);
1045 if (ret) {
1046 dev_err(dev, "failed to set pcie mode\n");
1047 return ret;
1048 }
1049
1050 return 0;
1051}
1052
1053static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
1054 .host_ops = &ks_pcie_host_ops,
1055 .version = 0x365A,
1056};
1057
1058static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
1059 .host_ops = &ks_pcie_am654_host_ops,
1060 .mode = DW_PCIE_RC_TYPE,
1061 .version = 0x490A,
1062};
1063
1064static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
1065 .ep_ops = &ks_pcie_am654_ep_ops,
1066 .mode = DW_PCIE_EP_TYPE,
1067 .version = 0x490A,
1068};
1069
1070static const struct of_device_id ks_pcie_of_match[] = {
1071 {
1072 .type = "pci",
1073 .data = &ks_pcie_rc_of_data,
1074 .compatible = "ti,keystone-pcie",
1075 },
1076 {
1077 .data = &ks_pcie_am654_rc_of_data,
1078 .compatible = "ti,am654-pcie-rc",
1079 },
1080 {
1081 .data = &ks_pcie_am654_ep_of_data,
1082 .compatible = "ti,am654-pcie-ep",
1083 },
1084 { },
1085};
1086
1087static int __init ks_pcie_probe(struct platform_device *pdev)
1088{
1089 const struct dw_pcie_host_ops *host_ops;
1090 const struct dw_pcie_ep_ops *ep_ops;
1091 struct device *dev = &pdev->dev;
1092 struct device_node *np = dev->of_node;
1093 const struct ks_pcie_of_data *data;
1094 const struct of_device_id *match;
1095 enum dw_pcie_device_mode mode;
1096 struct dw_pcie *pci;
1097 struct keystone_pcie *ks_pcie;
1098 struct device_link **link;
1099 struct gpio_desc *gpiod;
1100 struct resource *res;
1101 unsigned int version;
1102 void __iomem *base;
1103 u32 num_viewport;
1104 struct phy **phy;
1105 u32 num_lanes;
1106 char name[10];
1107 int ret;
1108 int irq;
1109 int i;
1110
1111 match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
1112 data = (struct ks_pcie_of_data *)match->data;
1113 if (!data)
1114 return -EINVAL;
1115
1116 version = data->version;
1117 host_ops = data->host_ops;
1118 ep_ops = data->ep_ops;
1119 mode = data->mode;
1120
1121 ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
1122 if (!ks_pcie)
1123 return -ENOMEM;
1124
1125 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1126 if (!pci)
1127 return -ENOMEM;
1128
1129 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
1130 ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
1131 if (IS_ERR(ks_pcie->va_app_base))
1132 return PTR_ERR(ks_pcie->va_app_base);
1133
1134 ks_pcie->app = *res;
1135
1136 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
1137 base = devm_pci_remap_cfg_resource(dev, res);
1138 if (IS_ERR(base))
1139 return PTR_ERR(base);
1140
1141 if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
1142 ks_pcie->is_am6 = true;
1143
1144 pci->dbi_base = base;
1145 pci->dbi_base2 = base;
1146 pci->dev = dev;
1147 pci->ops = &ks_pcie_dw_pcie_ops;
1148 pci->version = version;
1149
1150 irq = platform_get_irq(pdev, 0);
1151 if (irq < 0)
1152 return irq;
1153
1154 ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
1155 "ks-pcie-error-irq", ks_pcie);
1156 if (ret < 0) {
1157 dev_err(dev, "failed to request error IRQ %d\n",
1158 irq);
1159 return ret;
1160 }
1161
1162 ret = of_property_read_u32(np, "num-lanes", &num_lanes);
1163 if (ret)
1164 num_lanes = 1;
1165
1166 phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
1167 if (!phy)
1168 return -ENOMEM;
1169
1170 link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
1171 if (!link)
1172 return -ENOMEM;
1173
1174 for (i = 0; i < num_lanes; i++) {
1175 snprintf(name, sizeof(name), "pcie-phy%d", i);
1176 phy[i] = devm_phy_optional_get(dev, name);
1177 if (IS_ERR(phy[i])) {
1178 ret = PTR_ERR(phy[i]);
1179 goto err_link;
1180 }
1181
1182 if (!phy[i])
1183 continue;
1184
1185 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
1186 if (!link[i]) {
1187 ret = -EINVAL;
1188 goto err_link;
1189 }
1190 }
1191
1192 ks_pcie->np = np;
1193 ks_pcie->pci = pci;
1194 ks_pcie->link = link;
1195 ks_pcie->num_lanes = num_lanes;
1196 ks_pcie->phy = phy;
1197
1198 gpiod = devm_gpiod_get_optional(dev, "reset",
1199 GPIOD_OUT_LOW);
1200 if (IS_ERR(gpiod)) {
1201 ret = PTR_ERR(gpiod);
1202 if (ret != -EPROBE_DEFER)
1203 dev_err(dev, "Failed to get reset GPIO\n");
1204 goto err_link;
1205 }
1206
1207 ret = ks_pcie_enable_phy(ks_pcie);
1208 if (ret) {
1209 dev_err(dev, "failed to enable phy\n");
1210 goto err_link;
1211 }
1212
1213 platform_set_drvdata(pdev, ks_pcie);
1214 pm_runtime_enable(dev);
1215 ret = pm_runtime_get_sync(dev);
1216 if (ret < 0) {
1217 dev_err(dev, "pm_runtime_get_sync failed\n");
1218 goto err_get_sync;
1219 }
1220
1221 if (pci->version >= 0x480A)
1222 ret = ks_pcie_am654_set_mode(dev, mode);
1223 else
1224 ret = ks_pcie_set_mode(dev);
1225 if (ret < 0)
1226 goto err_get_sync;
1227
1228 switch (mode) {
1229 case DW_PCIE_RC_TYPE:
1230 if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
1231 ret = -ENODEV;
1232 goto err_get_sync;
1233 }
1234
1235 ret = of_property_read_u32(np, "num-viewport", &num_viewport);
1236 if (ret < 0) {
1237 dev_err(dev, "unable to read *num-viewport* property\n");
1238 goto err_get_sync;
1239 }
1240
1241 /*
1242 * "Power Sequencing and Reset Signal Timings" table in
1243 * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
1244 * indicates PERST# should be deasserted after minimum of 100us
1245 * once REFCLK is stable. The REFCLK to the connector in RC
1246 * mode is selected while enabling the PHY. So deassert PERST#
1247 * after 100 us.
1248 */
1249 if (gpiod) {
1250 usleep_range(100, 200);
1251 gpiod_set_value_cansleep(gpiod, 1);
1252 }
1253
1254 ks_pcie->num_viewport = num_viewport;
1255 pci->pp.ops = host_ops;
1256 ret = dw_pcie_host_init(&pci->pp);
1257 if (ret < 0)
1258 goto err_get_sync;
1259 break;
1260 case DW_PCIE_EP_TYPE:
1261 if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
1262 ret = -ENODEV;
1263 goto err_get_sync;
1264 }
1265
1266 pci->ep.ops = ep_ops;
1267 ret = dw_pcie_ep_init(&pci->ep);
1268 if (ret < 0)
1269 goto err_get_sync;
1270 break;
1271 default:
1272 dev_err(dev, "INVALID device type %d\n", mode);
1273 }
1274
1275 ks_pcie_enable_error_irq(ks_pcie);
1276
1277 return 0;
1278
1279err_get_sync:
1280 pm_runtime_put(dev);
1281 pm_runtime_disable(dev);
1282 ks_pcie_disable_phy(ks_pcie);
1283
1284err_link:
1285 while (--i >= 0 && link[i])
1286 device_link_del(link[i]);
1287
1288 return ret;
1289}
1290
1291static int __exit ks_pcie_remove(struct platform_device *pdev)
1292{
1293 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
1294 struct device_link **link = ks_pcie->link;
1295 int num_lanes = ks_pcie->num_lanes;
1296 struct device *dev = &pdev->dev;
1297
1298 pm_runtime_put(dev);
1299 pm_runtime_disable(dev);
1300 ks_pcie_disable_phy(ks_pcie);
1301 while (num_lanes--)
1302 device_link_del(link[num_lanes]);
1303
1304 return 0;
1305}
1306
1307static struct platform_driver ks_pcie_driver __refdata = {
1308 .probe = ks_pcie_probe,
1309 .remove = __exit_p(ks_pcie_remove),
1310 .driver = {
1311 .name = "keystone-pcie",
1312 .of_match_table = of_match_ptr(ks_pcie_of_match),
1313 },
1314};
1315builtin_platform_driver(ks_pcie_driver);