Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe host controller driver for Texas Instruments Keystone SoCs
4 *
5 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
6 * https://www.ti.com
7 *
8 * Author: Murali Karicheri <m-karicheri2@ti.com>
9 * Implementation based on pci-exynos.c and pcie-designware.c
10 */
11
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/gpio/consumer.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/irqchip/chained_irq.h>
18#include <linux/irqdomain.h>
19#include <linux/mfd/syscon.h>
20#include <linux/msi.h>
21#include <linux/of.h>
22#include <linux/of_irq.h>
23#include <linux/of_pci.h>
24#include <linux/phy/phy.h>
25#include <linux/platform_device.h>
26#include <linux/regmap.h>
27#include <linux/resource.h>
28#include <linux/signal.h>
29
30#include "../../pci.h"
31#include "pcie-designware.h"
32
33#define PCIE_VENDORID_MASK 0xffff
34#define PCIE_DEVICEID_SHIFT 16
35
36/* Application registers */
37#define PID 0x000
38#define RTL GENMASK(15, 11)
39#define RTL_SHIFT 11
40#define AM6_PCI_PG1_RTL_VER 0x15
41
42#define CMD_STATUS 0x004
43#define LTSSM_EN_VAL BIT(0)
44#define OB_XLAT_EN_VAL BIT(1)
45#define DBI_CS2 BIT(5)
46
47#define CFG_SETUP 0x008
48#define CFG_BUS(x) (((x) & 0xff) << 16)
49#define CFG_DEVICE(x) (((x) & 0x1f) << 8)
50#define CFG_FUNC(x) ((x) & 0x7)
51#define CFG_TYPE1 BIT(24)
52
53#define OB_SIZE 0x030
54#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
55#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
56#define OB_ENABLEN BIT(0)
57#define OB_WIN_SIZE 8 /* 8MB */
58
59#define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
60#define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
61#define PCIE_EP_IRQ_SET 0x64
62#define PCIE_EP_IRQ_CLR 0x68
63#define INT_ENABLE BIT(0)
64
65/* IRQ register defines */
66#define IRQ_EOI 0x050
67
68#define MSI_IRQ 0x054
69#define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
70#define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
71#define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
72#define MSI_IRQ_OFFSET 4
73
74#define IRQ_STATUS(n) (0x184 + ((n) << 4))
75#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
76#define INTx_EN BIT(0)
77
78#define ERR_IRQ_STATUS 0x1c4
79#define ERR_IRQ_ENABLE_SET 0x1c8
80#define ERR_AER BIT(5) /* ECRC error */
81#define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
82#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
83#define ERR_CORR BIT(3) /* Correctable error */
84#define ERR_NONFATAL BIT(2) /* Non-fatal error */
85#define ERR_FATAL BIT(1) /* Fatal error */
86#define ERR_SYS BIT(0) /* System error */
87#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
88 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
89
90/* PCIE controller device IDs */
91#define PCIE_RC_K2HK 0xb008
92#define PCIE_RC_K2E 0xb009
93#define PCIE_RC_K2L 0xb00a
94#define PCIE_RC_K2G 0xb00b
95
96#define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
97#define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
98
99#define EP 0x0
100#define LEG_EP 0x1
101#define RC 0x2
102
103#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
104
105#define AM654_PCIE_DEV_TYPE_MASK 0x3
106#define AM654_WIN_SIZE SZ_64K
107
108#define APP_ADDR_SPACE_0 (16 * SZ_1K)
109
110#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
111
112#define PCI_DEVICE_ID_TI_AM654X 0xb00c
113
114struct ks_pcie_of_data {
115 enum dw_pcie_device_mode mode;
116 const struct dw_pcie_host_ops *host_ops;
117 const struct dw_pcie_ep_ops *ep_ops;
118 u32 version;
119};
120
121struct keystone_pcie {
122 struct dw_pcie *pci;
123 /* PCI Device ID */
124 u32 device_id;
125 int intx_host_irqs[PCI_NUM_INTX];
126
127 int msi_host_irq;
128 int num_lanes;
129 u32 num_viewport;
130 struct phy **phy;
131 struct device_link **link;
132 struct device_node *msi_intc_np;
133 struct irq_domain *intx_irq_domain;
134 struct device_node *np;
135
136 /* Application register space */
137 void __iomem *va_app_base; /* DT 1st resource */
138 struct resource app;
139 bool is_am6;
140};
141
142static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
143{
144 return readl(ks_pcie->va_app_base + offset);
145}
146
147static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
148 u32 val)
149{
150 writel(val, ks_pcie->va_app_base + offset);
151}
152
153static void ks_pcie_msi_irq_ack(struct irq_data *data)
154{
155 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
156 struct keystone_pcie *ks_pcie;
157 u32 irq = data->hwirq;
158 struct dw_pcie *pci;
159 u32 reg_offset;
160 u32 bit_pos;
161
162 pci = to_dw_pcie_from_pp(pp);
163 ks_pcie = to_keystone_pcie(pci);
164
165 reg_offset = irq % 8;
166 bit_pos = irq >> 3;
167
168 ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
169 BIT(bit_pos));
170 ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
171}
172
173static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
174{
175 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
176 struct keystone_pcie *ks_pcie;
177 struct dw_pcie *pci;
178 u64 msi_target;
179
180 pci = to_dw_pcie_from_pp(pp);
181 ks_pcie = to_keystone_pcie(pci);
182
183 msi_target = ks_pcie->app.start + MSI_IRQ;
184 msg->address_lo = lower_32_bits(msi_target);
185 msg->address_hi = upper_32_bits(msi_target);
186 msg->data = data->hwirq;
187
188 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
189 (int)data->hwirq, msg->address_hi, msg->address_lo);
190}
191
192static void ks_pcie_msi_mask(struct irq_data *data)
193{
194 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
195 struct keystone_pcie *ks_pcie;
196 u32 irq = data->hwirq;
197 struct dw_pcie *pci;
198 unsigned long flags;
199 u32 reg_offset;
200 u32 bit_pos;
201
202 raw_spin_lock_irqsave(&pp->lock, flags);
203
204 pci = to_dw_pcie_from_pp(pp);
205 ks_pcie = to_keystone_pcie(pci);
206
207 reg_offset = irq % 8;
208 bit_pos = irq >> 3;
209
210 ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
211 BIT(bit_pos));
212
213 raw_spin_unlock_irqrestore(&pp->lock, flags);
214}
215
216static void ks_pcie_msi_unmask(struct irq_data *data)
217{
218 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
219 struct keystone_pcie *ks_pcie;
220 u32 irq = data->hwirq;
221 struct dw_pcie *pci;
222 unsigned long flags;
223 u32 reg_offset;
224 u32 bit_pos;
225
226 raw_spin_lock_irqsave(&pp->lock, flags);
227
228 pci = to_dw_pcie_from_pp(pp);
229 ks_pcie = to_keystone_pcie(pci);
230
231 reg_offset = irq % 8;
232 bit_pos = irq >> 3;
233
234 ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
235 BIT(bit_pos));
236
237 raw_spin_unlock_irqrestore(&pp->lock, flags);
238}
239
240static struct irq_chip ks_pcie_msi_irq_chip = {
241 .name = "KEYSTONE-PCI-MSI",
242 .irq_ack = ks_pcie_msi_irq_ack,
243 .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
244 .irq_mask = ks_pcie_msi_mask,
245 .irq_unmask = ks_pcie_msi_unmask,
246};
247
248/**
249 * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
250 * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
251 * PCIe host controller driver information.
252 *
253 * Since modification of dbi_cs2 involves different clock domain, read the
254 * status back to ensure the transition is complete.
255 */
256static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
257{
258 u32 val;
259
260 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
261 val |= DBI_CS2;
262 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
263
264 do {
265 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
266 } while (!(val & DBI_CS2));
267}
268
269/**
270 * ks_pcie_clear_dbi_mode() - Disable DBI mode
271 * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
272 * PCIe host controller driver information.
273 *
274 * Since modification of dbi_cs2 involves different clock domain, read the
275 * status back to ensure the transition is complete.
276 */
277static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
278{
279 u32 val;
280
281 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
282 val &= ~DBI_CS2;
283 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
284
285 do {
286 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
287 } while (val & DBI_CS2);
288}
289
290static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
291{
292 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
293 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
294
295 /* Configure and set up BAR0 */
296 ks_pcie_set_dbi_mode(ks_pcie);
297
298 /* Enable BAR0 */
299 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
300 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
301
302 ks_pcie_clear_dbi_mode(ks_pcie);
303
304 /*
305 * For BAR0, just setting bus address for inbound writes (MSI) should
306 * be sufficient. Use physical address to avoid any conflicts.
307 */
308 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
309
310 pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
311 return dw_pcie_allocate_domains(pp);
312}
313
314static void ks_pcie_handle_intx_irq(struct keystone_pcie *ks_pcie,
315 int offset)
316{
317 struct dw_pcie *pci = ks_pcie->pci;
318 struct device *dev = pci->dev;
319 u32 pending;
320
321 pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
322
323 if (BIT(0) & pending) {
324 dev_dbg(dev, ": irq: irq_offset %d", offset);
325 generic_handle_domain_irq(ks_pcie->intx_irq_domain, offset);
326 }
327
328 /* EOI the INTx interrupt */
329 ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
330}
331
332static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
333{
334 ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
335}
336
337static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
338{
339 u32 reg;
340 struct device *dev = ks_pcie->pci->dev;
341
342 reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
343 if (!reg)
344 return IRQ_NONE;
345
346 if (reg & ERR_SYS)
347 dev_err(dev, "System Error\n");
348
349 if (reg & ERR_FATAL)
350 dev_err(dev, "Fatal Error\n");
351
352 if (reg & ERR_NONFATAL)
353 dev_dbg(dev, "Non Fatal Error\n");
354
355 if (reg & ERR_CORR)
356 dev_dbg(dev, "Correctable Error\n");
357
358 if (!ks_pcie->is_am6 && (reg & ERR_AXI))
359 dev_err(dev, "AXI tag lookup fatal Error\n");
360
361 if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
362 dev_err(dev, "ECRC Error\n");
363
364 ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
365
366 return IRQ_HANDLED;
367}
368
369static void ks_pcie_ack_intx_irq(struct irq_data *d)
370{
371}
372
373static void ks_pcie_mask_intx_irq(struct irq_data *d)
374{
375}
376
377static void ks_pcie_unmask_intx_irq(struct irq_data *d)
378{
379}
380
381static struct irq_chip ks_pcie_intx_irq_chip = {
382 .name = "Keystone-PCI-INTX-IRQ",
383 .irq_ack = ks_pcie_ack_intx_irq,
384 .irq_mask = ks_pcie_mask_intx_irq,
385 .irq_unmask = ks_pcie_unmask_intx_irq,
386};
387
388static int ks_pcie_init_intx_irq_map(struct irq_domain *d,
389 unsigned int irq, irq_hw_number_t hw_irq)
390{
391 irq_set_chip_and_handler(irq, &ks_pcie_intx_irq_chip,
392 handle_level_irq);
393 irq_set_chip_data(irq, d->host_data);
394
395 return 0;
396}
397
398static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
399 .map = ks_pcie_init_intx_irq_map,
400 .xlate = irq_domain_xlate_onetwocell,
401};
402
403static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
404{
405 u32 val;
406 u32 num_viewport = ks_pcie->num_viewport;
407 struct dw_pcie *pci = ks_pcie->pci;
408 struct dw_pcie_rp *pp = &pci->pp;
409 struct resource_entry *entry;
410 struct resource *mem;
411 u64 start, end;
412 int i;
413
414 entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
415 if (!entry)
416 return -ENODEV;
417
418 mem = entry->res;
419 start = mem->start;
420 end = mem->end;
421
422 /* Disable BARs for inbound access */
423 ks_pcie_set_dbi_mode(ks_pcie);
424 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
425 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
426 ks_pcie_clear_dbi_mode(ks_pcie);
427
428 if (ks_pcie->is_am6)
429 return 0;
430
431 val = ilog2(OB_WIN_SIZE);
432 ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
433
434 /* Using Direct 1:1 mapping of RC <-> PCI memory space */
435 for (i = 0; i < num_viewport && (start < end); i++) {
436 ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
437 lower_32_bits(start) | OB_ENABLEN);
438 ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
439 upper_32_bits(start));
440 start += OB_WIN_SIZE * SZ_1M;
441 }
442
443 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
444 val |= OB_XLAT_EN_VAL;
445 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
446
447 return 0;
448}
449
450static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
451 unsigned int devfn, int where)
452{
453 struct dw_pcie_rp *pp = bus->sysdata;
454 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
455 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
456 u32 reg;
457
458 /*
459 * Checking whether the link is up here is a last line of defense
460 * against platforms that forward errors on the system bus as
461 * SError upon PCI configuration transactions issued when the link
462 * is down. This check is racy by definition and does not stop
463 * the system from triggering an SError if the link goes down
464 * after this check is performed.
465 */
466 if (!dw_pcie_link_up(pci))
467 return NULL;
468
469 reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
470 CFG_FUNC(PCI_FUNC(devfn));
471 if (!pci_is_root_bus(bus->parent))
472 reg |= CFG_TYPE1;
473 ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
474
475 return pp->va_cfg0_base + where;
476}
477
478static struct pci_ops ks_child_pcie_ops = {
479 .map_bus = ks_pcie_other_map_bus,
480 .read = pci_generic_config_read,
481 .write = pci_generic_config_write,
482};
483
484static struct pci_ops ks_pcie_ops = {
485 .map_bus = dw_pcie_own_conf_map_bus,
486 .read = pci_generic_config_read,
487 .write = pci_generic_config_write,
488};
489
490/**
491 * ks_pcie_link_up() - Check if link up
492 * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
493 * controller driver information.
494 */
495static int ks_pcie_link_up(struct dw_pcie *pci)
496{
497 u32 val;
498
499 val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
500 val &= PORT_LOGIC_LTSSM_STATE_MASK;
501 return (val == PORT_LOGIC_LTSSM_STATE_L0);
502}
503
504static void ks_pcie_stop_link(struct dw_pcie *pci)
505{
506 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
507 u32 val;
508
509 /* Disable Link training */
510 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
511 val &= ~LTSSM_EN_VAL;
512 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
513}
514
515static int ks_pcie_start_link(struct dw_pcie *pci)
516{
517 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
518 u32 val;
519
520 /* Initiate Link Training */
521 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
522 ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
523
524 return 0;
525}
526
527static void ks_pcie_quirk(struct pci_dev *dev)
528{
529 struct pci_bus *bus = dev->bus;
530 struct keystone_pcie *ks_pcie;
531 struct device *bridge_dev;
532 struct pci_dev *bridge;
533 u32 val;
534
535 static const struct pci_device_id rc_pci_devids[] = {
536 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
537 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
538 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
539 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
540 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
541 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
542 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
543 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
544 { 0, },
545 };
546 static const struct pci_device_id am6_pci_devids[] = {
547 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
548 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
549 { 0, },
550 };
551
552 if (pci_is_root_bus(bus))
553 bridge = dev;
554
555 /* look for the host bridge */
556 while (!pci_is_root_bus(bus)) {
557 bridge = bus->self;
558 bus = bus->parent;
559 }
560
561 if (!bridge)
562 return;
563
564 /*
565 * Keystone PCI controller has a h/w limitation of
566 * 256 bytes maximum read request size. It can't handle
567 * anything higher than this. So force this limit on
568 * all downstream devices.
569 */
570 if (pci_match_id(rc_pci_devids, bridge)) {
571 if (pcie_get_readrq(dev) > 256) {
572 dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
573 pcie_set_readrq(dev, 256);
574 }
575 }
576
577 /*
578 * Memory transactions fail with PCI controller in AM654 PG1.0
579 * when MRRS is set to more than 128 bytes. Force the MRRS to
580 * 128 bytes in all downstream devices.
581 */
582 if (pci_match_id(am6_pci_devids, bridge)) {
583 bridge_dev = pci_get_host_bridge_device(dev);
584 if (!bridge_dev || !bridge_dev->parent)
585 return;
586
587 ks_pcie = dev_get_drvdata(bridge_dev->parent);
588 if (!ks_pcie)
589 return;
590
591 val = ks_pcie_app_readl(ks_pcie, PID);
592 val &= RTL;
593 val >>= RTL_SHIFT;
594 if (val != AM6_PCI_PG1_RTL_VER)
595 return;
596
597 if (pcie_get_readrq(dev) > 128) {
598 dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
599 pcie_set_readrq(dev, 128);
600 }
601 }
602}
603DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
604
605static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
606{
607 unsigned int irq = desc->irq_data.hwirq;
608 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
609 u32 offset = irq - ks_pcie->msi_host_irq;
610 struct dw_pcie *pci = ks_pcie->pci;
611 struct dw_pcie_rp *pp = &pci->pp;
612 struct device *dev = pci->dev;
613 struct irq_chip *chip = irq_desc_get_chip(desc);
614 u32 vector, reg, pos;
615
616 dev_dbg(dev, "%s, irq %d\n", __func__, irq);
617
618 /*
619 * The chained irq handler installation would have replaced normal
620 * interrupt driver handler so we need to take care of mask/unmask and
621 * ack operation.
622 */
623 chained_irq_enter(chip, desc);
624
625 reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
626 /*
627 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
628 * shows 1, 9, 17, 25 and so forth
629 */
630 for (pos = 0; pos < 4; pos++) {
631 if (!(reg & BIT(pos)))
632 continue;
633
634 vector = offset + (pos << 3);
635 dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
636 generic_handle_domain_irq(pp->irq_domain, vector);
637 }
638
639 chained_irq_exit(chip, desc);
640}
641
642/**
643 * ks_pcie_intx_irq_handler() - Handle INTX interrupt
644 * @desc: Pointer to irq descriptor
645 *
646 * Traverse through pending INTX interrupts and invoke handler for each. Also
647 * takes care of interrupt controller level mask/ack operation.
648 */
649static void ks_pcie_intx_irq_handler(struct irq_desc *desc)
650{
651 unsigned int irq = irq_desc_get_irq(desc);
652 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
653 struct dw_pcie *pci = ks_pcie->pci;
654 struct device *dev = pci->dev;
655 u32 irq_offset = irq - ks_pcie->intx_host_irqs[0];
656 struct irq_chip *chip = irq_desc_get_chip(desc);
657
658 dev_dbg(dev, ": Handling INTX irq %d\n", irq);
659
660 /*
661 * The chained irq handler installation would have replaced normal
662 * interrupt driver handler so we need to take care of mask/unmask and
663 * ack operation.
664 */
665 chained_irq_enter(chip, desc);
666 ks_pcie_handle_intx_irq(ks_pcie, irq_offset);
667 chained_irq_exit(chip, desc);
668}
669
670static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
671{
672 struct device *dev = ks_pcie->pci->dev;
673 struct device_node *np = ks_pcie->np;
674 struct device_node *intc_np;
675 struct irq_data *irq_data;
676 int irq_count, irq, ret, i;
677
678 if (!IS_ENABLED(CONFIG_PCI_MSI))
679 return 0;
680
681 intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
682 if (!intc_np) {
683 if (ks_pcie->is_am6)
684 return 0;
685 dev_warn(dev, "msi-interrupt-controller node is absent\n");
686 return -EINVAL;
687 }
688
689 irq_count = of_irq_count(intc_np);
690 if (!irq_count) {
691 dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
692 ret = -EINVAL;
693 goto err;
694 }
695
696 for (i = 0; i < irq_count; i++) {
697 irq = irq_of_parse_and_map(intc_np, i);
698 if (!irq) {
699 ret = -EINVAL;
700 goto err;
701 }
702
703 if (!ks_pcie->msi_host_irq) {
704 irq_data = irq_get_irq_data(irq);
705 if (!irq_data) {
706 ret = -EINVAL;
707 goto err;
708 }
709 ks_pcie->msi_host_irq = irq_data->hwirq;
710 }
711
712 irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
713 ks_pcie);
714 }
715
716 of_node_put(intc_np);
717 return 0;
718
719err:
720 of_node_put(intc_np);
721 return ret;
722}
723
724static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie)
725{
726 struct device *dev = ks_pcie->pci->dev;
727 struct irq_domain *intx_irq_domain;
728 struct device_node *np = ks_pcie->np;
729 struct device_node *intc_np;
730 int irq_count, irq, ret = 0, i;
731
732 intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
733 if (!intc_np) {
734 /*
735 * Since INTX interrupts are modeled as edge-interrupts in
736 * AM6, keep it disabled for now.
737 */
738 if (ks_pcie->is_am6)
739 return 0;
740 dev_warn(dev, "legacy-interrupt-controller node is absent\n");
741 return -EINVAL;
742 }
743
744 irq_count = of_irq_count(intc_np);
745 if (!irq_count) {
746 dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
747 ret = -EINVAL;
748 goto err;
749 }
750
751 for (i = 0; i < irq_count; i++) {
752 irq = irq_of_parse_and_map(intc_np, i);
753 if (!irq) {
754 ret = -EINVAL;
755 goto err;
756 }
757 ks_pcie->intx_host_irqs[i] = irq;
758
759 irq_set_chained_handler_and_data(irq,
760 ks_pcie_intx_irq_handler,
761 ks_pcie);
762 }
763
764 intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX,
765 &ks_pcie_intx_irq_domain_ops, NULL);
766 if (!intx_irq_domain) {
767 dev_err(dev, "Failed to add irq domain for INTX irqs\n");
768 ret = -EINVAL;
769 goto err;
770 }
771 ks_pcie->intx_irq_domain = intx_irq_domain;
772
773 for (i = 0; i < PCI_NUM_INTX; i++)
774 ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
775
776err:
777 of_node_put(intc_np);
778 return ret;
779}
780
781#ifdef CONFIG_ARM
782/*
783 * When a PCI device does not exist during config cycles, keystone host
784 * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
785 * This handler always returns 0 for this kind of fault.
786 */
787static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
788 struct pt_regs *regs)
789{
790 unsigned long instr = *(unsigned long *) instruction_pointer(regs);
791
792 if ((instr & 0x0e100090) == 0x00100090) {
793 int reg = (instr >> 12) & 15;
794
795 regs->uregs[reg] = -1;
796 regs->ARM_pc += 4;
797 }
798
799 return 0;
800}
801#endif
802
803static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
804{
805 int ret;
806 unsigned int id;
807 struct regmap *devctrl_regs;
808 struct dw_pcie *pci = ks_pcie->pci;
809 struct device *dev = pci->dev;
810 struct device_node *np = dev->of_node;
811 struct of_phandle_args args;
812 unsigned int offset = 0;
813
814 devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
815 if (IS_ERR(devctrl_regs))
816 return PTR_ERR(devctrl_regs);
817
818 /* Do not error out to maintain old DT compatibility */
819 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args);
820 if (!ret)
821 offset = args.args[0];
822
823 ret = regmap_read(devctrl_regs, offset, &id);
824 if (ret)
825 return ret;
826
827 dw_pcie_dbi_ro_wr_en(pci);
828 dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
829 dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
830 dw_pcie_dbi_ro_wr_dis(pci);
831
832 return 0;
833}
834
835static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
836{
837 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
838 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
839 int ret;
840
841 pp->bridge->ops = &ks_pcie_ops;
842 if (!ks_pcie->is_am6)
843 pp->bridge->child_ops = &ks_child_pcie_ops;
844
845 ret = ks_pcie_config_intx_irq(ks_pcie);
846 if (ret)
847 return ret;
848
849 ret = ks_pcie_config_msi_irq(ks_pcie);
850 if (ret)
851 return ret;
852
853 ks_pcie_stop_link(pci);
854 ret = ks_pcie_setup_rc_app_regs(ks_pcie);
855 if (ret)
856 return ret;
857
858 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
859 pci->dbi_base + PCI_IO_BASE);
860
861 ret = ks_pcie_init_id(ks_pcie);
862 if (ret < 0)
863 return ret;
864
865#ifdef CONFIG_ARM
866 /*
867 * PCIe access errors that result into OCP errors are caught by ARM as
868 * "External aborts"
869 */
870 hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
871 "Asynchronous external abort");
872#endif
873
874 return 0;
875}
876
877static const struct dw_pcie_host_ops ks_pcie_host_ops = {
878 .init = ks_pcie_host_init,
879 .msi_init = ks_pcie_msi_host_init,
880};
881
882static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
883 .init = ks_pcie_host_init,
884};
885
886static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
887{
888 struct keystone_pcie *ks_pcie = priv;
889
890 return ks_pcie_handle_error_irq(ks_pcie);
891}
892
893static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
894 u32 reg, size_t size, u32 val)
895{
896 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
897
898 ks_pcie_set_dbi_mode(ks_pcie);
899 dw_pcie_write(base + reg, size, val);
900 ks_pcie_clear_dbi_mode(ks_pcie);
901}
902
903static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
904 .start_link = ks_pcie_start_link,
905 .stop_link = ks_pcie_stop_link,
906 .link_up = ks_pcie_link_up,
907 .write_dbi2 = ks_pcie_am654_write_dbi2,
908};
909
910static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
911{
912 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
913 int flags;
914
915 ep->page_size = AM654_WIN_SIZE;
916 flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
917 dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
918 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
919}
920
921static void ks_pcie_am654_raise_intx_irq(struct keystone_pcie *ks_pcie)
922{
923 struct dw_pcie *pci = ks_pcie->pci;
924 u8 int_pin;
925
926 int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
927 if (int_pin == 0 || int_pin > 4)
928 return;
929
930 ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
931 INT_ENABLE);
932 ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
933 mdelay(1);
934 ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
935 ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
936 INT_ENABLE);
937}
938
939static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
940 unsigned int type, u16 interrupt_num)
941{
942 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
943 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
944
945 switch (type) {
946 case PCI_IRQ_INTX:
947 ks_pcie_am654_raise_intx_irq(ks_pcie);
948 break;
949 case PCI_IRQ_MSI:
950 dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
951 break;
952 case PCI_IRQ_MSIX:
953 dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
954 break;
955 default:
956 dev_err(pci->dev, "UNKNOWN IRQ type\n");
957 return -EINVAL;
958 }
959
960 return 0;
961}
962
963static const struct pci_epc_features ks_pcie_am654_epc_features = {
964 .linkup_notifier = false,
965 .msi_capable = true,
966 .msix_capable = true,
967 .bar[BAR_0] = { .type = BAR_RESERVED, },
968 .bar[BAR_1] = { .type = BAR_RESERVED, },
969 .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
970 .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
971 .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
972 .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
973 .align = SZ_1M,
974};
975
976static const struct pci_epc_features*
977ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
978{
979 return &ks_pcie_am654_epc_features;
980}
981
982static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
983 .init = ks_pcie_am654_ep_init,
984 .raise_irq = ks_pcie_am654_raise_irq,
985 .get_features = &ks_pcie_am654_get_features,
986};
987
988static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
989{
990 int num_lanes = ks_pcie->num_lanes;
991
992 while (num_lanes--) {
993 phy_power_off(ks_pcie->phy[num_lanes]);
994 phy_exit(ks_pcie->phy[num_lanes]);
995 }
996}
997
998static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
999{
1000 int i;
1001 int ret;
1002 int num_lanes = ks_pcie->num_lanes;
1003
1004 for (i = 0; i < num_lanes; i++) {
1005 ret = phy_reset(ks_pcie->phy[i]);
1006 if (ret < 0)
1007 goto err_phy;
1008
1009 ret = phy_init(ks_pcie->phy[i]);
1010 if (ret < 0)
1011 goto err_phy;
1012
1013 ret = phy_power_on(ks_pcie->phy[i]);
1014 if (ret < 0) {
1015 phy_exit(ks_pcie->phy[i]);
1016 goto err_phy;
1017 }
1018 }
1019
1020 return 0;
1021
1022err_phy:
1023 while (--i >= 0) {
1024 phy_power_off(ks_pcie->phy[i]);
1025 phy_exit(ks_pcie->phy[i]);
1026 }
1027
1028 return ret;
1029}
1030
1031static int ks_pcie_set_mode(struct device *dev)
1032{
1033 struct device_node *np = dev->of_node;
1034 struct of_phandle_args args;
1035 unsigned int offset = 0;
1036 struct regmap *syscon;
1037 u32 val;
1038 u32 mask;
1039 int ret = 0;
1040
1041 syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1042 if (IS_ERR(syscon))
1043 return 0;
1044
1045 /* Do not error out to maintain old DT compatibility */
1046 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
1047 if (!ret)
1048 offset = args.args[0];
1049
1050 mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
1051 val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
1052
1053 ret = regmap_update_bits(syscon, offset, mask, val);
1054 if (ret) {
1055 dev_err(dev, "failed to set pcie mode\n");
1056 return ret;
1057 }
1058
1059 return 0;
1060}
1061
1062static int ks_pcie_am654_set_mode(struct device *dev,
1063 enum dw_pcie_device_mode mode)
1064{
1065 struct device_node *np = dev->of_node;
1066 struct of_phandle_args args;
1067 unsigned int offset = 0;
1068 struct regmap *syscon;
1069 u32 val;
1070 u32 mask;
1071 int ret = 0;
1072
1073 syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1074 if (IS_ERR(syscon))
1075 return 0;
1076
1077 /* Do not error out to maintain old DT compatibility */
1078 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
1079 if (!ret)
1080 offset = args.args[0];
1081
1082 mask = AM654_PCIE_DEV_TYPE_MASK;
1083
1084 switch (mode) {
1085 case DW_PCIE_RC_TYPE:
1086 val = RC;
1087 break;
1088 case DW_PCIE_EP_TYPE:
1089 val = EP;
1090 break;
1091 default:
1092 dev_err(dev, "INVALID device type %d\n", mode);
1093 return -EINVAL;
1094 }
1095
1096 ret = regmap_update_bits(syscon, offset, mask, val);
1097 if (ret) {
1098 dev_err(dev, "failed to set pcie mode\n");
1099 return ret;
1100 }
1101
1102 return 0;
1103}
1104
1105static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
1106 .host_ops = &ks_pcie_host_ops,
1107 .mode = DW_PCIE_RC_TYPE,
1108 .version = DW_PCIE_VER_365A,
1109};
1110
1111static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
1112 .host_ops = &ks_pcie_am654_host_ops,
1113 .mode = DW_PCIE_RC_TYPE,
1114 .version = DW_PCIE_VER_490A,
1115};
1116
1117static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
1118 .ep_ops = &ks_pcie_am654_ep_ops,
1119 .mode = DW_PCIE_EP_TYPE,
1120 .version = DW_PCIE_VER_490A,
1121};
1122
1123static const struct of_device_id ks_pcie_of_match[] = {
1124 {
1125 .type = "pci",
1126 .data = &ks_pcie_rc_of_data,
1127 .compatible = "ti,keystone-pcie",
1128 },
1129 {
1130 .data = &ks_pcie_am654_rc_of_data,
1131 .compatible = "ti,am654-pcie-rc",
1132 },
1133 {
1134 .data = &ks_pcie_am654_ep_of_data,
1135 .compatible = "ti,am654-pcie-ep",
1136 },
1137 { },
1138};
1139
1140static int ks_pcie_probe(struct platform_device *pdev)
1141{
1142 const struct dw_pcie_host_ops *host_ops;
1143 const struct dw_pcie_ep_ops *ep_ops;
1144 struct device *dev = &pdev->dev;
1145 struct device_node *np = dev->of_node;
1146 const struct ks_pcie_of_data *data;
1147 enum dw_pcie_device_mode mode;
1148 struct dw_pcie *pci;
1149 struct keystone_pcie *ks_pcie;
1150 struct device_link **link;
1151 struct gpio_desc *gpiod;
1152 struct resource *res;
1153 void __iomem *base;
1154 u32 num_viewport;
1155 struct phy **phy;
1156 u32 num_lanes;
1157 char name[10];
1158 u32 version;
1159 int ret;
1160 int irq;
1161 int i;
1162
1163 data = of_device_get_match_data(dev);
1164 if (!data)
1165 return -EINVAL;
1166
1167 version = data->version;
1168 host_ops = data->host_ops;
1169 ep_ops = data->ep_ops;
1170 mode = data->mode;
1171
1172 ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
1173 if (!ks_pcie)
1174 return -ENOMEM;
1175
1176 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1177 if (!pci)
1178 return -ENOMEM;
1179
1180 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
1181 ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
1182 if (IS_ERR(ks_pcie->va_app_base))
1183 return PTR_ERR(ks_pcie->va_app_base);
1184
1185 ks_pcie->app = *res;
1186
1187 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
1188 base = devm_pci_remap_cfg_resource(dev, res);
1189 if (IS_ERR(base))
1190 return PTR_ERR(base);
1191
1192 if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
1193 ks_pcie->is_am6 = true;
1194
1195 pci->dbi_base = base;
1196 pci->dbi_base2 = base;
1197 pci->dev = dev;
1198 pci->ops = &ks_pcie_dw_pcie_ops;
1199 pci->version = version;
1200
1201 irq = platform_get_irq(pdev, 0);
1202 if (irq < 0)
1203 return irq;
1204
1205 ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
1206 "ks-pcie-error-irq", ks_pcie);
1207 if (ret < 0) {
1208 dev_err(dev, "failed to request error IRQ %d\n",
1209 irq);
1210 return ret;
1211 }
1212
1213 ret = of_property_read_u32(np, "num-lanes", &num_lanes);
1214 if (ret)
1215 num_lanes = 1;
1216
1217 phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
1218 if (!phy)
1219 return -ENOMEM;
1220
1221 link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
1222 if (!link)
1223 return -ENOMEM;
1224
1225 for (i = 0; i < num_lanes; i++) {
1226 snprintf(name, sizeof(name), "pcie-phy%d", i);
1227 phy[i] = devm_phy_optional_get(dev, name);
1228 if (IS_ERR(phy[i])) {
1229 ret = PTR_ERR(phy[i]);
1230 goto err_link;
1231 }
1232
1233 if (!phy[i])
1234 continue;
1235
1236 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
1237 if (!link[i]) {
1238 ret = -EINVAL;
1239 goto err_link;
1240 }
1241 }
1242
1243 ks_pcie->np = np;
1244 ks_pcie->pci = pci;
1245 ks_pcie->link = link;
1246 ks_pcie->num_lanes = num_lanes;
1247 ks_pcie->phy = phy;
1248
1249 gpiod = devm_gpiod_get_optional(dev, "reset",
1250 GPIOD_OUT_LOW);
1251 if (IS_ERR(gpiod)) {
1252 ret = PTR_ERR(gpiod);
1253 if (ret != -EPROBE_DEFER)
1254 dev_err(dev, "Failed to get reset GPIO\n");
1255 goto err_link;
1256 }
1257
1258 /* Obtain references to the PHYs */
1259 for (i = 0; i < num_lanes; i++)
1260 phy_pm_runtime_get_sync(ks_pcie->phy[i]);
1261
1262 ret = ks_pcie_enable_phy(ks_pcie);
1263
1264 /* Release references to the PHYs */
1265 for (i = 0; i < num_lanes; i++)
1266 phy_pm_runtime_put_sync(ks_pcie->phy[i]);
1267
1268 if (ret) {
1269 dev_err(dev, "failed to enable phy\n");
1270 goto err_link;
1271 }
1272
1273 platform_set_drvdata(pdev, ks_pcie);
1274 pm_runtime_enable(dev);
1275 ret = pm_runtime_get_sync(dev);
1276 if (ret < 0) {
1277 dev_err(dev, "pm_runtime_get_sync failed\n");
1278 goto err_get_sync;
1279 }
1280
1281 if (dw_pcie_ver_is_ge(pci, 480A))
1282 ret = ks_pcie_am654_set_mode(dev, mode);
1283 else
1284 ret = ks_pcie_set_mode(dev);
1285 if (ret < 0)
1286 goto err_get_sync;
1287
1288 switch (mode) {
1289 case DW_PCIE_RC_TYPE:
1290 if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
1291 ret = -ENODEV;
1292 goto err_get_sync;
1293 }
1294
1295 ret = of_property_read_u32(np, "num-viewport", &num_viewport);
1296 if (ret < 0) {
1297 dev_err(dev, "unable to read *num-viewport* property\n");
1298 goto err_get_sync;
1299 }
1300
1301 /*
1302 * "Power Sequencing and Reset Signal Timings" table in
1303 * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
1304 * indicates PERST# should be deasserted after minimum of 100us
1305 * once REFCLK is stable. The REFCLK to the connector in RC
1306 * mode is selected while enabling the PHY. So deassert PERST#
1307 * after 100 us.
1308 */
1309 if (gpiod) {
1310 usleep_range(100, 200);
1311 gpiod_set_value_cansleep(gpiod, 1);
1312 }
1313
1314 ks_pcie->num_viewport = num_viewport;
1315 pci->pp.ops = host_ops;
1316 ret = dw_pcie_host_init(&pci->pp);
1317 if (ret < 0)
1318 goto err_get_sync;
1319 break;
1320 case DW_PCIE_EP_TYPE:
1321 if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
1322 ret = -ENODEV;
1323 goto err_get_sync;
1324 }
1325
1326 pci->ep.ops = ep_ops;
1327 ret = dw_pcie_ep_init(&pci->ep);
1328 if (ret < 0)
1329 goto err_get_sync;
1330
1331 ret = dw_pcie_ep_init_registers(&pci->ep);
1332 if (ret) {
1333 dev_err(dev, "Failed to initialize DWC endpoint registers\n");
1334 goto err_ep_init;
1335 }
1336
1337 pci_epc_init_notify(pci->ep.epc);
1338
1339 break;
1340 default:
1341 dev_err(dev, "INVALID device type %d\n", mode);
1342 }
1343
1344 ks_pcie_enable_error_irq(ks_pcie);
1345
1346 return 0;
1347
1348err_ep_init:
1349 dw_pcie_ep_deinit(&pci->ep);
1350err_get_sync:
1351 pm_runtime_put(dev);
1352 pm_runtime_disable(dev);
1353 ks_pcie_disable_phy(ks_pcie);
1354
1355err_link:
1356 while (--i >= 0 && link[i])
1357 device_link_del(link[i]);
1358
1359 return ret;
1360}
1361
1362static void ks_pcie_remove(struct platform_device *pdev)
1363{
1364 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
1365 struct device_link **link = ks_pcie->link;
1366 int num_lanes = ks_pcie->num_lanes;
1367 struct device *dev = &pdev->dev;
1368
1369 pm_runtime_put(dev);
1370 pm_runtime_disable(dev);
1371 ks_pcie_disable_phy(ks_pcie);
1372 while (num_lanes--)
1373 device_link_del(link[num_lanes]);
1374}
1375
1376static struct platform_driver ks_pcie_driver = {
1377 .probe = ks_pcie_probe,
1378 .remove = ks_pcie_remove,
1379 .driver = {
1380 .name = "keystone-pcie",
1381 .of_match_table = ks_pcie_of_match,
1382 },
1383};
1384builtin_platform_driver(ks_pcie_driver);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe host controller driver for Texas Instruments Keystone SoCs
4 *
5 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
6 * https://www.ti.com
7 *
8 * Author: Murali Karicheri <m-karicheri2@ti.com>
9 * Implementation based on pci-exynos.c and pcie-designware.c
10 */
11
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/gpio/consumer.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/irqchip/chained_irq.h>
18#include <linux/irqdomain.h>
19#include <linux/mfd/syscon.h>
20#include <linux/msi.h>
21#include <linux/of.h>
22#include <linux/of_device.h>
23#include <linux/of_irq.h>
24#include <linux/of_pci.h>
25#include <linux/phy/phy.h>
26#include <linux/platform_device.h>
27#include <linux/regmap.h>
28#include <linux/resource.h>
29#include <linux/signal.h>
30
31#include "../../pci.h"
32#include "pcie-designware.h"
33
34#define PCIE_VENDORID_MASK 0xffff
35#define PCIE_DEVICEID_SHIFT 16
36
37/* Application registers */
38#define CMD_STATUS 0x004
39#define LTSSM_EN_VAL BIT(0)
40#define OB_XLAT_EN_VAL BIT(1)
41#define DBI_CS2 BIT(5)
42
43#define CFG_SETUP 0x008
44#define CFG_BUS(x) (((x) & 0xff) << 16)
45#define CFG_DEVICE(x) (((x) & 0x1f) << 8)
46#define CFG_FUNC(x) ((x) & 0x7)
47#define CFG_TYPE1 BIT(24)
48
49#define OB_SIZE 0x030
50#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
51#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
52#define OB_ENABLEN BIT(0)
53#define OB_WIN_SIZE 8 /* 8MB */
54
55#define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
56#define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
57#define PCIE_EP_IRQ_SET 0x64
58#define PCIE_EP_IRQ_CLR 0x68
59#define INT_ENABLE BIT(0)
60
61/* IRQ register defines */
62#define IRQ_EOI 0x050
63
64#define MSI_IRQ 0x054
65#define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
66#define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
67#define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
68#define MSI_IRQ_OFFSET 4
69
70#define IRQ_STATUS(n) (0x184 + ((n) << 4))
71#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
72#define INTx_EN BIT(0)
73
74#define ERR_IRQ_STATUS 0x1c4
75#define ERR_IRQ_ENABLE_SET 0x1c8
76#define ERR_AER BIT(5) /* ECRC error */
77#define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
78#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
79#define ERR_CORR BIT(3) /* Correctable error */
80#define ERR_NONFATAL BIT(2) /* Non-fatal error */
81#define ERR_FATAL BIT(1) /* Fatal error */
82#define ERR_SYS BIT(0) /* System error */
83#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
84 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
85
86/* PCIE controller device IDs */
87#define PCIE_RC_K2HK 0xb008
88#define PCIE_RC_K2E 0xb009
89#define PCIE_RC_K2L 0xb00a
90#define PCIE_RC_K2G 0xb00b
91
92#define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
93#define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
94
95#define EP 0x0
96#define LEG_EP 0x1
97#define RC 0x2
98
99#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
100
101#define AM654_PCIE_DEV_TYPE_MASK 0x3
102#define AM654_WIN_SIZE SZ_64K
103
104#define APP_ADDR_SPACE_0 (16 * SZ_1K)
105
106#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
107
108struct ks_pcie_of_data {
109 enum dw_pcie_device_mode mode;
110 const struct dw_pcie_host_ops *host_ops;
111 const struct dw_pcie_ep_ops *ep_ops;
112 unsigned int version;
113};
114
115struct keystone_pcie {
116 struct dw_pcie *pci;
117 /* PCI Device ID */
118 u32 device_id;
119 int legacy_host_irqs[PCI_NUM_INTX];
120 struct device_node *legacy_intc_np;
121
122 int msi_host_irq;
123 int num_lanes;
124 u32 num_viewport;
125 struct phy **phy;
126 struct device_link **link;
127 struct device_node *msi_intc_np;
128 struct irq_domain *legacy_irq_domain;
129 struct device_node *np;
130
131 /* Application register space */
132 void __iomem *va_app_base; /* DT 1st resource */
133 struct resource app;
134 bool is_am6;
135};
136
137static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
138{
139 return readl(ks_pcie->va_app_base + offset);
140}
141
142static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
143 u32 val)
144{
145 writel(val, ks_pcie->va_app_base + offset);
146}
147
148static void ks_pcie_msi_irq_ack(struct irq_data *data)
149{
150 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
151 struct keystone_pcie *ks_pcie;
152 u32 irq = data->hwirq;
153 struct dw_pcie *pci;
154 u32 reg_offset;
155 u32 bit_pos;
156
157 pci = to_dw_pcie_from_pp(pp);
158 ks_pcie = to_keystone_pcie(pci);
159
160 reg_offset = irq % 8;
161 bit_pos = irq >> 3;
162
163 ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
164 BIT(bit_pos));
165 ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
166}
167
168static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
169{
170 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
171 struct keystone_pcie *ks_pcie;
172 struct dw_pcie *pci;
173 u64 msi_target;
174
175 pci = to_dw_pcie_from_pp(pp);
176 ks_pcie = to_keystone_pcie(pci);
177
178 msi_target = ks_pcie->app.start + MSI_IRQ;
179 msg->address_lo = lower_32_bits(msi_target);
180 msg->address_hi = upper_32_bits(msi_target);
181 msg->data = data->hwirq;
182
183 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
184 (int)data->hwirq, msg->address_hi, msg->address_lo);
185}
186
187static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
188 const struct cpumask *mask, bool force)
189{
190 return -EINVAL;
191}
192
193static void ks_pcie_msi_mask(struct irq_data *data)
194{
195 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
196 struct keystone_pcie *ks_pcie;
197 u32 irq = data->hwirq;
198 struct dw_pcie *pci;
199 unsigned long flags;
200 u32 reg_offset;
201 u32 bit_pos;
202
203 raw_spin_lock_irqsave(&pp->lock, flags);
204
205 pci = to_dw_pcie_from_pp(pp);
206 ks_pcie = to_keystone_pcie(pci);
207
208 reg_offset = irq % 8;
209 bit_pos = irq >> 3;
210
211 ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
212 BIT(bit_pos));
213
214 raw_spin_unlock_irqrestore(&pp->lock, flags);
215}
216
217static void ks_pcie_msi_unmask(struct irq_data *data)
218{
219 struct pcie_port *pp = irq_data_get_irq_chip_data(data);
220 struct keystone_pcie *ks_pcie;
221 u32 irq = data->hwirq;
222 struct dw_pcie *pci;
223 unsigned long flags;
224 u32 reg_offset;
225 u32 bit_pos;
226
227 raw_spin_lock_irqsave(&pp->lock, flags);
228
229 pci = to_dw_pcie_from_pp(pp);
230 ks_pcie = to_keystone_pcie(pci);
231
232 reg_offset = irq % 8;
233 bit_pos = irq >> 3;
234
235 ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
236 BIT(bit_pos));
237
238 raw_spin_unlock_irqrestore(&pp->lock, flags);
239}
240
241static struct irq_chip ks_pcie_msi_irq_chip = {
242 .name = "KEYSTONE-PCI-MSI",
243 .irq_ack = ks_pcie_msi_irq_ack,
244 .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
245 .irq_set_affinity = ks_pcie_msi_set_affinity,
246 .irq_mask = ks_pcie_msi_mask,
247 .irq_unmask = ks_pcie_msi_unmask,
248};
249
250static int ks_pcie_msi_host_init(struct pcie_port *pp)
251{
252 pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
253 return dw_pcie_allocate_domains(pp);
254}
255
256static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
257 int offset)
258{
259 struct dw_pcie *pci = ks_pcie->pci;
260 struct device *dev = pci->dev;
261 u32 pending;
262 int virq;
263
264 pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
265
266 if (BIT(0) & pending) {
267 virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
268 dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
269 generic_handle_irq(virq);
270 }
271
272 /* EOI the INTx interrupt */
273 ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
274}
275
276static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
277{
278 ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
279}
280
281static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
282{
283 u32 reg;
284 struct device *dev = ks_pcie->pci->dev;
285
286 reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
287 if (!reg)
288 return IRQ_NONE;
289
290 if (reg & ERR_SYS)
291 dev_err(dev, "System Error\n");
292
293 if (reg & ERR_FATAL)
294 dev_err(dev, "Fatal Error\n");
295
296 if (reg & ERR_NONFATAL)
297 dev_dbg(dev, "Non Fatal Error\n");
298
299 if (reg & ERR_CORR)
300 dev_dbg(dev, "Correctable Error\n");
301
302 if (!ks_pcie->is_am6 && (reg & ERR_AXI))
303 dev_err(dev, "AXI tag lookup fatal Error\n");
304
305 if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
306 dev_err(dev, "ECRC Error\n");
307
308 ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
309
310 return IRQ_HANDLED;
311}
312
313static void ks_pcie_ack_legacy_irq(struct irq_data *d)
314{
315}
316
317static void ks_pcie_mask_legacy_irq(struct irq_data *d)
318{
319}
320
321static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
322{
323}
324
325static struct irq_chip ks_pcie_legacy_irq_chip = {
326 .name = "Keystone-PCI-Legacy-IRQ",
327 .irq_ack = ks_pcie_ack_legacy_irq,
328 .irq_mask = ks_pcie_mask_legacy_irq,
329 .irq_unmask = ks_pcie_unmask_legacy_irq,
330};
331
332static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
333 unsigned int irq,
334 irq_hw_number_t hw_irq)
335{
336 irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
337 handle_level_irq);
338 irq_set_chip_data(irq, d->host_data);
339
340 return 0;
341}
342
343static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
344 .map = ks_pcie_init_legacy_irq_map,
345 .xlate = irq_domain_xlate_onetwocell,
346};
347
348/**
349 * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
350 * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
351 * PCIe host controller driver information.
352 *
353 * Since modification of dbi_cs2 involves different clock domain, read the
354 * status back to ensure the transition is complete.
355 */
356static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
357{
358 u32 val;
359
360 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
361 val |= DBI_CS2;
362 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
363
364 do {
365 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
366 } while (!(val & DBI_CS2));
367}
368
369/**
370 * ks_pcie_clear_dbi_mode() - Disable DBI mode
371 * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
372 * PCIe host controller driver information.
373 *
374 * Since modification of dbi_cs2 involves different clock domain, read the
375 * status back to ensure the transition is complete.
376 */
377static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
378{
379 u32 val;
380
381 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
382 val &= ~DBI_CS2;
383 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
384
385 do {
386 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
387 } while (val & DBI_CS2);
388}
389
390static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
391{
392 u32 val;
393 u32 num_viewport = ks_pcie->num_viewport;
394 struct dw_pcie *pci = ks_pcie->pci;
395 struct pcie_port *pp = &pci->pp;
396 u64 start, end;
397 struct resource *mem;
398 int i;
399
400 mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
401 start = mem->start;
402 end = mem->end;
403
404 /* Disable BARs for inbound access */
405 ks_pcie_set_dbi_mode(ks_pcie);
406 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
407 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
408 ks_pcie_clear_dbi_mode(ks_pcie);
409
410 if (ks_pcie->is_am6)
411 return;
412
413 val = ilog2(OB_WIN_SIZE);
414 ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
415
416 /* Using Direct 1:1 mapping of RC <-> PCI memory space */
417 for (i = 0; i < num_viewport && (start < end); i++) {
418 ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
419 lower_32_bits(start) | OB_ENABLEN);
420 ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
421 upper_32_bits(start));
422 start += OB_WIN_SIZE * SZ_1M;
423 }
424
425 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
426 val |= OB_XLAT_EN_VAL;
427 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
428}
429
430static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
431 unsigned int devfn, int where)
432{
433 struct pcie_port *pp = bus->sysdata;
434 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
435 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
436 u32 reg;
437
438 reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
439 CFG_FUNC(PCI_FUNC(devfn));
440 if (!pci_is_root_bus(bus->parent))
441 reg |= CFG_TYPE1;
442 ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
443
444 return pp->va_cfg0_base + where;
445}
446
447static struct pci_ops ks_child_pcie_ops = {
448 .map_bus = ks_pcie_other_map_bus,
449 .read = pci_generic_config_read,
450 .write = pci_generic_config_write,
451};
452
453/**
454 * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
455 * @bus: A pointer to the PCI bus structure.
456 *
457 * This sets BAR0 to enable inbound access for MSI_IRQ register
458 */
459static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
460{
461 struct pcie_port *pp = bus->sysdata;
462 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
463 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
464
465 if (!pci_is_root_bus(bus))
466 return 0;
467
468 /* Configure and set up BAR0 */
469 ks_pcie_set_dbi_mode(ks_pcie);
470
471 /* Enable BAR0 */
472 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
473 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
474
475 ks_pcie_clear_dbi_mode(ks_pcie);
476
477 /*
478 * For BAR0, just setting bus address for inbound writes (MSI) should
479 * be sufficient. Use physical address to avoid any conflicts.
480 */
481 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
482
483 return 0;
484}
485
486static struct pci_ops ks_pcie_ops = {
487 .map_bus = dw_pcie_own_conf_map_bus,
488 .read = pci_generic_config_read,
489 .write = pci_generic_config_write,
490 .add_bus = ks_pcie_v3_65_add_bus,
491};
492
493/**
494 * ks_pcie_link_up() - Check if link up
495 * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
496 * controller driver information.
497 */
498static int ks_pcie_link_up(struct dw_pcie *pci)
499{
500 u32 val;
501
502 val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
503 val &= PORT_LOGIC_LTSSM_STATE_MASK;
504 return (val == PORT_LOGIC_LTSSM_STATE_L0);
505}
506
507static void ks_pcie_stop_link(struct dw_pcie *pci)
508{
509 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
510 u32 val;
511
512 /* Disable Link training */
513 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
514 val &= ~LTSSM_EN_VAL;
515 ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
516}
517
518static int ks_pcie_start_link(struct dw_pcie *pci)
519{
520 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
521 u32 val;
522
523 /* Initiate Link Training */
524 val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
525 ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
526
527 return 0;
528}
529
530static void ks_pcie_quirk(struct pci_dev *dev)
531{
532 struct pci_bus *bus = dev->bus;
533 struct pci_dev *bridge;
534 static const struct pci_device_id rc_pci_devids[] = {
535 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
536 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
537 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
538 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
539 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
540 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
541 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
542 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
543 { 0, },
544 };
545
546 if (pci_is_root_bus(bus))
547 bridge = dev;
548
549 /* look for the host bridge */
550 while (!pci_is_root_bus(bus)) {
551 bridge = bus->self;
552 bus = bus->parent;
553 }
554
555 if (!bridge)
556 return;
557
558 /*
559 * Keystone PCI controller has a h/w limitation of
560 * 256 bytes maximum read request size. It can't handle
561 * anything higher than this. So force this limit on
562 * all downstream devices.
563 */
564 if (pci_match_id(rc_pci_devids, bridge)) {
565 if (pcie_get_readrq(dev) > 256) {
566 dev_info(&dev->dev, "limiting MRRS to 256\n");
567 pcie_set_readrq(dev, 256);
568 }
569 }
570}
571DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
572
573static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
574{
575 unsigned int irq = desc->irq_data.hwirq;
576 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
577 u32 offset = irq - ks_pcie->msi_host_irq;
578 struct dw_pcie *pci = ks_pcie->pci;
579 struct pcie_port *pp = &pci->pp;
580 struct device *dev = pci->dev;
581 struct irq_chip *chip = irq_desc_get_chip(desc);
582 u32 vector, virq, reg, pos;
583
584 dev_dbg(dev, "%s, irq %d\n", __func__, irq);
585
586 /*
587 * The chained irq handler installation would have replaced normal
588 * interrupt driver handler so we need to take care of mask/unmask and
589 * ack operation.
590 */
591 chained_irq_enter(chip, desc);
592
593 reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
594 /*
595 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
596 * shows 1, 9, 17, 25 and so forth
597 */
598 for (pos = 0; pos < 4; pos++) {
599 if (!(reg & BIT(pos)))
600 continue;
601
602 vector = offset + (pos << 3);
603 virq = irq_linear_revmap(pp->irq_domain, vector);
604 dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
605 virq);
606 generic_handle_irq(virq);
607 }
608
609 chained_irq_exit(chip, desc);
610}
611
612/**
613 * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
614 * @desc: Pointer to irq descriptor
615 *
616 * Traverse through pending legacy interrupts and invoke handler for each. Also
617 * takes care of interrupt controller level mask/ack operation.
618 */
619static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
620{
621 unsigned int irq = irq_desc_get_irq(desc);
622 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
623 struct dw_pcie *pci = ks_pcie->pci;
624 struct device *dev = pci->dev;
625 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
626 struct irq_chip *chip = irq_desc_get_chip(desc);
627
628 dev_dbg(dev, ": Handling legacy irq %d\n", irq);
629
630 /*
631 * The chained irq handler installation would have replaced normal
632 * interrupt driver handler so we need to take care of mask/unmask and
633 * ack operation.
634 */
635 chained_irq_enter(chip, desc);
636 ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
637 chained_irq_exit(chip, desc);
638}
639
640static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
641{
642 struct device *dev = ks_pcie->pci->dev;
643 struct device_node *np = ks_pcie->np;
644 struct device_node *intc_np;
645 struct irq_data *irq_data;
646 int irq_count, irq, ret, i;
647
648 if (!IS_ENABLED(CONFIG_PCI_MSI))
649 return 0;
650
651 intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
652 if (!intc_np) {
653 if (ks_pcie->is_am6)
654 return 0;
655 dev_warn(dev, "msi-interrupt-controller node is absent\n");
656 return -EINVAL;
657 }
658
659 irq_count = of_irq_count(intc_np);
660 if (!irq_count) {
661 dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
662 ret = -EINVAL;
663 goto err;
664 }
665
666 for (i = 0; i < irq_count; i++) {
667 irq = irq_of_parse_and_map(intc_np, i);
668 if (!irq) {
669 ret = -EINVAL;
670 goto err;
671 }
672
673 if (!ks_pcie->msi_host_irq) {
674 irq_data = irq_get_irq_data(irq);
675 if (!irq_data) {
676 ret = -EINVAL;
677 goto err;
678 }
679 ks_pcie->msi_host_irq = irq_data->hwirq;
680 }
681
682 irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
683 ks_pcie);
684 }
685
686 of_node_put(intc_np);
687 return 0;
688
689err:
690 of_node_put(intc_np);
691 return ret;
692}
693
694static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
695{
696 struct device *dev = ks_pcie->pci->dev;
697 struct irq_domain *legacy_irq_domain;
698 struct device_node *np = ks_pcie->np;
699 struct device_node *intc_np;
700 int irq_count, irq, ret = 0, i;
701
702 intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
703 if (!intc_np) {
704 /*
705 * Since legacy interrupts are modeled as edge-interrupts in
706 * AM6, keep it disabled for now.
707 */
708 if (ks_pcie->is_am6)
709 return 0;
710 dev_warn(dev, "legacy-interrupt-controller node is absent\n");
711 return -EINVAL;
712 }
713
714 irq_count = of_irq_count(intc_np);
715 if (!irq_count) {
716 dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
717 ret = -EINVAL;
718 goto err;
719 }
720
721 for (i = 0; i < irq_count; i++) {
722 irq = irq_of_parse_and_map(intc_np, i);
723 if (!irq) {
724 ret = -EINVAL;
725 goto err;
726 }
727 ks_pcie->legacy_host_irqs[i] = irq;
728
729 irq_set_chained_handler_and_data(irq,
730 ks_pcie_legacy_irq_handler,
731 ks_pcie);
732 }
733
734 legacy_irq_domain =
735 irq_domain_add_linear(intc_np, PCI_NUM_INTX,
736 &ks_pcie_legacy_irq_domain_ops, NULL);
737 if (!legacy_irq_domain) {
738 dev_err(dev, "Failed to add irq domain for legacy irqs\n");
739 ret = -EINVAL;
740 goto err;
741 }
742 ks_pcie->legacy_irq_domain = legacy_irq_domain;
743
744 for (i = 0; i < PCI_NUM_INTX; i++)
745 ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
746
747err:
748 of_node_put(intc_np);
749 return ret;
750}
751
752#ifdef CONFIG_ARM
753/*
754 * When a PCI device does not exist during config cycles, keystone host gets a
755 * bus error instead of returning 0xffffffff. This handler always returns 0
756 * for this kind of faults.
757 */
758static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
759 struct pt_regs *regs)
760{
761 unsigned long instr = *(unsigned long *) instruction_pointer(regs);
762
763 if ((instr & 0x0e100090) == 0x00100090) {
764 int reg = (instr >> 12) & 15;
765
766 regs->uregs[reg] = -1;
767 regs->ARM_pc += 4;
768 }
769
770 return 0;
771}
772#endif
773
774static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
775{
776 int ret;
777 unsigned int id;
778 struct regmap *devctrl_regs;
779 struct dw_pcie *pci = ks_pcie->pci;
780 struct device *dev = pci->dev;
781 struct device_node *np = dev->of_node;
782
783 devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
784 if (IS_ERR(devctrl_regs))
785 return PTR_ERR(devctrl_regs);
786
787 ret = regmap_read(devctrl_regs, 0, &id);
788 if (ret)
789 return ret;
790
791 dw_pcie_dbi_ro_wr_en(pci);
792 dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
793 dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
794 dw_pcie_dbi_ro_wr_dis(pci);
795
796 return 0;
797}
798
799static int __init ks_pcie_host_init(struct pcie_port *pp)
800{
801 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
802 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
803 int ret;
804
805 pp->bridge->ops = &ks_pcie_ops;
806 if (!ks_pcie->is_am6)
807 pp->bridge->child_ops = &ks_child_pcie_ops;
808
809 ret = ks_pcie_config_legacy_irq(ks_pcie);
810 if (ret)
811 return ret;
812
813 ret = ks_pcie_config_msi_irq(ks_pcie);
814 if (ret)
815 return ret;
816
817 ks_pcie_stop_link(pci);
818 ks_pcie_setup_rc_app_regs(ks_pcie);
819 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
820 pci->dbi_base + PCI_IO_BASE);
821
822 ret = ks_pcie_init_id(ks_pcie);
823 if (ret < 0)
824 return ret;
825
826#ifdef CONFIG_ARM
827 /*
828 * PCIe access errors that result into OCP errors are caught by ARM as
829 * "External aborts"
830 */
831 hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
832 "Asynchronous external abort");
833#endif
834
835 return 0;
836}
837
838static const struct dw_pcie_host_ops ks_pcie_host_ops = {
839 .host_init = ks_pcie_host_init,
840 .msi_host_init = ks_pcie_msi_host_init,
841};
842
843static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
844 .host_init = ks_pcie_host_init,
845};
846
847static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
848{
849 struct keystone_pcie *ks_pcie = priv;
850
851 return ks_pcie_handle_error_irq(ks_pcie);
852}
853
854static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
855 u32 reg, size_t size, u32 val)
856{
857 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
858
859 ks_pcie_set_dbi_mode(ks_pcie);
860 dw_pcie_write(base + reg, size, val);
861 ks_pcie_clear_dbi_mode(ks_pcie);
862}
863
864static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
865 .start_link = ks_pcie_start_link,
866 .stop_link = ks_pcie_stop_link,
867 .link_up = ks_pcie_link_up,
868 .write_dbi2 = ks_pcie_am654_write_dbi2,
869};
870
871static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
872{
873 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
874 int flags;
875
876 ep->page_size = AM654_WIN_SIZE;
877 flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
878 dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
879 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
880}
881
882static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
883{
884 struct dw_pcie *pci = ks_pcie->pci;
885 u8 int_pin;
886
887 int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
888 if (int_pin == 0 || int_pin > 4)
889 return;
890
891 ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
892 INT_ENABLE);
893 ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
894 mdelay(1);
895 ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
896 ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
897 INT_ENABLE);
898}
899
900static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
901 enum pci_epc_irq_type type,
902 u16 interrupt_num)
903{
904 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
905 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
906
907 switch (type) {
908 case PCI_EPC_IRQ_LEGACY:
909 ks_pcie_am654_raise_legacy_irq(ks_pcie);
910 break;
911 case PCI_EPC_IRQ_MSI:
912 dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
913 break;
914 case PCI_EPC_IRQ_MSIX:
915 dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
916 break;
917 default:
918 dev_err(pci->dev, "UNKNOWN IRQ type\n");
919 return -EINVAL;
920 }
921
922 return 0;
923}
924
925static const struct pci_epc_features ks_pcie_am654_epc_features = {
926 .linkup_notifier = false,
927 .msi_capable = true,
928 .msix_capable = true,
929 .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
930 .bar_fixed_64bit = 1 << BAR_0,
931 .bar_fixed_size[2] = SZ_1M,
932 .bar_fixed_size[3] = SZ_64K,
933 .bar_fixed_size[4] = 256,
934 .bar_fixed_size[5] = SZ_1M,
935 .align = SZ_1M,
936};
937
938static const struct pci_epc_features*
939ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
940{
941 return &ks_pcie_am654_epc_features;
942}
943
944static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
945 .ep_init = ks_pcie_am654_ep_init,
946 .raise_irq = ks_pcie_am654_raise_irq,
947 .get_features = &ks_pcie_am654_get_features,
948};
949
950static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
951{
952 int num_lanes = ks_pcie->num_lanes;
953
954 while (num_lanes--) {
955 phy_power_off(ks_pcie->phy[num_lanes]);
956 phy_exit(ks_pcie->phy[num_lanes]);
957 }
958}
959
960static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
961{
962 int i;
963 int ret;
964 int num_lanes = ks_pcie->num_lanes;
965
966 for (i = 0; i < num_lanes; i++) {
967 ret = phy_reset(ks_pcie->phy[i]);
968 if (ret < 0)
969 goto err_phy;
970
971 ret = phy_init(ks_pcie->phy[i]);
972 if (ret < 0)
973 goto err_phy;
974
975 ret = phy_power_on(ks_pcie->phy[i]);
976 if (ret < 0) {
977 phy_exit(ks_pcie->phy[i]);
978 goto err_phy;
979 }
980 }
981
982 return 0;
983
984err_phy:
985 while (--i >= 0) {
986 phy_power_off(ks_pcie->phy[i]);
987 phy_exit(ks_pcie->phy[i]);
988 }
989
990 return ret;
991}
992
993static int ks_pcie_set_mode(struct device *dev)
994{
995 struct device_node *np = dev->of_node;
996 struct regmap *syscon;
997 u32 val;
998 u32 mask;
999 int ret = 0;
1000
1001 syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1002 if (IS_ERR(syscon))
1003 return 0;
1004
1005 mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
1006 val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
1007
1008 ret = regmap_update_bits(syscon, 0, mask, val);
1009 if (ret) {
1010 dev_err(dev, "failed to set pcie mode\n");
1011 return ret;
1012 }
1013
1014 return 0;
1015}
1016
1017static int ks_pcie_am654_set_mode(struct device *dev,
1018 enum dw_pcie_device_mode mode)
1019{
1020 struct device_node *np = dev->of_node;
1021 struct regmap *syscon;
1022 u32 val;
1023 u32 mask;
1024 int ret = 0;
1025
1026 syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1027 if (IS_ERR(syscon))
1028 return 0;
1029
1030 mask = AM654_PCIE_DEV_TYPE_MASK;
1031
1032 switch (mode) {
1033 case DW_PCIE_RC_TYPE:
1034 val = RC;
1035 break;
1036 case DW_PCIE_EP_TYPE:
1037 val = EP;
1038 break;
1039 default:
1040 dev_err(dev, "INVALID device type %d\n", mode);
1041 return -EINVAL;
1042 }
1043
1044 ret = regmap_update_bits(syscon, 0, mask, val);
1045 if (ret) {
1046 dev_err(dev, "failed to set pcie mode\n");
1047 return ret;
1048 }
1049
1050 return 0;
1051}
1052
1053static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
1054 .host_ops = &ks_pcie_host_ops,
1055 .version = 0x365A,
1056};
1057
1058static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
1059 .host_ops = &ks_pcie_am654_host_ops,
1060 .mode = DW_PCIE_RC_TYPE,
1061 .version = 0x490A,
1062};
1063
1064static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
1065 .ep_ops = &ks_pcie_am654_ep_ops,
1066 .mode = DW_PCIE_EP_TYPE,
1067 .version = 0x490A,
1068};
1069
1070static const struct of_device_id ks_pcie_of_match[] = {
1071 {
1072 .type = "pci",
1073 .data = &ks_pcie_rc_of_data,
1074 .compatible = "ti,keystone-pcie",
1075 },
1076 {
1077 .data = &ks_pcie_am654_rc_of_data,
1078 .compatible = "ti,am654-pcie-rc",
1079 },
1080 {
1081 .data = &ks_pcie_am654_ep_of_data,
1082 .compatible = "ti,am654-pcie-ep",
1083 },
1084 { },
1085};
1086
1087static int __init ks_pcie_probe(struct platform_device *pdev)
1088{
1089 const struct dw_pcie_host_ops *host_ops;
1090 const struct dw_pcie_ep_ops *ep_ops;
1091 struct device *dev = &pdev->dev;
1092 struct device_node *np = dev->of_node;
1093 const struct ks_pcie_of_data *data;
1094 const struct of_device_id *match;
1095 enum dw_pcie_device_mode mode;
1096 struct dw_pcie *pci;
1097 struct keystone_pcie *ks_pcie;
1098 struct device_link **link;
1099 struct gpio_desc *gpiod;
1100 struct resource *res;
1101 unsigned int version;
1102 void __iomem *base;
1103 u32 num_viewport;
1104 struct phy **phy;
1105 u32 num_lanes;
1106 char name[10];
1107 int ret;
1108 int irq;
1109 int i;
1110
1111 match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
1112 data = (struct ks_pcie_of_data *)match->data;
1113 if (!data)
1114 return -EINVAL;
1115
1116 version = data->version;
1117 host_ops = data->host_ops;
1118 ep_ops = data->ep_ops;
1119 mode = data->mode;
1120
1121 ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
1122 if (!ks_pcie)
1123 return -ENOMEM;
1124
1125 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1126 if (!pci)
1127 return -ENOMEM;
1128
1129 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
1130 ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
1131 if (IS_ERR(ks_pcie->va_app_base))
1132 return PTR_ERR(ks_pcie->va_app_base);
1133
1134 ks_pcie->app = *res;
1135
1136 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
1137 base = devm_pci_remap_cfg_resource(dev, res);
1138 if (IS_ERR(base))
1139 return PTR_ERR(base);
1140
1141 if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
1142 ks_pcie->is_am6 = true;
1143
1144 pci->dbi_base = base;
1145 pci->dbi_base2 = base;
1146 pci->dev = dev;
1147 pci->ops = &ks_pcie_dw_pcie_ops;
1148 pci->version = version;
1149
1150 irq = platform_get_irq(pdev, 0);
1151 if (irq < 0)
1152 return irq;
1153
1154 ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
1155 "ks-pcie-error-irq", ks_pcie);
1156 if (ret < 0) {
1157 dev_err(dev, "failed to request error IRQ %d\n",
1158 irq);
1159 return ret;
1160 }
1161
1162 ret = of_property_read_u32(np, "num-lanes", &num_lanes);
1163 if (ret)
1164 num_lanes = 1;
1165
1166 phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
1167 if (!phy)
1168 return -ENOMEM;
1169
1170 link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
1171 if (!link)
1172 return -ENOMEM;
1173
1174 for (i = 0; i < num_lanes; i++) {
1175 snprintf(name, sizeof(name), "pcie-phy%d", i);
1176 phy[i] = devm_phy_optional_get(dev, name);
1177 if (IS_ERR(phy[i])) {
1178 ret = PTR_ERR(phy[i]);
1179 goto err_link;
1180 }
1181
1182 if (!phy[i])
1183 continue;
1184
1185 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
1186 if (!link[i]) {
1187 ret = -EINVAL;
1188 goto err_link;
1189 }
1190 }
1191
1192 ks_pcie->np = np;
1193 ks_pcie->pci = pci;
1194 ks_pcie->link = link;
1195 ks_pcie->num_lanes = num_lanes;
1196 ks_pcie->phy = phy;
1197
1198 gpiod = devm_gpiod_get_optional(dev, "reset",
1199 GPIOD_OUT_LOW);
1200 if (IS_ERR(gpiod)) {
1201 ret = PTR_ERR(gpiod);
1202 if (ret != -EPROBE_DEFER)
1203 dev_err(dev, "Failed to get reset GPIO\n");
1204 goto err_link;
1205 }
1206
1207 ret = ks_pcie_enable_phy(ks_pcie);
1208 if (ret) {
1209 dev_err(dev, "failed to enable phy\n");
1210 goto err_link;
1211 }
1212
1213 platform_set_drvdata(pdev, ks_pcie);
1214 pm_runtime_enable(dev);
1215 ret = pm_runtime_get_sync(dev);
1216 if (ret < 0) {
1217 dev_err(dev, "pm_runtime_get_sync failed\n");
1218 goto err_get_sync;
1219 }
1220
1221 if (pci->version >= 0x480A)
1222 ret = ks_pcie_am654_set_mode(dev, mode);
1223 else
1224 ret = ks_pcie_set_mode(dev);
1225 if (ret < 0)
1226 goto err_get_sync;
1227
1228 switch (mode) {
1229 case DW_PCIE_RC_TYPE:
1230 if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
1231 ret = -ENODEV;
1232 goto err_get_sync;
1233 }
1234
1235 ret = of_property_read_u32(np, "num-viewport", &num_viewport);
1236 if (ret < 0) {
1237 dev_err(dev, "unable to read *num-viewport* property\n");
1238 goto err_get_sync;
1239 }
1240
1241 /*
1242 * "Power Sequencing and Reset Signal Timings" table in
1243 * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
1244 * indicates PERST# should be deasserted after minimum of 100us
1245 * once REFCLK is stable. The REFCLK to the connector in RC
1246 * mode is selected while enabling the PHY. So deassert PERST#
1247 * after 100 us.
1248 */
1249 if (gpiod) {
1250 usleep_range(100, 200);
1251 gpiod_set_value_cansleep(gpiod, 1);
1252 }
1253
1254 ks_pcie->num_viewport = num_viewport;
1255 pci->pp.ops = host_ops;
1256 ret = dw_pcie_host_init(&pci->pp);
1257 if (ret < 0)
1258 goto err_get_sync;
1259 break;
1260 case DW_PCIE_EP_TYPE:
1261 if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
1262 ret = -ENODEV;
1263 goto err_get_sync;
1264 }
1265
1266 pci->ep.ops = ep_ops;
1267 ret = dw_pcie_ep_init(&pci->ep);
1268 if (ret < 0)
1269 goto err_get_sync;
1270 break;
1271 default:
1272 dev_err(dev, "INVALID device type %d\n", mode);
1273 }
1274
1275 ks_pcie_enable_error_irq(ks_pcie);
1276
1277 return 0;
1278
1279err_get_sync:
1280 pm_runtime_put(dev);
1281 pm_runtime_disable(dev);
1282 ks_pcie_disable_phy(ks_pcie);
1283
1284err_link:
1285 while (--i >= 0 && link[i])
1286 device_link_del(link[i]);
1287
1288 return ret;
1289}
1290
1291static int __exit ks_pcie_remove(struct platform_device *pdev)
1292{
1293 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
1294 struct device_link **link = ks_pcie->link;
1295 int num_lanes = ks_pcie->num_lanes;
1296 struct device *dev = &pdev->dev;
1297
1298 pm_runtime_put(dev);
1299 pm_runtime_disable(dev);
1300 ks_pcie_disable_phy(ks_pcie);
1301 while (num_lanes--)
1302 device_link_del(link[num_lanes]);
1303
1304 return 0;
1305}
1306
1307static struct platform_driver ks_pcie_driver __refdata = {
1308 .probe = ks_pcie_probe,
1309 .remove = __exit_p(ks_pcie_remove),
1310 .driver = {
1311 .name = "keystone-pcie",
1312 .of_match_table = of_match_ptr(ks_pcie_of_match),
1313 },
1314};
1315builtin_platform_driver(ks_pcie_driver);