Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
4 *
5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com
6 *
7 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
8 */
9
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/err.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/irqdomain.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/of_device.h>
19#include <linux/of_gpio.h>
20#include <linux/of_pci.h>
21#include <linux/pci.h>
22#include <linux/phy/phy.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/resource.h>
26#include <linux/types.h>
27#include <linux/mfd/syscon.h>
28#include <linux/regmap.h>
29#include <linux/gpio/consumer.h>
30
31#include "../../pci.h"
32#include "pcie-designware.h"
33
34/* PCIe controller wrapper DRA7XX configuration registers */
35
36#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
37#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
38#define ERR_SYS BIT(0)
39#define ERR_FATAL BIT(1)
40#define ERR_NONFATAL BIT(2)
41#define ERR_COR BIT(3)
42#define ERR_AXI BIT(4)
43#define ERR_ECRC BIT(5)
44#define PME_TURN_OFF BIT(8)
45#define PME_TO_ACK BIT(9)
46#define PM_PME BIT(10)
47#define LINK_REQ_RST BIT(11)
48#define LINK_UP_EVT BIT(12)
49#define CFG_BME_EVT BIT(13)
50#define CFG_MSE_EVT BIT(14)
51#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
54
55#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
56#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
57#define INTA BIT(0)
58#define INTB BIT(1)
59#define INTC BIT(2)
60#define INTD BIT(3)
61#define MSI BIT(4)
62#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
63
64#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
65#define DEVICE_TYPE_EP 0x0
66#define DEVICE_TYPE_LEG_EP 0x1
67#define DEVICE_TYPE_RC 0x4
68
69#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
70#define LTSSM_EN 0x1
71
72#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
73#define LINK_UP BIT(16)
74#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
75
76#define EXP_CAP_ID_OFFSET 0x70
77
78#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
79#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
80
81#define PCIECTRL_TI_CONF_MSI_XMT 0x012c
82#define MSI_REQ_GRANT BIT(0)
83#define MSI_VECTOR_SHIFT 7
84
85#define PCIE_1LANE_2LANE_SELECTION BIT(13)
86#define PCIE_B1C0_MODE_SEL BIT(2)
87#define PCIE_B0_B1_TSYNCEN BIT(0)
88
89struct dra7xx_pcie {
90 struct dw_pcie *pci;
91 void __iomem *base; /* DT ti_conf */
92 int phy_count; /* DT phy-names count */
93 struct phy **phy;
94 int link_gen;
95 struct irq_domain *irq_domain;
96 enum dw_pcie_device_mode mode;
97};
98
99struct dra7xx_pcie_of_data {
100 enum dw_pcie_device_mode mode;
101 u32 b1co_mode_sel_mask;
102};
103
104#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
105
106static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
107{
108 return readl(pcie->base + offset);
109}
110
111static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
112 u32 value)
113{
114 writel(value, pcie->base + offset);
115}
116
117static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
118{
119 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
120}
121
122static int dra7xx_pcie_link_up(struct dw_pcie *pci)
123{
124 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
125 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
126
127 return !!(reg & LINK_UP);
128}
129
130static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
131{
132 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
133 u32 reg;
134
135 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
136 reg &= ~LTSSM_EN;
137 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
138}
139
140static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
141{
142 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
143 struct device *dev = pci->dev;
144 u32 reg;
145 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
146
147 if (dw_pcie_link_up(pci)) {
148 dev_err(dev, "link is already up\n");
149 return 0;
150 }
151
152 if (dra7xx->link_gen == 1) {
153 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
154 4, ®);
155 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
156 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
157 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
158 dw_pcie_write(pci->dbi_base + exp_cap_off +
159 PCI_EXP_LNKCAP, 4, reg);
160 }
161
162 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
163 2, ®);
164 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
165 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
166 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
167 dw_pcie_write(pci->dbi_base + exp_cap_off +
168 PCI_EXP_LNKCTL2, 2, reg);
169 }
170 }
171
172 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
173 reg |= LTSSM_EN;
174 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
175
176 return 0;
177}
178
179static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
180{
181 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
182 LEG_EP_INTERRUPTS | MSI);
183
184 dra7xx_pcie_writel(dra7xx,
185 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
186 MSI | LEG_EP_INTERRUPTS);
187}
188
189static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
190{
191 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
192 INTERRUPTS);
193 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
194 INTERRUPTS);
195}
196
197static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
198{
199 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
200 dra7xx_pcie_enable_msi_interrupts(dra7xx);
201}
202
203static int dra7xx_pcie_host_init(struct pcie_port *pp)
204{
205 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
206 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
207
208 dw_pcie_setup_rc(pp);
209
210 dra7xx_pcie_establish_link(pci);
211 dw_pcie_wait_for_link(pci);
212 dw_pcie_msi_init(pp);
213 dra7xx_pcie_enable_interrupts(dra7xx);
214
215 return 0;
216}
217
218static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
219 irq_hw_number_t hwirq)
220{
221 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
222 irq_set_chip_data(irq, domain->host_data);
223
224 return 0;
225}
226
227static const struct irq_domain_ops intx_domain_ops = {
228 .map = dra7xx_pcie_intx_map,
229 .xlate = pci_irqd_intx_xlate,
230};
231
232static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
233{
234 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
235 unsigned long val;
236 int pos, irq;
237
238 val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
239 (index * MSI_REG_CTRL_BLOCK_SIZE));
240 if (!val)
241 return 0;
242
243 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
244 while (pos != MAX_MSI_IRQS_PER_CTRL) {
245 irq = irq_find_mapping(pp->irq_domain,
246 (index * MAX_MSI_IRQS_PER_CTRL) + pos);
247 generic_handle_irq(irq);
248 pos++;
249 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
250 }
251
252 return 1;
253}
254
255static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp)
256{
257 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
258 int ret, i, count, num_ctrls;
259
260 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
261
262 /**
263 * Need to make sure all MSI status bits read 0 before exiting.
264 * Else, new MSI IRQs are not registered by the wrapper. Have an
265 * upperbound for the loop and exit the IRQ in case of IRQ flood
266 * to avoid locking up system in interrupt context.
267 */
268 count = 0;
269 do {
270 ret = 0;
271
272 for (i = 0; i < num_ctrls; i++)
273 ret |= dra7xx_pcie_handle_msi(pp, i);
274 count++;
275 } while (ret && count <= 1000);
276
277 if (count > 1000)
278 dev_warn_ratelimited(pci->dev,
279 "Too many MSI IRQs to handle\n");
280}
281
282static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
283{
284 struct irq_chip *chip = irq_desc_get_chip(desc);
285 struct dra7xx_pcie *dra7xx;
286 struct dw_pcie *pci;
287 struct pcie_port *pp;
288 unsigned long reg;
289 u32 virq, bit;
290
291 chained_irq_enter(chip, desc);
292
293 pp = irq_desc_get_handler_data(desc);
294 pci = to_dw_pcie_from_pp(pp);
295 dra7xx = to_dra7xx_pcie(pci);
296
297 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
298 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
299
300 switch (reg) {
301 case MSI:
302 dra7xx_pcie_handle_msi_irq(pp);
303 break;
304 case INTA:
305 case INTB:
306 case INTC:
307 case INTD:
308 for_each_set_bit(bit, ®, PCI_NUM_INTX) {
309 virq = irq_find_mapping(dra7xx->irq_domain, bit);
310 if (virq)
311 generic_handle_irq(virq);
312 }
313 break;
314 }
315
316 chained_irq_exit(chip, desc);
317}
318
319static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
320{
321 struct dra7xx_pcie *dra7xx = arg;
322 struct dw_pcie *pci = dra7xx->pci;
323 struct device *dev = pci->dev;
324 struct dw_pcie_ep *ep = &pci->ep;
325 u32 reg;
326
327 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
328
329 if (reg & ERR_SYS)
330 dev_dbg(dev, "System Error\n");
331
332 if (reg & ERR_FATAL)
333 dev_dbg(dev, "Fatal Error\n");
334
335 if (reg & ERR_NONFATAL)
336 dev_dbg(dev, "Non Fatal Error\n");
337
338 if (reg & ERR_COR)
339 dev_dbg(dev, "Correctable Error\n");
340
341 if (reg & ERR_AXI)
342 dev_dbg(dev, "AXI tag lookup fatal Error\n");
343
344 if (reg & ERR_ECRC)
345 dev_dbg(dev, "ECRC Error\n");
346
347 if (reg & PME_TURN_OFF)
348 dev_dbg(dev,
349 "Power Management Event Turn-Off message received\n");
350
351 if (reg & PME_TO_ACK)
352 dev_dbg(dev,
353 "Power Management Turn-Off Ack message received\n");
354
355 if (reg & PM_PME)
356 dev_dbg(dev, "PM Power Management Event message received\n");
357
358 if (reg & LINK_REQ_RST)
359 dev_dbg(dev, "Link Request Reset\n");
360
361 if (reg & LINK_UP_EVT) {
362 if (dra7xx->mode == DW_PCIE_EP_TYPE)
363 dw_pcie_ep_linkup(ep);
364 dev_dbg(dev, "Link-up state change\n");
365 }
366
367 if (reg & CFG_BME_EVT)
368 dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
369
370 if (reg & CFG_MSE_EVT)
371 dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
372
373 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
374
375 return IRQ_HANDLED;
376}
377
378static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
379{
380 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
381 struct device *dev = pci->dev;
382 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
383 struct device_node *node = dev->of_node;
384 struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
385
386 if (!pcie_intc_node) {
387 dev_err(dev, "No PCIe Intc node found\n");
388 return -ENODEV;
389 }
390
391 irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
392 pp);
393 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
394 &intx_domain_ops, pp);
395 of_node_put(pcie_intc_node);
396 if (!dra7xx->irq_domain) {
397 dev_err(dev, "Failed to get a INTx IRQ domain\n");
398 return -ENODEV;
399 }
400
401 return 0;
402}
403
404static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
405{
406 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
407 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
408 u64 msi_target;
409
410 msi_target = (u64)pp->msi_data;
411
412 msg->address_lo = lower_32_bits(msi_target);
413 msg->address_hi = upper_32_bits(msi_target);
414
415 msg->data = d->hwirq;
416
417 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
418 (int)d->hwirq, msg->address_hi, msg->address_lo);
419}
420
421static int dra7xx_pcie_msi_set_affinity(struct irq_data *d,
422 const struct cpumask *mask,
423 bool force)
424{
425 return -EINVAL;
426}
427
428static void dra7xx_pcie_bottom_mask(struct irq_data *d)
429{
430 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
431 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
432 unsigned int res, bit, ctrl;
433 unsigned long flags;
434
435 raw_spin_lock_irqsave(&pp->lock, flags);
436
437 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
438 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
439 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
440
441 pp->irq_mask[ctrl] |= BIT(bit);
442 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
443 pp->irq_mask[ctrl]);
444
445 raw_spin_unlock_irqrestore(&pp->lock, flags);
446}
447
448static void dra7xx_pcie_bottom_unmask(struct irq_data *d)
449{
450 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
451 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
452 unsigned int res, bit, ctrl;
453 unsigned long flags;
454
455 raw_spin_lock_irqsave(&pp->lock, flags);
456
457 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
458 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
459 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
460
461 pp->irq_mask[ctrl] &= ~BIT(bit);
462 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
463 pp->irq_mask[ctrl]);
464
465 raw_spin_unlock_irqrestore(&pp->lock, flags);
466}
467
468static void dra7xx_pcie_bottom_ack(struct irq_data *d)
469{
470 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
471 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
472 unsigned int res, bit, ctrl;
473
474 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
475 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
476 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
477
478 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
479}
480
481static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = {
482 .name = "DRA7XX-PCI-MSI",
483 .irq_ack = dra7xx_pcie_bottom_ack,
484 .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg,
485 .irq_set_affinity = dra7xx_pcie_msi_set_affinity,
486 .irq_mask = dra7xx_pcie_bottom_mask,
487 .irq_unmask = dra7xx_pcie_bottom_unmask,
488};
489
490static int dra7xx_pcie_msi_host_init(struct pcie_port *pp)
491{
492 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
493 u32 ctrl, num_ctrls;
494
495 pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip;
496
497 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
498 /* Initialize IRQ Status array */
499 for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
500 pp->irq_mask[ctrl] = ~0;
501 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
502 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
503 pp->irq_mask[ctrl]);
504 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
505 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
506 ~0);
507 }
508
509 return dw_pcie_allocate_domains(pp);
510}
511
512static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
513 .host_init = dra7xx_pcie_host_init,
514 .msi_host_init = dra7xx_pcie_msi_host_init,
515};
516
517static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
518{
519 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
520 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
521 enum pci_barno bar;
522
523 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
524 dw_pcie_ep_reset_bar(pci, bar);
525
526 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
527}
528
529static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
530{
531 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
532 mdelay(1);
533 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
534}
535
536static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
537 u8 interrupt_num)
538{
539 u32 reg;
540
541 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
542 reg |= MSI_REQ_GRANT;
543 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
544}
545
546static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
547 enum pci_epc_irq_type type, u16 interrupt_num)
548{
549 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
550 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
551
552 switch (type) {
553 case PCI_EPC_IRQ_LEGACY:
554 dra7xx_pcie_raise_legacy_irq(dra7xx);
555 break;
556 case PCI_EPC_IRQ_MSI:
557 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
558 break;
559 default:
560 dev_err(pci->dev, "UNKNOWN IRQ type\n");
561 }
562
563 return 0;
564}
565
566static const struct pci_epc_features dra7xx_pcie_epc_features = {
567 .linkup_notifier = true,
568 .msi_capable = true,
569 .msix_capable = false,
570};
571
572static const struct pci_epc_features*
573dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
574{
575 return &dra7xx_pcie_epc_features;
576}
577
578static const struct dw_pcie_ep_ops pcie_ep_ops = {
579 .ep_init = dra7xx_pcie_ep_init,
580 .raise_irq = dra7xx_pcie_raise_irq,
581 .get_features = dra7xx_pcie_get_features,
582};
583
584static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
585 struct platform_device *pdev)
586{
587 int ret;
588 struct dw_pcie_ep *ep;
589 struct resource *res;
590 struct device *dev = &pdev->dev;
591 struct dw_pcie *pci = dra7xx->pci;
592
593 ep = &pci->ep;
594 ep->ops = &pcie_ep_ops;
595
596 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics");
597 if (IS_ERR(pci->dbi_base))
598 return PTR_ERR(pci->dbi_base);
599
600 pci->dbi_base2 =
601 devm_platform_ioremap_resource_byname(pdev, "ep_dbics2");
602 if (IS_ERR(pci->dbi_base2))
603 return PTR_ERR(pci->dbi_base2);
604
605 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
606 if (!res)
607 return -EINVAL;
608
609 ep->phys_base = res->start;
610 ep->addr_size = resource_size(res);
611
612 ret = dw_pcie_ep_init(ep);
613 if (ret) {
614 dev_err(dev, "failed to initialize endpoint\n");
615 return ret;
616 }
617
618 return 0;
619}
620
621static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
622 struct platform_device *pdev)
623{
624 int ret;
625 struct dw_pcie *pci = dra7xx->pci;
626 struct pcie_port *pp = &pci->pp;
627 struct device *dev = pci->dev;
628
629 pp->irq = platform_get_irq(pdev, 1);
630 if (pp->irq < 0)
631 return pp->irq;
632
633 ret = dra7xx_pcie_init_irq_domain(pp);
634 if (ret < 0)
635 return ret;
636
637 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics");
638 if (IS_ERR(pci->dbi_base))
639 return PTR_ERR(pci->dbi_base);
640
641 pp->ops = &dra7xx_pcie_host_ops;
642
643 ret = dw_pcie_host_init(pp);
644 if (ret) {
645 dev_err(dev, "failed to initialize host\n");
646 return ret;
647 }
648
649 return 0;
650}
651
652static const struct dw_pcie_ops dw_pcie_ops = {
653 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
654 .start_link = dra7xx_pcie_establish_link,
655 .stop_link = dra7xx_pcie_stop_link,
656 .link_up = dra7xx_pcie_link_up,
657};
658
659static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
660{
661 int phy_count = dra7xx->phy_count;
662
663 while (phy_count--) {
664 phy_power_off(dra7xx->phy[phy_count]);
665 phy_exit(dra7xx->phy[phy_count]);
666 }
667}
668
669static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
670{
671 int phy_count = dra7xx->phy_count;
672 int ret;
673 int i;
674
675 for (i = 0; i < phy_count; i++) {
676 ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE);
677 if (ret < 0)
678 goto err_phy;
679
680 ret = phy_init(dra7xx->phy[i]);
681 if (ret < 0)
682 goto err_phy;
683
684 ret = phy_power_on(dra7xx->phy[i]);
685 if (ret < 0) {
686 phy_exit(dra7xx->phy[i]);
687 goto err_phy;
688 }
689 }
690
691 return 0;
692
693err_phy:
694 while (--i >= 0) {
695 phy_power_off(dra7xx->phy[i]);
696 phy_exit(dra7xx->phy[i]);
697 }
698
699 return ret;
700}
701
702static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
703 .mode = DW_PCIE_RC_TYPE,
704};
705
706static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
707 .mode = DW_PCIE_EP_TYPE,
708};
709
710static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = {
711 .b1co_mode_sel_mask = BIT(2),
712 .mode = DW_PCIE_RC_TYPE,
713};
714
715static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = {
716 .b1co_mode_sel_mask = GENMASK(3, 2),
717 .mode = DW_PCIE_RC_TYPE,
718};
719
720static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = {
721 .b1co_mode_sel_mask = BIT(2),
722 .mode = DW_PCIE_EP_TYPE,
723};
724
725static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = {
726 .b1co_mode_sel_mask = GENMASK(3, 2),
727 .mode = DW_PCIE_EP_TYPE,
728};
729
730static const struct of_device_id of_dra7xx_pcie_match[] = {
731 {
732 .compatible = "ti,dra7-pcie",
733 .data = &dra7xx_pcie_rc_of_data,
734 },
735 {
736 .compatible = "ti,dra7-pcie-ep",
737 .data = &dra7xx_pcie_ep_of_data,
738 },
739 {
740 .compatible = "ti,dra746-pcie-rc",
741 .data = &dra746_pcie_rc_of_data,
742 },
743 {
744 .compatible = "ti,dra726-pcie-rc",
745 .data = &dra726_pcie_rc_of_data,
746 },
747 {
748 .compatible = "ti,dra746-pcie-ep",
749 .data = &dra746_pcie_ep_of_data,
750 },
751 {
752 .compatible = "ti,dra726-pcie-ep",
753 .data = &dra726_pcie_ep_of_data,
754 },
755 {},
756};
757
758/*
759 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
760 * @dra7xx: the dra7xx device where the workaround should be applied
761 *
762 * Access to the PCIe slave port that are not 32-bit aligned will result
763 * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
764 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
765 * 0x3.
766 *
767 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
768 */
769static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
770{
771 int ret;
772 struct device_node *np = dev->of_node;
773 struct of_phandle_args args;
774 struct regmap *regmap;
775
776 regmap = syscon_regmap_lookup_by_phandle(np,
777 "ti,syscon-unaligned-access");
778 if (IS_ERR(regmap)) {
779 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
780 return -EINVAL;
781 }
782
783 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
784 2, 0, &args);
785 if (ret) {
786 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
787 return ret;
788 }
789
790 ret = regmap_update_bits(regmap, args.args[0], args.args[1],
791 args.args[1]);
792 if (ret)
793 dev_err(dev, "failed to enable unaligned access\n");
794
795 of_node_put(args.np);
796
797 return ret;
798}
799
800static int dra7xx_pcie_configure_two_lane(struct device *dev,
801 u32 b1co_mode_sel_mask)
802{
803 struct device_node *np = dev->of_node;
804 struct regmap *pcie_syscon;
805 unsigned int pcie_reg;
806 u32 mask;
807 u32 val;
808
809 pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
810 if (IS_ERR(pcie_syscon)) {
811 dev_err(dev, "unable to get ti,syscon-lane-sel\n");
812 return -EINVAL;
813 }
814
815 if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
816 &pcie_reg)) {
817 dev_err(dev, "couldn't get lane selection reg offset\n");
818 return -EINVAL;
819 }
820
821 mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
822 val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
823 regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
824
825 return 0;
826}
827
828static int __init dra7xx_pcie_probe(struct platform_device *pdev)
829{
830 u32 reg;
831 int ret;
832 int irq;
833 int i;
834 int phy_count;
835 struct phy **phy;
836 struct device_link **link;
837 void __iomem *base;
838 struct dw_pcie *pci;
839 struct dra7xx_pcie *dra7xx;
840 struct device *dev = &pdev->dev;
841 struct device_node *np = dev->of_node;
842 char name[10];
843 struct gpio_desc *reset;
844 const struct of_device_id *match;
845 const struct dra7xx_pcie_of_data *data;
846 enum dw_pcie_device_mode mode;
847 u32 b1co_mode_sel_mask;
848
849 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
850 if (!match)
851 return -EINVAL;
852
853 data = (struct dra7xx_pcie_of_data *)match->data;
854 mode = (enum dw_pcie_device_mode)data->mode;
855 b1co_mode_sel_mask = data->b1co_mode_sel_mask;
856
857 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
858 if (!dra7xx)
859 return -ENOMEM;
860
861 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
862 if (!pci)
863 return -ENOMEM;
864
865 pci->dev = dev;
866 pci->ops = &dw_pcie_ops;
867
868 irq = platform_get_irq(pdev, 0);
869 if (irq < 0)
870 return irq;
871
872 base = devm_platform_ioremap_resource_byname(pdev, "ti_conf");
873 if (IS_ERR(base))
874 return PTR_ERR(base);
875
876 phy_count = of_property_count_strings(np, "phy-names");
877 if (phy_count < 0) {
878 dev_err(dev, "unable to find the strings\n");
879 return phy_count;
880 }
881
882 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
883 if (!phy)
884 return -ENOMEM;
885
886 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
887 if (!link)
888 return -ENOMEM;
889
890 for (i = 0; i < phy_count; i++) {
891 snprintf(name, sizeof(name), "pcie-phy%d", i);
892 phy[i] = devm_phy_get(dev, name);
893 if (IS_ERR(phy[i]))
894 return PTR_ERR(phy[i]);
895
896 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
897 if (!link[i]) {
898 ret = -EINVAL;
899 goto err_link;
900 }
901 }
902
903 dra7xx->base = base;
904 dra7xx->phy = phy;
905 dra7xx->pci = pci;
906 dra7xx->phy_count = phy_count;
907
908 if (phy_count == 2) {
909 ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask);
910 if (ret < 0)
911 dra7xx->phy_count = 1; /* Fallback to x1 lane mode */
912 }
913
914 ret = dra7xx_pcie_enable_phy(dra7xx);
915 if (ret) {
916 dev_err(dev, "failed to enable phy\n");
917 return ret;
918 }
919
920 platform_set_drvdata(pdev, dra7xx);
921
922 pm_runtime_enable(dev);
923 ret = pm_runtime_get_sync(dev);
924 if (ret < 0) {
925 dev_err(dev, "pm_runtime_get_sync failed\n");
926 goto err_get_sync;
927 }
928
929 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
930 if (IS_ERR(reset)) {
931 ret = PTR_ERR(reset);
932 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
933 goto err_gpio;
934 }
935
936 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
937 reg &= ~LTSSM_EN;
938 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
939
940 dra7xx->link_gen = of_pci_get_max_link_speed(np);
941 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
942 dra7xx->link_gen = 2;
943
944 switch (mode) {
945 case DW_PCIE_RC_TYPE:
946 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
947 ret = -ENODEV;
948 goto err_gpio;
949 }
950
951 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
952 DEVICE_TYPE_RC);
953
954 ret = dra7xx_pcie_unaligned_memaccess(dev);
955 if (ret)
956 dev_err(dev, "WA for Errata i870 not applied\n");
957
958 ret = dra7xx_add_pcie_port(dra7xx, pdev);
959 if (ret < 0)
960 goto err_gpio;
961 break;
962 case DW_PCIE_EP_TYPE:
963 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
964 ret = -ENODEV;
965 goto err_gpio;
966 }
967
968 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
969 DEVICE_TYPE_EP);
970
971 ret = dra7xx_pcie_unaligned_memaccess(dev);
972 if (ret)
973 goto err_gpio;
974
975 ret = dra7xx_add_pcie_ep(dra7xx, pdev);
976 if (ret < 0)
977 goto err_gpio;
978 break;
979 default:
980 dev_err(dev, "INVALID device type %d\n", mode);
981 }
982 dra7xx->mode = mode;
983
984 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
985 IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
986 if (ret) {
987 dev_err(dev, "failed to request irq\n");
988 goto err_gpio;
989 }
990
991 return 0;
992
993err_gpio:
994err_get_sync:
995 pm_runtime_put(dev);
996 pm_runtime_disable(dev);
997 dra7xx_pcie_disable_phy(dra7xx);
998
999err_link:
1000 while (--i >= 0)
1001 device_link_del(link[i]);
1002
1003 return ret;
1004}
1005
1006#ifdef CONFIG_PM_SLEEP
1007static int dra7xx_pcie_suspend(struct device *dev)
1008{
1009 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
1010 struct dw_pcie *pci = dra7xx->pci;
1011 u32 val;
1012
1013 if (dra7xx->mode != DW_PCIE_RC_TYPE)
1014 return 0;
1015
1016 /* clear MSE */
1017 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
1018 val &= ~PCI_COMMAND_MEMORY;
1019 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
1020
1021 return 0;
1022}
1023
1024static int dra7xx_pcie_resume(struct device *dev)
1025{
1026 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
1027 struct dw_pcie *pci = dra7xx->pci;
1028 u32 val;
1029
1030 if (dra7xx->mode != DW_PCIE_RC_TYPE)
1031 return 0;
1032
1033 /* set MSE */
1034 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
1035 val |= PCI_COMMAND_MEMORY;
1036 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
1037
1038 return 0;
1039}
1040
1041static int dra7xx_pcie_suspend_noirq(struct device *dev)
1042{
1043 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
1044
1045 dra7xx_pcie_disable_phy(dra7xx);
1046
1047 return 0;
1048}
1049
1050static int dra7xx_pcie_resume_noirq(struct device *dev)
1051{
1052 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
1053 int ret;
1054
1055 ret = dra7xx_pcie_enable_phy(dra7xx);
1056 if (ret) {
1057 dev_err(dev, "failed to enable phy\n");
1058 return ret;
1059 }
1060
1061 return 0;
1062}
1063#endif
1064
1065static void dra7xx_pcie_shutdown(struct platform_device *pdev)
1066{
1067 struct device *dev = &pdev->dev;
1068 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
1069 int ret;
1070
1071 dra7xx_pcie_stop_link(dra7xx->pci);
1072
1073 ret = pm_runtime_put_sync(dev);
1074 if (ret < 0)
1075 dev_dbg(dev, "pm_runtime_put_sync failed\n");
1076
1077 pm_runtime_disable(dev);
1078 dra7xx_pcie_disable_phy(dra7xx);
1079}
1080
1081static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
1082 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
1083 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
1084 dra7xx_pcie_resume_noirq)
1085};
1086
1087static struct platform_driver dra7xx_pcie_driver = {
1088 .driver = {
1089 .name = "dra7-pcie",
1090 .of_match_table = of_dra7xx_pcie_match,
1091 .suppress_bind_attrs = true,
1092 .pm = &dra7xx_pcie_pm_ops,
1093 },
1094 .shutdown = dra7xx_pcie_shutdown,
1095};
1096builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
4 *
5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com
6 *
7 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
8 */
9
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/err.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/irqdomain.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/of_device.h>
19#include <linux/of_gpio.h>
20#include <linux/of_pci.h>
21#include <linux/pci.h>
22#include <linux/phy/phy.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/resource.h>
26#include <linux/types.h>
27#include <linux/mfd/syscon.h>
28#include <linux/regmap.h>
29#include <linux/gpio/consumer.h>
30
31#include "../../pci.h"
32#include "pcie-designware.h"
33
34/* PCIe controller wrapper DRA7XX configuration registers */
35
36#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
37#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
38#define ERR_SYS BIT(0)
39#define ERR_FATAL BIT(1)
40#define ERR_NONFATAL BIT(2)
41#define ERR_COR BIT(3)
42#define ERR_AXI BIT(4)
43#define ERR_ECRC BIT(5)
44#define PME_TURN_OFF BIT(8)
45#define PME_TO_ACK BIT(9)
46#define PM_PME BIT(10)
47#define LINK_REQ_RST BIT(11)
48#define LINK_UP_EVT BIT(12)
49#define CFG_BME_EVT BIT(13)
50#define CFG_MSE_EVT BIT(14)
51#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
54
55#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
56#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
57#define INTA BIT(0)
58#define INTB BIT(1)
59#define INTC BIT(2)
60#define INTD BIT(3)
61#define MSI BIT(4)
62#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
63
64#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
65#define DEVICE_TYPE_EP 0x0
66#define DEVICE_TYPE_LEG_EP 0x1
67#define DEVICE_TYPE_RC 0x4
68
69#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
70#define LTSSM_EN 0x1
71
72#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
73#define LINK_UP BIT(16)
74#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
75
76#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
77#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
78
79#define PCIECTRL_TI_CONF_MSI_XMT 0x012c
80#define MSI_REQ_GRANT BIT(0)
81#define MSI_VECTOR_SHIFT 7
82
83#define PCIE_1LANE_2LANE_SELECTION BIT(13)
84#define PCIE_B1C0_MODE_SEL BIT(2)
85#define PCIE_B0_B1_TSYNCEN BIT(0)
86
87struct dra7xx_pcie {
88 struct dw_pcie *pci;
89 void __iomem *base; /* DT ti_conf */
90 int phy_count; /* DT phy-names count */
91 struct phy **phy;
92 struct irq_domain *irq_domain;
93 enum dw_pcie_device_mode mode;
94};
95
96struct dra7xx_pcie_of_data {
97 enum dw_pcie_device_mode mode;
98 u32 b1co_mode_sel_mask;
99};
100
101#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
102
103static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
104{
105 return readl(pcie->base + offset);
106}
107
108static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
109 u32 value)
110{
111 writel(value, pcie->base + offset);
112}
113
114static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
115{
116 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
117}
118
119static int dra7xx_pcie_link_up(struct dw_pcie *pci)
120{
121 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
122 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
123
124 return !!(reg & LINK_UP);
125}
126
127static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
128{
129 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
130 u32 reg;
131
132 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
133 reg &= ~LTSSM_EN;
134 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
135}
136
137static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
138{
139 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
140 struct device *dev = pci->dev;
141 u32 reg;
142
143 if (dw_pcie_link_up(pci)) {
144 dev_err(dev, "link is already up\n");
145 return 0;
146 }
147
148 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
149 reg |= LTSSM_EN;
150 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
151
152 return 0;
153}
154
155static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
156{
157 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
158 LEG_EP_INTERRUPTS | MSI);
159
160 dra7xx_pcie_writel(dra7xx,
161 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
162 MSI | LEG_EP_INTERRUPTS);
163}
164
165static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
166{
167 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
168 INTERRUPTS);
169 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
170 INTERRUPTS);
171}
172
173static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
174{
175 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
176 dra7xx_pcie_enable_msi_interrupts(dra7xx);
177}
178
179static int dra7xx_pcie_host_init(struct pcie_port *pp)
180{
181 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
182 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
183
184 dra7xx_pcie_enable_interrupts(dra7xx);
185
186 return 0;
187}
188
189static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
190 irq_hw_number_t hwirq)
191{
192 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
193 irq_set_chip_data(irq, domain->host_data);
194
195 return 0;
196}
197
198static const struct irq_domain_ops intx_domain_ops = {
199 .map = dra7xx_pcie_intx_map,
200 .xlate = pci_irqd_intx_xlate,
201};
202
203static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
204{
205 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
206 unsigned long val;
207 int pos, irq;
208
209 val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
210 (index * MSI_REG_CTRL_BLOCK_SIZE));
211 if (!val)
212 return 0;
213
214 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
215 while (pos != MAX_MSI_IRQS_PER_CTRL) {
216 irq = irq_find_mapping(pp->irq_domain,
217 (index * MAX_MSI_IRQS_PER_CTRL) + pos);
218 generic_handle_irq(irq);
219 pos++;
220 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
221 }
222
223 return 1;
224}
225
226static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp)
227{
228 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
229 int ret, i, count, num_ctrls;
230
231 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
232
233 /**
234 * Need to make sure all MSI status bits read 0 before exiting.
235 * Else, new MSI IRQs are not registered by the wrapper. Have an
236 * upperbound for the loop and exit the IRQ in case of IRQ flood
237 * to avoid locking up system in interrupt context.
238 */
239 count = 0;
240 do {
241 ret = 0;
242
243 for (i = 0; i < num_ctrls; i++)
244 ret |= dra7xx_pcie_handle_msi(pp, i);
245 count++;
246 } while (ret && count <= 1000);
247
248 if (count > 1000)
249 dev_warn_ratelimited(pci->dev,
250 "Too many MSI IRQs to handle\n");
251}
252
253static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
254{
255 struct irq_chip *chip = irq_desc_get_chip(desc);
256 struct dra7xx_pcie *dra7xx;
257 struct dw_pcie *pci;
258 struct pcie_port *pp;
259 unsigned long reg;
260 u32 virq, bit;
261
262 chained_irq_enter(chip, desc);
263
264 pp = irq_desc_get_handler_data(desc);
265 pci = to_dw_pcie_from_pp(pp);
266 dra7xx = to_dra7xx_pcie(pci);
267
268 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
269 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
270
271 switch (reg) {
272 case MSI:
273 dra7xx_pcie_handle_msi_irq(pp);
274 break;
275 case INTA:
276 case INTB:
277 case INTC:
278 case INTD:
279 for_each_set_bit(bit, ®, PCI_NUM_INTX) {
280 virq = irq_find_mapping(dra7xx->irq_domain, bit);
281 if (virq)
282 generic_handle_irq(virq);
283 }
284 break;
285 }
286
287 chained_irq_exit(chip, desc);
288}
289
290static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
291{
292 struct dra7xx_pcie *dra7xx = arg;
293 struct dw_pcie *pci = dra7xx->pci;
294 struct device *dev = pci->dev;
295 struct dw_pcie_ep *ep = &pci->ep;
296 u32 reg;
297
298 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
299
300 if (reg & ERR_SYS)
301 dev_dbg(dev, "System Error\n");
302
303 if (reg & ERR_FATAL)
304 dev_dbg(dev, "Fatal Error\n");
305
306 if (reg & ERR_NONFATAL)
307 dev_dbg(dev, "Non Fatal Error\n");
308
309 if (reg & ERR_COR)
310 dev_dbg(dev, "Correctable Error\n");
311
312 if (reg & ERR_AXI)
313 dev_dbg(dev, "AXI tag lookup fatal Error\n");
314
315 if (reg & ERR_ECRC)
316 dev_dbg(dev, "ECRC Error\n");
317
318 if (reg & PME_TURN_OFF)
319 dev_dbg(dev,
320 "Power Management Event Turn-Off message received\n");
321
322 if (reg & PME_TO_ACK)
323 dev_dbg(dev,
324 "Power Management Turn-Off Ack message received\n");
325
326 if (reg & PM_PME)
327 dev_dbg(dev, "PM Power Management Event message received\n");
328
329 if (reg & LINK_REQ_RST)
330 dev_dbg(dev, "Link Request Reset\n");
331
332 if (reg & LINK_UP_EVT) {
333 if (dra7xx->mode == DW_PCIE_EP_TYPE)
334 dw_pcie_ep_linkup(ep);
335 dev_dbg(dev, "Link-up state change\n");
336 }
337
338 if (reg & CFG_BME_EVT)
339 dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
340
341 if (reg & CFG_MSE_EVT)
342 dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
343
344 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
345
346 return IRQ_HANDLED;
347}
348
349static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
350{
351 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
352 struct device *dev = pci->dev;
353 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
354 struct device_node *node = dev->of_node;
355 struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
356
357 if (!pcie_intc_node) {
358 dev_err(dev, "No PCIe Intc node found\n");
359 return -ENODEV;
360 }
361
362 irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
363 pp);
364 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
365 &intx_domain_ops, pp);
366 of_node_put(pcie_intc_node);
367 if (!dra7xx->irq_domain) {
368 dev_err(dev, "Failed to get a INTx IRQ domain\n");
369 return -ENODEV;
370 }
371
372 return 0;
373}
374
375static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
376 .host_init = dra7xx_pcie_host_init,
377};
378
379static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
380{
381 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
382 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
383 enum pci_barno bar;
384
385 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
386 dw_pcie_ep_reset_bar(pci, bar);
387
388 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
389}
390
391static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
392{
393 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
394 mdelay(1);
395 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
396}
397
398static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
399 u8 interrupt_num)
400{
401 u32 reg;
402
403 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
404 reg |= MSI_REQ_GRANT;
405 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
406}
407
408static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
409 enum pci_epc_irq_type type, u16 interrupt_num)
410{
411 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
412 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
413
414 switch (type) {
415 case PCI_EPC_IRQ_LEGACY:
416 dra7xx_pcie_raise_legacy_irq(dra7xx);
417 break;
418 case PCI_EPC_IRQ_MSI:
419 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
420 break;
421 default:
422 dev_err(pci->dev, "UNKNOWN IRQ type\n");
423 }
424
425 return 0;
426}
427
428static const struct pci_epc_features dra7xx_pcie_epc_features = {
429 .linkup_notifier = true,
430 .msi_capable = true,
431 .msix_capable = false,
432};
433
434static const struct pci_epc_features*
435dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
436{
437 return &dra7xx_pcie_epc_features;
438}
439
440static const struct dw_pcie_ep_ops pcie_ep_ops = {
441 .ep_init = dra7xx_pcie_ep_init,
442 .raise_irq = dra7xx_pcie_raise_irq,
443 .get_features = dra7xx_pcie_get_features,
444};
445
446static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
447 struct platform_device *pdev)
448{
449 int ret;
450 struct dw_pcie_ep *ep;
451 struct device *dev = &pdev->dev;
452 struct dw_pcie *pci = dra7xx->pci;
453
454 ep = &pci->ep;
455 ep->ops = &pcie_ep_ops;
456
457 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics");
458 if (IS_ERR(pci->dbi_base))
459 return PTR_ERR(pci->dbi_base);
460
461 pci->dbi_base2 =
462 devm_platform_ioremap_resource_byname(pdev, "ep_dbics2");
463 if (IS_ERR(pci->dbi_base2))
464 return PTR_ERR(pci->dbi_base2);
465
466 ret = dw_pcie_ep_init(ep);
467 if (ret) {
468 dev_err(dev, "failed to initialize endpoint\n");
469 return ret;
470 }
471
472 return 0;
473}
474
475static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
476 struct platform_device *pdev)
477{
478 int ret;
479 struct dw_pcie *pci = dra7xx->pci;
480 struct pcie_port *pp = &pci->pp;
481 struct device *dev = pci->dev;
482
483 pp->irq = platform_get_irq(pdev, 1);
484 if (pp->irq < 0)
485 return pp->irq;
486
487 /* MSI IRQ is muxed */
488 pp->msi_irq = -ENODEV;
489
490 ret = dra7xx_pcie_init_irq_domain(pp);
491 if (ret < 0)
492 return ret;
493
494 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics");
495 if (IS_ERR(pci->dbi_base))
496 return PTR_ERR(pci->dbi_base);
497
498 pp->ops = &dra7xx_pcie_host_ops;
499
500 ret = dw_pcie_host_init(pp);
501 if (ret) {
502 dev_err(dev, "failed to initialize host\n");
503 return ret;
504 }
505
506 return 0;
507}
508
509static const struct dw_pcie_ops dw_pcie_ops = {
510 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
511 .start_link = dra7xx_pcie_establish_link,
512 .stop_link = dra7xx_pcie_stop_link,
513 .link_up = dra7xx_pcie_link_up,
514};
515
516static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
517{
518 int phy_count = dra7xx->phy_count;
519
520 while (phy_count--) {
521 phy_power_off(dra7xx->phy[phy_count]);
522 phy_exit(dra7xx->phy[phy_count]);
523 }
524}
525
526static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
527{
528 int phy_count = dra7xx->phy_count;
529 int ret;
530 int i;
531
532 for (i = 0; i < phy_count; i++) {
533 ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE);
534 if (ret < 0)
535 goto err_phy;
536
537 ret = phy_init(dra7xx->phy[i]);
538 if (ret < 0)
539 goto err_phy;
540
541 ret = phy_power_on(dra7xx->phy[i]);
542 if (ret < 0) {
543 phy_exit(dra7xx->phy[i]);
544 goto err_phy;
545 }
546 }
547
548 return 0;
549
550err_phy:
551 while (--i >= 0) {
552 phy_power_off(dra7xx->phy[i]);
553 phy_exit(dra7xx->phy[i]);
554 }
555
556 return ret;
557}
558
559static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
560 .mode = DW_PCIE_RC_TYPE,
561};
562
563static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
564 .mode = DW_PCIE_EP_TYPE,
565};
566
567static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = {
568 .b1co_mode_sel_mask = BIT(2),
569 .mode = DW_PCIE_RC_TYPE,
570};
571
572static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = {
573 .b1co_mode_sel_mask = GENMASK(3, 2),
574 .mode = DW_PCIE_RC_TYPE,
575};
576
577static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = {
578 .b1co_mode_sel_mask = BIT(2),
579 .mode = DW_PCIE_EP_TYPE,
580};
581
582static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = {
583 .b1co_mode_sel_mask = GENMASK(3, 2),
584 .mode = DW_PCIE_EP_TYPE,
585};
586
587static const struct of_device_id of_dra7xx_pcie_match[] = {
588 {
589 .compatible = "ti,dra7-pcie",
590 .data = &dra7xx_pcie_rc_of_data,
591 },
592 {
593 .compatible = "ti,dra7-pcie-ep",
594 .data = &dra7xx_pcie_ep_of_data,
595 },
596 {
597 .compatible = "ti,dra746-pcie-rc",
598 .data = &dra746_pcie_rc_of_data,
599 },
600 {
601 .compatible = "ti,dra726-pcie-rc",
602 .data = &dra726_pcie_rc_of_data,
603 },
604 {
605 .compatible = "ti,dra746-pcie-ep",
606 .data = &dra746_pcie_ep_of_data,
607 },
608 {
609 .compatible = "ti,dra726-pcie-ep",
610 .data = &dra726_pcie_ep_of_data,
611 },
612 {},
613};
614
615/*
616 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
617 * @dra7xx: the dra7xx device where the workaround should be applied
618 *
619 * Access to the PCIe slave port that are not 32-bit aligned will result
620 * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
621 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
622 * 0x3.
623 *
624 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
625 */
626static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
627{
628 int ret;
629 struct device_node *np = dev->of_node;
630 struct of_phandle_args args;
631 struct regmap *regmap;
632
633 regmap = syscon_regmap_lookup_by_phandle(np,
634 "ti,syscon-unaligned-access");
635 if (IS_ERR(regmap)) {
636 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
637 return -EINVAL;
638 }
639
640 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
641 2, 0, &args);
642 if (ret) {
643 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
644 return ret;
645 }
646
647 ret = regmap_update_bits(regmap, args.args[0], args.args[1],
648 args.args[1]);
649 if (ret)
650 dev_err(dev, "failed to enable unaligned access\n");
651
652 of_node_put(args.np);
653
654 return ret;
655}
656
657static int dra7xx_pcie_configure_two_lane(struct device *dev,
658 u32 b1co_mode_sel_mask)
659{
660 struct device_node *np = dev->of_node;
661 struct regmap *pcie_syscon;
662 unsigned int pcie_reg;
663 u32 mask;
664 u32 val;
665
666 pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
667 if (IS_ERR(pcie_syscon)) {
668 dev_err(dev, "unable to get ti,syscon-lane-sel\n");
669 return -EINVAL;
670 }
671
672 if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
673 &pcie_reg)) {
674 dev_err(dev, "couldn't get lane selection reg offset\n");
675 return -EINVAL;
676 }
677
678 mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
679 val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
680 regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
681
682 return 0;
683}
684
685static int dra7xx_pcie_probe(struct platform_device *pdev)
686{
687 u32 reg;
688 int ret;
689 int irq;
690 int i;
691 int phy_count;
692 struct phy **phy;
693 struct device_link **link;
694 void __iomem *base;
695 struct dw_pcie *pci;
696 struct dra7xx_pcie *dra7xx;
697 struct device *dev = &pdev->dev;
698 struct device_node *np = dev->of_node;
699 char name[10];
700 struct gpio_desc *reset;
701 const struct of_device_id *match;
702 const struct dra7xx_pcie_of_data *data;
703 enum dw_pcie_device_mode mode;
704 u32 b1co_mode_sel_mask;
705
706 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
707 if (!match)
708 return -EINVAL;
709
710 data = (struct dra7xx_pcie_of_data *)match->data;
711 mode = (enum dw_pcie_device_mode)data->mode;
712 b1co_mode_sel_mask = data->b1co_mode_sel_mask;
713
714 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
715 if (!dra7xx)
716 return -ENOMEM;
717
718 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
719 if (!pci)
720 return -ENOMEM;
721
722 pci->dev = dev;
723 pci->ops = &dw_pcie_ops;
724
725 irq = platform_get_irq(pdev, 0);
726 if (irq < 0)
727 return irq;
728
729 base = devm_platform_ioremap_resource_byname(pdev, "ti_conf");
730 if (IS_ERR(base))
731 return PTR_ERR(base);
732
733 phy_count = of_property_count_strings(np, "phy-names");
734 if (phy_count < 0) {
735 dev_err(dev, "unable to find the strings\n");
736 return phy_count;
737 }
738
739 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
740 if (!phy)
741 return -ENOMEM;
742
743 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
744 if (!link)
745 return -ENOMEM;
746
747 for (i = 0; i < phy_count; i++) {
748 snprintf(name, sizeof(name), "pcie-phy%d", i);
749 phy[i] = devm_phy_get(dev, name);
750 if (IS_ERR(phy[i]))
751 return PTR_ERR(phy[i]);
752
753 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
754 if (!link[i]) {
755 ret = -EINVAL;
756 goto err_link;
757 }
758 }
759
760 dra7xx->base = base;
761 dra7xx->phy = phy;
762 dra7xx->pci = pci;
763 dra7xx->phy_count = phy_count;
764
765 if (phy_count == 2) {
766 ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask);
767 if (ret < 0)
768 dra7xx->phy_count = 1; /* Fallback to x1 lane mode */
769 }
770
771 ret = dra7xx_pcie_enable_phy(dra7xx);
772 if (ret) {
773 dev_err(dev, "failed to enable phy\n");
774 return ret;
775 }
776
777 platform_set_drvdata(pdev, dra7xx);
778
779 pm_runtime_enable(dev);
780 ret = pm_runtime_get_sync(dev);
781 if (ret < 0) {
782 dev_err(dev, "pm_runtime_get_sync failed\n");
783 goto err_get_sync;
784 }
785
786 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
787 if (IS_ERR(reset)) {
788 ret = PTR_ERR(reset);
789 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
790 goto err_gpio;
791 }
792
793 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
794 reg &= ~LTSSM_EN;
795 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
796
797 switch (mode) {
798 case DW_PCIE_RC_TYPE:
799 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
800 ret = -ENODEV;
801 goto err_gpio;
802 }
803
804 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
805 DEVICE_TYPE_RC);
806
807 ret = dra7xx_pcie_unaligned_memaccess(dev);
808 if (ret)
809 dev_err(dev, "WA for Errata i870 not applied\n");
810
811 ret = dra7xx_add_pcie_port(dra7xx, pdev);
812 if (ret < 0)
813 goto err_gpio;
814 break;
815 case DW_PCIE_EP_TYPE:
816 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
817 ret = -ENODEV;
818 goto err_gpio;
819 }
820
821 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
822 DEVICE_TYPE_EP);
823
824 ret = dra7xx_pcie_unaligned_memaccess(dev);
825 if (ret)
826 goto err_gpio;
827
828 ret = dra7xx_add_pcie_ep(dra7xx, pdev);
829 if (ret < 0)
830 goto err_gpio;
831 break;
832 default:
833 dev_err(dev, "INVALID device type %d\n", mode);
834 }
835 dra7xx->mode = mode;
836
837 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
838 IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
839 if (ret) {
840 dev_err(dev, "failed to request irq\n");
841 goto err_gpio;
842 }
843
844 return 0;
845
846err_gpio:
847err_get_sync:
848 pm_runtime_put(dev);
849 pm_runtime_disable(dev);
850 dra7xx_pcie_disable_phy(dra7xx);
851
852err_link:
853 while (--i >= 0)
854 device_link_del(link[i]);
855
856 return ret;
857}
858
859#ifdef CONFIG_PM_SLEEP
860static int dra7xx_pcie_suspend(struct device *dev)
861{
862 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
863 struct dw_pcie *pci = dra7xx->pci;
864 u32 val;
865
866 if (dra7xx->mode != DW_PCIE_RC_TYPE)
867 return 0;
868
869 /* clear MSE */
870 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
871 val &= ~PCI_COMMAND_MEMORY;
872 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
873
874 return 0;
875}
876
877static int dra7xx_pcie_resume(struct device *dev)
878{
879 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
880 struct dw_pcie *pci = dra7xx->pci;
881 u32 val;
882
883 if (dra7xx->mode != DW_PCIE_RC_TYPE)
884 return 0;
885
886 /* set MSE */
887 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
888 val |= PCI_COMMAND_MEMORY;
889 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
890
891 return 0;
892}
893
894static int dra7xx_pcie_suspend_noirq(struct device *dev)
895{
896 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
897
898 dra7xx_pcie_disable_phy(dra7xx);
899
900 return 0;
901}
902
903static int dra7xx_pcie_resume_noirq(struct device *dev)
904{
905 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
906 int ret;
907
908 ret = dra7xx_pcie_enable_phy(dra7xx);
909 if (ret) {
910 dev_err(dev, "failed to enable phy\n");
911 return ret;
912 }
913
914 return 0;
915}
916#endif
917
918static void dra7xx_pcie_shutdown(struct platform_device *pdev)
919{
920 struct device *dev = &pdev->dev;
921 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
922 int ret;
923
924 dra7xx_pcie_stop_link(dra7xx->pci);
925
926 ret = pm_runtime_put_sync(dev);
927 if (ret < 0)
928 dev_dbg(dev, "pm_runtime_put_sync failed\n");
929
930 pm_runtime_disable(dev);
931 dra7xx_pcie_disable_phy(dra7xx);
932}
933
934static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
935 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
936 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
937 dra7xx_pcie_resume_noirq)
938};
939
940static struct platform_driver dra7xx_pcie_driver = {
941 .probe = dra7xx_pcie_probe,
942 .driver = {
943 .name = "dra7-pcie",
944 .of_match_table = of_dra7xx_pcie_match,
945 .suppress_bind_attrs = true,
946 .pm = &dra7xx_pcie_pm_ops,
947 },
948 .shutdown = dra7xx_pcie_shutdown,
949};
950builtin_platform_driver(dra7xx_pcie_driver);