Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe driver for Marvell Armada 370 and Armada XP SoCs
4 *
5 * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/pci.h>
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/gpio.h>
13#include <linux/init.h>
14#include <linux/mbus.h>
15#include <linux/msi.h>
16#include <linux/slab.h>
17#include <linux/platform_device.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20#include <linux/of_gpio.h>
21#include <linux/of_pci.h>
22#include <linux/of_platform.h>
23
24#include "../pci.h"
25#include "../pci-bridge-emul.h"
26
27/*
28 * PCIe unit register offsets.
29 */
30#define PCIE_DEV_ID_OFF 0x0000
31#define PCIE_CMD_OFF 0x0004
32#define PCIE_DEV_REV_OFF 0x0008
33#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
34#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
35#define PCIE_CAP_PCIEXP 0x0060
36#define PCIE_HEADER_LOG_4_OFF 0x0128
37#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
38#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
39#define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
40#define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
41#define PCIE_WIN5_CTRL_OFF 0x1880
42#define PCIE_WIN5_BASE_OFF 0x1884
43#define PCIE_WIN5_REMAP_OFF 0x188c
44#define PCIE_CONF_ADDR_OFF 0x18f8
45#define PCIE_CONF_ADDR_EN 0x80000000
46#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
47#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
48#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
49#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
50#define PCIE_CONF_ADDR(bus, devfn, where) \
51 (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
52 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
53 PCIE_CONF_ADDR_EN)
54#define PCIE_CONF_DATA_OFF 0x18fc
55#define PCIE_MASK_OFF 0x1910
56#define PCIE_MASK_ENABLE_INTS 0x0f000000
57#define PCIE_CTRL_OFF 0x1a00
58#define PCIE_CTRL_X1_MODE 0x0001
59#define PCIE_STAT_OFF 0x1a04
60#define PCIE_STAT_BUS 0xff00
61#define PCIE_STAT_DEV 0x1f0000
62#define PCIE_STAT_LINK_DOWN BIT(0)
63#define PCIE_RC_RTSTA 0x1a14
64#define PCIE_DEBUG_CTRL 0x1a60
65#define PCIE_DEBUG_SOFT_RESET BIT(20)
66
67struct mvebu_pcie_port;
68
69/* Structure representing all PCIe interfaces */
70struct mvebu_pcie {
71 struct platform_device *pdev;
72 struct mvebu_pcie_port *ports;
73 struct msi_controller *msi;
74 struct resource io;
75 struct resource realio;
76 struct resource mem;
77 struct resource busn;
78 int nports;
79};
80
81struct mvebu_pcie_window {
82 phys_addr_t base;
83 phys_addr_t remap;
84 size_t size;
85};
86
87/* Structure representing one PCIe interface */
88struct mvebu_pcie_port {
89 char *name;
90 void __iomem *base;
91 u32 port;
92 u32 lane;
93 int devfn;
94 unsigned int mem_target;
95 unsigned int mem_attr;
96 unsigned int io_target;
97 unsigned int io_attr;
98 struct clk *clk;
99 struct gpio_desc *reset_gpio;
100 char *reset_name;
101 struct pci_bridge_emul bridge;
102 struct device_node *dn;
103 struct mvebu_pcie *pcie;
104 struct mvebu_pcie_window memwin;
105 struct mvebu_pcie_window iowin;
106 u32 saved_pcie_stat;
107 struct resource regs;
108};
109
110static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
111{
112 writel(val, port->base + reg);
113}
114
115static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
116{
117 return readl(port->base + reg);
118}
119
120static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
121{
122 return port->io_target != -1 && port->io_attr != -1;
123}
124
125static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
126{
127 return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
128}
129
130static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
131{
132 u32 stat;
133
134 stat = mvebu_readl(port, PCIE_STAT_OFF);
135 stat &= ~PCIE_STAT_BUS;
136 stat |= nr << 8;
137 mvebu_writel(port, stat, PCIE_STAT_OFF);
138}
139
140static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
141{
142 u32 stat;
143
144 stat = mvebu_readl(port, PCIE_STAT_OFF);
145 stat &= ~PCIE_STAT_DEV;
146 stat |= nr << 16;
147 mvebu_writel(port, stat, PCIE_STAT_OFF);
148}
149
150/*
151 * Setup PCIE BARs and Address Decode Wins:
152 * BAR[0] -> internal registers (needed for MSI)
153 * BAR[1] -> covers all DRAM banks
154 * BAR[2] -> Disabled
155 * WIN[0-3] -> DRAM bank[0-3]
156 */
157static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
158{
159 const struct mbus_dram_target_info *dram;
160 u32 size;
161 int i;
162
163 dram = mv_mbus_dram_info();
164
165 /* First, disable and clear BARs and windows. */
166 for (i = 1; i < 3; i++) {
167 mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
168 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
169 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
170 }
171
172 for (i = 0; i < 5; i++) {
173 mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
174 mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
175 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
176 }
177
178 mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
179 mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
180 mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
181
182 /* Setup windows for DDR banks. Count total DDR size on the fly. */
183 size = 0;
184 for (i = 0; i < dram->num_cs; i++) {
185 const struct mbus_dram_window *cs = dram->cs + i;
186
187 mvebu_writel(port, cs->base & 0xffff0000,
188 PCIE_WIN04_BASE_OFF(i));
189 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
190 mvebu_writel(port,
191 ((cs->size - 1) & 0xffff0000) |
192 (cs->mbus_attr << 8) |
193 (dram->mbus_dram_target_id << 4) | 1,
194 PCIE_WIN04_CTRL_OFF(i));
195
196 size += cs->size;
197 }
198
199 /* Round up 'size' to the nearest power of two. */
200 if ((size & (size - 1)) != 0)
201 size = 1 << fls(size);
202
203 /* Setup BAR[1] to all DRAM banks. */
204 mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
205 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
206 mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
207 PCIE_BAR_CTRL_OFF(1));
208
209 /*
210 * Point BAR[0] to the device's internal registers.
211 */
212 mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
213 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
214}
215
216static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
217{
218 u32 cmd, mask;
219
220 /* Point PCIe unit MBUS decode windows to DRAM space. */
221 mvebu_pcie_setup_wins(port);
222
223 /* Master + slave enable. */
224 cmd = mvebu_readl(port, PCIE_CMD_OFF);
225 cmd |= PCI_COMMAND_IO;
226 cmd |= PCI_COMMAND_MEMORY;
227 cmd |= PCI_COMMAND_MASTER;
228 mvebu_writel(port, cmd, PCIE_CMD_OFF);
229
230 /* Enable interrupt lines A-D. */
231 mask = mvebu_readl(port, PCIE_MASK_OFF);
232 mask |= PCIE_MASK_ENABLE_INTS;
233 mvebu_writel(port, mask, PCIE_MASK_OFF);
234}
235
236static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
237 struct pci_bus *bus,
238 u32 devfn, int where, int size, u32 *val)
239{
240 void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
241
242 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
243 PCIE_CONF_ADDR_OFF);
244
245 switch (size) {
246 case 1:
247 *val = readb_relaxed(conf_data + (where & 3));
248 break;
249 case 2:
250 *val = readw_relaxed(conf_data + (where & 2));
251 break;
252 case 4:
253 *val = readl_relaxed(conf_data);
254 break;
255 }
256
257 return PCIBIOS_SUCCESSFUL;
258}
259
260static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
261 struct pci_bus *bus,
262 u32 devfn, int where, int size, u32 val)
263{
264 void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
265
266 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
267 PCIE_CONF_ADDR_OFF);
268
269 switch (size) {
270 case 1:
271 writeb(val, conf_data + (where & 3));
272 break;
273 case 2:
274 writew(val, conf_data + (where & 2));
275 break;
276 case 4:
277 writel(val, conf_data);
278 break;
279 default:
280 return PCIBIOS_BAD_REGISTER_NUMBER;
281 }
282
283 return PCIBIOS_SUCCESSFUL;
284}
285
286/*
287 * Remove windows, starting from the largest ones to the smallest
288 * ones.
289 */
290static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
291 phys_addr_t base, size_t size)
292{
293 while (size) {
294 size_t sz = 1 << (fls(size) - 1);
295
296 mvebu_mbus_del_window(base, sz);
297 base += sz;
298 size -= sz;
299 }
300}
301
302/*
303 * MBus windows can only have a power of two size, but PCI BARs do not
304 * have this constraint. Therefore, we have to split the PCI BAR into
305 * areas each having a power of two size. We start from the largest
306 * one (i.e highest order bit set in the size).
307 */
308static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
309 unsigned int target, unsigned int attribute,
310 phys_addr_t base, size_t size,
311 phys_addr_t remap)
312{
313 size_t size_mapped = 0;
314
315 while (size) {
316 size_t sz = 1 << (fls(size) - 1);
317 int ret;
318
319 ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
320 sz, remap);
321 if (ret) {
322 phys_addr_t end = base + sz - 1;
323
324 dev_err(&port->pcie->pdev->dev,
325 "Could not create MBus window at [mem %pa-%pa]: %d\n",
326 &base, &end, ret);
327 mvebu_pcie_del_windows(port, base - size_mapped,
328 size_mapped);
329 return;
330 }
331
332 size -= sz;
333 size_mapped += sz;
334 base += sz;
335 if (remap != MVEBU_MBUS_NO_REMAP)
336 remap += sz;
337 }
338}
339
340static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
341 unsigned int target, unsigned int attribute,
342 const struct mvebu_pcie_window *desired,
343 struct mvebu_pcie_window *cur)
344{
345 if (desired->base == cur->base && desired->remap == cur->remap &&
346 desired->size == cur->size)
347 return;
348
349 if (cur->size != 0) {
350 mvebu_pcie_del_windows(port, cur->base, cur->size);
351 cur->size = 0;
352 cur->base = 0;
353
354 /*
355 * If something tries to change the window while it is enabled
356 * the change will not be done atomically. That would be
357 * difficult to do in the general case.
358 */
359 }
360
361 if (desired->size == 0)
362 return;
363
364 mvebu_pcie_add_windows(port, target, attribute, desired->base,
365 desired->size, desired->remap);
366 *cur = *desired;
367}
368
369static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
370{
371 struct mvebu_pcie_window desired = {};
372 struct pci_bridge_emul_conf *conf = &port->bridge.conf;
373
374 /* Are the new iobase/iolimit values invalid? */
375 if (conf->iolimit < conf->iobase ||
376 conf->iolimitupper < conf->iobaseupper ||
377 !(conf->command & PCI_COMMAND_IO)) {
378 mvebu_pcie_set_window(port, port->io_target, port->io_attr,
379 &desired, &port->iowin);
380 return;
381 }
382
383 if (!mvebu_has_ioport(port)) {
384 dev_WARN(&port->pcie->pdev->dev,
385 "Attempt to set IO when IO is disabled\n");
386 return;
387 }
388
389 /*
390 * We read the PCI-to-PCI bridge emulated registers, and
391 * calculate the base address and size of the address decoding
392 * window to setup, according to the PCI-to-PCI bridge
393 * specifications. iobase is the bus address, port->iowin_base
394 * is the CPU address.
395 */
396 desired.remap = ((conf->iobase & 0xF0) << 8) |
397 (conf->iobaseupper << 16);
398 desired.base = port->pcie->io.start + desired.remap;
399 desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
400 (conf->iolimitupper << 16)) -
401 desired.remap) +
402 1;
403
404 mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
405 &port->iowin);
406}
407
408static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
409{
410 struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
411 struct pci_bridge_emul_conf *conf = &port->bridge.conf;
412
413 /* Are the new membase/memlimit values invalid? */
414 if (conf->memlimit < conf->membase ||
415 !(conf->command & PCI_COMMAND_MEMORY)) {
416 mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
417 &desired, &port->memwin);
418 return;
419 }
420
421 /*
422 * We read the PCI-to-PCI bridge emulated registers, and
423 * calculate the base address and size of the address decoding
424 * window to setup, according to the PCI-to-PCI bridge
425 * specifications.
426 */
427 desired.base = ((conf->membase & 0xFFF0) << 16);
428 desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
429 desired.base + 1;
430
431 mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
432 &port->memwin);
433}
434
435static pci_bridge_emul_read_status_t
436mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
437 int reg, u32 *value)
438{
439 struct mvebu_pcie_port *port = bridge->data;
440
441 switch (reg) {
442 case PCI_EXP_DEVCAP:
443 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
444 break;
445
446 case PCI_EXP_DEVCTL:
447 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
448 ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
449 PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
450 break;
451
452 case PCI_EXP_LNKCAP:
453 /*
454 * PCIe requires the clock power management capability to be
455 * hard-wired to zero for downstream ports
456 */
457 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
458 ~PCI_EXP_LNKCAP_CLKPM;
459 break;
460
461 case PCI_EXP_LNKCTL:
462 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
463 break;
464
465 case PCI_EXP_SLTCTL:
466 *value = PCI_EXP_SLTSTA_PDS << 16;
467 break;
468
469 case PCI_EXP_RTSTA:
470 *value = mvebu_readl(port, PCIE_RC_RTSTA);
471 break;
472
473 default:
474 return PCI_BRIDGE_EMUL_NOT_HANDLED;
475 }
476
477 return PCI_BRIDGE_EMUL_HANDLED;
478}
479
480static void
481mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
482 int reg, u32 old, u32 new, u32 mask)
483{
484 struct mvebu_pcie_port *port = bridge->data;
485 struct pci_bridge_emul_conf *conf = &bridge->conf;
486
487 switch (reg) {
488 case PCI_COMMAND:
489 {
490 if (!mvebu_has_ioport(port))
491 conf->command &= ~PCI_COMMAND_IO;
492
493 if ((old ^ new) & PCI_COMMAND_IO)
494 mvebu_pcie_handle_iobase_change(port);
495 if ((old ^ new) & PCI_COMMAND_MEMORY)
496 mvebu_pcie_handle_membase_change(port);
497
498 break;
499 }
500
501 case PCI_IO_BASE:
502 /*
503 * We keep bit 1 set, it is a read-only bit that
504 * indicates we support 32 bits addressing for the
505 * I/O
506 */
507 conf->iobase |= PCI_IO_RANGE_TYPE_32;
508 conf->iolimit |= PCI_IO_RANGE_TYPE_32;
509 mvebu_pcie_handle_iobase_change(port);
510 break;
511
512 case PCI_MEMORY_BASE:
513 mvebu_pcie_handle_membase_change(port);
514 break;
515
516 case PCI_IO_BASE_UPPER16:
517 mvebu_pcie_handle_iobase_change(port);
518 break;
519
520 case PCI_PRIMARY_BUS:
521 mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
522 break;
523
524 default:
525 break;
526 }
527}
528
529static void
530mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
531 int reg, u32 old, u32 new, u32 mask)
532{
533 struct mvebu_pcie_port *port = bridge->data;
534
535 switch (reg) {
536 case PCI_EXP_DEVCTL:
537 /*
538 * Armada370 data says these bits must always
539 * be zero when in root complex mode.
540 */
541 new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
542 PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
543
544 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
545 break;
546
547 case PCI_EXP_LNKCTL:
548 /*
549 * If we don't support CLKREQ, we must ensure that the
550 * CLKREQ enable bit always reads zero. Since we haven't
551 * had this capability, and it's dependent on board wiring,
552 * disable it for the time being.
553 */
554 new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
555
556 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
557 break;
558
559 case PCI_EXP_RTSTA:
560 mvebu_writel(port, new, PCIE_RC_RTSTA);
561 break;
562 }
563}
564
565static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
566 .write_base = mvebu_pci_bridge_emul_base_conf_write,
567 .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
568 .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
569};
570
571/*
572 * Initialize the configuration space of the PCI-to-PCI bridge
573 * associated with the given PCIe interface.
574 */
575static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
576{
577 struct pci_bridge_emul *bridge = &port->bridge;
578
579 bridge->conf.vendor = PCI_VENDOR_ID_MARVELL;
580 bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
581 bridge->conf.class_revision =
582 mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
583
584 if (mvebu_has_ioport(port)) {
585 /* We support 32 bits I/O addressing */
586 bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
587 bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
588 }
589
590 bridge->has_pcie = true;
591 bridge->data = port;
592 bridge->ops = &mvebu_pci_bridge_emul_ops;
593
594 pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
595}
596
597static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
598{
599 return sys->private_data;
600}
601
602static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
603 struct pci_bus *bus,
604 int devfn)
605{
606 int i;
607
608 for (i = 0; i < pcie->nports; i++) {
609 struct mvebu_pcie_port *port = &pcie->ports[i];
610
611 if (bus->number == 0 && port->devfn == devfn)
612 return port;
613 if (bus->number != 0 &&
614 bus->number >= port->bridge.conf.secondary_bus &&
615 bus->number <= port->bridge.conf.subordinate_bus)
616 return port;
617 }
618
619 return NULL;
620}
621
622/* PCI configuration space write function */
623static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
624 int where, int size, u32 val)
625{
626 struct mvebu_pcie *pcie = bus->sysdata;
627 struct mvebu_pcie_port *port;
628 int ret;
629
630 port = mvebu_pcie_find_port(pcie, bus, devfn);
631 if (!port)
632 return PCIBIOS_DEVICE_NOT_FOUND;
633
634 /* Access the emulated PCI-to-PCI bridge */
635 if (bus->number == 0)
636 return pci_bridge_emul_conf_write(&port->bridge, where,
637 size, val);
638
639 if (!mvebu_pcie_link_up(port))
640 return PCIBIOS_DEVICE_NOT_FOUND;
641
642 /* Access the real PCIe interface */
643 ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
644 where, size, val);
645
646 return ret;
647}
648
649/* PCI configuration space read function */
650static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
651 int size, u32 *val)
652{
653 struct mvebu_pcie *pcie = bus->sysdata;
654 struct mvebu_pcie_port *port;
655 int ret;
656
657 port = mvebu_pcie_find_port(pcie, bus, devfn);
658 if (!port) {
659 *val = 0xffffffff;
660 return PCIBIOS_DEVICE_NOT_FOUND;
661 }
662
663 /* Access the emulated PCI-to-PCI bridge */
664 if (bus->number == 0)
665 return pci_bridge_emul_conf_read(&port->bridge, where,
666 size, val);
667
668 if (!mvebu_pcie_link_up(port)) {
669 *val = 0xffffffff;
670 return PCIBIOS_DEVICE_NOT_FOUND;
671 }
672
673 /* Access the real PCIe interface */
674 ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
675 where, size, val);
676
677 return ret;
678}
679
680static struct pci_ops mvebu_pcie_ops = {
681 .read = mvebu_pcie_rd_conf,
682 .write = mvebu_pcie_wr_conf,
683};
684
685static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
686 const struct resource *res,
687 resource_size_t start,
688 resource_size_t size,
689 resource_size_t align)
690{
691 if (dev->bus->number != 0)
692 return start;
693
694 /*
695 * On the PCI-to-PCI bridge side, the I/O windows must have at
696 * least a 64 KB size and the memory windows must have at
697 * least a 1 MB size. Moreover, MBus windows need to have a
698 * base address aligned on their size, and their size must be
699 * a power of two. This means that if the BAR doesn't have a
700 * power of two size, several MBus windows will actually be
701 * created. We need to ensure that the biggest MBus window
702 * (which will be the first one) is aligned on its size, which
703 * explains the rounddown_pow_of_two() being done here.
704 */
705 if (res->flags & IORESOURCE_IO)
706 return round_up(start, max_t(resource_size_t, SZ_64K,
707 rounddown_pow_of_two(size)));
708 else if (res->flags & IORESOURCE_MEM)
709 return round_up(start, max_t(resource_size_t, SZ_1M,
710 rounddown_pow_of_two(size)));
711 else
712 return start;
713}
714
715static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
716 struct device_node *np,
717 struct mvebu_pcie_port *port)
718{
719 int ret = 0;
720
721 ret = of_address_to_resource(np, 0, &port->regs);
722 if (ret)
723 return (void __iomem *)ERR_PTR(ret);
724
725 return devm_ioremap_resource(&pdev->dev, &port->regs);
726}
727
728#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
729#define DT_TYPE_IO 0x1
730#define DT_TYPE_MEM32 0x2
731#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
732#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
733
734static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
735 unsigned long type,
736 unsigned int *tgt,
737 unsigned int *attr)
738{
739 const int na = 3, ns = 2;
740 const __be32 *range;
741 int rlen, nranges, rangesz, pna, i;
742
743 *tgt = -1;
744 *attr = -1;
745
746 range = of_get_property(np, "ranges", &rlen);
747 if (!range)
748 return -EINVAL;
749
750 pna = of_n_addr_cells(np);
751 rangesz = pna + na + ns;
752 nranges = rlen / sizeof(__be32) / rangesz;
753
754 for (i = 0; i < nranges; i++, range += rangesz) {
755 u32 flags = of_read_number(range, 1);
756 u32 slot = of_read_number(range + 1, 1);
757 u64 cpuaddr = of_read_number(range + na, pna);
758 unsigned long rtype;
759
760 if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
761 rtype = IORESOURCE_IO;
762 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
763 rtype = IORESOURCE_MEM;
764 else
765 continue;
766
767 if (slot == PCI_SLOT(devfn) && type == rtype) {
768 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
769 *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
770 return 0;
771 }
772 }
773
774 return -ENOENT;
775}
776
777#ifdef CONFIG_PM_SLEEP
778static int mvebu_pcie_suspend(struct device *dev)
779{
780 struct mvebu_pcie *pcie;
781 int i;
782
783 pcie = dev_get_drvdata(dev);
784 for (i = 0; i < pcie->nports; i++) {
785 struct mvebu_pcie_port *port = pcie->ports + i;
786 port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
787 }
788
789 return 0;
790}
791
792static int mvebu_pcie_resume(struct device *dev)
793{
794 struct mvebu_pcie *pcie;
795 int i;
796
797 pcie = dev_get_drvdata(dev);
798 for (i = 0; i < pcie->nports; i++) {
799 struct mvebu_pcie_port *port = pcie->ports + i;
800 mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
801 mvebu_pcie_setup_hw(port);
802 }
803
804 return 0;
805}
806#endif
807
808static void mvebu_pcie_port_clk_put(void *data)
809{
810 struct mvebu_pcie_port *port = data;
811
812 clk_put(port->clk);
813}
814
815static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
816 struct mvebu_pcie_port *port, struct device_node *child)
817{
818 struct device *dev = &pcie->pdev->dev;
819 enum of_gpio_flags flags;
820 int reset_gpio, ret;
821
822 port->pcie = pcie;
823
824 if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
825 dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
826 child);
827 goto skip;
828 }
829
830 if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
831 port->lane = 0;
832
833 port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
834 port->lane);
835 if (!port->name) {
836 ret = -ENOMEM;
837 goto err;
838 }
839
840 port->devfn = of_pci_get_devfn(child);
841 if (port->devfn < 0)
842 goto skip;
843
844 ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
845 &port->mem_target, &port->mem_attr);
846 if (ret < 0) {
847 dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
848 port->name);
849 goto skip;
850 }
851
852 if (resource_size(&pcie->io) != 0) {
853 mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
854 &port->io_target, &port->io_attr);
855 } else {
856 port->io_target = -1;
857 port->io_attr = -1;
858 }
859
860 reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
861 if (reset_gpio == -EPROBE_DEFER) {
862 ret = reset_gpio;
863 goto err;
864 }
865
866 if (gpio_is_valid(reset_gpio)) {
867 unsigned long gpio_flags;
868
869 port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
870 port->name);
871 if (!port->reset_name) {
872 ret = -ENOMEM;
873 goto err;
874 }
875
876 if (flags & OF_GPIO_ACTIVE_LOW) {
877 dev_info(dev, "%pOF: reset gpio is active low\n",
878 child);
879 gpio_flags = GPIOF_ACTIVE_LOW |
880 GPIOF_OUT_INIT_LOW;
881 } else {
882 gpio_flags = GPIOF_OUT_INIT_HIGH;
883 }
884
885 ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
886 port->reset_name);
887 if (ret) {
888 if (ret == -EPROBE_DEFER)
889 goto err;
890 goto skip;
891 }
892
893 port->reset_gpio = gpio_to_desc(reset_gpio);
894 }
895
896 port->clk = of_clk_get_by_name(child, NULL);
897 if (IS_ERR(port->clk)) {
898 dev_err(dev, "%s: cannot get clock\n", port->name);
899 goto skip;
900 }
901
902 ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
903 if (ret < 0) {
904 clk_put(port->clk);
905 goto err;
906 }
907
908 return 1;
909
910skip:
911 ret = 0;
912
913 /* In the case of skipping, we need to free these */
914 devm_kfree(dev, port->reset_name);
915 port->reset_name = NULL;
916 devm_kfree(dev, port->name);
917 port->name = NULL;
918
919err:
920 return ret;
921}
922
923/*
924 * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
925 * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
926 * of the PCI Express Card Electromechanical Specification, 1.1.
927 */
928static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
929{
930 int ret;
931
932 ret = clk_prepare_enable(port->clk);
933 if (ret < 0)
934 return ret;
935
936 if (port->reset_gpio) {
937 u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
938
939 of_property_read_u32(port->dn, "reset-delay-us",
940 &reset_udelay);
941
942 udelay(100);
943
944 gpiod_set_value_cansleep(port->reset_gpio, 0);
945 msleep(reset_udelay / 1000);
946 }
947
948 return 0;
949}
950
951/*
952 * Power down a PCIe port. Strictly, PCIe requires us to place the card
953 * in D3hot state before asserting PERST#.
954 */
955static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
956{
957 gpiod_set_value_cansleep(port->reset_gpio, 1);
958
959 clk_disable_unprepare(port->clk);
960}
961
962/*
963 * We can't use devm_of_pci_get_host_bridge_resources() because we
964 * need to parse our special DT properties encoding the MEM and IO
965 * apertures.
966 */
967static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
968{
969 struct device *dev = &pcie->pdev->dev;
970 struct device_node *np = dev->of_node;
971 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
972 int ret;
973
974 /* Get the bus range */
975 ret = of_pci_parse_bus_range(np, &pcie->busn);
976 if (ret) {
977 dev_err(dev, "failed to parse bus-range property: %d\n", ret);
978 return ret;
979 }
980 pci_add_resource(&bridge->windows, &pcie->busn);
981
982 /* Get the PCIe memory aperture */
983 mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
984 if (resource_size(&pcie->mem) == 0) {
985 dev_err(dev, "invalid memory aperture size\n");
986 return -EINVAL;
987 }
988
989 pcie->mem.name = "PCI MEM";
990 pci_add_resource(&bridge->windows, &pcie->mem);
991
992 /* Get the PCIe IO aperture */
993 mvebu_mbus_get_pcie_io_aperture(&pcie->io);
994
995 if (resource_size(&pcie->io) != 0) {
996 pcie->realio.flags = pcie->io.flags;
997 pcie->realio.start = PCIBIOS_MIN_IO;
998 pcie->realio.end = min_t(resource_size_t,
999 IO_SPACE_LIMIT - SZ_64K,
1000 resource_size(&pcie->io) - 1);
1001 pcie->realio.name = "PCI I/O";
1002
1003 pci_add_resource(&bridge->windows, &pcie->realio);
1004 }
1005
1006 return devm_request_pci_bus_resources(dev, &bridge->windows);
1007}
1008
1009/*
1010 * This is a copy of pci_host_probe(), except that it does the I/O
1011 * remap as the last step, once we are sure we won't fail.
1012 *
1013 * It should be removed once the I/O remap error handling issue has
1014 * been sorted out.
1015 */
1016static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
1017{
1018 struct mvebu_pcie *pcie;
1019 struct pci_bus *bus, *child;
1020 int ret;
1021
1022 ret = pci_scan_root_bus_bridge(bridge);
1023 if (ret < 0) {
1024 dev_err(bridge->dev.parent, "Scanning root bridge failed");
1025 return ret;
1026 }
1027
1028 pcie = pci_host_bridge_priv(bridge);
1029 if (resource_size(&pcie->io) != 0) {
1030 unsigned int i;
1031
1032 for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
1033 pci_ioremap_io(i, pcie->io.start + i);
1034 }
1035
1036 bus = bridge->bus;
1037
1038 /*
1039 * We insert PCI resources into the iomem_resource and
1040 * ioport_resource trees in either pci_bus_claim_resources()
1041 * or pci_bus_assign_resources().
1042 */
1043 if (pci_has_flag(PCI_PROBE_ONLY)) {
1044 pci_bus_claim_resources(bus);
1045 } else {
1046 pci_bus_size_bridges(bus);
1047 pci_bus_assign_resources(bus);
1048
1049 list_for_each_entry(child, &bus->children, node)
1050 pcie_bus_configure_settings(child);
1051 }
1052
1053 pci_bus_add_devices(bus);
1054 return 0;
1055}
1056
1057static int mvebu_pcie_probe(struct platform_device *pdev)
1058{
1059 struct device *dev = &pdev->dev;
1060 struct mvebu_pcie *pcie;
1061 struct pci_host_bridge *bridge;
1062 struct device_node *np = dev->of_node;
1063 struct device_node *child;
1064 int num, i, ret;
1065
1066 bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
1067 if (!bridge)
1068 return -ENOMEM;
1069
1070 pcie = pci_host_bridge_priv(bridge);
1071 pcie->pdev = pdev;
1072 platform_set_drvdata(pdev, pcie);
1073
1074 ret = mvebu_pcie_parse_request_resources(pcie);
1075 if (ret)
1076 return ret;
1077
1078 num = of_get_available_child_count(np);
1079
1080 pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
1081 if (!pcie->ports)
1082 return -ENOMEM;
1083
1084 i = 0;
1085 for_each_available_child_of_node(np, child) {
1086 struct mvebu_pcie_port *port = &pcie->ports[i];
1087
1088 ret = mvebu_pcie_parse_port(pcie, port, child);
1089 if (ret < 0) {
1090 of_node_put(child);
1091 return ret;
1092 } else if (ret == 0) {
1093 continue;
1094 }
1095
1096 port->dn = child;
1097 i++;
1098 }
1099 pcie->nports = i;
1100
1101 for (i = 0; i < pcie->nports; i++) {
1102 struct mvebu_pcie_port *port = &pcie->ports[i];
1103
1104 child = port->dn;
1105 if (!child)
1106 continue;
1107
1108 ret = mvebu_pcie_powerup(port);
1109 if (ret < 0)
1110 continue;
1111
1112 port->base = mvebu_pcie_map_registers(pdev, child, port);
1113 if (IS_ERR(port->base)) {
1114 dev_err(dev, "%s: cannot map registers\n", port->name);
1115 port->base = NULL;
1116 mvebu_pcie_powerdown(port);
1117 continue;
1118 }
1119
1120 mvebu_pcie_setup_hw(port);
1121 mvebu_pcie_set_local_dev_nr(port, 1);
1122 mvebu_pci_bridge_emul_init(port);
1123 }
1124
1125 pcie->nports = i;
1126
1127 bridge->sysdata = pcie;
1128 bridge->ops = &mvebu_pcie_ops;
1129 bridge->align_resource = mvebu_pcie_align_resource;
1130 bridge->msi = pcie->msi;
1131
1132 return mvebu_pci_host_probe(bridge);
1133}
1134
1135static const struct of_device_id mvebu_pcie_of_match_table[] = {
1136 { .compatible = "marvell,armada-xp-pcie", },
1137 { .compatible = "marvell,armada-370-pcie", },
1138 { .compatible = "marvell,dove-pcie", },
1139 { .compatible = "marvell,kirkwood-pcie", },
1140 {},
1141};
1142
1143static const struct dev_pm_ops mvebu_pcie_pm_ops = {
1144 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
1145};
1146
1147static struct platform_driver mvebu_pcie_driver = {
1148 .driver = {
1149 .name = "mvebu-pcie",
1150 .of_match_table = mvebu_pcie_of_match_table,
1151 /* driver unloading/unbinding currently not supported */
1152 .suppress_bind_attrs = true,
1153 .pm = &mvebu_pcie_pm_ops,
1154 },
1155 .probe = mvebu_pcie_probe,
1156};
1157builtin_platform_driver(mvebu_pcie_driver);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe driver for Marvell Armada 370 and Armada XP SoCs
4 *
5 * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/pci.h>
11#include <linux/bitfield.h>
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/gpio/consumer.h>
15#include <linux/init.h>
16#include <linux/irqchip/chained_irq.h>
17#include <linux/irqdomain.h>
18#include <linux/mbus.h>
19#include <linux/slab.h>
20#include <linux/platform_device.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/of_pci.h>
24#include <linux/of_platform.h>
25
26#include "../pci.h"
27#include "../pci-bridge-emul.h"
28
29/*
30 * PCIe unit register offsets.
31 */
32#define PCIE_DEV_ID_OFF 0x0000
33#define PCIE_CMD_OFF 0x0004
34#define PCIE_DEV_REV_OFF 0x0008
35#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
36#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
37#define PCIE_SSDEV_ID_OFF 0x002c
38#define PCIE_CAP_PCIEXP 0x0060
39#define PCIE_CAP_PCIERR_OFF 0x0100
40#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
41#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
42#define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
43#define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
44#define PCIE_WIN5_CTRL_OFF 0x1880
45#define PCIE_WIN5_BASE_OFF 0x1884
46#define PCIE_WIN5_REMAP_OFF 0x188c
47#define PCIE_CONF_ADDR_OFF 0x18f8
48#define PCIE_CONF_ADDR_EN 0x80000000
49#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
50#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
51#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
52#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
53#define PCIE_CONF_ADDR(bus, devfn, where) \
54 (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
55 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
56 PCIE_CONF_ADDR_EN)
57#define PCIE_CONF_DATA_OFF 0x18fc
58#define PCIE_INT_CAUSE_OFF 0x1900
59#define PCIE_INT_UNMASK_OFF 0x1910
60#define PCIE_INT_INTX(i) BIT(24+i)
61#define PCIE_INT_PM_PME BIT(28)
62#define PCIE_INT_ALL_MASK GENMASK(31, 0)
63#define PCIE_CTRL_OFF 0x1a00
64#define PCIE_CTRL_X1_MODE 0x0001
65#define PCIE_CTRL_RC_MODE BIT(1)
66#define PCIE_CTRL_MASTER_HOT_RESET BIT(24)
67#define PCIE_STAT_OFF 0x1a04
68#define PCIE_STAT_BUS 0xff00
69#define PCIE_STAT_DEV 0x1f0000
70#define PCIE_STAT_LINK_DOWN BIT(0)
71#define PCIE_SSPL_OFF 0x1a0c
72#define PCIE_SSPL_VALUE_SHIFT 0
73#define PCIE_SSPL_VALUE_MASK GENMASK(7, 0)
74#define PCIE_SSPL_SCALE_SHIFT 8
75#define PCIE_SSPL_SCALE_MASK GENMASK(9, 8)
76#define PCIE_SSPL_ENABLE BIT(16)
77#define PCIE_RC_RTSTA 0x1a14
78#define PCIE_DEBUG_CTRL 0x1a60
79#define PCIE_DEBUG_SOFT_RESET BIT(20)
80
81struct mvebu_pcie_port;
82
83/* Structure representing all PCIe interfaces */
84struct mvebu_pcie {
85 struct platform_device *pdev;
86 struct mvebu_pcie_port *ports;
87 struct resource io;
88 struct resource realio;
89 struct resource mem;
90 struct resource busn;
91 int nports;
92};
93
94struct mvebu_pcie_window {
95 phys_addr_t base;
96 phys_addr_t remap;
97 size_t size;
98};
99
100/* Structure representing one PCIe interface */
101struct mvebu_pcie_port {
102 char *name;
103 void __iomem *base;
104 u32 port;
105 u32 lane;
106 bool is_x4;
107 int devfn;
108 unsigned int mem_target;
109 unsigned int mem_attr;
110 unsigned int io_target;
111 unsigned int io_attr;
112 struct clk *clk;
113 struct gpio_desc *reset_gpio;
114 char *reset_name;
115 struct pci_bridge_emul bridge;
116 struct device_node *dn;
117 struct mvebu_pcie *pcie;
118 struct mvebu_pcie_window memwin;
119 struct mvebu_pcie_window iowin;
120 u32 saved_pcie_stat;
121 struct resource regs;
122 u8 slot_power_limit_value;
123 u8 slot_power_limit_scale;
124 struct irq_domain *intx_irq_domain;
125 raw_spinlock_t irq_lock;
126 int intx_irq;
127};
128
129static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
130{
131 writel(val, port->base + reg);
132}
133
134static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
135{
136 return readl(port->base + reg);
137}
138
139static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
140{
141 return port->io_target != -1 && port->io_attr != -1;
142}
143
144static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
145{
146 return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
147}
148
149static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port)
150{
151 return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8;
152}
153
154static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
155{
156 u32 stat;
157
158 stat = mvebu_readl(port, PCIE_STAT_OFF);
159 stat &= ~PCIE_STAT_BUS;
160 stat |= nr << 8;
161 mvebu_writel(port, stat, PCIE_STAT_OFF);
162}
163
164static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
165{
166 u32 stat;
167
168 stat = mvebu_readl(port, PCIE_STAT_OFF);
169 stat &= ~PCIE_STAT_DEV;
170 stat |= nr << 16;
171 mvebu_writel(port, stat, PCIE_STAT_OFF);
172}
173
174static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port)
175{
176 int i;
177
178 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0));
179 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
180
181 for (i = 1; i < 3; i++) {
182 mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
183 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
184 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
185 }
186
187 for (i = 0; i < 5; i++) {
188 mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
189 mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
190 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
191 }
192
193 mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
194 mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
195 mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
196}
197
198/*
199 * Setup PCIE BARs and Address Decode Wins:
200 * BAR[0] -> internal registers (needed for MSI)
201 * BAR[1] -> covers all DRAM banks
202 * BAR[2] -> Disabled
203 * WIN[0-3] -> DRAM bank[0-3]
204 */
205static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
206{
207 const struct mbus_dram_target_info *dram;
208 u32 size;
209 int i;
210
211 dram = mv_mbus_dram_info();
212
213 /* First, disable and clear BARs and windows. */
214 mvebu_pcie_disable_wins(port);
215
216 /* Setup windows for DDR banks. Count total DDR size on the fly. */
217 size = 0;
218 for (i = 0; i < dram->num_cs; i++) {
219 const struct mbus_dram_window *cs = dram->cs + i;
220
221 mvebu_writel(port, cs->base & 0xffff0000,
222 PCIE_WIN04_BASE_OFF(i));
223 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
224 mvebu_writel(port,
225 ((cs->size - 1) & 0xffff0000) |
226 (cs->mbus_attr << 8) |
227 (dram->mbus_dram_target_id << 4) | 1,
228 PCIE_WIN04_CTRL_OFF(i));
229
230 size += cs->size;
231 }
232
233 /* Round up 'size' to the nearest power of two. */
234 if ((size & (size - 1)) != 0)
235 size = 1 << fls(size);
236
237 /* Setup BAR[1] to all DRAM banks. */
238 mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
239 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
240 mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
241 PCIE_BAR_CTRL_OFF(1));
242
243 /*
244 * Point BAR[0] to the device's internal registers.
245 */
246 mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
247 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
248}
249
250static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
251{
252 u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl;
253
254 /* Setup PCIe controller to Root Complex mode. */
255 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
256 ctrl |= PCIE_CTRL_RC_MODE;
257 mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
258
259 /*
260 * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link
261 * Capability register. This register is defined by PCIe specification
262 * as read-only but this mvebu controller has it as read-write and must
263 * be set to number of SerDes PCIe lanes (1 or 4). If this register is
264 * not set correctly then link with endpoint card is not established.
265 */
266 lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
267 lnkcap &= ~PCI_EXP_LNKCAP_MLW;
268 lnkcap |= (port->is_x4 ? 4 : 1) << 4;
269 mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
270
271 /* Disable Root Bridge I/O space, memory space and bus mastering. */
272 cmd = mvebu_readl(port, PCIE_CMD_OFF);
273 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
274 mvebu_writel(port, cmd, PCIE_CMD_OFF);
275
276 /*
277 * Change Class Code of PCI Bridge device to PCI Bridge (0x6004)
278 * because default value is Memory controller (0x5080).
279 *
280 * Note that this mvebu PCI Bridge does not have compliant Type 1
281 * Configuration Space. Header Type is reported as Type 0 and it
282 * has format of Type 0 config space.
283 *
284 * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34)
285 * have the same format in Marvell's specification as in PCIe
286 * specification, but their meaning is totally different and they do
287 * different things: they are aliased into internal mvebu registers
288 * (e.g. PCIE_BAR_LO_OFF) and these should not be changed or
289 * reconfigured by pci device drivers.
290 *
291 * Therefore driver uses emulation of PCI Bridge which emulates
292 * access to configuration space via internal mvebu registers or
293 * emulated configuration buffer. Driver access these PCI Bridge
294 * directly for simplification, but these registers can be accessed
295 * also via standard mvebu way for accessing PCI config space.
296 */
297 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
298 dev_rev &= ~0xffffff00;
299 dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
300 mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF);
301
302 /* Point PCIe unit MBUS decode windows to DRAM space. */
303 mvebu_pcie_setup_wins(port);
304
305 /*
306 * Program Root Port to automatically send Set_Slot_Power_Limit
307 * PCIe Message when changing status from Dl_Down to Dl_Up and valid
308 * slot power limit was specified.
309 */
310 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
311 sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
312 if (port->slot_power_limit_value) {
313 sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT;
314 sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT;
315 sspl |= PCIE_SSPL_ENABLE;
316 }
317 mvebu_writel(port, sspl, PCIE_SSPL_OFF);
318
319 /* Mask all interrupt sources. */
320 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
321
322 /* Clear all interrupt causes. */
323 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
324
325 /* Check if "intx" interrupt was specified in DT. */
326 if (port->intx_irq > 0)
327 return;
328
329 /*
330 * Fallback code when "intx" interrupt was not specified in DT:
331 * Unmask all legacy INTx interrupts as driver does not provide a way
332 * for masking and unmasking of individual legacy INTx interrupts.
333 * Legacy INTx are reported via one shared GIC source and therefore
334 * kernel cannot distinguish which individual legacy INTx was triggered.
335 * These interrupts are shared, so it should not cause any issue. Just
336 * performance penalty as every PCIe interrupt handler needs to be
337 * called when some interrupt is triggered.
338 */
339 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
340 unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) |
341 PCIE_INT_INTX(2) | PCIE_INT_INTX(3);
342 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
343}
344
345static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
346 struct pci_bus *bus,
347 int devfn);
348
349static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where,
350 int size, u32 *val)
351{
352 struct mvebu_pcie *pcie = bus->sysdata;
353 struct mvebu_pcie_port *port;
354 void __iomem *conf_data;
355
356 port = mvebu_pcie_find_port(pcie, bus, devfn);
357 if (!port)
358 return PCIBIOS_DEVICE_NOT_FOUND;
359
360 if (!mvebu_pcie_link_up(port))
361 return PCIBIOS_DEVICE_NOT_FOUND;
362
363 conf_data = port->base + PCIE_CONF_DATA_OFF;
364
365 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
366 PCIE_CONF_ADDR_OFF);
367
368 switch (size) {
369 case 1:
370 *val = readb_relaxed(conf_data + (where & 3));
371 break;
372 case 2:
373 *val = readw_relaxed(conf_data + (where & 2));
374 break;
375 case 4:
376 *val = readl_relaxed(conf_data);
377 break;
378 default:
379 return PCIBIOS_BAD_REGISTER_NUMBER;
380 }
381
382 return PCIBIOS_SUCCESSFUL;
383}
384
385static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn,
386 int where, int size, u32 val)
387{
388 struct mvebu_pcie *pcie = bus->sysdata;
389 struct mvebu_pcie_port *port;
390 void __iomem *conf_data;
391
392 port = mvebu_pcie_find_port(pcie, bus, devfn);
393 if (!port)
394 return PCIBIOS_DEVICE_NOT_FOUND;
395
396 if (!mvebu_pcie_link_up(port))
397 return PCIBIOS_DEVICE_NOT_FOUND;
398
399 conf_data = port->base + PCIE_CONF_DATA_OFF;
400
401 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
402 PCIE_CONF_ADDR_OFF);
403
404 switch (size) {
405 case 1:
406 writeb(val, conf_data + (where & 3));
407 break;
408 case 2:
409 writew(val, conf_data + (where & 2));
410 break;
411 case 4:
412 writel(val, conf_data);
413 break;
414 default:
415 return PCIBIOS_BAD_REGISTER_NUMBER;
416 }
417
418 return PCIBIOS_SUCCESSFUL;
419}
420
421static struct pci_ops mvebu_pcie_child_ops = {
422 .read = mvebu_pcie_child_rd_conf,
423 .write = mvebu_pcie_child_wr_conf,
424};
425
426/*
427 * Remove windows, starting from the largest ones to the smallest
428 * ones.
429 */
430static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
431 phys_addr_t base, size_t size)
432{
433 while (size) {
434 size_t sz = 1 << (fls(size) - 1);
435
436 mvebu_mbus_del_window(base, sz);
437 base += sz;
438 size -= sz;
439 }
440}
441
442/*
443 * MBus windows can only have a power of two size, but PCI BARs do not
444 * have this constraint. Therefore, we have to split the PCI BAR into
445 * areas each having a power of two size. We start from the largest
446 * one (i.e highest order bit set in the size).
447 */
448static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
449 unsigned int target, unsigned int attribute,
450 phys_addr_t base, size_t size,
451 phys_addr_t remap)
452{
453 size_t size_mapped = 0;
454
455 while (size) {
456 size_t sz = 1 << (fls(size) - 1);
457 int ret;
458
459 ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
460 sz, remap);
461 if (ret) {
462 phys_addr_t end = base + sz - 1;
463
464 dev_err(&port->pcie->pdev->dev,
465 "Could not create MBus window at [mem %pa-%pa]: %d\n",
466 &base, &end, ret);
467 mvebu_pcie_del_windows(port, base - size_mapped,
468 size_mapped);
469 return ret;
470 }
471
472 size -= sz;
473 size_mapped += sz;
474 base += sz;
475 if (remap != MVEBU_MBUS_NO_REMAP)
476 remap += sz;
477 }
478
479 return 0;
480}
481
482static int mvebu_pcie_set_window(struct mvebu_pcie_port *port,
483 unsigned int target, unsigned int attribute,
484 const struct mvebu_pcie_window *desired,
485 struct mvebu_pcie_window *cur)
486{
487 int ret;
488
489 if (desired->base == cur->base && desired->remap == cur->remap &&
490 desired->size == cur->size)
491 return 0;
492
493 if (cur->size != 0) {
494 mvebu_pcie_del_windows(port, cur->base, cur->size);
495 cur->size = 0;
496 cur->base = 0;
497
498 /*
499 * If something tries to change the window while it is enabled
500 * the change will not be done atomically. That would be
501 * difficult to do in the general case.
502 */
503 }
504
505 if (desired->size == 0)
506 return 0;
507
508 ret = mvebu_pcie_add_windows(port, target, attribute, desired->base,
509 desired->size, desired->remap);
510 if (ret) {
511 cur->size = 0;
512 cur->base = 0;
513 return ret;
514 }
515
516 *cur = *desired;
517 return 0;
518}
519
520static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
521{
522 struct mvebu_pcie_window desired = {};
523 struct pci_bridge_emul_conf *conf = &port->bridge.conf;
524
525 /* Are the new iobase/iolimit values invalid? */
526 if (conf->iolimit < conf->iobase ||
527 le16_to_cpu(conf->iolimitupper) < le16_to_cpu(conf->iobaseupper))
528 return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
529 &desired, &port->iowin);
530
531 /*
532 * We read the PCI-to-PCI bridge emulated registers, and
533 * calculate the base address and size of the address decoding
534 * window to setup, according to the PCI-to-PCI bridge
535 * specifications. iobase is the bus address, port->iowin_base
536 * is the CPU address.
537 */
538 desired.remap = ((conf->iobase & 0xF0) << 8) |
539 (le16_to_cpu(conf->iobaseupper) << 16);
540 desired.base = port->pcie->io.start + desired.remap;
541 desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
542 (le16_to_cpu(conf->iolimitupper) << 16)) -
543 desired.remap) +
544 1;
545
546 return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
547 &port->iowin);
548}
549
550static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
551{
552 struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
553 struct pci_bridge_emul_conf *conf = &port->bridge.conf;
554
555 /* Are the new membase/memlimit values invalid? */
556 if (le16_to_cpu(conf->memlimit) < le16_to_cpu(conf->membase))
557 return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
558 &desired, &port->memwin);
559
560 /*
561 * We read the PCI-to-PCI bridge emulated registers, and
562 * calculate the base address and size of the address decoding
563 * window to setup, according to the PCI-to-PCI bridge
564 * specifications.
565 */
566 desired.base = ((le16_to_cpu(conf->membase) & 0xFFF0) << 16);
567 desired.size = (((le16_to_cpu(conf->memlimit) & 0xFFF0) << 16) | 0xFFFFF) -
568 desired.base + 1;
569
570 return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
571 &port->memwin);
572}
573
574static pci_bridge_emul_read_status_t
575mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
576 int reg, u32 *value)
577{
578 struct mvebu_pcie_port *port = bridge->data;
579
580 switch (reg) {
581 case PCI_COMMAND:
582 *value = mvebu_readl(port, PCIE_CMD_OFF);
583 break;
584
585 case PCI_PRIMARY_BUS: {
586 /*
587 * From the whole 32bit register we support reading from HW only
588 * secondary bus number which is mvebu local bus number.
589 * Other bits are retrieved only from emulated config buffer.
590 */
591 __le32 *cfgspace = (__le32 *)&bridge->conf;
592 u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]);
593 val &= ~0xff00;
594 val |= mvebu_pcie_get_local_bus_nr(port) << 8;
595 *value = val;
596 break;
597 }
598
599 case PCI_INTERRUPT_LINE: {
600 /*
601 * From the whole 32bit register we support reading from HW only
602 * one bit: PCI_BRIDGE_CTL_BUS_RESET.
603 * Other bits are retrieved only from emulated config buffer.
604 */
605 __le32 *cfgspace = (__le32 *)&bridge->conf;
606 u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
607 if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET)
608 val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
609 else
610 val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
611 *value = val;
612 break;
613 }
614
615 default:
616 return PCI_BRIDGE_EMUL_NOT_HANDLED;
617 }
618
619 return PCI_BRIDGE_EMUL_HANDLED;
620}
621
622static pci_bridge_emul_read_status_t
623mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
624 int reg, u32 *value)
625{
626 struct mvebu_pcie_port *port = bridge->data;
627
628 switch (reg) {
629 case PCI_EXP_DEVCAP:
630 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
631 break;
632
633 case PCI_EXP_DEVCTL:
634 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
635 break;
636
637 case PCI_EXP_LNKCAP:
638 /*
639 * PCIe requires that the Clock Power Management capability bit
640 * is hard-wired to zero for downstream ports but HW returns 1.
641 * Additionally enable Data Link Layer Link Active Reporting
642 * Capable bit as DL_Active indication is provided too.
643 */
644 *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
645 ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC;
646 break;
647
648 case PCI_EXP_LNKCTL:
649 /* DL_Active indication is provided via PCIE_STAT_OFF */
650 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) |
651 (mvebu_pcie_link_up(port) ?
652 (PCI_EXP_LNKSTA_DLLLA << 16) : 0);
653 break;
654
655 case PCI_EXP_SLTCTL: {
656 u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl);
657 u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta);
658 u32 val = 0;
659 /*
660 * When slot power limit was not specified in DT then
661 * ASPL_DISABLE bit is stored only in emulated config space.
662 * Otherwise reflect status of PCIE_SSPL_ENABLE bit in HW.
663 */
664 if (!port->slot_power_limit_value)
665 val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE;
666 else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE))
667 val |= PCI_EXP_SLTCTL_ASPL_DISABLE;
668 /* This callback is 32-bit and in high bits is slot status. */
669 val |= slotsta << 16;
670 *value = val;
671 break;
672 }
673
674 case PCI_EXP_RTSTA:
675 *value = mvebu_readl(port, PCIE_RC_RTSTA);
676 break;
677
678 case PCI_EXP_DEVCAP2:
679 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2);
680 break;
681
682 case PCI_EXP_DEVCTL2:
683 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
684 break;
685
686 case PCI_EXP_LNKCTL2:
687 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
688 break;
689
690 default:
691 return PCI_BRIDGE_EMUL_NOT_HANDLED;
692 }
693
694 return PCI_BRIDGE_EMUL_HANDLED;
695}
696
697static pci_bridge_emul_read_status_t
698mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
699 int reg, u32 *value)
700{
701 struct mvebu_pcie_port *port = bridge->data;
702
703 switch (reg) {
704 case 0:
705 case PCI_ERR_UNCOR_STATUS:
706 case PCI_ERR_UNCOR_MASK:
707 case PCI_ERR_UNCOR_SEVER:
708 case PCI_ERR_COR_STATUS:
709 case PCI_ERR_COR_MASK:
710 case PCI_ERR_CAP:
711 case PCI_ERR_HEADER_LOG+0:
712 case PCI_ERR_HEADER_LOG+4:
713 case PCI_ERR_HEADER_LOG+8:
714 case PCI_ERR_HEADER_LOG+12:
715 case PCI_ERR_ROOT_COMMAND:
716 case PCI_ERR_ROOT_STATUS:
717 case PCI_ERR_ROOT_ERR_SRC:
718 *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg);
719 break;
720
721 default:
722 return PCI_BRIDGE_EMUL_NOT_HANDLED;
723 }
724
725 return PCI_BRIDGE_EMUL_HANDLED;
726}
727
728static void
729mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
730 int reg, u32 old, u32 new, u32 mask)
731{
732 struct mvebu_pcie_port *port = bridge->data;
733 struct pci_bridge_emul_conf *conf = &bridge->conf;
734
735 switch (reg) {
736 case PCI_COMMAND:
737 mvebu_writel(port, new, PCIE_CMD_OFF);
738 break;
739
740 case PCI_IO_BASE:
741 if ((mask & 0xffff) && mvebu_has_ioport(port) &&
742 mvebu_pcie_handle_iobase_change(port)) {
743 /* On error disable IO range */
744 conf->iobase &= ~0xf0;
745 conf->iolimit &= ~0xf0;
746 conf->iobase |= 0xf0;
747 conf->iobaseupper = cpu_to_le16(0x0000);
748 conf->iolimitupper = cpu_to_le16(0x0000);
749 }
750 break;
751
752 case PCI_MEMORY_BASE:
753 if (mvebu_pcie_handle_membase_change(port)) {
754 /* On error disable mem range */
755 conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0);
756 conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0);
757 conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0);
758 }
759 break;
760
761 case PCI_IO_BASE_UPPER16:
762 if (mvebu_has_ioport(port) &&
763 mvebu_pcie_handle_iobase_change(port)) {
764 /* On error disable IO range */
765 conf->iobase &= ~0xf0;
766 conf->iolimit &= ~0xf0;
767 conf->iobase |= 0xf0;
768 conf->iobaseupper = cpu_to_le16(0x0000);
769 conf->iolimitupper = cpu_to_le16(0x0000);
770 }
771 break;
772
773 case PCI_PRIMARY_BUS:
774 if (mask & 0xff00)
775 mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
776 break;
777
778 case PCI_INTERRUPT_LINE:
779 if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
780 u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
781 if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
782 ctrl |= PCIE_CTRL_MASTER_HOT_RESET;
783 else
784 ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET;
785 mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
786 }
787 break;
788
789 default:
790 break;
791 }
792}
793
794static void
795mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
796 int reg, u32 old, u32 new, u32 mask)
797{
798 struct mvebu_pcie_port *port = bridge->data;
799
800 switch (reg) {
801 case PCI_EXP_DEVCTL:
802 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
803 break;
804
805 case PCI_EXP_LNKCTL:
806 /*
807 * PCIe requires that the Enable Clock Power Management bit
808 * is hard-wired to zero for downstream ports but HW allows
809 * to change it.
810 */
811 new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
812
813 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
814 break;
815
816 case PCI_EXP_SLTCTL:
817 /*
818 * Allow to change PCIE_SSPL_ENABLE bit only when slot power
819 * limit was specified in DT and configured into HW.
820 */
821 if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) &&
822 port->slot_power_limit_value) {
823 u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
824 if (new & PCI_EXP_SLTCTL_ASPL_DISABLE)
825 sspl &= ~PCIE_SSPL_ENABLE;
826 else
827 sspl |= PCIE_SSPL_ENABLE;
828 mvebu_writel(port, sspl, PCIE_SSPL_OFF);
829 }
830 break;
831
832 case PCI_EXP_RTSTA:
833 /*
834 * PME Status bit in Root Status Register (PCIE_RC_RTSTA)
835 * is read-only and can be cleared only by writing 0b to the
836 * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So
837 * clear PME via Interrupt Cause.
838 */
839 if (new & PCI_EXP_RTSTA_PME)
840 mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF);
841 break;
842
843 case PCI_EXP_DEVCTL2:
844 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
845 break;
846
847 case PCI_EXP_LNKCTL2:
848 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
849 break;
850
851 default:
852 break;
853 }
854}
855
856static void
857mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
858 int reg, u32 old, u32 new, u32 mask)
859{
860 struct mvebu_pcie_port *port = bridge->data;
861
862 switch (reg) {
863 /* These are W1C registers, so clear other bits */
864 case PCI_ERR_UNCOR_STATUS:
865 case PCI_ERR_COR_STATUS:
866 case PCI_ERR_ROOT_STATUS:
867 new &= mask;
868 fallthrough;
869
870 case PCI_ERR_UNCOR_MASK:
871 case PCI_ERR_UNCOR_SEVER:
872 case PCI_ERR_COR_MASK:
873 case PCI_ERR_CAP:
874 case PCI_ERR_HEADER_LOG+0:
875 case PCI_ERR_HEADER_LOG+4:
876 case PCI_ERR_HEADER_LOG+8:
877 case PCI_ERR_HEADER_LOG+12:
878 case PCI_ERR_ROOT_COMMAND:
879 case PCI_ERR_ROOT_ERR_SRC:
880 mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg);
881 break;
882
883 default:
884 break;
885 }
886}
887
888static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
889 .read_base = mvebu_pci_bridge_emul_base_conf_read,
890 .write_base = mvebu_pci_bridge_emul_base_conf_write,
891 .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
892 .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
893 .read_ext = mvebu_pci_bridge_emul_ext_conf_read,
894 .write_ext = mvebu_pci_bridge_emul_ext_conf_write,
895};
896
897/*
898 * Initialize the configuration space of the PCI-to-PCI bridge
899 * associated with the given PCIe interface.
900 */
901static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
902{
903 unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD;
904 struct pci_bridge_emul *bridge = &port->bridge;
905 u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF);
906 u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
907 u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF);
908 u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
909 u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
910
911 bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff);
912 bridge->conf.device = cpu_to_le16(dev_id >> 16);
913 bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff);
914
915 if (mvebu_has_ioport(port)) {
916 /* We support 32 bits I/O addressing */
917 bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
918 bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
919 } else {
920 bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD;
921 }
922
923 /*
924 * Older mvebu hardware provides PCIe Capability structure only in
925 * version 1. New hardware provides it in version 2.
926 * Enable slot support which is emulated.
927 */
928 bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT);
929
930 /*
931 * Set Presence Detect State bit permanently as there is no support for
932 * unplugging PCIe card from the slot. Assume that PCIe card is always
933 * connected in slot.
934 *
935 * Set physical slot number to port+1 as mvebu ports are indexed from
936 * zero and zero value is reserved for ports within the same silicon
937 * as Root Port which is not mvebu case.
938 *
939 * Also set correct slot power limit.
940 */
941 bridge->pcie_conf.slotcap = cpu_to_le32(
942 FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) |
943 FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) |
944 FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1));
945 bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
946
947 bridge->subsystem_vendor_id = ssdev_id & 0xffff;
948 bridge->subsystem_id = ssdev_id >> 16;
949 bridge->has_pcie = true;
950 bridge->pcie_start = PCIE_CAP_PCIEXP;
951 bridge->data = port;
952 bridge->ops = &mvebu_pci_bridge_emul_ops;
953
954 return pci_bridge_emul_init(bridge, bridge_flags);
955}
956
957static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
958{
959 return sys->private_data;
960}
961
962static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
963 struct pci_bus *bus,
964 int devfn)
965{
966 int i;
967
968 for (i = 0; i < pcie->nports; i++) {
969 struct mvebu_pcie_port *port = &pcie->ports[i];
970
971 if (!port->base)
972 continue;
973
974 if (bus->number == 0 && port->devfn == devfn)
975 return port;
976 if (bus->number != 0 &&
977 bus->number >= port->bridge.conf.secondary_bus &&
978 bus->number <= port->bridge.conf.subordinate_bus)
979 return port;
980 }
981
982 return NULL;
983}
984
985/* PCI configuration space write function */
986static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
987 int where, int size, u32 val)
988{
989 struct mvebu_pcie *pcie = bus->sysdata;
990 struct mvebu_pcie_port *port;
991
992 port = mvebu_pcie_find_port(pcie, bus, devfn);
993 if (!port)
994 return PCIBIOS_DEVICE_NOT_FOUND;
995
996 return pci_bridge_emul_conf_write(&port->bridge, where, size, val);
997}
998
999/* PCI configuration space read function */
1000static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
1001 int size, u32 *val)
1002{
1003 struct mvebu_pcie *pcie = bus->sysdata;
1004 struct mvebu_pcie_port *port;
1005
1006 port = mvebu_pcie_find_port(pcie, bus, devfn);
1007 if (!port)
1008 return PCIBIOS_DEVICE_NOT_FOUND;
1009
1010 return pci_bridge_emul_conf_read(&port->bridge, where, size, val);
1011}
1012
1013static struct pci_ops mvebu_pcie_ops = {
1014 .read = mvebu_pcie_rd_conf,
1015 .write = mvebu_pcie_wr_conf,
1016};
1017
1018static void mvebu_pcie_intx_irq_mask(struct irq_data *d)
1019{
1020 struct mvebu_pcie_port *port = d->domain->host_data;
1021 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1022 unsigned long flags;
1023 u32 unmask;
1024
1025 raw_spin_lock_irqsave(&port->irq_lock, flags);
1026 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1027 unmask &= ~PCIE_INT_INTX(hwirq);
1028 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
1029 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
1030}
1031
1032static void mvebu_pcie_intx_irq_unmask(struct irq_data *d)
1033{
1034 struct mvebu_pcie_port *port = d->domain->host_data;
1035 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1036 unsigned long flags;
1037 u32 unmask;
1038
1039 raw_spin_lock_irqsave(&port->irq_lock, flags);
1040 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1041 unmask |= PCIE_INT_INTX(hwirq);
1042 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
1043 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
1044}
1045
1046static struct irq_chip intx_irq_chip = {
1047 .name = "mvebu-INTx",
1048 .irq_mask = mvebu_pcie_intx_irq_mask,
1049 .irq_unmask = mvebu_pcie_intx_irq_unmask,
1050};
1051
1052static int mvebu_pcie_intx_irq_map(struct irq_domain *h,
1053 unsigned int virq, irq_hw_number_t hwirq)
1054{
1055 struct mvebu_pcie_port *port = h->host_data;
1056
1057 irq_set_status_flags(virq, IRQ_LEVEL);
1058 irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq);
1059 irq_set_chip_data(virq, port);
1060
1061 return 0;
1062}
1063
1064static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = {
1065 .map = mvebu_pcie_intx_irq_map,
1066 .xlate = irq_domain_xlate_onecell,
1067};
1068
1069static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
1070{
1071 struct device *dev = &port->pcie->pdev->dev;
1072 struct device_node *pcie_intc_node;
1073
1074 raw_spin_lock_init(&port->irq_lock);
1075
1076 pcie_intc_node = of_get_next_child(port->dn, NULL);
1077 if (!pcie_intc_node) {
1078 dev_err(dev, "No PCIe Intc node found for %s\n", port->name);
1079 return -ENODEV;
1080 }
1081
1082 port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
1083 &mvebu_pcie_intx_irq_domain_ops,
1084 port);
1085 of_node_put(pcie_intc_node);
1086 if (!port->intx_irq_domain) {
1087 dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
1088 return -ENOMEM;
1089 }
1090
1091 return 0;
1092}
1093
1094static void mvebu_pcie_irq_handler(struct irq_desc *desc)
1095{
1096 struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc);
1097 struct irq_chip *chip = irq_desc_get_chip(desc);
1098 struct device *dev = &port->pcie->pdev->dev;
1099 u32 cause, unmask, status;
1100 int i;
1101
1102 chained_irq_enter(chip, desc);
1103
1104 cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF);
1105 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1106 status = cause & unmask;
1107
1108 /* Process legacy INTx interrupts */
1109 for (i = 0; i < PCI_NUM_INTX; i++) {
1110 if (!(status & PCIE_INT_INTX(i)))
1111 continue;
1112
1113 if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL)
1114 dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A');
1115 }
1116
1117 chained_irq_exit(chip, desc);
1118}
1119
1120static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
1121{
1122 /* Interrupt support on mvebu emulated bridges is not implemented yet */
1123 if (dev->bus->number == 0)
1124 return 0; /* Proper return code 0 == NO_IRQ */
1125
1126 return of_irq_parse_and_map_pci(dev, slot, pin);
1127}
1128
1129static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
1130 const struct resource *res,
1131 resource_size_t start,
1132 resource_size_t size,
1133 resource_size_t align)
1134{
1135 if (dev->bus->number != 0)
1136 return start;
1137
1138 /*
1139 * On the PCI-to-PCI bridge side, the I/O windows must have at
1140 * least a 64 KB size and the memory windows must have at
1141 * least a 1 MB size. Moreover, MBus windows need to have a
1142 * base address aligned on their size, and their size must be
1143 * a power of two. This means that if the BAR doesn't have a
1144 * power of two size, several MBus windows will actually be
1145 * created. We need to ensure that the biggest MBus window
1146 * (which will be the first one) is aligned on its size, which
1147 * explains the rounddown_pow_of_two() being done here.
1148 */
1149 if (res->flags & IORESOURCE_IO)
1150 return round_up(start, max_t(resource_size_t, SZ_64K,
1151 rounddown_pow_of_two(size)));
1152 else if (res->flags & IORESOURCE_MEM)
1153 return round_up(start, max_t(resource_size_t, SZ_1M,
1154 rounddown_pow_of_two(size)));
1155 else
1156 return start;
1157}
1158
1159static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
1160 struct device_node *np,
1161 struct mvebu_pcie_port *port)
1162{
1163 int ret = 0;
1164
1165 ret = of_address_to_resource(np, 0, &port->regs);
1166 if (ret)
1167 return (void __iomem *)ERR_PTR(ret);
1168
1169 return devm_ioremap_resource(&pdev->dev, &port->regs);
1170}
1171
1172#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
1173#define DT_TYPE_IO 0x1
1174#define DT_TYPE_MEM32 0x2
1175#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
1176#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
1177
1178static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
1179 unsigned long type,
1180 unsigned int *tgt,
1181 unsigned int *attr)
1182{
1183 const int na = 3, ns = 2;
1184 const __be32 *range;
1185 int rlen, nranges, rangesz, pna, i;
1186
1187 *tgt = -1;
1188 *attr = -1;
1189
1190 range = of_get_property(np, "ranges", &rlen);
1191 if (!range)
1192 return -EINVAL;
1193
1194 pna = of_n_addr_cells(np);
1195 rangesz = pna + na + ns;
1196 nranges = rlen / sizeof(__be32) / rangesz;
1197
1198 for (i = 0; i < nranges; i++, range += rangesz) {
1199 u32 flags = of_read_number(range, 1);
1200 u32 slot = of_read_number(range + 1, 1);
1201 u64 cpuaddr = of_read_number(range + na, pna);
1202 unsigned long rtype;
1203
1204 if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
1205 rtype = IORESOURCE_IO;
1206 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
1207 rtype = IORESOURCE_MEM;
1208 else
1209 continue;
1210
1211 if (slot == PCI_SLOT(devfn) && type == rtype) {
1212 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
1213 *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
1214 return 0;
1215 }
1216 }
1217
1218 return -ENOENT;
1219}
1220
1221static int mvebu_pcie_suspend(struct device *dev)
1222{
1223 struct mvebu_pcie *pcie;
1224 int i;
1225
1226 pcie = dev_get_drvdata(dev);
1227 for (i = 0; i < pcie->nports; i++) {
1228 struct mvebu_pcie_port *port = pcie->ports + i;
1229 if (!port->base)
1230 continue;
1231 port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
1232 }
1233
1234 return 0;
1235}
1236
1237static int mvebu_pcie_resume(struct device *dev)
1238{
1239 struct mvebu_pcie *pcie;
1240 int i;
1241
1242 pcie = dev_get_drvdata(dev);
1243 for (i = 0; i < pcie->nports; i++) {
1244 struct mvebu_pcie_port *port = pcie->ports + i;
1245 if (!port->base)
1246 continue;
1247 mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
1248 mvebu_pcie_setup_hw(port);
1249 }
1250
1251 return 0;
1252}
1253
1254static void mvebu_pcie_port_clk_put(void *data)
1255{
1256 struct mvebu_pcie_port *port = data;
1257
1258 clk_put(port->clk);
1259}
1260
1261static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
1262 struct mvebu_pcie_port *port, struct device_node *child)
1263{
1264 struct device *dev = &pcie->pdev->dev;
1265 u32 slot_power_limit;
1266 int ret;
1267 u32 num_lanes;
1268
1269 port->pcie = pcie;
1270
1271 if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
1272 dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
1273 child);
1274 goto skip;
1275 }
1276
1277 if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
1278 port->lane = 0;
1279
1280 if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4)
1281 port->is_x4 = true;
1282
1283 port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
1284 port->lane);
1285 if (!port->name) {
1286 ret = -ENOMEM;
1287 goto err;
1288 }
1289
1290 port->devfn = of_pci_get_devfn(child);
1291 if (port->devfn < 0)
1292 goto skip;
1293 if (PCI_FUNC(port->devfn) != 0) {
1294 dev_err(dev, "%s: invalid function number, must be zero\n",
1295 port->name);
1296 goto skip;
1297 }
1298
1299 ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
1300 &port->mem_target, &port->mem_attr);
1301 if (ret < 0) {
1302 dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
1303 port->name);
1304 goto skip;
1305 }
1306
1307 if (resource_size(&pcie->io) != 0) {
1308 mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
1309 &port->io_target, &port->io_attr);
1310 } else {
1311 port->io_target = -1;
1312 port->io_attr = -1;
1313 }
1314
1315 /*
1316 * Old DT bindings do not contain "intx" interrupt
1317 * so do not fail probing driver when interrupt does not exist.
1318 */
1319 port->intx_irq = of_irq_get_byname(child, "intx");
1320 if (port->intx_irq == -EPROBE_DEFER) {
1321 ret = port->intx_irq;
1322 goto err;
1323 }
1324 if (port->intx_irq <= 0) {
1325 dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, "
1326 "%pOF does not contain intx interrupt\n",
1327 port->name, child);
1328 }
1329
1330 port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
1331 port->name);
1332 if (!port->reset_name) {
1333 ret = -ENOMEM;
1334 goto err;
1335 }
1336
1337 port->reset_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(child),
1338 "reset", GPIOD_OUT_HIGH,
1339 port->name);
1340 ret = PTR_ERR_OR_ZERO(port->reset_gpio);
1341 if (ret) {
1342 if (ret != -ENOENT)
1343 goto err;
1344 /* reset gpio is optional */
1345 port->reset_gpio = NULL;
1346 devm_kfree(dev, port->reset_name);
1347 port->reset_name = NULL;
1348 }
1349
1350 slot_power_limit = of_pci_get_slot_power_limit(child,
1351 &port->slot_power_limit_value,
1352 &port->slot_power_limit_scale);
1353 if (slot_power_limit)
1354 dev_info(dev, "%s: Slot power limit %u.%uW\n",
1355 port->name,
1356 slot_power_limit / 1000,
1357 (slot_power_limit / 100) % 10);
1358
1359 port->clk = of_clk_get_by_name(child, NULL);
1360 if (IS_ERR(port->clk)) {
1361 dev_err(dev, "%s: cannot get clock\n", port->name);
1362 goto skip;
1363 }
1364
1365 ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
1366 if (ret < 0) {
1367 clk_put(port->clk);
1368 goto err;
1369 }
1370
1371 return 1;
1372
1373skip:
1374 ret = 0;
1375
1376 /* In the case of skipping, we need to free these */
1377 devm_kfree(dev, port->reset_name);
1378 port->reset_name = NULL;
1379 devm_kfree(dev, port->name);
1380 port->name = NULL;
1381
1382err:
1383 return ret;
1384}
1385
1386/*
1387 * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
1388 * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
1389 * of the PCI Express Card Electromechanical Specification, 1.1.
1390 */
1391static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
1392{
1393 int ret;
1394
1395 ret = clk_prepare_enable(port->clk);
1396 if (ret < 0)
1397 return ret;
1398
1399 if (port->reset_gpio) {
1400 u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
1401
1402 of_property_read_u32(port->dn, "reset-delay-us",
1403 &reset_udelay);
1404
1405 udelay(100);
1406
1407 gpiod_set_value_cansleep(port->reset_gpio, 0);
1408 msleep(reset_udelay / 1000);
1409 }
1410
1411 return 0;
1412}
1413
1414/*
1415 * Power down a PCIe port. Strictly, PCIe requires us to place the card
1416 * in D3hot state before asserting PERST#.
1417 */
1418static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
1419{
1420 gpiod_set_value_cansleep(port->reset_gpio, 1);
1421
1422 clk_disable_unprepare(port->clk);
1423}
1424
1425/*
1426 * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
1427 * so we need extra resource setup parsing our special DT properties encoding
1428 * the MEM and IO apertures.
1429 */
1430static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
1431{
1432 struct device *dev = &pcie->pdev->dev;
1433 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1434 int ret;
1435
1436 /* Get the PCIe memory aperture */
1437 mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
1438 if (resource_size(&pcie->mem) == 0) {
1439 dev_err(dev, "invalid memory aperture size\n");
1440 return -EINVAL;
1441 }
1442
1443 pcie->mem.name = "PCI MEM";
1444 pci_add_resource(&bridge->windows, &pcie->mem);
1445 ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
1446 if (ret)
1447 return ret;
1448
1449 /* Get the PCIe IO aperture */
1450 mvebu_mbus_get_pcie_io_aperture(&pcie->io);
1451
1452 if (resource_size(&pcie->io) != 0) {
1453 pcie->realio.flags = pcie->io.flags;
1454 pcie->realio.start = PCIBIOS_MIN_IO;
1455 pcie->realio.end = min_t(resource_size_t,
1456 IO_SPACE_LIMIT - SZ_64K,
1457 resource_size(&pcie->io) - 1);
1458 pcie->realio.name = "PCI I/O";
1459
1460 ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start);
1461 if (ret)
1462 return ret;
1463
1464 pci_add_resource(&bridge->windows, &pcie->realio);
1465 ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
1466 if (ret)
1467 return ret;
1468 }
1469
1470 return 0;
1471}
1472
1473static int mvebu_pcie_probe(struct platform_device *pdev)
1474{
1475 struct device *dev = &pdev->dev;
1476 struct mvebu_pcie *pcie;
1477 struct pci_host_bridge *bridge;
1478 struct device_node *np = dev->of_node;
1479 struct device_node *child;
1480 int num, i, ret;
1481
1482 bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
1483 if (!bridge)
1484 return -ENOMEM;
1485
1486 pcie = pci_host_bridge_priv(bridge);
1487 pcie->pdev = pdev;
1488 platform_set_drvdata(pdev, pcie);
1489
1490 ret = mvebu_pcie_parse_request_resources(pcie);
1491 if (ret)
1492 return ret;
1493
1494 num = of_get_available_child_count(np);
1495
1496 pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
1497 if (!pcie->ports)
1498 return -ENOMEM;
1499
1500 i = 0;
1501 for_each_available_child_of_node(np, child) {
1502 struct mvebu_pcie_port *port = &pcie->ports[i];
1503
1504 ret = mvebu_pcie_parse_port(pcie, port, child);
1505 if (ret < 0) {
1506 of_node_put(child);
1507 return ret;
1508 } else if (ret == 0) {
1509 continue;
1510 }
1511
1512 port->dn = child;
1513 i++;
1514 }
1515 pcie->nports = i;
1516
1517 for (i = 0; i < pcie->nports; i++) {
1518 struct mvebu_pcie_port *port = &pcie->ports[i];
1519 int irq = port->intx_irq;
1520
1521 child = port->dn;
1522 if (!child)
1523 continue;
1524
1525 ret = mvebu_pcie_powerup(port);
1526 if (ret < 0)
1527 continue;
1528
1529 port->base = mvebu_pcie_map_registers(pdev, child, port);
1530 if (IS_ERR(port->base)) {
1531 dev_err(dev, "%s: cannot map registers\n", port->name);
1532 port->base = NULL;
1533 mvebu_pcie_powerdown(port);
1534 continue;
1535 }
1536
1537 ret = mvebu_pci_bridge_emul_init(port);
1538 if (ret < 0) {
1539 dev_err(dev, "%s: cannot init emulated bridge\n",
1540 port->name);
1541 devm_iounmap(dev, port->base);
1542 port->base = NULL;
1543 mvebu_pcie_powerdown(port);
1544 continue;
1545 }
1546
1547 if (irq > 0) {
1548 ret = mvebu_pcie_init_irq_domain(port);
1549 if (ret) {
1550 dev_err(dev, "%s: cannot init irq domain\n",
1551 port->name);
1552 pci_bridge_emul_cleanup(&port->bridge);
1553 devm_iounmap(dev, port->base);
1554 port->base = NULL;
1555 mvebu_pcie_powerdown(port);
1556 continue;
1557 }
1558 irq_set_chained_handler_and_data(irq,
1559 mvebu_pcie_irq_handler,
1560 port);
1561 }
1562
1563 /*
1564 * PCIe topology exported by mvebu hw is quite complicated. In
1565 * reality has something like N fully independent host bridges
1566 * where each host bridge has one PCIe Root Port (which acts as
1567 * PCI Bridge device). Each host bridge has its own independent
1568 * internal registers, independent access to PCI config space,
1569 * independent interrupt lines, independent window and memory
1570 * access configuration. But additionally there is some kind of
1571 * peer-to-peer support between PCIe devices behind different
1572 * host bridges limited just to forwarding of memory and I/O
1573 * transactions (forwarding of error messages and config cycles
1574 * is not supported). So we could say there are N independent
1575 * PCIe Root Complexes.
1576 *
1577 * For this kind of setup DT should have been structured into
1578 * N independent PCIe controllers / host bridges. But instead
1579 * structure in past was defined to put PCIe Root Ports of all
1580 * host bridges into one bus zero, like in classic multi-port
1581 * Root Complex setup with just one host bridge.
1582 *
1583 * This means that pci-mvebu.c driver provides "virtual" bus 0
1584 * on which registers all PCIe Root Ports (PCI Bridge devices)
1585 * specified in DT by their BDF addresses and virtually routes
1586 * PCI config access of each PCI bridge device to specific PCIe
1587 * host bridge.
1588 *
1589 * Normally PCI Bridge should choose between Type 0 and Type 1
1590 * config requests based on primary and secondary bus numbers
1591 * configured on the bridge itself. But because mvebu PCI Bridge
1592 * does not have registers for primary and secondary bus numbers
1593 * in its config space, it determinates type of config requests
1594 * via its own custom way.
1595 *
1596 * There are two options how mvebu determinate type of config
1597 * request.
1598 *
1599 * 1. If Secondary Bus Number Enable bit is not set or is not
1600 * available (applies for pre-XP PCIe controllers) then Type 0
1601 * is used if target bus number equals Local Bus Number (bits
1602 * [15:8] in register 0x1a04) and target device number differs
1603 * from Local Device Number (bits [20:16] in register 0x1a04).
1604 * Type 1 is used if target bus number differs from Local Bus
1605 * Number. And when target bus number equals Local Bus Number
1606 * and target device equals Local Device Number then request is
1607 * routed to Local PCI Bridge (PCIe Root Port).
1608 *
1609 * 2. If Secondary Bus Number Enable bit is set (bit 7 in
1610 * register 0x1a2c) then mvebu hw determinate type of config
1611 * request like compliant PCI Bridge based on primary bus number
1612 * which is configured via Local Bus Number (bits [15:8] in
1613 * register 0x1a04) and secondary bus number which is configured
1614 * via Secondary Bus Number (bits [7:0] in register 0x1a2c).
1615 * Local PCI Bridge (PCIe Root Port) is available on primary bus
1616 * as device with Local Device Number (bits [20:16] in register
1617 * 0x1a04).
1618 *
1619 * Secondary Bus Number Enable bit is disabled by default and
1620 * option 2. is not available on pre-XP PCIe controllers. Hence
1621 * this driver always use option 1.
1622 *
1623 * Basically it means that primary and secondary buses shares
1624 * one virtual number configured via Local Bus Number bits and
1625 * Local Device Number bits determinates if accessing primary
1626 * or secondary bus. Set Local Device Number to 1 and redirect
1627 * all writes of PCI Bridge Secondary Bus Number register to
1628 * Local Bus Number (bits [15:8] in register 0x1a04).
1629 *
1630 * So when accessing devices on buses behind secondary bus
1631 * number it would work correctly. And also when accessing
1632 * device 0 at secondary bus number via config space would be
1633 * correctly routed to secondary bus. Due to issues described
1634 * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero)
1635 * are not accessed directly via PCI config space but rarher
1636 * indirectly via kernel emulated PCI bridge driver.
1637 */
1638 mvebu_pcie_setup_hw(port);
1639 mvebu_pcie_set_local_dev_nr(port, 1);
1640 mvebu_pcie_set_local_bus_nr(port, 0);
1641 }
1642
1643 bridge->sysdata = pcie;
1644 bridge->ops = &mvebu_pcie_ops;
1645 bridge->child_ops = &mvebu_pcie_child_ops;
1646 bridge->align_resource = mvebu_pcie_align_resource;
1647 bridge->map_irq = mvebu_pcie_map_irq;
1648
1649 return pci_host_probe(bridge);
1650}
1651
1652static int mvebu_pcie_remove(struct platform_device *pdev)
1653{
1654 struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
1655 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1656 u32 cmd, sspl;
1657 int i;
1658
1659 /* Remove PCI bus with all devices. */
1660 pci_lock_rescan_remove();
1661 pci_stop_root_bus(bridge->bus);
1662 pci_remove_root_bus(bridge->bus);
1663 pci_unlock_rescan_remove();
1664
1665 for (i = 0; i < pcie->nports; i++) {
1666 struct mvebu_pcie_port *port = &pcie->ports[i];
1667 int irq = port->intx_irq;
1668
1669 if (!port->base)
1670 continue;
1671
1672 /* Disable Root Bridge I/O space, memory space and bus mastering. */
1673 cmd = mvebu_readl(port, PCIE_CMD_OFF);
1674 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1675 mvebu_writel(port, cmd, PCIE_CMD_OFF);
1676
1677 /* Mask all interrupt sources. */
1678 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
1679
1680 /* Clear all interrupt causes. */
1681 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
1682
1683 if (irq > 0)
1684 irq_set_chained_handler_and_data(irq, NULL, NULL);
1685
1686 /* Remove IRQ domains. */
1687 if (port->intx_irq_domain)
1688 irq_domain_remove(port->intx_irq_domain);
1689
1690 /* Free config space for emulated root bridge. */
1691 pci_bridge_emul_cleanup(&port->bridge);
1692
1693 /* Disable sending Set_Slot_Power_Limit PCIe Message. */
1694 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
1695 sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
1696 mvebu_writel(port, sspl, PCIE_SSPL_OFF);
1697
1698 /* Disable and clear BARs and windows. */
1699 mvebu_pcie_disable_wins(port);
1700
1701 /* Delete PCIe IO and MEM windows. */
1702 if (port->iowin.size)
1703 mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size);
1704 if (port->memwin.size)
1705 mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size);
1706
1707 /* Power down card and disable clocks. Must be the last step. */
1708 mvebu_pcie_powerdown(port);
1709 }
1710
1711 return 0;
1712}
1713
1714static const struct of_device_id mvebu_pcie_of_match_table[] = {
1715 { .compatible = "marvell,armada-xp-pcie", },
1716 { .compatible = "marvell,armada-370-pcie", },
1717 { .compatible = "marvell,dove-pcie", },
1718 { .compatible = "marvell,kirkwood-pcie", },
1719 {},
1720};
1721
1722static const struct dev_pm_ops mvebu_pcie_pm_ops = {
1723 NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
1724};
1725
1726static struct platform_driver mvebu_pcie_driver = {
1727 .driver = {
1728 .name = "mvebu-pcie",
1729 .of_match_table = mvebu_pcie_of_match_table,
1730 .pm = &mvebu_pcie_pm_ops,
1731 },
1732 .probe = mvebu_pcie_probe,
1733 .remove = mvebu_pcie_remove,
1734};
1735module_platform_driver(mvebu_pcie_driver);
1736
1737MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@bootlin.com>");
1738MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
1739MODULE_DESCRIPTION("Marvell EBU PCIe controller");
1740MODULE_LICENSE("GPL v2");