Loading...
1/*
2 * PCI / PCI-X / PCI-Express support for 4xx parts
3 *
4 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
5 *
6 * Most PCI Express code is coming from Stefan Roese implementation for
7 * arch/ppc in the Denx tree, slightly reworked by me.
8 *
9 * Copyright 2007 DENX Software Engineering, Stefan Roese <sr@denx.de>
10 *
11 * Some of that comes itself from a previous implementation for 440SPE only
12 * by Roland Dreier:
13 *
14 * Copyright (c) 2005 Cisco Systems. All rights reserved.
15 * Roland Dreier <rolandd@cisco.com>
16 *
17 */
18
19#undef DEBUG
20
21#include <linux/kernel.h>
22#include <linux/pci.h>
23#include <linux/init.h>
24#include <linux/of.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27
28#include <asm/io.h>
29#include <asm/pci-bridge.h>
30#include <asm/machdep.h>
31#include <asm/dcr.h>
32#include <asm/dcr-regs.h>
33#include <mm/mmu_decl.h>
34
35#include "ppc4xx_pci.h"
36
37static int dma_offset_set;
38
39#define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
40#define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
41
42#define RES_TO_U32_LOW(val) \
43 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
44#define RES_TO_U32_HIGH(val) \
45 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
46
47static inline int ppc440spe_revA(void)
48{
49 /* Catch both 440SPe variants, with and without RAID6 support */
50 if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
51 return 1;
52 else
53 return 0;
54}
55
56static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
57{
58 struct pci_controller *hose;
59 int i;
60
61 if (dev->devfn != 0 || dev->bus->self != NULL)
62 return;
63
64 hose = pci_bus_to_host(dev->bus);
65 if (hose == NULL)
66 return;
67
68 if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
69 !of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
70 !of_device_is_compatible(hose->dn, "ibm,plb-pci"))
71 return;
72
73 if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
74 of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
75 hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
76 }
77
78 /* Hide the PCI host BARs from the kernel as their content doesn't
79 * fit well in the resource management
80 */
81 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
82 dev->resource[i].start = dev->resource[i].end = 0;
83 dev->resource[i].flags = 0;
84 }
85
86 printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
87 pci_name(dev));
88}
89DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
90
91static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
92 void __iomem *reg,
93 struct resource *res)
94{
95 u64 size;
96 const u32 *ranges;
97 int rlen;
98 int pna = of_n_addr_cells(hose->dn);
99 int np = pna + 5;
100
101 /* Default */
102 res->start = 0;
103 size = 0x80000000;
104 res->end = size - 1;
105 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
106
107 /* Get dma-ranges property */
108 ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
109 if (ranges == NULL)
110 goto out;
111
112 /* Walk it */
113 while ((rlen -= np * 4) >= 0) {
114 u32 pci_space = ranges[0];
115 u64 pci_addr = of_read_number(ranges + 1, 2);
116 u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
117 size = of_read_number(ranges + pna + 3, 2);
118 ranges += np;
119 if (cpu_addr == OF_BAD_ADDR || size == 0)
120 continue;
121
122 /* We only care about memory */
123 if ((pci_space & 0x03000000) != 0x02000000)
124 continue;
125
126 /* We currently only support memory at 0, and pci_addr
127 * within 32 bits space
128 */
129 if (cpu_addr != 0 || pci_addr > 0xffffffff) {
130 printk(KERN_WARNING "%s: Ignored unsupported dma range"
131 " 0x%016llx...0x%016llx -> 0x%016llx\n",
132 hose->dn->full_name,
133 pci_addr, pci_addr + size - 1, cpu_addr);
134 continue;
135 }
136
137 /* Check if not prefetchable */
138 if (!(pci_space & 0x40000000))
139 res->flags &= ~IORESOURCE_PREFETCH;
140
141
142 /* Use that */
143 res->start = pci_addr;
144 /* Beware of 32 bits resources */
145 if (sizeof(resource_size_t) == sizeof(u32) &&
146 (pci_addr + size) > 0x100000000ull)
147 res->end = 0xffffffff;
148 else
149 res->end = res->start + size - 1;
150 break;
151 }
152
153 /* We only support one global DMA offset */
154 if (dma_offset_set && pci_dram_offset != res->start) {
155 printk(KERN_ERR "%s: dma-ranges(s) mismatch\n",
156 hose->dn->full_name);
157 return -ENXIO;
158 }
159
160 /* Check that we can fit all of memory as we don't support
161 * DMA bounce buffers
162 */
163 if (size < total_memory) {
164 printk(KERN_ERR "%s: dma-ranges too small "
165 "(size=%llx total_memory=%llx)\n",
166 hose->dn->full_name, size, (u64)total_memory);
167 return -ENXIO;
168 }
169
170 /* Check we are a power of 2 size and that base is a multiple of size*/
171 if ((size & (size - 1)) != 0 ||
172 (res->start & (size - 1)) != 0) {
173 printk(KERN_ERR "%s: dma-ranges unaligned\n",
174 hose->dn->full_name);
175 return -ENXIO;
176 }
177
178 /* Check that we are fully contained within 32 bits space if we are not
179 * running on a 460sx or 476fpe which have 64 bit bus addresses.
180 */
181 if (res->end > 0xffffffff &&
182 !(of_device_is_compatible(hose->dn, "ibm,plb-pciex-460sx")
183 || of_device_is_compatible(hose->dn, "ibm,plb-pciex-476fpe"))) {
184 printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n",
185 hose->dn->full_name);
186 return -ENXIO;
187 }
188 out:
189 dma_offset_set = 1;
190 pci_dram_offset = res->start;
191 hose->dma_window_base_cur = res->start;
192 hose->dma_window_size = resource_size(res);
193
194 printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
195 pci_dram_offset);
196 printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n",
197 (unsigned long long)hose->dma_window_base_cur);
198 printk(KERN_INFO "DMA window size 0x%016llx\n",
199 (unsigned long long)hose->dma_window_size);
200 return 0;
201}
202
203/*
204 * 4xx PCI 2.x part
205 */
206
207static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
208 void __iomem *reg,
209 u64 plb_addr,
210 u64 pci_addr,
211 u64 size,
212 unsigned int flags,
213 int index)
214{
215 u32 ma, pcila, pciha;
216
217 /* Hack warning ! The "old" PCI 2.x cell only let us configure the low
218 * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
219 * address are actually hard wired to a value that appears to depend
220 * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
221 *
222 * The trick here is we just crop those top bits and ignore them when
223 * programming the chip. That means the device-tree has to be right
224 * for the specific part used (we don't print a warning if it's wrong
225 * but on the other hand, you'll crash quickly enough), but at least
226 * this code should work whatever the hard coded value is
227 */
228 plb_addr &= 0xffffffffull;
229
230 /* Note: Due to the above hack, the test below doesn't actually test
231 * if you address is above 4G, but it tests that address and
232 * (address + size) are both contained in the same 4G
233 */
234 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
235 size < 0x1000 || (plb_addr & (size - 1)) != 0) {
236 printk(KERN_WARNING "%s: Resource out of range\n",
237 hose->dn->full_name);
238 return -1;
239 }
240 ma = (0xffffffffu << ilog2(size)) | 1;
241 if (flags & IORESOURCE_PREFETCH)
242 ma |= 2;
243
244 pciha = RES_TO_U32_HIGH(pci_addr);
245 pcila = RES_TO_U32_LOW(pci_addr);
246
247 writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
248 writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
249 writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
250 writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
251
252 return 0;
253}
254
255static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
256 void __iomem *reg)
257{
258 int i, j, found_isa_hole = 0;
259
260 /* Setup outbound memory windows */
261 for (i = j = 0; i < 3; i++) {
262 struct resource *res = &hose->mem_resources[i];
263 resource_size_t offset = hose->mem_offset[i];
264
265 /* we only care about memory windows */
266 if (!(res->flags & IORESOURCE_MEM))
267 continue;
268 if (j > 2) {
269 printk(KERN_WARNING "%s: Too many ranges\n",
270 hose->dn->full_name);
271 break;
272 }
273
274 /* Configure the resource */
275 if (ppc4xx_setup_one_pci_PMM(hose, reg,
276 res->start,
277 res->start - offset,
278 resource_size(res),
279 res->flags,
280 j) == 0) {
281 j++;
282
283 /* If the resource PCI address is 0 then we have our
284 * ISA memory hole
285 */
286 if (res->start == offset)
287 found_isa_hole = 1;
288 }
289 }
290
291 /* Handle ISA memory hole if not already covered */
292 if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
293 if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
294 hose->isa_mem_size, 0, j) == 0)
295 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
296 hose->dn->full_name);
297}
298
299static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
300 void __iomem *reg,
301 const struct resource *res)
302{
303 resource_size_t size = resource_size(res);
304 u32 sa;
305
306 /* Calculate window size */
307 sa = (0xffffffffu << ilog2(size)) | 1;
308 sa |= 0x1;
309
310 /* RAM is always at 0 local for now */
311 writel(0, reg + PCIL0_PTM1LA);
312 writel(sa, reg + PCIL0_PTM1MS);
313
314 /* Map on PCI side */
315 early_write_config_dword(hose, hose->first_busno, 0,
316 PCI_BASE_ADDRESS_1, res->start);
317 early_write_config_dword(hose, hose->first_busno, 0,
318 PCI_BASE_ADDRESS_2, 0x00000000);
319 early_write_config_word(hose, hose->first_busno, 0,
320 PCI_COMMAND, 0x0006);
321}
322
323static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
324{
325 /* NYI */
326 struct resource rsrc_cfg;
327 struct resource rsrc_reg;
328 struct resource dma_window;
329 struct pci_controller *hose = NULL;
330 void __iomem *reg = NULL;
331 const int *bus_range;
332 int primary = 0;
333
334 /* Check if device is enabled */
335 if (!of_device_is_available(np)) {
336 printk(KERN_INFO "%s: Port disabled via device-tree\n",
337 np->full_name);
338 return;
339 }
340
341 /* Fetch config space registers address */
342 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
343 printk(KERN_ERR "%s: Can't get PCI config register base !",
344 np->full_name);
345 return;
346 }
347 /* Fetch host bridge internal registers address */
348 if (of_address_to_resource(np, 3, &rsrc_reg)) {
349 printk(KERN_ERR "%s: Can't get PCI internal register base !",
350 np->full_name);
351 return;
352 }
353
354 /* Check if primary bridge */
355 if (of_get_property(np, "primary", NULL))
356 primary = 1;
357
358 /* Get bus range if any */
359 bus_range = of_get_property(np, "bus-range", NULL);
360
361 /* Map registers */
362 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
363 if (reg == NULL) {
364 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
365 goto fail;
366 }
367
368 /* Allocate the host controller data structure */
369 hose = pcibios_alloc_controller(np);
370 if (!hose)
371 goto fail;
372
373 hose->first_busno = bus_range ? bus_range[0] : 0x0;
374 hose->last_busno = bus_range ? bus_range[1] : 0xff;
375
376 /* Setup config space */
377 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
378
379 /* Disable all windows */
380 writel(0, reg + PCIL0_PMM0MA);
381 writel(0, reg + PCIL0_PMM1MA);
382 writel(0, reg + PCIL0_PMM2MA);
383 writel(0, reg + PCIL0_PTM1MS);
384 writel(0, reg + PCIL0_PTM2MS);
385
386 /* Parse outbound mapping resources */
387 pci_process_bridge_OF_ranges(hose, np, primary);
388
389 /* Parse inbound mapping resources */
390 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
391 goto fail;
392
393 /* Configure outbound ranges POMs */
394 ppc4xx_configure_pci_PMMs(hose, reg);
395
396 /* Configure inbound ranges PIMs */
397 ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
398
399 /* We don't need the registers anymore */
400 iounmap(reg);
401 return;
402
403 fail:
404 if (hose)
405 pcibios_free_controller(hose);
406 if (reg)
407 iounmap(reg);
408}
409
410/*
411 * 4xx PCI-X part
412 */
413
414static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
415 void __iomem *reg,
416 u64 plb_addr,
417 u64 pci_addr,
418 u64 size,
419 unsigned int flags,
420 int index)
421{
422 u32 lah, lal, pciah, pcial, sa;
423
424 if (!is_power_of_2(size) || size < 0x1000 ||
425 (plb_addr & (size - 1)) != 0) {
426 printk(KERN_WARNING "%s: Resource out of range\n",
427 hose->dn->full_name);
428 return -1;
429 }
430
431 /* Calculate register values */
432 lah = RES_TO_U32_HIGH(plb_addr);
433 lal = RES_TO_U32_LOW(plb_addr);
434 pciah = RES_TO_U32_HIGH(pci_addr);
435 pcial = RES_TO_U32_LOW(pci_addr);
436 sa = (0xffffffffu << ilog2(size)) | 0x1;
437
438 /* Program register values */
439 if (index == 0) {
440 writel(lah, reg + PCIX0_POM0LAH);
441 writel(lal, reg + PCIX0_POM0LAL);
442 writel(pciah, reg + PCIX0_POM0PCIAH);
443 writel(pcial, reg + PCIX0_POM0PCIAL);
444 writel(sa, reg + PCIX0_POM0SA);
445 } else {
446 writel(lah, reg + PCIX0_POM1LAH);
447 writel(lal, reg + PCIX0_POM1LAL);
448 writel(pciah, reg + PCIX0_POM1PCIAH);
449 writel(pcial, reg + PCIX0_POM1PCIAL);
450 writel(sa, reg + PCIX0_POM1SA);
451 }
452
453 return 0;
454}
455
456static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
457 void __iomem *reg)
458{
459 int i, j, found_isa_hole = 0;
460
461 /* Setup outbound memory windows */
462 for (i = j = 0; i < 3; i++) {
463 struct resource *res = &hose->mem_resources[i];
464 resource_size_t offset = hose->mem_offset[i];
465
466 /* we only care about memory windows */
467 if (!(res->flags & IORESOURCE_MEM))
468 continue;
469 if (j > 1) {
470 printk(KERN_WARNING "%s: Too many ranges\n",
471 hose->dn->full_name);
472 break;
473 }
474
475 /* Configure the resource */
476 if (ppc4xx_setup_one_pcix_POM(hose, reg,
477 res->start,
478 res->start - offset,
479 resource_size(res),
480 res->flags,
481 j) == 0) {
482 j++;
483
484 /* If the resource PCI address is 0 then we have our
485 * ISA memory hole
486 */
487 if (res->start == offset)
488 found_isa_hole = 1;
489 }
490 }
491
492 /* Handle ISA memory hole if not already covered */
493 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
494 if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
495 hose->isa_mem_size, 0, j) == 0)
496 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
497 hose->dn->full_name);
498}
499
500static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
501 void __iomem *reg,
502 const struct resource *res,
503 int big_pim,
504 int enable_msi_hole)
505{
506 resource_size_t size = resource_size(res);
507 u32 sa;
508
509 /* RAM is always at 0 */
510 writel(0x00000000, reg + PCIX0_PIM0LAH);
511 writel(0x00000000, reg + PCIX0_PIM0LAL);
512
513 /* Calculate window size */
514 sa = (0xffffffffu << ilog2(size)) | 1;
515 sa |= 0x1;
516 if (res->flags & IORESOURCE_PREFETCH)
517 sa |= 0x2;
518 if (enable_msi_hole)
519 sa |= 0x4;
520 writel(sa, reg + PCIX0_PIM0SA);
521 if (big_pim)
522 writel(0xffffffff, reg + PCIX0_PIM0SAH);
523
524 /* Map on PCI side */
525 writel(0x00000000, reg + PCIX0_BAR0H);
526 writel(res->start, reg + PCIX0_BAR0L);
527 writew(0x0006, reg + PCIX0_COMMAND);
528}
529
530static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
531{
532 struct resource rsrc_cfg;
533 struct resource rsrc_reg;
534 struct resource dma_window;
535 struct pci_controller *hose = NULL;
536 void __iomem *reg = NULL;
537 const int *bus_range;
538 int big_pim = 0, msi = 0, primary = 0;
539
540 /* Fetch config space registers address */
541 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
542 printk(KERN_ERR "%s:Can't get PCI-X config register base !",
543 np->full_name);
544 return;
545 }
546 /* Fetch host bridge internal registers address */
547 if (of_address_to_resource(np, 3, &rsrc_reg)) {
548 printk(KERN_ERR "%s: Can't get PCI-X internal register base !",
549 np->full_name);
550 return;
551 }
552
553 /* Check if it supports large PIMs (440GX) */
554 if (of_get_property(np, "large-inbound-windows", NULL))
555 big_pim = 1;
556
557 /* Check if we should enable MSIs inbound hole */
558 if (of_get_property(np, "enable-msi-hole", NULL))
559 msi = 1;
560
561 /* Check if primary bridge */
562 if (of_get_property(np, "primary", NULL))
563 primary = 1;
564
565 /* Get bus range if any */
566 bus_range = of_get_property(np, "bus-range", NULL);
567
568 /* Map registers */
569 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
570 if (reg == NULL) {
571 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
572 goto fail;
573 }
574
575 /* Allocate the host controller data structure */
576 hose = pcibios_alloc_controller(np);
577 if (!hose)
578 goto fail;
579
580 hose->first_busno = bus_range ? bus_range[0] : 0x0;
581 hose->last_busno = bus_range ? bus_range[1] : 0xff;
582
583 /* Setup config space */
584 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4,
585 PPC_INDIRECT_TYPE_SET_CFG_TYPE);
586
587 /* Disable all windows */
588 writel(0, reg + PCIX0_POM0SA);
589 writel(0, reg + PCIX0_POM1SA);
590 writel(0, reg + PCIX0_POM2SA);
591 writel(0, reg + PCIX0_PIM0SA);
592 writel(0, reg + PCIX0_PIM1SA);
593 writel(0, reg + PCIX0_PIM2SA);
594 if (big_pim) {
595 writel(0, reg + PCIX0_PIM0SAH);
596 writel(0, reg + PCIX0_PIM2SAH);
597 }
598
599 /* Parse outbound mapping resources */
600 pci_process_bridge_OF_ranges(hose, np, primary);
601
602 /* Parse inbound mapping resources */
603 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
604 goto fail;
605
606 /* Configure outbound ranges POMs */
607 ppc4xx_configure_pcix_POMs(hose, reg);
608
609 /* Configure inbound ranges PIMs */
610 ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
611
612 /* We don't need the registers anymore */
613 iounmap(reg);
614 return;
615
616 fail:
617 if (hose)
618 pcibios_free_controller(hose);
619 if (reg)
620 iounmap(reg);
621}
622
623#ifdef CONFIG_PPC4xx_PCI_EXPRESS
624
625/*
626 * 4xx PCI-Express part
627 *
628 * We support 3 parts currently based on the compatible property:
629 *
630 * ibm,plb-pciex-440spe
631 * ibm,plb-pciex-405ex
632 * ibm,plb-pciex-460ex
633 *
634 * Anything else will be rejected for now as they are all subtly
635 * different unfortunately.
636 *
637 */
638
639#define MAX_PCIE_BUS_MAPPED 0x40
640
641struct ppc4xx_pciex_port
642{
643 struct pci_controller *hose;
644 struct device_node *node;
645 unsigned int index;
646 int endpoint;
647 int link;
648 int has_ibpre;
649 unsigned int sdr_base;
650 dcr_host_t dcrs;
651 struct resource cfg_space;
652 struct resource utl_regs;
653 void __iomem *utl_base;
654};
655
656static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
657static unsigned int ppc4xx_pciex_port_count;
658
659struct ppc4xx_pciex_hwops
660{
661 bool want_sdr;
662 int (*core_init)(struct device_node *np);
663 int (*port_init_hw)(struct ppc4xx_pciex_port *port);
664 int (*setup_utl)(struct ppc4xx_pciex_port *port);
665 void (*check_link)(struct ppc4xx_pciex_port *port);
666};
667
668static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
669
670static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
671 unsigned int sdr_offset,
672 unsigned int mask,
673 unsigned int value,
674 int timeout_ms)
675{
676 u32 val;
677
678 while(timeout_ms--) {
679 val = mfdcri(SDR0, port->sdr_base + sdr_offset);
680 if ((val & mask) == value) {
681 pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
682 port->index, sdr_offset, timeout_ms, val);
683 return 0;
684 }
685 msleep(1);
686 }
687 return -1;
688}
689
690static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
691{
692 /* Wait for reset to complete */
693 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
694 printk(KERN_WARNING "PCIE%d: PGRST failed\n",
695 port->index);
696 return -1;
697 }
698 return 0;
699}
700
701
702static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
703{
704 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
705
706 /* Check for card presence detect if supported, if not, just wait for
707 * link unconditionally.
708 *
709 * note that we don't fail if there is no link, we just filter out
710 * config space accesses. That way, it will be easier to implement
711 * hotplug later on.
712 */
713 if (!port->has_ibpre ||
714 !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
715 1 << 28, 1 << 28, 100)) {
716 printk(KERN_INFO
717 "PCIE%d: Device detected, waiting for link...\n",
718 port->index);
719 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
720 0x1000, 0x1000, 2000))
721 printk(KERN_WARNING
722 "PCIE%d: Link up failed\n", port->index);
723 else {
724 printk(KERN_INFO
725 "PCIE%d: link is up !\n", port->index);
726 port->link = 1;
727 }
728 } else
729 printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
730}
731
732#ifdef CONFIG_44x
733
734/* Check various reset bits of the 440SPe PCIe core */
735static int __init ppc440spe_pciex_check_reset(struct device_node *np)
736{
737 u32 valPE0, valPE1, valPE2;
738 int err = 0;
739
740 /* SDR0_PEGPLLLCT1 reset */
741 if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
742 /*
743 * the PCIe core was probably already initialised
744 * by firmware - let's re-reset RCSSET regs
745 *
746 * -- Shouldn't we also re-reset the whole thing ? -- BenH
747 */
748 pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
749 mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
750 mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
751 mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
752 }
753
754 valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
755 valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
756 valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
757
758 /* SDR0_PExRCSSET rstgu */
759 if (!(valPE0 & 0x01000000) ||
760 !(valPE1 & 0x01000000) ||
761 !(valPE2 & 0x01000000)) {
762 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
763 err = -1;
764 }
765
766 /* SDR0_PExRCSSET rstdl */
767 if (!(valPE0 & 0x00010000) ||
768 !(valPE1 & 0x00010000) ||
769 !(valPE2 & 0x00010000)) {
770 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
771 err = -1;
772 }
773
774 /* SDR0_PExRCSSET rstpyn */
775 if ((valPE0 & 0x00001000) ||
776 (valPE1 & 0x00001000) ||
777 (valPE2 & 0x00001000)) {
778 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
779 err = -1;
780 }
781
782 /* SDR0_PExRCSSET hldplb */
783 if ((valPE0 & 0x10000000) ||
784 (valPE1 & 0x10000000) ||
785 (valPE2 & 0x10000000)) {
786 printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
787 err = -1;
788 }
789
790 /* SDR0_PExRCSSET rdy */
791 if ((valPE0 & 0x00100000) ||
792 (valPE1 & 0x00100000) ||
793 (valPE2 & 0x00100000)) {
794 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
795 err = -1;
796 }
797
798 /* SDR0_PExRCSSET shutdown */
799 if ((valPE0 & 0x00000100) ||
800 (valPE1 & 0x00000100) ||
801 (valPE2 & 0x00000100)) {
802 printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
803 err = -1;
804 }
805
806 return err;
807}
808
809/* Global PCIe core initializations for 440SPe core */
810static int __init ppc440spe_pciex_core_init(struct device_node *np)
811{
812 int time_out = 20;
813
814 /* Set PLL clock receiver to LVPECL */
815 dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
816
817 /* Shouldn't we do all the calibration stuff etc... here ? */
818 if (ppc440spe_pciex_check_reset(np))
819 return -ENXIO;
820
821 if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
822 printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
823 "failed (0x%08x)\n",
824 mfdcri(SDR0, PESDR0_PLLLCT2));
825 return -1;
826 }
827
828 /* De-assert reset of PCIe PLL, wait for lock */
829 dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
830 udelay(3);
831
832 while (time_out) {
833 if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
834 time_out--;
835 udelay(1);
836 } else
837 break;
838 }
839 if (!time_out) {
840 printk(KERN_INFO "PCIE: VCO output not locked\n");
841 return -1;
842 }
843
844 pr_debug("PCIE initialization OK\n");
845
846 return 3;
847}
848
849static int __init ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
850{
851 u32 val = 1 << 24;
852
853 if (port->endpoint)
854 val = PTYPE_LEGACY_ENDPOINT << 20;
855 else
856 val = PTYPE_ROOT_PORT << 20;
857
858 if (port->index == 0)
859 val |= LNKW_X8 << 12;
860 else
861 val |= LNKW_X4 << 12;
862
863 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
864 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
865 if (ppc440spe_revA())
866 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
867 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
868 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
869 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
870 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
871 if (port->index == 0) {
872 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
873 0x35000000);
874 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
875 0x35000000);
876 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
877 0x35000000);
878 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
879 0x35000000);
880 }
881 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
882 (1 << 24) | (1 << 16), 1 << 12);
883
884 return ppc4xx_pciex_port_reset_sdr(port);
885}
886
887static int __init ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
888{
889 return ppc440spe_pciex_init_port_hw(port);
890}
891
892static int __init ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
893{
894 int rc = ppc440spe_pciex_init_port_hw(port);
895
896 port->has_ibpre = 1;
897
898 return rc;
899}
900
901static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
902{
903 /* XXX Check what that value means... I hate magic */
904 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
905
906 /*
907 * Set buffer allocations and then assert VRB and TXE.
908 */
909 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
910 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
911 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x10000000);
912 out_be32(port->utl_base + PEUTL_PBBSZ, 0x53000000);
913 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x08000000);
914 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x10000000);
915 out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
916 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
917
918 return 0;
919}
920
921static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
922{
923 /* Report CRS to the operating system */
924 out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
925
926 return 0;
927}
928
929static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
930{
931 .want_sdr = true,
932 .core_init = ppc440spe_pciex_core_init,
933 .port_init_hw = ppc440speA_pciex_init_port_hw,
934 .setup_utl = ppc440speA_pciex_init_utl,
935 .check_link = ppc4xx_pciex_check_link_sdr,
936};
937
938static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
939{
940 .want_sdr = true,
941 .core_init = ppc440spe_pciex_core_init,
942 .port_init_hw = ppc440speB_pciex_init_port_hw,
943 .setup_utl = ppc440speB_pciex_init_utl,
944 .check_link = ppc4xx_pciex_check_link_sdr,
945};
946
947static int __init ppc460ex_pciex_core_init(struct device_node *np)
948{
949 /* Nothing to do, return 2 ports */
950 return 2;
951}
952
953static int __init ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
954{
955 u32 val;
956 u32 utlset1;
957
958 if (port->endpoint)
959 val = PTYPE_LEGACY_ENDPOINT << 20;
960 else
961 val = PTYPE_ROOT_PORT << 20;
962
963 if (port->index == 0) {
964 val |= LNKW_X1 << 12;
965 utlset1 = 0x20000000;
966 } else {
967 val |= LNKW_X4 << 12;
968 utlset1 = 0x20101101;
969 }
970
971 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
972 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
973 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
974
975 switch (port->index) {
976 case 0:
977 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
978 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
979 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
980
981 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
982 break;
983
984 case 1:
985 mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
986 mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
987 mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
988 mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
989 mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
990 mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
991 mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
992 mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
993 mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
994 mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
995 mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
996 mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
997
998 mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
999 break;
1000 }
1001
1002 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1003 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1004 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1005
1006 /* Poll for PHY reset */
1007 /* XXX FIXME add timeout */
1008 switch (port->index) {
1009 case 0:
1010 while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
1011 udelay(10);
1012 break;
1013 case 1:
1014 while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
1015 udelay(10);
1016 break;
1017 }
1018
1019 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1020 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1021 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1022 PESDRx_RCSSET_RSTPYN);
1023
1024 port->has_ibpre = 1;
1025
1026 return ppc4xx_pciex_port_reset_sdr(port);
1027}
1028
1029static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1030{
1031 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1032
1033 /*
1034 * Set buffer allocations and then assert VRB and TXE.
1035 */
1036 out_be32(port->utl_base + PEUTL_PBCTL, 0x0800000c);
1037 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
1038 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
1039 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
1040 out_be32(port->utl_base + PEUTL_PBBSZ, 0x00000000);
1041 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
1042 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
1043 out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
1044 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
1045
1046 return 0;
1047}
1048
1049static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
1050{
1051 .want_sdr = true,
1052 .core_init = ppc460ex_pciex_core_init,
1053 .port_init_hw = ppc460ex_pciex_init_port_hw,
1054 .setup_utl = ppc460ex_pciex_init_utl,
1055 .check_link = ppc4xx_pciex_check_link_sdr,
1056};
1057
1058static int __init apm821xx_pciex_core_init(struct device_node *np)
1059{
1060 /* Return the number of pcie port */
1061 return 1;
1062}
1063
1064static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1065{
1066 u32 val;
1067
1068 /*
1069 * Do a software reset on PCIe ports.
1070 * This code is to fix the issue that pci drivers doesn't re-assign
1071 * bus number for PCIE devices after Uboot
1072 * scanned and configured all the buses (eg. PCIE NIC IntelPro/1000
1073 * PT quad port, SAS LSI 1064E)
1074 */
1075
1076 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
1077 mdelay(10);
1078
1079 if (port->endpoint)
1080 val = PTYPE_LEGACY_ENDPOINT << 20;
1081 else
1082 val = PTYPE_ROOT_PORT << 20;
1083
1084 val |= LNKW_X1 << 12;
1085
1086 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
1087 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1088 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1089
1090 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
1091 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
1092 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
1093
1094 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
1095 mdelay(50);
1096 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
1097
1098 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1099 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1100 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1101
1102 /* Poll for PHY reset */
1103 val = PESDR0_460EX_RSTSTA - port->sdr_base;
1104 if (ppc4xx_pciex_wait_on_sdr(port, val, 0x1, 1, 100)) {
1105 printk(KERN_WARNING "%s: PCIE: Can't reset PHY\n", __func__);
1106 return -EBUSY;
1107 } else {
1108 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1109 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1110 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1111 PESDRx_RCSSET_RSTPYN);
1112
1113 port->has_ibpre = 1;
1114 return 0;
1115 }
1116}
1117
1118static struct ppc4xx_pciex_hwops apm821xx_pcie_hwops __initdata = {
1119 .want_sdr = true,
1120 .core_init = apm821xx_pciex_core_init,
1121 .port_init_hw = apm821xx_pciex_init_port_hw,
1122 .setup_utl = ppc460ex_pciex_init_utl,
1123 .check_link = ppc4xx_pciex_check_link_sdr,
1124};
1125
1126static int __init ppc460sx_pciex_core_init(struct device_node *np)
1127{
1128 /* HSS drive amplitude */
1129 mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
1130 mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
1131 mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
1132 mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
1133 mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
1134 mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
1135 mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
1136 mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
1137
1138 mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
1139 mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
1140 mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
1141 mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
1142
1143 mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
1144 mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
1145 mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
1146 mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
1147
1148 /* HSS TX pre-emphasis */
1149 mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
1150 mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
1151 mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
1152 mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
1153 mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
1154 mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
1155 mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
1156 mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
1157
1158 mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
1159 mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
1160 mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
1161 mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
1162
1163 mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
1164 mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
1165 mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
1166 mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
1167
1168 /* HSS TX calibration control */
1169 mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
1170 mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
1171 mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
1172
1173 /* HSS TX slew control */
1174 mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
1175 mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
1176 mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
1177
1178 /* Set HSS PRBS enabled */
1179 mtdcri(SDR0, PESDR0_460SX_HSSCTLSET, 0x00001130);
1180 mtdcri(SDR0, PESDR2_460SX_HSSCTLSET, 0x00001130);
1181
1182 udelay(100);
1183
1184 /* De-assert PLLRESET */
1185 dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
1186
1187 /* Reset DL, UTL, GPL before configuration */
1188 mtdcri(SDR0, PESDR0_460SX_RCSSET,
1189 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1190 mtdcri(SDR0, PESDR1_460SX_RCSSET,
1191 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1192 mtdcri(SDR0, PESDR2_460SX_RCSSET,
1193 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1194
1195 udelay(100);
1196
1197 /*
1198 * If bifurcation is not enabled, u-boot would have disabled the
1199 * third PCIe port
1200 */
1201 if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
1202 0x00000001)) {
1203 printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
1204 printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
1205 return 3;
1206 }
1207
1208 printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
1209 return 2;
1210}
1211
1212static int __init ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1213{
1214
1215 if (port->endpoint)
1216 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1217 0x01000000, 0);
1218 else
1219 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1220 0, 0x01000000);
1221
1222 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
1223 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
1224 PESDRx_RCSSET_RSTPYN);
1225
1226 port->has_ibpre = 1;
1227
1228 return ppc4xx_pciex_port_reset_sdr(port);
1229}
1230
1231static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
1232{
1233 /* Max 128 Bytes */
1234 out_be32 (port->utl_base + PEUTL_PBBSZ, 0x00000000);
1235 /* Assert VRB and TXE - per datasheet turn off addr validation */
1236 out_be32(port->utl_base + PEUTL_PCTL, 0x80800000);
1237 return 0;
1238}
1239
1240static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
1241{
1242 void __iomem *mbase;
1243 int attempt = 50;
1244
1245 port->link = 0;
1246
1247 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1248 if (mbase == NULL) {
1249 printk(KERN_ERR "%s: Can't map internal config space !",
1250 port->node->full_name);
1251 goto done;
1252 }
1253
1254 while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
1255 & PECFG_460SX_DLLSTA_LINKUP))) {
1256 attempt--;
1257 mdelay(10);
1258 }
1259 if (attempt)
1260 port->link = 1;
1261done:
1262 iounmap(mbase);
1263
1264}
1265
1266static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
1267 .want_sdr = true,
1268 .core_init = ppc460sx_pciex_core_init,
1269 .port_init_hw = ppc460sx_pciex_init_port_hw,
1270 .setup_utl = ppc460sx_pciex_init_utl,
1271 .check_link = ppc460sx_pciex_check_link,
1272};
1273
1274#endif /* CONFIG_44x */
1275
1276#ifdef CONFIG_40x
1277
1278static int __init ppc405ex_pciex_core_init(struct device_node *np)
1279{
1280 /* Nothing to do, return 2 ports */
1281 return 2;
1282}
1283
1284static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
1285{
1286 /* Assert the PE0_PHY reset */
1287 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
1288 msleep(1);
1289
1290 /* deassert the PE0_hotreset */
1291 if (port->endpoint)
1292 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
1293 else
1294 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
1295
1296 /* poll for phy !reset */
1297 /* XXX FIXME add timeout */
1298 while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
1299 ;
1300
1301 /* deassert the PE0_gpl_utl_reset */
1302 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
1303}
1304
1305static int __init ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1306{
1307 u32 val;
1308
1309 if (port->endpoint)
1310 val = PTYPE_LEGACY_ENDPOINT;
1311 else
1312 val = PTYPE_ROOT_PORT;
1313
1314 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
1315 1 << 24 | val << 20 | LNKW_X1 << 12);
1316
1317 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1318 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1319 mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
1320 mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
1321
1322 /*
1323 * Only reset the PHY when no link is currently established.
1324 * This is for the Atheros PCIe board which has problems to establish
1325 * the link (again) after this PHY reset. All other currently tested
1326 * PCIe boards don't show this problem.
1327 * This has to be re-tested and fixed in a later release!
1328 */
1329 val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
1330 if (!(val & 0x00001000))
1331 ppc405ex_pcie_phy_reset(port);
1332
1333 dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */
1334
1335 port->has_ibpre = 1;
1336
1337 return ppc4xx_pciex_port_reset_sdr(port);
1338}
1339
1340static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1341{
1342 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1343
1344 /*
1345 * Set buffer allocations and then assert VRB and TXE.
1346 */
1347 out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000);
1348 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
1349 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
1350 out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000);
1351 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
1352 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
1353 out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
1354 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
1355
1356 out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
1357
1358 return 0;
1359}
1360
1361static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
1362{
1363 .want_sdr = true,
1364 .core_init = ppc405ex_pciex_core_init,
1365 .port_init_hw = ppc405ex_pciex_init_port_hw,
1366 .setup_utl = ppc405ex_pciex_init_utl,
1367 .check_link = ppc4xx_pciex_check_link_sdr,
1368};
1369
1370#endif /* CONFIG_40x */
1371
1372#ifdef CONFIG_476FPE
1373static int __init ppc_476fpe_pciex_core_init(struct device_node *np)
1374{
1375 return 4;
1376}
1377
1378static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
1379{
1380 u32 timeout_ms = 20;
1381 u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT);
1382 void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000,
1383 0x1000);
1384
1385 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
1386
1387 if (mbase == NULL) {
1388 printk(KERN_WARNING "PCIE%d: failed to get cfg space\n",
1389 port->index);
1390 return;
1391 }
1392
1393 while (timeout_ms--) {
1394 val = in_le32(mbase + PECFG_TLDLP);
1395
1396 if ((val & mask) == mask)
1397 break;
1398 msleep(10);
1399 }
1400
1401 if (val & PECFG_TLDLP_PRESENT) {
1402 printk(KERN_INFO "PCIE%d: link is up !\n", port->index);
1403 port->link = 1;
1404 } else
1405 printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index);
1406
1407 iounmap(mbase);
1408 return;
1409}
1410
1411static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata =
1412{
1413 .core_init = ppc_476fpe_pciex_core_init,
1414 .check_link = ppc_476fpe_pciex_check_link,
1415};
1416#endif /* CONFIG_476FPE */
1417
1418/* Check that the core has been initied and if not, do it */
1419static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1420{
1421 static int core_init;
1422 int count = -ENODEV;
1423
1424 if (core_init++)
1425 return 0;
1426
1427#ifdef CONFIG_44x
1428 if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
1429 if (ppc440spe_revA())
1430 ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
1431 else
1432 ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
1433 }
1434 if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
1435 ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
1436 if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
1437 ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
1438 if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx"))
1439 ppc4xx_pciex_hwops = &apm821xx_pcie_hwops;
1440#endif /* CONFIG_44x */
1441#ifdef CONFIG_40x
1442 if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
1443 ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
1444#endif
1445#ifdef CONFIG_476FPE
1446 if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe")
1447 || of_device_is_compatible(np, "ibm,plb-pciex-476gtr"))
1448 ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops;
1449#endif
1450 if (ppc4xx_pciex_hwops == NULL) {
1451 printk(KERN_WARNING "PCIE: unknown host type %s\n",
1452 np->full_name);
1453 return -ENODEV;
1454 }
1455
1456 count = ppc4xx_pciex_hwops->core_init(np);
1457 if (count > 0) {
1458 ppc4xx_pciex_ports =
1459 kzalloc(count * sizeof(struct ppc4xx_pciex_port),
1460 GFP_KERNEL);
1461 if (ppc4xx_pciex_ports) {
1462 ppc4xx_pciex_port_count = count;
1463 return 0;
1464 }
1465 printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
1466 return -ENOMEM;
1467 }
1468 return -ENODEV;
1469}
1470
1471static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
1472{
1473 /* We map PCI Express configuration based on the reg property */
1474 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
1475 RES_TO_U32_HIGH(port->cfg_space.start));
1476 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
1477 RES_TO_U32_LOW(port->cfg_space.start));
1478
1479 /* XXX FIXME: Use size from reg property. For now, map 512M */
1480 dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
1481
1482 /* We map UTL registers based on the reg property */
1483 dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
1484 RES_TO_U32_HIGH(port->utl_regs.start));
1485 dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
1486 RES_TO_U32_LOW(port->utl_regs.start));
1487
1488 /* XXX FIXME: Use size from reg property */
1489 dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
1490
1491 /* Disable all other outbound windows */
1492 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
1493 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
1494 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
1495 dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
1496}
1497
1498static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1499{
1500 int rc = 0;
1501
1502 /* Init HW */
1503 if (ppc4xx_pciex_hwops->port_init_hw)
1504 rc = ppc4xx_pciex_hwops->port_init_hw(port);
1505 if (rc != 0)
1506 return rc;
1507
1508 /*
1509 * Initialize mapping: disable all regions and configure
1510 * CFG and REG regions based on resources in the device tree
1511 */
1512 ppc4xx_pciex_port_init_mapping(port);
1513
1514 if (ppc4xx_pciex_hwops->check_link)
1515 ppc4xx_pciex_hwops->check_link(port);
1516
1517 /*
1518 * Map UTL
1519 */
1520 port->utl_base = ioremap(port->utl_regs.start, 0x100);
1521 BUG_ON(port->utl_base == NULL);
1522
1523 /*
1524 * Setup UTL registers --BenH.
1525 */
1526 if (ppc4xx_pciex_hwops->setup_utl)
1527 ppc4xx_pciex_hwops->setup_utl(port);
1528
1529 /*
1530 * Check for VC0 active or PLL Locked and assert RDY.
1531 */
1532 if (port->sdr_base) {
1533 if (of_device_is_compatible(port->node,
1534 "ibm,plb-pciex-460sx")){
1535 if (port->link && ppc4xx_pciex_wait_on_sdr(port,
1536 PESDRn_RCSSTS,
1537 1 << 12, 1 << 12, 5000)) {
1538 printk(KERN_INFO "PCIE%d: PLL not locked\n",
1539 port->index);
1540 port->link = 0;
1541 }
1542 } else if (port->link &&
1543 ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1544 1 << 16, 1 << 16, 5000)) {
1545 printk(KERN_INFO "PCIE%d: VC0 not active\n",
1546 port->index);
1547 port->link = 0;
1548 }
1549
1550 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
1551 }
1552
1553 msleep(100);
1554
1555 return 0;
1556}
1557
1558static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
1559 struct pci_bus *bus,
1560 unsigned int devfn)
1561{
1562 static int message;
1563
1564 /* Endpoint can not generate upstream(remote) config cycles */
1565 if (port->endpoint && bus->number != port->hose->first_busno)
1566 return PCIBIOS_DEVICE_NOT_FOUND;
1567
1568 /* Check we are within the mapped range */
1569 if (bus->number > port->hose->last_busno) {
1570 if (!message) {
1571 printk(KERN_WARNING "Warning! Probing bus %u"
1572 " out of range !\n", bus->number);
1573 message++;
1574 }
1575 return PCIBIOS_DEVICE_NOT_FOUND;
1576 }
1577
1578 /* The root complex has only one device / function */
1579 if (bus->number == port->hose->first_busno && devfn != 0)
1580 return PCIBIOS_DEVICE_NOT_FOUND;
1581
1582 /* The other side of the RC has only one device as well */
1583 if (bus->number == (port->hose->first_busno + 1) &&
1584 PCI_SLOT(devfn) != 0)
1585 return PCIBIOS_DEVICE_NOT_FOUND;
1586
1587 /* Check if we have a link */
1588 if ((bus->number != port->hose->first_busno) && !port->link)
1589 return PCIBIOS_DEVICE_NOT_FOUND;
1590
1591 return 0;
1592}
1593
1594static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
1595 struct pci_bus *bus,
1596 unsigned int devfn)
1597{
1598 int relbus;
1599
1600 /* Remove the casts when we finally remove the stupid volatile
1601 * in struct pci_controller
1602 */
1603 if (bus->number == port->hose->first_busno)
1604 return (void __iomem *)port->hose->cfg_addr;
1605
1606 relbus = bus->number - (port->hose->first_busno + 1);
1607 return (void __iomem *)port->hose->cfg_data +
1608 ((relbus << 20) | (devfn << 12));
1609}
1610
1611static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
1612 int offset, int len, u32 *val)
1613{
1614 struct pci_controller *hose = pci_bus_to_host(bus);
1615 struct ppc4xx_pciex_port *port =
1616 &ppc4xx_pciex_ports[hose->indirect_type];
1617 void __iomem *addr;
1618 u32 gpl_cfg;
1619
1620 BUG_ON(hose != port->hose);
1621
1622 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1623 return PCIBIOS_DEVICE_NOT_FOUND;
1624
1625 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1626
1627 /*
1628 * Reading from configuration space of non-existing device can
1629 * generate transaction errors. For the read duration we suppress
1630 * assertion of machine check exceptions to avoid those.
1631 */
1632 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1633 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1634
1635 /* Make sure no CRS is recorded */
1636 out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
1637
1638 switch (len) {
1639 case 1:
1640 *val = in_8((u8 *)(addr + offset));
1641 break;
1642 case 2:
1643 *val = in_le16((u16 *)(addr + offset));
1644 break;
1645 default:
1646 *val = in_le32((u32 *)(addr + offset));
1647 break;
1648 }
1649
1650 pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
1651 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1652 bus->number, hose->first_busno, hose->last_busno,
1653 devfn, offset, len, addr + offset, *val);
1654
1655 /* Check for CRS (440SPe rev B does that for us but heh ..) */
1656 if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
1657 pr_debug("Got CRS !\n");
1658 if (len != 4 || offset != 0)
1659 return PCIBIOS_DEVICE_NOT_FOUND;
1660 *val = 0xffff0001;
1661 }
1662
1663 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1664
1665 return PCIBIOS_SUCCESSFUL;
1666}
1667
1668static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
1669 int offset, int len, u32 val)
1670{
1671 struct pci_controller *hose = pci_bus_to_host(bus);
1672 struct ppc4xx_pciex_port *port =
1673 &ppc4xx_pciex_ports[hose->indirect_type];
1674 void __iomem *addr;
1675 u32 gpl_cfg;
1676
1677 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1678 return PCIBIOS_DEVICE_NOT_FOUND;
1679
1680 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1681
1682 /*
1683 * Reading from configuration space of non-existing device can
1684 * generate transaction errors. For the read duration we suppress
1685 * assertion of machine check exceptions to avoid those.
1686 */
1687 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1688 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1689
1690 pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
1691 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1692 bus->number, hose->first_busno, hose->last_busno,
1693 devfn, offset, len, addr + offset, val);
1694
1695 switch (len) {
1696 case 1:
1697 out_8((u8 *)(addr + offset), val);
1698 break;
1699 case 2:
1700 out_le16((u16 *)(addr + offset), val);
1701 break;
1702 default:
1703 out_le32((u32 *)(addr + offset), val);
1704 break;
1705 }
1706
1707 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1708
1709 return PCIBIOS_SUCCESSFUL;
1710}
1711
1712static struct pci_ops ppc4xx_pciex_pci_ops =
1713{
1714 .read = ppc4xx_pciex_read_config,
1715 .write = ppc4xx_pciex_write_config,
1716};
1717
1718static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
1719 struct pci_controller *hose,
1720 void __iomem *mbase,
1721 u64 plb_addr,
1722 u64 pci_addr,
1723 u64 size,
1724 unsigned int flags,
1725 int index)
1726{
1727 u32 lah, lal, pciah, pcial, sa;
1728
1729 if (!is_power_of_2(size) ||
1730 (index < 2 && size < 0x100000) ||
1731 (index == 2 && size < 0x100) ||
1732 (plb_addr & (size - 1)) != 0) {
1733 printk(KERN_WARNING "%s: Resource out of range\n",
1734 hose->dn->full_name);
1735 return -1;
1736 }
1737
1738 /* Calculate register values */
1739 lah = RES_TO_U32_HIGH(plb_addr);
1740 lal = RES_TO_U32_LOW(plb_addr);
1741 pciah = RES_TO_U32_HIGH(pci_addr);
1742 pcial = RES_TO_U32_LOW(pci_addr);
1743 sa = (0xffffffffu << ilog2(size)) | 0x1;
1744
1745 /* Program register values */
1746 switch (index) {
1747 case 0:
1748 out_le32(mbase + PECFG_POM0LAH, pciah);
1749 out_le32(mbase + PECFG_POM0LAL, pcial);
1750 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
1751 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
1752 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
1753 /*Enabled and single region */
1754 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
1755 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1756 sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT
1757 | DCRO_PEGPL_OMRxMSKL_VAL);
1758 else if (of_device_is_compatible(
1759 port->node, "ibm,plb-pciex-476fpe") ||
1760 of_device_is_compatible(
1761 port->node, "ibm,plb-pciex-476gtr"))
1762 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1763 sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT
1764 | DCRO_PEGPL_OMRxMSKL_VAL);
1765 else
1766 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1767 sa | DCRO_PEGPL_OMR1MSKL_UOT
1768 | DCRO_PEGPL_OMRxMSKL_VAL);
1769 break;
1770 case 1:
1771 out_le32(mbase + PECFG_POM1LAH, pciah);
1772 out_le32(mbase + PECFG_POM1LAL, pcial);
1773 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
1774 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
1775 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
1776 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL,
1777 sa | DCRO_PEGPL_OMRxMSKL_VAL);
1778 break;
1779 case 2:
1780 out_le32(mbase + PECFG_POM2LAH, pciah);
1781 out_le32(mbase + PECFG_POM2LAL, pcial);
1782 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
1783 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
1784 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
1785 /* Note that 3 here means enabled | IO space !!! */
1786 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL,
1787 sa | DCRO_PEGPL_OMR3MSKL_IO
1788 | DCRO_PEGPL_OMRxMSKL_VAL);
1789 break;
1790 }
1791
1792 return 0;
1793}
1794
1795static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1796 struct pci_controller *hose,
1797 void __iomem *mbase)
1798{
1799 int i, j, found_isa_hole = 0;
1800
1801 /* Setup outbound memory windows */
1802 for (i = j = 0; i < 3; i++) {
1803 struct resource *res = &hose->mem_resources[i];
1804 resource_size_t offset = hose->mem_offset[i];
1805
1806 /* we only care about memory windows */
1807 if (!(res->flags & IORESOURCE_MEM))
1808 continue;
1809 if (j > 1) {
1810 printk(KERN_WARNING "%s: Too many ranges\n",
1811 port->node->full_name);
1812 break;
1813 }
1814
1815 /* Configure the resource */
1816 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1817 res->start,
1818 res->start - offset,
1819 resource_size(res),
1820 res->flags,
1821 j) == 0) {
1822 j++;
1823
1824 /* If the resource PCI address is 0 then we have our
1825 * ISA memory hole
1826 */
1827 if (res->start == offset)
1828 found_isa_hole = 1;
1829 }
1830 }
1831
1832 /* Handle ISA memory hole if not already covered */
1833 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
1834 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1835 hose->isa_mem_phys, 0,
1836 hose->isa_mem_size, 0, j) == 0)
1837 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
1838 hose->dn->full_name);
1839
1840 /* Configure IO, always 64K starting at 0. We hard wire it to 64K !
1841 * Note also that it -has- to be region index 2 on this HW
1842 */
1843 if (hose->io_resource.flags & IORESOURCE_IO)
1844 ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1845 hose->io_base_phys, 0,
1846 0x10000, IORESOURCE_IO, 2);
1847}
1848
1849static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
1850 struct pci_controller *hose,
1851 void __iomem *mbase,
1852 struct resource *res)
1853{
1854 resource_size_t size = resource_size(res);
1855 u64 sa;
1856
1857 if (port->endpoint) {
1858 resource_size_t ep_addr = 0;
1859 resource_size_t ep_size = 32 << 20;
1860
1861 /* Currently we map a fixed 64MByte window to PLB address
1862 * 0 (SDRAM). This should probably be configurable via a dts
1863 * property.
1864 */
1865
1866 /* Calculate window size */
1867 sa = (0xffffffffffffffffull << ilog2(ep_size));
1868
1869 /* Setup BAR0 */
1870 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1871 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
1872 PCI_BASE_ADDRESS_MEM_TYPE_64);
1873
1874 /* Disable BAR1 & BAR2 */
1875 out_le32(mbase + PECFG_BAR1MPA, 0);
1876 out_le32(mbase + PECFG_BAR2HMPA, 0);
1877 out_le32(mbase + PECFG_BAR2LMPA, 0);
1878
1879 out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
1880 out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
1881
1882 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
1883 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
1884 } else {
1885 /* Calculate window size */
1886 sa = (0xffffffffffffffffull << ilog2(size));
1887 if (res->flags & IORESOURCE_PREFETCH)
1888 sa |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1889
1890 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") ||
1891 of_device_is_compatible(
1892 port->node, "ibm,plb-pciex-476fpe") ||
1893 of_device_is_compatible(
1894 port->node, "ibm,plb-pciex-476gtr"))
1895 sa |= PCI_BASE_ADDRESS_MEM_TYPE_64;
1896
1897 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1898 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
1899
1900 /* The setup of the split looks weird to me ... let's see
1901 * if it works
1902 */
1903 out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
1904 out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
1905 out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
1906 out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
1907 out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
1908 out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
1909
1910 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
1911 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
1912 }
1913
1914 /* Enable inbound mapping */
1915 out_le32(mbase + PECFG_PIMEN, 0x1);
1916
1917 /* Enable I/O, Mem, and Busmaster cycles */
1918 out_le16(mbase + PCI_COMMAND,
1919 in_le16(mbase + PCI_COMMAND) |
1920 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1921}
1922
1923static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
1924{
1925 struct resource dma_window;
1926 struct pci_controller *hose = NULL;
1927 const int *bus_range;
1928 int primary = 0, busses;
1929 void __iomem *mbase = NULL, *cfg_data = NULL;
1930 const u32 *pval;
1931 u32 val;
1932
1933 /* Check if primary bridge */
1934 if (of_get_property(port->node, "primary", NULL))
1935 primary = 1;
1936
1937 /* Get bus range if any */
1938 bus_range = of_get_property(port->node, "bus-range", NULL);
1939
1940 /* Allocate the host controller data structure */
1941 hose = pcibios_alloc_controller(port->node);
1942 if (!hose)
1943 goto fail;
1944
1945 /* We stick the port number in "indirect_type" so the config space
1946 * ops can retrieve the port data structure easily
1947 */
1948 hose->indirect_type = port->index;
1949
1950 /* Get bus range */
1951 hose->first_busno = bus_range ? bus_range[0] : 0x0;
1952 hose->last_busno = bus_range ? bus_range[1] : 0xff;
1953
1954 /* Because of how big mapping the config space is (1M per bus), we
1955 * limit how many busses we support. In the long run, we could replace
1956 * that with something akin to kmap_atomic instead. We set aside 1 bus
1957 * for the host itself too.
1958 */
1959 busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
1960 if (busses > MAX_PCIE_BUS_MAPPED) {
1961 busses = MAX_PCIE_BUS_MAPPED;
1962 hose->last_busno = hose->first_busno + busses;
1963 }
1964
1965 if (!port->endpoint) {
1966 /* Only map the external config space in cfg_data for
1967 * PCIe root-complexes. External space is 1M per bus
1968 */
1969 cfg_data = ioremap(port->cfg_space.start +
1970 (hose->first_busno + 1) * 0x100000,
1971 busses * 0x100000);
1972 if (cfg_data == NULL) {
1973 printk(KERN_ERR "%s: Can't map external config space !",
1974 port->node->full_name);
1975 goto fail;
1976 }
1977 hose->cfg_data = cfg_data;
1978 }
1979
1980 /* Always map the host config space in cfg_addr.
1981 * Internal space is 4K
1982 */
1983 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1984 if (mbase == NULL) {
1985 printk(KERN_ERR "%s: Can't map internal config space !",
1986 port->node->full_name);
1987 goto fail;
1988 }
1989 hose->cfg_addr = mbase;
1990
1991 pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
1992 hose->first_busno, hose->last_busno);
1993 pr_debug(" config space mapped at: root @0x%p, other @0x%p\n",
1994 hose->cfg_addr, hose->cfg_data);
1995
1996 /* Setup config space */
1997 hose->ops = &ppc4xx_pciex_pci_ops;
1998 port->hose = hose;
1999 mbase = (void __iomem *)hose->cfg_addr;
2000
2001 if (!port->endpoint) {
2002 /*
2003 * Set bus numbers on our root port
2004 */
2005 out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
2006 out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
2007 out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
2008 }
2009
2010 /*
2011 * OMRs are already reset, also disable PIMs
2012 */
2013 out_le32(mbase + PECFG_PIMEN, 0);
2014
2015 /* Parse outbound mapping resources */
2016 pci_process_bridge_OF_ranges(hose, port->node, primary);
2017
2018 /* Parse inbound mapping resources */
2019 if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
2020 goto fail;
2021
2022 /* Configure outbound ranges POMs */
2023 ppc4xx_configure_pciex_POMs(port, hose, mbase);
2024
2025 /* Configure inbound ranges PIMs */
2026 ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
2027
2028 /* The root complex doesn't show up if we don't set some vendor
2029 * and device IDs into it. The defaults below are the same bogus
2030 * one that the initial code in arch/ppc had. This can be
2031 * overwritten by setting the "vendor-id/device-id" properties
2032 * in the pciex node.
2033 */
2034
2035 /* Get the (optional) vendor-/device-id from the device-tree */
2036 pval = of_get_property(port->node, "vendor-id", NULL);
2037 if (pval) {
2038 val = *pval;
2039 } else {
2040 if (!port->endpoint)
2041 val = 0xaaa0 + port->index;
2042 else
2043 val = 0xeee0 + port->index;
2044 }
2045 out_le16(mbase + 0x200, val);
2046
2047 pval = of_get_property(port->node, "device-id", NULL);
2048 if (pval) {
2049 val = *pval;
2050 } else {
2051 if (!port->endpoint)
2052 val = 0xbed0 + port->index;
2053 else
2054 val = 0xfed0 + port->index;
2055 }
2056 out_le16(mbase + 0x202, val);
2057
2058 /* Enable Bus master, memory, and io space */
2059 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
2060 out_le16(mbase + 0x204, 0x7);
2061
2062 if (!port->endpoint) {
2063 /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
2064 out_le32(mbase + 0x208, 0x06040001);
2065
2066 printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
2067 port->index);
2068 } else {
2069 /* Set Class Code to Processor/PPC */
2070 out_le32(mbase + 0x208, 0x0b200001);
2071
2072 printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
2073 port->index);
2074 }
2075
2076 return;
2077 fail:
2078 if (hose)
2079 pcibios_free_controller(hose);
2080 if (cfg_data)
2081 iounmap(cfg_data);
2082 if (mbase)
2083 iounmap(mbase);
2084}
2085
2086static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
2087{
2088 struct ppc4xx_pciex_port *port;
2089 const u32 *pval;
2090 int portno;
2091 unsigned int dcrs;
2092 const char *val;
2093
2094 /* First, proceed to core initialization as we assume there's
2095 * only one PCIe core in the system
2096 */
2097 if (ppc4xx_pciex_check_core_init(np))
2098 return;
2099
2100 /* Get the port number from the device-tree */
2101 pval = of_get_property(np, "port", NULL);
2102 if (pval == NULL) {
2103 printk(KERN_ERR "PCIE: Can't find port number for %s\n",
2104 np->full_name);
2105 return;
2106 }
2107 portno = *pval;
2108 if (portno >= ppc4xx_pciex_port_count) {
2109 printk(KERN_ERR "PCIE: port number out of range for %s\n",
2110 np->full_name);
2111 return;
2112 }
2113 port = &ppc4xx_pciex_ports[portno];
2114 port->index = portno;
2115
2116 /*
2117 * Check if device is enabled
2118 */
2119 if (!of_device_is_available(np)) {
2120 printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
2121 return;
2122 }
2123
2124 port->node = of_node_get(np);
2125 if (ppc4xx_pciex_hwops->want_sdr) {
2126 pval = of_get_property(np, "sdr-base", NULL);
2127 if (pval == NULL) {
2128 printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
2129 np->full_name);
2130 return;
2131 }
2132 port->sdr_base = *pval;
2133 }
2134
2135 /* Check if device_type property is set to "pci" or "pci-endpoint".
2136 * Resulting from this setup this PCIe port will be configured
2137 * as root-complex or as endpoint.
2138 */
2139 val = of_get_property(port->node, "device_type", NULL);
2140 if (!strcmp(val, "pci-endpoint")) {
2141 port->endpoint = 1;
2142 } else if (!strcmp(val, "pci")) {
2143 port->endpoint = 0;
2144 } else {
2145 printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n",
2146 np->full_name);
2147 return;
2148 }
2149
2150 /* Fetch config space registers address */
2151 if (of_address_to_resource(np, 0, &port->cfg_space)) {
2152 printk(KERN_ERR "%s: Can't get PCI-E config space !",
2153 np->full_name);
2154 return;
2155 }
2156 /* Fetch host bridge internal registers address */
2157 if (of_address_to_resource(np, 1, &port->utl_regs)) {
2158 printk(KERN_ERR "%s: Can't get UTL register base !",
2159 np->full_name);
2160 return;
2161 }
2162
2163 /* Map DCRs */
2164 dcrs = dcr_resource_start(np, 0);
2165 if (dcrs == 0) {
2166 printk(KERN_ERR "%s: Can't get DCR register base !",
2167 np->full_name);
2168 return;
2169 }
2170 port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
2171
2172 /* Initialize the port specific registers */
2173 if (ppc4xx_pciex_port_init(port)) {
2174 printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
2175 return;
2176 }
2177
2178 /* Setup the linux hose data structure */
2179 ppc4xx_pciex_port_setup_hose(port);
2180}
2181
2182#endif /* CONFIG_PPC4xx_PCI_EXPRESS */
2183
2184static int __init ppc4xx_pci_find_bridges(void)
2185{
2186 struct device_node *np;
2187
2188 pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
2189
2190#ifdef CONFIG_PPC4xx_PCI_EXPRESS
2191 for_each_compatible_node(np, NULL, "ibm,plb-pciex")
2192 ppc4xx_probe_pciex_bridge(np);
2193#endif
2194 for_each_compatible_node(np, NULL, "ibm,plb-pcix")
2195 ppc4xx_probe_pcix_bridge(np);
2196 for_each_compatible_node(np, NULL, "ibm,plb-pci")
2197 ppc4xx_probe_pci_bridge(np);
2198
2199 return 0;
2200}
2201arch_initcall(ppc4xx_pci_find_bridges);
2202
1/*
2 * PCI / PCI-X / PCI-Express support for 4xx parts
3 *
4 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
5 *
6 * Most PCI Express code is coming from Stefan Roese implementation for
7 * arch/ppc in the Denx tree, slightly reworked by me.
8 *
9 * Copyright 2007 DENX Software Engineering, Stefan Roese <sr@denx.de>
10 *
11 * Some of that comes itself from a previous implementation for 440SPE only
12 * by Roland Dreier:
13 *
14 * Copyright (c) 2005 Cisco Systems. All rights reserved.
15 * Roland Dreier <rolandd@cisco.com>
16 *
17 */
18
19#undef DEBUG
20
21#include <linux/kernel.h>
22#include <linux/pci.h>
23#include <linux/init.h>
24#include <linux/of.h>
25#include <linux/bootmem.h>
26#include <linux/delay.h>
27#include <linux/slab.h>
28
29#include <asm/io.h>
30#include <asm/pci-bridge.h>
31#include <asm/machdep.h>
32#include <asm/dcr.h>
33#include <asm/dcr-regs.h>
34#include <mm/mmu_decl.h>
35
36#include "ppc4xx_pci.h"
37
38static int dma_offset_set;
39
40#define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
41#define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
42
43#define RES_TO_U32_LOW(val) \
44 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
45#define RES_TO_U32_HIGH(val) \
46 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
47
48static inline int ppc440spe_revA(void)
49{
50 /* Catch both 440SPe variants, with and without RAID6 support */
51 if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
52 return 1;
53 else
54 return 0;
55}
56
57static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
58{
59 struct pci_controller *hose;
60 int i;
61
62 if (dev->devfn != 0 || dev->bus->self != NULL)
63 return;
64
65 hose = pci_bus_to_host(dev->bus);
66 if (hose == NULL)
67 return;
68
69 if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
70 !of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
71 !of_device_is_compatible(hose->dn, "ibm,plb-pci"))
72 return;
73
74 if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
75 of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
76 hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
77 }
78
79 /* Hide the PCI host BARs from the kernel as their content doesn't
80 * fit well in the resource management
81 */
82 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
83 dev->resource[i].start = dev->resource[i].end = 0;
84 dev->resource[i].flags = 0;
85 }
86
87 printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
88 pci_name(dev));
89}
90DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
91
92static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
93 void __iomem *reg,
94 struct resource *res)
95{
96 u64 size;
97 const u32 *ranges;
98 int rlen;
99 int pna = of_n_addr_cells(hose->dn);
100 int np = pna + 5;
101
102 /* Default */
103 res->start = 0;
104 size = 0x80000000;
105 res->end = size - 1;
106 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
107
108 /* Get dma-ranges property */
109 ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
110 if (ranges == NULL)
111 goto out;
112
113 /* Walk it */
114 while ((rlen -= np * 4) >= 0) {
115 u32 pci_space = ranges[0];
116 u64 pci_addr = of_read_number(ranges + 1, 2);
117 u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
118 size = of_read_number(ranges + pna + 3, 2);
119 ranges += np;
120 if (cpu_addr == OF_BAD_ADDR || size == 0)
121 continue;
122
123 /* We only care about memory */
124 if ((pci_space & 0x03000000) != 0x02000000)
125 continue;
126
127 /* We currently only support memory at 0, and pci_addr
128 * within 32 bits space
129 */
130 if (cpu_addr != 0 || pci_addr > 0xffffffff) {
131 printk(KERN_WARNING "%s: Ignored unsupported dma range"
132 " 0x%016llx...0x%016llx -> 0x%016llx\n",
133 hose->dn->full_name,
134 pci_addr, pci_addr + size - 1, cpu_addr);
135 continue;
136 }
137
138 /* Check if not prefetchable */
139 if (!(pci_space & 0x40000000))
140 res->flags &= ~IORESOURCE_PREFETCH;
141
142
143 /* Use that */
144 res->start = pci_addr;
145 /* Beware of 32 bits resources */
146 if (sizeof(resource_size_t) == sizeof(u32) &&
147 (pci_addr + size) > 0x100000000ull)
148 res->end = 0xffffffff;
149 else
150 res->end = res->start + size - 1;
151 break;
152 }
153
154 /* We only support one global DMA offset */
155 if (dma_offset_set && pci_dram_offset != res->start) {
156 printk(KERN_ERR "%s: dma-ranges(s) mismatch\n",
157 hose->dn->full_name);
158 return -ENXIO;
159 }
160
161 /* Check that we can fit all of memory as we don't support
162 * DMA bounce buffers
163 */
164 if (size < total_memory) {
165 printk(KERN_ERR "%s: dma-ranges too small "
166 "(size=%llx total_memory=%llx)\n",
167 hose->dn->full_name, size, (u64)total_memory);
168 return -ENXIO;
169 }
170
171 /* Check we are a power of 2 size and that base is a multiple of size*/
172 if ((size & (size - 1)) != 0 ||
173 (res->start & (size - 1)) != 0) {
174 printk(KERN_ERR "%s: dma-ranges unaligned\n",
175 hose->dn->full_name);
176 return -ENXIO;
177 }
178
179 /* Check that we are fully contained within 32 bits space */
180 if (res->end > 0xffffffff) {
181 printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n",
182 hose->dn->full_name);
183 return -ENXIO;
184 }
185 out:
186 dma_offset_set = 1;
187 pci_dram_offset = res->start;
188 hose->dma_window_base_cur = res->start;
189 hose->dma_window_size = resource_size(res);
190
191 printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
192 pci_dram_offset);
193 printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n",
194 (unsigned long long)hose->dma_window_base_cur);
195 printk(KERN_INFO "DMA window size 0x%016llx\n",
196 (unsigned long long)hose->dma_window_size);
197 return 0;
198}
199
200/*
201 * 4xx PCI 2.x part
202 */
203
204static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
205 void __iomem *reg,
206 u64 plb_addr,
207 u64 pci_addr,
208 u64 size,
209 unsigned int flags,
210 int index)
211{
212 u32 ma, pcila, pciha;
213
214 /* Hack warning ! The "old" PCI 2.x cell only let us configure the low
215 * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
216 * address are actually hard wired to a value that appears to depend
217 * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
218 *
219 * The trick here is we just crop those top bits and ignore them when
220 * programming the chip. That means the device-tree has to be right
221 * for the specific part used (we don't print a warning if it's wrong
222 * but on the other hand, you'll crash quickly enough), but at least
223 * this code should work whatever the hard coded value is
224 */
225 plb_addr &= 0xffffffffull;
226
227 /* Note: Due to the above hack, the test below doesn't actually test
228 * if you address is above 4G, but it tests that address and
229 * (address + size) are both contained in the same 4G
230 */
231 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
232 size < 0x1000 || (plb_addr & (size - 1)) != 0) {
233 printk(KERN_WARNING "%s: Resource out of range\n",
234 hose->dn->full_name);
235 return -1;
236 }
237 ma = (0xffffffffu << ilog2(size)) | 1;
238 if (flags & IORESOURCE_PREFETCH)
239 ma |= 2;
240
241 pciha = RES_TO_U32_HIGH(pci_addr);
242 pcila = RES_TO_U32_LOW(pci_addr);
243
244 writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
245 writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
246 writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
247 writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
248
249 return 0;
250}
251
252static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
253 void __iomem *reg)
254{
255 int i, j, found_isa_hole = 0;
256
257 /* Setup outbound memory windows */
258 for (i = j = 0; i < 3; i++) {
259 struct resource *res = &hose->mem_resources[i];
260 resource_size_t offset = hose->mem_offset[i];
261
262 /* we only care about memory windows */
263 if (!(res->flags & IORESOURCE_MEM))
264 continue;
265 if (j > 2) {
266 printk(KERN_WARNING "%s: Too many ranges\n",
267 hose->dn->full_name);
268 break;
269 }
270
271 /* Configure the resource */
272 if (ppc4xx_setup_one_pci_PMM(hose, reg,
273 res->start,
274 res->start - offset,
275 resource_size(res),
276 res->flags,
277 j) == 0) {
278 j++;
279
280 /* If the resource PCI address is 0 then we have our
281 * ISA memory hole
282 */
283 if (res->start == offset)
284 found_isa_hole = 1;
285 }
286 }
287
288 /* Handle ISA memory hole if not already covered */
289 if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
290 if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
291 hose->isa_mem_size, 0, j) == 0)
292 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
293 hose->dn->full_name);
294}
295
296static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
297 void __iomem *reg,
298 const struct resource *res)
299{
300 resource_size_t size = resource_size(res);
301 u32 sa;
302
303 /* Calculate window size */
304 sa = (0xffffffffu << ilog2(size)) | 1;
305 sa |= 0x1;
306
307 /* RAM is always at 0 local for now */
308 writel(0, reg + PCIL0_PTM1LA);
309 writel(sa, reg + PCIL0_PTM1MS);
310
311 /* Map on PCI side */
312 early_write_config_dword(hose, hose->first_busno, 0,
313 PCI_BASE_ADDRESS_1, res->start);
314 early_write_config_dword(hose, hose->first_busno, 0,
315 PCI_BASE_ADDRESS_2, 0x00000000);
316 early_write_config_word(hose, hose->first_busno, 0,
317 PCI_COMMAND, 0x0006);
318}
319
320static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
321{
322 /* NYI */
323 struct resource rsrc_cfg;
324 struct resource rsrc_reg;
325 struct resource dma_window;
326 struct pci_controller *hose = NULL;
327 void __iomem *reg = NULL;
328 const int *bus_range;
329 int primary = 0;
330
331 /* Check if device is enabled */
332 if (!of_device_is_available(np)) {
333 printk(KERN_INFO "%s: Port disabled via device-tree\n",
334 np->full_name);
335 return;
336 }
337
338 /* Fetch config space registers address */
339 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
340 printk(KERN_ERR "%s: Can't get PCI config register base !",
341 np->full_name);
342 return;
343 }
344 /* Fetch host bridge internal registers address */
345 if (of_address_to_resource(np, 3, &rsrc_reg)) {
346 printk(KERN_ERR "%s: Can't get PCI internal register base !",
347 np->full_name);
348 return;
349 }
350
351 /* Check if primary bridge */
352 if (of_get_property(np, "primary", NULL))
353 primary = 1;
354
355 /* Get bus range if any */
356 bus_range = of_get_property(np, "bus-range", NULL);
357
358 /* Map registers */
359 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
360 if (reg == NULL) {
361 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
362 goto fail;
363 }
364
365 /* Allocate the host controller data structure */
366 hose = pcibios_alloc_controller(np);
367 if (!hose)
368 goto fail;
369
370 hose->first_busno = bus_range ? bus_range[0] : 0x0;
371 hose->last_busno = bus_range ? bus_range[1] : 0xff;
372
373 /* Setup config space */
374 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
375
376 /* Disable all windows */
377 writel(0, reg + PCIL0_PMM0MA);
378 writel(0, reg + PCIL0_PMM1MA);
379 writel(0, reg + PCIL0_PMM2MA);
380 writel(0, reg + PCIL0_PTM1MS);
381 writel(0, reg + PCIL0_PTM2MS);
382
383 /* Parse outbound mapping resources */
384 pci_process_bridge_OF_ranges(hose, np, primary);
385
386 /* Parse inbound mapping resources */
387 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
388 goto fail;
389
390 /* Configure outbound ranges POMs */
391 ppc4xx_configure_pci_PMMs(hose, reg);
392
393 /* Configure inbound ranges PIMs */
394 ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
395
396 /* We don't need the registers anymore */
397 iounmap(reg);
398 return;
399
400 fail:
401 if (hose)
402 pcibios_free_controller(hose);
403 if (reg)
404 iounmap(reg);
405}
406
407/*
408 * 4xx PCI-X part
409 */
410
411static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
412 void __iomem *reg,
413 u64 plb_addr,
414 u64 pci_addr,
415 u64 size,
416 unsigned int flags,
417 int index)
418{
419 u32 lah, lal, pciah, pcial, sa;
420
421 if (!is_power_of_2(size) || size < 0x1000 ||
422 (plb_addr & (size - 1)) != 0) {
423 printk(KERN_WARNING "%s: Resource out of range\n",
424 hose->dn->full_name);
425 return -1;
426 }
427
428 /* Calculate register values */
429 lah = RES_TO_U32_HIGH(plb_addr);
430 lal = RES_TO_U32_LOW(plb_addr);
431 pciah = RES_TO_U32_HIGH(pci_addr);
432 pcial = RES_TO_U32_LOW(pci_addr);
433 sa = (0xffffffffu << ilog2(size)) | 0x1;
434
435 /* Program register values */
436 if (index == 0) {
437 writel(lah, reg + PCIX0_POM0LAH);
438 writel(lal, reg + PCIX0_POM0LAL);
439 writel(pciah, reg + PCIX0_POM0PCIAH);
440 writel(pcial, reg + PCIX0_POM0PCIAL);
441 writel(sa, reg + PCIX0_POM0SA);
442 } else {
443 writel(lah, reg + PCIX0_POM1LAH);
444 writel(lal, reg + PCIX0_POM1LAL);
445 writel(pciah, reg + PCIX0_POM1PCIAH);
446 writel(pcial, reg + PCIX0_POM1PCIAL);
447 writel(sa, reg + PCIX0_POM1SA);
448 }
449
450 return 0;
451}
452
453static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
454 void __iomem *reg)
455{
456 int i, j, found_isa_hole = 0;
457
458 /* Setup outbound memory windows */
459 for (i = j = 0; i < 3; i++) {
460 struct resource *res = &hose->mem_resources[i];
461 resource_size_t offset = hose->mem_offset[i];
462
463 /* we only care about memory windows */
464 if (!(res->flags & IORESOURCE_MEM))
465 continue;
466 if (j > 1) {
467 printk(KERN_WARNING "%s: Too many ranges\n",
468 hose->dn->full_name);
469 break;
470 }
471
472 /* Configure the resource */
473 if (ppc4xx_setup_one_pcix_POM(hose, reg,
474 res->start,
475 res->start - offset,
476 resource_size(res),
477 res->flags,
478 j) == 0) {
479 j++;
480
481 /* If the resource PCI address is 0 then we have our
482 * ISA memory hole
483 */
484 if (res->start == offset)
485 found_isa_hole = 1;
486 }
487 }
488
489 /* Handle ISA memory hole if not already covered */
490 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
491 if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
492 hose->isa_mem_size, 0, j) == 0)
493 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
494 hose->dn->full_name);
495}
496
497static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
498 void __iomem *reg,
499 const struct resource *res,
500 int big_pim,
501 int enable_msi_hole)
502{
503 resource_size_t size = resource_size(res);
504 u32 sa;
505
506 /* RAM is always at 0 */
507 writel(0x00000000, reg + PCIX0_PIM0LAH);
508 writel(0x00000000, reg + PCIX0_PIM0LAL);
509
510 /* Calculate window size */
511 sa = (0xffffffffu << ilog2(size)) | 1;
512 sa |= 0x1;
513 if (res->flags & IORESOURCE_PREFETCH)
514 sa |= 0x2;
515 if (enable_msi_hole)
516 sa |= 0x4;
517 writel(sa, reg + PCIX0_PIM0SA);
518 if (big_pim)
519 writel(0xffffffff, reg + PCIX0_PIM0SAH);
520
521 /* Map on PCI side */
522 writel(0x00000000, reg + PCIX0_BAR0H);
523 writel(res->start, reg + PCIX0_BAR0L);
524 writew(0x0006, reg + PCIX0_COMMAND);
525}
526
527static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
528{
529 struct resource rsrc_cfg;
530 struct resource rsrc_reg;
531 struct resource dma_window;
532 struct pci_controller *hose = NULL;
533 void __iomem *reg = NULL;
534 const int *bus_range;
535 int big_pim = 0, msi = 0, primary = 0;
536
537 /* Fetch config space registers address */
538 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
539 printk(KERN_ERR "%s:Can't get PCI-X config register base !",
540 np->full_name);
541 return;
542 }
543 /* Fetch host bridge internal registers address */
544 if (of_address_to_resource(np, 3, &rsrc_reg)) {
545 printk(KERN_ERR "%s: Can't get PCI-X internal register base !",
546 np->full_name);
547 return;
548 }
549
550 /* Check if it supports large PIMs (440GX) */
551 if (of_get_property(np, "large-inbound-windows", NULL))
552 big_pim = 1;
553
554 /* Check if we should enable MSIs inbound hole */
555 if (of_get_property(np, "enable-msi-hole", NULL))
556 msi = 1;
557
558 /* Check if primary bridge */
559 if (of_get_property(np, "primary", NULL))
560 primary = 1;
561
562 /* Get bus range if any */
563 bus_range = of_get_property(np, "bus-range", NULL);
564
565 /* Map registers */
566 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
567 if (reg == NULL) {
568 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
569 goto fail;
570 }
571
572 /* Allocate the host controller data structure */
573 hose = pcibios_alloc_controller(np);
574 if (!hose)
575 goto fail;
576
577 hose->first_busno = bus_range ? bus_range[0] : 0x0;
578 hose->last_busno = bus_range ? bus_range[1] : 0xff;
579
580 /* Setup config space */
581 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4,
582 PPC_INDIRECT_TYPE_SET_CFG_TYPE);
583
584 /* Disable all windows */
585 writel(0, reg + PCIX0_POM0SA);
586 writel(0, reg + PCIX0_POM1SA);
587 writel(0, reg + PCIX0_POM2SA);
588 writel(0, reg + PCIX0_PIM0SA);
589 writel(0, reg + PCIX0_PIM1SA);
590 writel(0, reg + PCIX0_PIM2SA);
591 if (big_pim) {
592 writel(0, reg + PCIX0_PIM0SAH);
593 writel(0, reg + PCIX0_PIM2SAH);
594 }
595
596 /* Parse outbound mapping resources */
597 pci_process_bridge_OF_ranges(hose, np, primary);
598
599 /* Parse inbound mapping resources */
600 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
601 goto fail;
602
603 /* Configure outbound ranges POMs */
604 ppc4xx_configure_pcix_POMs(hose, reg);
605
606 /* Configure inbound ranges PIMs */
607 ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
608
609 /* We don't need the registers anymore */
610 iounmap(reg);
611 return;
612
613 fail:
614 if (hose)
615 pcibios_free_controller(hose);
616 if (reg)
617 iounmap(reg);
618}
619
620#ifdef CONFIG_PPC4xx_PCI_EXPRESS
621
622/*
623 * 4xx PCI-Express part
624 *
625 * We support 3 parts currently based on the compatible property:
626 *
627 * ibm,plb-pciex-440spe
628 * ibm,plb-pciex-405ex
629 * ibm,plb-pciex-460ex
630 *
631 * Anything else will be rejected for now as they are all subtly
632 * different unfortunately.
633 *
634 */
635
636#define MAX_PCIE_BUS_MAPPED 0x40
637
638struct ppc4xx_pciex_port
639{
640 struct pci_controller *hose;
641 struct device_node *node;
642 unsigned int index;
643 int endpoint;
644 int link;
645 int has_ibpre;
646 unsigned int sdr_base;
647 dcr_host_t dcrs;
648 struct resource cfg_space;
649 struct resource utl_regs;
650 void __iomem *utl_base;
651};
652
653static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
654static unsigned int ppc4xx_pciex_port_count;
655
656struct ppc4xx_pciex_hwops
657{
658 bool want_sdr;
659 int (*core_init)(struct device_node *np);
660 int (*port_init_hw)(struct ppc4xx_pciex_port *port);
661 int (*setup_utl)(struct ppc4xx_pciex_port *port);
662 void (*check_link)(struct ppc4xx_pciex_port *port);
663};
664
665static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
666
667static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
668 unsigned int sdr_offset,
669 unsigned int mask,
670 unsigned int value,
671 int timeout_ms)
672{
673 u32 val;
674
675 while(timeout_ms--) {
676 val = mfdcri(SDR0, port->sdr_base + sdr_offset);
677 if ((val & mask) == value) {
678 pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
679 port->index, sdr_offset, timeout_ms, val);
680 return 0;
681 }
682 msleep(1);
683 }
684 return -1;
685}
686
687static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
688{
689 /* Wait for reset to complete */
690 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
691 printk(KERN_WARNING "PCIE%d: PGRST failed\n",
692 port->index);
693 return -1;
694 }
695 return 0;
696}
697
698
699static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
700{
701 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
702
703 /* Check for card presence detect if supported, if not, just wait for
704 * link unconditionally.
705 *
706 * note that we don't fail if there is no link, we just filter out
707 * config space accesses. That way, it will be easier to implement
708 * hotplug later on.
709 */
710 if (!port->has_ibpre ||
711 !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
712 1 << 28, 1 << 28, 100)) {
713 printk(KERN_INFO
714 "PCIE%d: Device detected, waiting for link...\n",
715 port->index);
716 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
717 0x1000, 0x1000, 2000))
718 printk(KERN_WARNING
719 "PCIE%d: Link up failed\n", port->index);
720 else {
721 printk(KERN_INFO
722 "PCIE%d: link is up !\n", port->index);
723 port->link = 1;
724 }
725 } else
726 printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
727}
728
729#ifdef CONFIG_44x
730
731/* Check various reset bits of the 440SPe PCIe core */
732static int __init ppc440spe_pciex_check_reset(struct device_node *np)
733{
734 u32 valPE0, valPE1, valPE2;
735 int err = 0;
736
737 /* SDR0_PEGPLLLCT1 reset */
738 if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
739 /*
740 * the PCIe core was probably already initialised
741 * by firmware - let's re-reset RCSSET regs
742 *
743 * -- Shouldn't we also re-reset the whole thing ? -- BenH
744 */
745 pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
746 mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
747 mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
748 mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
749 }
750
751 valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
752 valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
753 valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
754
755 /* SDR0_PExRCSSET rstgu */
756 if (!(valPE0 & 0x01000000) ||
757 !(valPE1 & 0x01000000) ||
758 !(valPE2 & 0x01000000)) {
759 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
760 err = -1;
761 }
762
763 /* SDR0_PExRCSSET rstdl */
764 if (!(valPE0 & 0x00010000) ||
765 !(valPE1 & 0x00010000) ||
766 !(valPE2 & 0x00010000)) {
767 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
768 err = -1;
769 }
770
771 /* SDR0_PExRCSSET rstpyn */
772 if ((valPE0 & 0x00001000) ||
773 (valPE1 & 0x00001000) ||
774 (valPE2 & 0x00001000)) {
775 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
776 err = -1;
777 }
778
779 /* SDR0_PExRCSSET hldplb */
780 if ((valPE0 & 0x10000000) ||
781 (valPE1 & 0x10000000) ||
782 (valPE2 & 0x10000000)) {
783 printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
784 err = -1;
785 }
786
787 /* SDR0_PExRCSSET rdy */
788 if ((valPE0 & 0x00100000) ||
789 (valPE1 & 0x00100000) ||
790 (valPE2 & 0x00100000)) {
791 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
792 err = -1;
793 }
794
795 /* SDR0_PExRCSSET shutdown */
796 if ((valPE0 & 0x00000100) ||
797 (valPE1 & 0x00000100) ||
798 (valPE2 & 0x00000100)) {
799 printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
800 err = -1;
801 }
802
803 return err;
804}
805
806/* Global PCIe core initializations for 440SPe core */
807static int __init ppc440spe_pciex_core_init(struct device_node *np)
808{
809 int time_out = 20;
810
811 /* Set PLL clock receiver to LVPECL */
812 dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
813
814 /* Shouldn't we do all the calibration stuff etc... here ? */
815 if (ppc440spe_pciex_check_reset(np))
816 return -ENXIO;
817
818 if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
819 printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
820 "failed (0x%08x)\n",
821 mfdcri(SDR0, PESDR0_PLLLCT2));
822 return -1;
823 }
824
825 /* De-assert reset of PCIe PLL, wait for lock */
826 dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
827 udelay(3);
828
829 while (time_out) {
830 if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
831 time_out--;
832 udelay(1);
833 } else
834 break;
835 }
836 if (!time_out) {
837 printk(KERN_INFO "PCIE: VCO output not locked\n");
838 return -1;
839 }
840
841 pr_debug("PCIE initialization OK\n");
842
843 return 3;
844}
845
846static int __init ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
847{
848 u32 val = 1 << 24;
849
850 if (port->endpoint)
851 val = PTYPE_LEGACY_ENDPOINT << 20;
852 else
853 val = PTYPE_ROOT_PORT << 20;
854
855 if (port->index == 0)
856 val |= LNKW_X8 << 12;
857 else
858 val |= LNKW_X4 << 12;
859
860 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
861 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
862 if (ppc440spe_revA())
863 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
864 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
865 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
866 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
867 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
868 if (port->index == 0) {
869 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
870 0x35000000);
871 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
872 0x35000000);
873 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
874 0x35000000);
875 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
876 0x35000000);
877 }
878 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
879 (1 << 24) | (1 << 16), 1 << 12);
880
881 return ppc4xx_pciex_port_reset_sdr(port);
882}
883
884static int __init ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
885{
886 return ppc440spe_pciex_init_port_hw(port);
887}
888
889static int __init ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
890{
891 int rc = ppc440spe_pciex_init_port_hw(port);
892
893 port->has_ibpre = 1;
894
895 return rc;
896}
897
898static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
899{
900 /* XXX Check what that value means... I hate magic */
901 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
902
903 /*
904 * Set buffer allocations and then assert VRB and TXE.
905 */
906 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
907 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
908 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x10000000);
909 out_be32(port->utl_base + PEUTL_PBBSZ, 0x53000000);
910 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x08000000);
911 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x10000000);
912 out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
913 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
914
915 return 0;
916}
917
918static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
919{
920 /* Report CRS to the operating system */
921 out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
922
923 return 0;
924}
925
926static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
927{
928 .want_sdr = true,
929 .core_init = ppc440spe_pciex_core_init,
930 .port_init_hw = ppc440speA_pciex_init_port_hw,
931 .setup_utl = ppc440speA_pciex_init_utl,
932 .check_link = ppc4xx_pciex_check_link_sdr,
933};
934
935static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
936{
937 .want_sdr = true,
938 .core_init = ppc440spe_pciex_core_init,
939 .port_init_hw = ppc440speB_pciex_init_port_hw,
940 .setup_utl = ppc440speB_pciex_init_utl,
941 .check_link = ppc4xx_pciex_check_link_sdr,
942};
943
944static int __init ppc460ex_pciex_core_init(struct device_node *np)
945{
946 /* Nothing to do, return 2 ports */
947 return 2;
948}
949
950static int __init ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
951{
952 u32 val;
953 u32 utlset1;
954
955 if (port->endpoint)
956 val = PTYPE_LEGACY_ENDPOINT << 20;
957 else
958 val = PTYPE_ROOT_PORT << 20;
959
960 if (port->index == 0) {
961 val |= LNKW_X1 << 12;
962 utlset1 = 0x20000000;
963 } else {
964 val |= LNKW_X4 << 12;
965 utlset1 = 0x20101101;
966 }
967
968 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
969 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
970 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
971
972 switch (port->index) {
973 case 0:
974 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
975 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
976 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
977
978 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
979 break;
980
981 case 1:
982 mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
983 mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
984 mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
985 mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
986 mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
987 mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
988 mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
989 mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
990 mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
991 mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
992 mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
993 mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
994
995 mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
996 break;
997 }
998
999 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1000 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1001 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1002
1003 /* Poll for PHY reset */
1004 /* XXX FIXME add timeout */
1005 switch (port->index) {
1006 case 0:
1007 while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
1008 udelay(10);
1009 break;
1010 case 1:
1011 while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
1012 udelay(10);
1013 break;
1014 }
1015
1016 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1017 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1018 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1019 PESDRx_RCSSET_RSTPYN);
1020
1021 port->has_ibpre = 1;
1022
1023 return ppc4xx_pciex_port_reset_sdr(port);
1024}
1025
1026static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1027{
1028 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1029
1030 /*
1031 * Set buffer allocations and then assert VRB and TXE.
1032 */
1033 out_be32(port->utl_base + PEUTL_PBCTL, 0x0800000c);
1034 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
1035 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
1036 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
1037 out_be32(port->utl_base + PEUTL_PBBSZ, 0x00000000);
1038 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
1039 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
1040 out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
1041 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
1042
1043 return 0;
1044}
1045
1046static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
1047{
1048 .want_sdr = true,
1049 .core_init = ppc460ex_pciex_core_init,
1050 .port_init_hw = ppc460ex_pciex_init_port_hw,
1051 .setup_utl = ppc460ex_pciex_init_utl,
1052 .check_link = ppc4xx_pciex_check_link_sdr,
1053};
1054
1055static int __init apm821xx_pciex_core_init(struct device_node *np)
1056{
1057 /* Return the number of pcie port */
1058 return 1;
1059}
1060
1061static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1062{
1063 u32 val;
1064
1065 /*
1066 * Do a software reset on PCIe ports.
1067 * This code is to fix the issue that pci drivers doesn't re-assign
1068 * bus number for PCIE devices after Uboot
1069 * scanned and configured all the buses (eg. PCIE NIC IntelPro/1000
1070 * PT quad port, SAS LSI 1064E)
1071 */
1072
1073 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
1074 mdelay(10);
1075
1076 if (port->endpoint)
1077 val = PTYPE_LEGACY_ENDPOINT << 20;
1078 else
1079 val = PTYPE_ROOT_PORT << 20;
1080
1081 val |= LNKW_X1 << 12;
1082
1083 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
1084 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1085 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1086
1087 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
1088 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
1089 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
1090
1091 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
1092 mdelay(50);
1093 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
1094
1095 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1096 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1097 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1098
1099 /* Poll for PHY reset */
1100 val = PESDR0_460EX_RSTSTA - port->sdr_base;
1101 if (ppc4xx_pciex_wait_on_sdr(port, val, 0x1, 1, 100)) {
1102 printk(KERN_WARNING "%s: PCIE: Can't reset PHY\n", __func__);
1103 return -EBUSY;
1104 } else {
1105 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1106 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1107 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1108 PESDRx_RCSSET_RSTPYN);
1109
1110 port->has_ibpre = 1;
1111 return 0;
1112 }
1113}
1114
1115static struct ppc4xx_pciex_hwops apm821xx_pcie_hwops __initdata = {
1116 .want_sdr = true,
1117 .core_init = apm821xx_pciex_core_init,
1118 .port_init_hw = apm821xx_pciex_init_port_hw,
1119 .setup_utl = ppc460ex_pciex_init_utl,
1120 .check_link = ppc4xx_pciex_check_link_sdr,
1121};
1122
1123static int __init ppc460sx_pciex_core_init(struct device_node *np)
1124{
1125 /* HSS drive amplitude */
1126 mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
1127 mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
1128 mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
1129 mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
1130 mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
1131 mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
1132 mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
1133 mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
1134
1135 mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
1136 mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
1137 mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
1138 mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
1139
1140 mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
1141 mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
1142 mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
1143 mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
1144
1145 /* HSS TX pre-emphasis */
1146 mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
1147 mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
1148 mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
1149 mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
1150 mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
1151 mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
1152 mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
1153 mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
1154
1155 mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
1156 mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
1157 mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
1158 mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
1159
1160 mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
1161 mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
1162 mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
1163 mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
1164
1165 /* HSS TX calibration control */
1166 mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
1167 mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
1168 mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
1169
1170 /* HSS TX slew control */
1171 mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
1172 mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
1173 mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
1174
1175 /* Set HSS PRBS enabled */
1176 mtdcri(SDR0, PESDR0_460SX_HSSCTLSET, 0x00001130);
1177 mtdcri(SDR0, PESDR2_460SX_HSSCTLSET, 0x00001130);
1178
1179 udelay(100);
1180
1181 /* De-assert PLLRESET */
1182 dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
1183
1184 /* Reset DL, UTL, GPL before configuration */
1185 mtdcri(SDR0, PESDR0_460SX_RCSSET,
1186 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1187 mtdcri(SDR0, PESDR1_460SX_RCSSET,
1188 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1189 mtdcri(SDR0, PESDR2_460SX_RCSSET,
1190 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1191
1192 udelay(100);
1193
1194 /*
1195 * If bifurcation is not enabled, u-boot would have disabled the
1196 * third PCIe port
1197 */
1198 if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
1199 0x00000001)) {
1200 printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
1201 printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
1202 return 3;
1203 }
1204
1205 printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
1206 return 2;
1207}
1208
1209static int __init ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1210{
1211
1212 if (port->endpoint)
1213 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1214 0x01000000, 0);
1215 else
1216 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1217 0, 0x01000000);
1218
1219 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
1220 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
1221 PESDRx_RCSSET_RSTPYN);
1222
1223 port->has_ibpre = 1;
1224
1225 return ppc4xx_pciex_port_reset_sdr(port);
1226}
1227
1228static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
1229{
1230 /* Max 128 Bytes */
1231 out_be32 (port->utl_base + PEUTL_PBBSZ, 0x00000000);
1232 /* Assert VRB and TXE - per datasheet turn off addr validation */
1233 out_be32(port->utl_base + PEUTL_PCTL, 0x80800000);
1234 return 0;
1235}
1236
1237static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
1238{
1239 void __iomem *mbase;
1240 int attempt = 50;
1241
1242 port->link = 0;
1243
1244 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1245 if (mbase == NULL) {
1246 printk(KERN_ERR "%s: Can't map internal config space !",
1247 port->node->full_name);
1248 goto done;
1249 }
1250
1251 while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
1252 & PECFG_460SX_DLLSTA_LINKUP))) {
1253 attempt--;
1254 mdelay(10);
1255 }
1256 if (attempt)
1257 port->link = 1;
1258done:
1259 iounmap(mbase);
1260
1261}
1262
1263static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
1264 .want_sdr = true,
1265 .core_init = ppc460sx_pciex_core_init,
1266 .port_init_hw = ppc460sx_pciex_init_port_hw,
1267 .setup_utl = ppc460sx_pciex_init_utl,
1268 .check_link = ppc460sx_pciex_check_link,
1269};
1270
1271#endif /* CONFIG_44x */
1272
1273#ifdef CONFIG_40x
1274
1275static int __init ppc405ex_pciex_core_init(struct device_node *np)
1276{
1277 /* Nothing to do, return 2 ports */
1278 return 2;
1279}
1280
1281static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
1282{
1283 /* Assert the PE0_PHY reset */
1284 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
1285 msleep(1);
1286
1287 /* deassert the PE0_hotreset */
1288 if (port->endpoint)
1289 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
1290 else
1291 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
1292
1293 /* poll for phy !reset */
1294 /* XXX FIXME add timeout */
1295 while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
1296 ;
1297
1298 /* deassert the PE0_gpl_utl_reset */
1299 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
1300}
1301
1302static int __init ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1303{
1304 u32 val;
1305
1306 if (port->endpoint)
1307 val = PTYPE_LEGACY_ENDPOINT;
1308 else
1309 val = PTYPE_ROOT_PORT;
1310
1311 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
1312 1 << 24 | val << 20 | LNKW_X1 << 12);
1313
1314 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1315 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1316 mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
1317 mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
1318
1319 /*
1320 * Only reset the PHY when no link is currently established.
1321 * This is for the Atheros PCIe board which has problems to establish
1322 * the link (again) after this PHY reset. All other currently tested
1323 * PCIe boards don't show this problem.
1324 * This has to be re-tested and fixed in a later release!
1325 */
1326 val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
1327 if (!(val & 0x00001000))
1328 ppc405ex_pcie_phy_reset(port);
1329
1330 dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */
1331
1332 port->has_ibpre = 1;
1333
1334 return ppc4xx_pciex_port_reset_sdr(port);
1335}
1336
1337static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1338{
1339 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1340
1341 /*
1342 * Set buffer allocations and then assert VRB and TXE.
1343 */
1344 out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000);
1345 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
1346 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
1347 out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000);
1348 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
1349 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
1350 out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
1351 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
1352
1353 out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
1354
1355 return 0;
1356}
1357
1358static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
1359{
1360 .want_sdr = true,
1361 .core_init = ppc405ex_pciex_core_init,
1362 .port_init_hw = ppc405ex_pciex_init_port_hw,
1363 .setup_utl = ppc405ex_pciex_init_utl,
1364 .check_link = ppc4xx_pciex_check_link_sdr,
1365};
1366
1367#endif /* CONFIG_40x */
1368
1369#ifdef CONFIG_476FPE
1370static int __init ppc_476fpe_pciex_core_init(struct device_node *np)
1371{
1372 return 4;
1373}
1374
1375static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
1376{
1377 u32 timeout_ms = 20;
1378 u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT);
1379 void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000,
1380 0x1000);
1381
1382 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
1383
1384 if (mbase == NULL) {
1385 printk(KERN_WARNING "PCIE%d: failed to get cfg space\n",
1386 port->index);
1387 return;
1388 }
1389
1390 while (timeout_ms--) {
1391 val = in_le32(mbase + PECFG_TLDLP);
1392
1393 if ((val & mask) == mask)
1394 break;
1395 msleep(10);
1396 }
1397
1398 if (val & PECFG_TLDLP_PRESENT) {
1399 printk(KERN_INFO "PCIE%d: link is up !\n", port->index);
1400 port->link = 1;
1401 } else
1402 printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index);
1403
1404 iounmap(mbase);
1405 return;
1406}
1407
1408static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata =
1409{
1410 .core_init = ppc_476fpe_pciex_core_init,
1411 .check_link = ppc_476fpe_pciex_check_link,
1412};
1413#endif /* CONFIG_476FPE */
1414
1415/* Check that the core has been initied and if not, do it */
1416static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1417{
1418 static int core_init;
1419 int count = -ENODEV;
1420
1421 if (core_init++)
1422 return 0;
1423
1424#ifdef CONFIG_44x
1425 if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
1426 if (ppc440spe_revA())
1427 ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
1428 else
1429 ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
1430 }
1431 if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
1432 ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
1433 if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
1434 ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
1435 if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx"))
1436 ppc4xx_pciex_hwops = &apm821xx_pcie_hwops;
1437#endif /* CONFIG_44x */
1438#ifdef CONFIG_40x
1439 if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
1440 ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
1441#endif
1442#ifdef CONFIG_476FPE
1443 if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe"))
1444 ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops;
1445#endif
1446 if (ppc4xx_pciex_hwops == NULL) {
1447 printk(KERN_WARNING "PCIE: unknown host type %s\n",
1448 np->full_name);
1449 return -ENODEV;
1450 }
1451
1452 count = ppc4xx_pciex_hwops->core_init(np);
1453 if (count > 0) {
1454 ppc4xx_pciex_ports =
1455 kzalloc(count * sizeof(struct ppc4xx_pciex_port),
1456 GFP_KERNEL);
1457 if (ppc4xx_pciex_ports) {
1458 ppc4xx_pciex_port_count = count;
1459 return 0;
1460 }
1461 printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
1462 return -ENOMEM;
1463 }
1464 return -ENODEV;
1465}
1466
1467static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
1468{
1469 /* We map PCI Express configuration based on the reg property */
1470 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
1471 RES_TO_U32_HIGH(port->cfg_space.start));
1472 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
1473 RES_TO_U32_LOW(port->cfg_space.start));
1474
1475 /* XXX FIXME: Use size from reg property. For now, map 512M */
1476 dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
1477
1478 /* We map UTL registers based on the reg property */
1479 dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
1480 RES_TO_U32_HIGH(port->utl_regs.start));
1481 dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
1482 RES_TO_U32_LOW(port->utl_regs.start));
1483
1484 /* XXX FIXME: Use size from reg property */
1485 dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
1486
1487 /* Disable all other outbound windows */
1488 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
1489 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
1490 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
1491 dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
1492}
1493
1494static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1495{
1496 int rc = 0;
1497
1498 /* Init HW */
1499 if (ppc4xx_pciex_hwops->port_init_hw)
1500 rc = ppc4xx_pciex_hwops->port_init_hw(port);
1501 if (rc != 0)
1502 return rc;
1503
1504 /*
1505 * Initialize mapping: disable all regions and configure
1506 * CFG and REG regions based on resources in the device tree
1507 */
1508 ppc4xx_pciex_port_init_mapping(port);
1509
1510 if (ppc4xx_pciex_hwops->check_link)
1511 ppc4xx_pciex_hwops->check_link(port);
1512
1513 /*
1514 * Map UTL
1515 */
1516 port->utl_base = ioremap(port->utl_regs.start, 0x100);
1517 BUG_ON(port->utl_base == NULL);
1518
1519 /*
1520 * Setup UTL registers --BenH.
1521 */
1522 if (ppc4xx_pciex_hwops->setup_utl)
1523 ppc4xx_pciex_hwops->setup_utl(port);
1524
1525 /*
1526 * Check for VC0 active or PLL Locked and assert RDY.
1527 */
1528 if (port->sdr_base) {
1529 if (of_device_is_compatible(port->node,
1530 "ibm,plb-pciex-460sx")){
1531 if (port->link && ppc4xx_pciex_wait_on_sdr(port,
1532 PESDRn_RCSSTS,
1533 1 << 12, 1 << 12, 5000)) {
1534 printk(KERN_INFO "PCIE%d: PLL not locked\n",
1535 port->index);
1536 port->link = 0;
1537 }
1538 } else if (port->link &&
1539 ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1540 1 << 16, 1 << 16, 5000)) {
1541 printk(KERN_INFO "PCIE%d: VC0 not active\n",
1542 port->index);
1543 port->link = 0;
1544 }
1545
1546 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
1547 }
1548
1549 msleep(100);
1550
1551 return 0;
1552}
1553
1554static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
1555 struct pci_bus *bus,
1556 unsigned int devfn)
1557{
1558 static int message;
1559
1560 /* Endpoint can not generate upstream(remote) config cycles */
1561 if (port->endpoint && bus->number != port->hose->first_busno)
1562 return PCIBIOS_DEVICE_NOT_FOUND;
1563
1564 /* Check we are within the mapped range */
1565 if (bus->number > port->hose->last_busno) {
1566 if (!message) {
1567 printk(KERN_WARNING "Warning! Probing bus %u"
1568 " out of range !\n", bus->number);
1569 message++;
1570 }
1571 return PCIBIOS_DEVICE_NOT_FOUND;
1572 }
1573
1574 /* The root complex has only one device / function */
1575 if (bus->number == port->hose->first_busno && devfn != 0)
1576 return PCIBIOS_DEVICE_NOT_FOUND;
1577
1578 /* The other side of the RC has only one device as well */
1579 if (bus->number == (port->hose->first_busno + 1) &&
1580 PCI_SLOT(devfn) != 0)
1581 return PCIBIOS_DEVICE_NOT_FOUND;
1582
1583 /* Check if we have a link */
1584 if ((bus->number != port->hose->first_busno) && !port->link)
1585 return PCIBIOS_DEVICE_NOT_FOUND;
1586
1587 return 0;
1588}
1589
1590static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
1591 struct pci_bus *bus,
1592 unsigned int devfn)
1593{
1594 int relbus;
1595
1596 /* Remove the casts when we finally remove the stupid volatile
1597 * in struct pci_controller
1598 */
1599 if (bus->number == port->hose->first_busno)
1600 return (void __iomem *)port->hose->cfg_addr;
1601
1602 relbus = bus->number - (port->hose->first_busno + 1);
1603 return (void __iomem *)port->hose->cfg_data +
1604 ((relbus << 20) | (devfn << 12));
1605}
1606
1607static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
1608 int offset, int len, u32 *val)
1609{
1610 struct pci_controller *hose = pci_bus_to_host(bus);
1611 struct ppc4xx_pciex_port *port =
1612 &ppc4xx_pciex_ports[hose->indirect_type];
1613 void __iomem *addr;
1614 u32 gpl_cfg;
1615
1616 BUG_ON(hose != port->hose);
1617
1618 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1619 return PCIBIOS_DEVICE_NOT_FOUND;
1620
1621 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1622
1623 /*
1624 * Reading from configuration space of non-existing device can
1625 * generate transaction errors. For the read duration we suppress
1626 * assertion of machine check exceptions to avoid those.
1627 */
1628 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1629 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1630
1631 /* Make sure no CRS is recorded */
1632 out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
1633
1634 switch (len) {
1635 case 1:
1636 *val = in_8((u8 *)(addr + offset));
1637 break;
1638 case 2:
1639 *val = in_le16((u16 *)(addr + offset));
1640 break;
1641 default:
1642 *val = in_le32((u32 *)(addr + offset));
1643 break;
1644 }
1645
1646 pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
1647 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1648 bus->number, hose->first_busno, hose->last_busno,
1649 devfn, offset, len, addr + offset, *val);
1650
1651 /* Check for CRS (440SPe rev B does that for us but heh ..) */
1652 if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
1653 pr_debug("Got CRS !\n");
1654 if (len != 4 || offset != 0)
1655 return PCIBIOS_DEVICE_NOT_FOUND;
1656 *val = 0xffff0001;
1657 }
1658
1659 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1660
1661 return PCIBIOS_SUCCESSFUL;
1662}
1663
1664static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
1665 int offset, int len, u32 val)
1666{
1667 struct pci_controller *hose = pci_bus_to_host(bus);
1668 struct ppc4xx_pciex_port *port =
1669 &ppc4xx_pciex_ports[hose->indirect_type];
1670 void __iomem *addr;
1671 u32 gpl_cfg;
1672
1673 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1674 return PCIBIOS_DEVICE_NOT_FOUND;
1675
1676 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1677
1678 /*
1679 * Reading from configuration space of non-existing device can
1680 * generate transaction errors. For the read duration we suppress
1681 * assertion of machine check exceptions to avoid those.
1682 */
1683 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1684 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1685
1686 pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
1687 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1688 bus->number, hose->first_busno, hose->last_busno,
1689 devfn, offset, len, addr + offset, val);
1690
1691 switch (len) {
1692 case 1:
1693 out_8((u8 *)(addr + offset), val);
1694 break;
1695 case 2:
1696 out_le16((u16 *)(addr + offset), val);
1697 break;
1698 default:
1699 out_le32((u32 *)(addr + offset), val);
1700 break;
1701 }
1702
1703 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1704
1705 return PCIBIOS_SUCCESSFUL;
1706}
1707
1708static struct pci_ops ppc4xx_pciex_pci_ops =
1709{
1710 .read = ppc4xx_pciex_read_config,
1711 .write = ppc4xx_pciex_write_config,
1712};
1713
1714static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
1715 struct pci_controller *hose,
1716 void __iomem *mbase,
1717 u64 plb_addr,
1718 u64 pci_addr,
1719 u64 size,
1720 unsigned int flags,
1721 int index)
1722{
1723 u32 lah, lal, pciah, pcial, sa;
1724
1725 if (!is_power_of_2(size) ||
1726 (index < 2 && size < 0x100000) ||
1727 (index == 2 && size < 0x100) ||
1728 (plb_addr & (size - 1)) != 0) {
1729 printk(KERN_WARNING "%s: Resource out of range\n",
1730 hose->dn->full_name);
1731 return -1;
1732 }
1733
1734 /* Calculate register values */
1735 lah = RES_TO_U32_HIGH(plb_addr);
1736 lal = RES_TO_U32_LOW(plb_addr);
1737 pciah = RES_TO_U32_HIGH(pci_addr);
1738 pcial = RES_TO_U32_LOW(pci_addr);
1739 sa = (0xffffffffu << ilog2(size)) | 0x1;
1740
1741 /* Program register values */
1742 switch (index) {
1743 case 0:
1744 out_le32(mbase + PECFG_POM0LAH, pciah);
1745 out_le32(mbase + PECFG_POM0LAL, pcial);
1746 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
1747 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
1748 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
1749 /*Enabled and single region */
1750 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
1751 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1752 sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT
1753 | DCRO_PEGPL_OMRxMSKL_VAL);
1754 else if (of_device_is_compatible(port->node, "ibm,plb-pciex-476fpe"))
1755 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1756 sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT
1757 | DCRO_PEGPL_OMRxMSKL_VAL);
1758 else
1759 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1760 sa | DCRO_PEGPL_OMR1MSKL_UOT
1761 | DCRO_PEGPL_OMRxMSKL_VAL);
1762 break;
1763 case 1:
1764 out_le32(mbase + PECFG_POM1LAH, pciah);
1765 out_le32(mbase + PECFG_POM1LAL, pcial);
1766 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
1767 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
1768 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
1769 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL,
1770 sa | DCRO_PEGPL_OMRxMSKL_VAL);
1771 break;
1772 case 2:
1773 out_le32(mbase + PECFG_POM2LAH, pciah);
1774 out_le32(mbase + PECFG_POM2LAL, pcial);
1775 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
1776 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
1777 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
1778 /* Note that 3 here means enabled | IO space !!! */
1779 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL,
1780 sa | DCRO_PEGPL_OMR3MSKL_IO
1781 | DCRO_PEGPL_OMRxMSKL_VAL);
1782 break;
1783 }
1784
1785 return 0;
1786}
1787
1788static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1789 struct pci_controller *hose,
1790 void __iomem *mbase)
1791{
1792 int i, j, found_isa_hole = 0;
1793
1794 /* Setup outbound memory windows */
1795 for (i = j = 0; i < 3; i++) {
1796 struct resource *res = &hose->mem_resources[i];
1797 resource_size_t offset = hose->mem_offset[i];
1798
1799 /* we only care about memory windows */
1800 if (!(res->flags & IORESOURCE_MEM))
1801 continue;
1802 if (j > 1) {
1803 printk(KERN_WARNING "%s: Too many ranges\n",
1804 port->node->full_name);
1805 break;
1806 }
1807
1808 /* Configure the resource */
1809 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1810 res->start,
1811 res->start - offset,
1812 resource_size(res),
1813 res->flags,
1814 j) == 0) {
1815 j++;
1816
1817 /* If the resource PCI address is 0 then we have our
1818 * ISA memory hole
1819 */
1820 if (res->start == offset)
1821 found_isa_hole = 1;
1822 }
1823 }
1824
1825 /* Handle ISA memory hole if not already covered */
1826 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
1827 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1828 hose->isa_mem_phys, 0,
1829 hose->isa_mem_size, 0, j) == 0)
1830 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
1831 hose->dn->full_name);
1832
1833 /* Configure IO, always 64K starting at 0. We hard wire it to 64K !
1834 * Note also that it -has- to be region index 2 on this HW
1835 */
1836 if (hose->io_resource.flags & IORESOURCE_IO)
1837 ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1838 hose->io_base_phys, 0,
1839 0x10000, IORESOURCE_IO, 2);
1840}
1841
1842static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
1843 struct pci_controller *hose,
1844 void __iomem *mbase,
1845 struct resource *res)
1846{
1847 resource_size_t size = resource_size(res);
1848 u64 sa;
1849
1850 if (port->endpoint) {
1851 resource_size_t ep_addr = 0;
1852 resource_size_t ep_size = 32 << 20;
1853
1854 /* Currently we map a fixed 64MByte window to PLB address
1855 * 0 (SDRAM). This should probably be configurable via a dts
1856 * property.
1857 */
1858
1859 /* Calculate window size */
1860 sa = (0xffffffffffffffffull << ilog2(ep_size));
1861
1862 /* Setup BAR0 */
1863 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1864 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
1865 PCI_BASE_ADDRESS_MEM_TYPE_64);
1866
1867 /* Disable BAR1 & BAR2 */
1868 out_le32(mbase + PECFG_BAR1MPA, 0);
1869 out_le32(mbase + PECFG_BAR2HMPA, 0);
1870 out_le32(mbase + PECFG_BAR2LMPA, 0);
1871
1872 out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
1873 out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
1874
1875 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
1876 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
1877 } else {
1878 /* Calculate window size */
1879 sa = (0xffffffffffffffffull << ilog2(size));
1880 if (res->flags & IORESOURCE_PREFETCH)
1881 sa |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1882
1883 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") ||
1884 of_device_is_compatible(port->node, "ibm,plb-pciex-476fpe"))
1885 sa |= PCI_BASE_ADDRESS_MEM_TYPE_64;
1886
1887 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1888 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
1889
1890 /* The setup of the split looks weird to me ... let's see
1891 * if it works
1892 */
1893 out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
1894 out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
1895 out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
1896 out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
1897 out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
1898 out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
1899
1900 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
1901 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
1902 }
1903
1904 /* Enable inbound mapping */
1905 out_le32(mbase + PECFG_PIMEN, 0x1);
1906
1907 /* Enable I/O, Mem, and Busmaster cycles */
1908 out_le16(mbase + PCI_COMMAND,
1909 in_le16(mbase + PCI_COMMAND) |
1910 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1911}
1912
1913static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
1914{
1915 struct resource dma_window;
1916 struct pci_controller *hose = NULL;
1917 const int *bus_range;
1918 int primary = 0, busses;
1919 void __iomem *mbase = NULL, *cfg_data = NULL;
1920 const u32 *pval;
1921 u32 val;
1922
1923 /* Check if primary bridge */
1924 if (of_get_property(port->node, "primary", NULL))
1925 primary = 1;
1926
1927 /* Get bus range if any */
1928 bus_range = of_get_property(port->node, "bus-range", NULL);
1929
1930 /* Allocate the host controller data structure */
1931 hose = pcibios_alloc_controller(port->node);
1932 if (!hose)
1933 goto fail;
1934
1935 /* We stick the port number in "indirect_type" so the config space
1936 * ops can retrieve the port data structure easily
1937 */
1938 hose->indirect_type = port->index;
1939
1940 /* Get bus range */
1941 hose->first_busno = bus_range ? bus_range[0] : 0x0;
1942 hose->last_busno = bus_range ? bus_range[1] : 0xff;
1943
1944 /* Because of how big mapping the config space is (1M per bus), we
1945 * limit how many busses we support. In the long run, we could replace
1946 * that with something akin to kmap_atomic instead. We set aside 1 bus
1947 * for the host itself too.
1948 */
1949 busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
1950 if (busses > MAX_PCIE_BUS_MAPPED) {
1951 busses = MAX_PCIE_BUS_MAPPED;
1952 hose->last_busno = hose->first_busno + busses;
1953 }
1954
1955 if (!port->endpoint) {
1956 /* Only map the external config space in cfg_data for
1957 * PCIe root-complexes. External space is 1M per bus
1958 */
1959 cfg_data = ioremap(port->cfg_space.start +
1960 (hose->first_busno + 1) * 0x100000,
1961 busses * 0x100000);
1962 if (cfg_data == NULL) {
1963 printk(KERN_ERR "%s: Can't map external config space !",
1964 port->node->full_name);
1965 goto fail;
1966 }
1967 hose->cfg_data = cfg_data;
1968 }
1969
1970 /* Always map the host config space in cfg_addr.
1971 * Internal space is 4K
1972 */
1973 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1974 if (mbase == NULL) {
1975 printk(KERN_ERR "%s: Can't map internal config space !",
1976 port->node->full_name);
1977 goto fail;
1978 }
1979 hose->cfg_addr = mbase;
1980
1981 pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
1982 hose->first_busno, hose->last_busno);
1983 pr_debug(" config space mapped at: root @0x%p, other @0x%p\n",
1984 hose->cfg_addr, hose->cfg_data);
1985
1986 /* Setup config space */
1987 hose->ops = &ppc4xx_pciex_pci_ops;
1988 port->hose = hose;
1989 mbase = (void __iomem *)hose->cfg_addr;
1990
1991 if (!port->endpoint) {
1992 /*
1993 * Set bus numbers on our root port
1994 */
1995 out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
1996 out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
1997 out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
1998 }
1999
2000 /*
2001 * OMRs are already reset, also disable PIMs
2002 */
2003 out_le32(mbase + PECFG_PIMEN, 0);
2004
2005 /* Parse outbound mapping resources */
2006 pci_process_bridge_OF_ranges(hose, port->node, primary);
2007
2008 /* Parse inbound mapping resources */
2009 if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
2010 goto fail;
2011
2012 /* Configure outbound ranges POMs */
2013 ppc4xx_configure_pciex_POMs(port, hose, mbase);
2014
2015 /* Configure inbound ranges PIMs */
2016 ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
2017
2018 /* The root complex doesn't show up if we don't set some vendor
2019 * and device IDs into it. The defaults below are the same bogus
2020 * one that the initial code in arch/ppc had. This can be
2021 * overwritten by setting the "vendor-id/device-id" properties
2022 * in the pciex node.
2023 */
2024
2025 /* Get the (optional) vendor-/device-id from the device-tree */
2026 pval = of_get_property(port->node, "vendor-id", NULL);
2027 if (pval) {
2028 val = *pval;
2029 } else {
2030 if (!port->endpoint)
2031 val = 0xaaa0 + port->index;
2032 else
2033 val = 0xeee0 + port->index;
2034 }
2035 out_le16(mbase + 0x200, val);
2036
2037 pval = of_get_property(port->node, "device-id", NULL);
2038 if (pval) {
2039 val = *pval;
2040 } else {
2041 if (!port->endpoint)
2042 val = 0xbed0 + port->index;
2043 else
2044 val = 0xfed0 + port->index;
2045 }
2046 out_le16(mbase + 0x202, val);
2047
2048 /* Enable Bus master, memory, and io space */
2049 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
2050 out_le16(mbase + 0x204, 0x7);
2051
2052 if (!port->endpoint) {
2053 /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
2054 out_le32(mbase + 0x208, 0x06040001);
2055
2056 printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
2057 port->index);
2058 } else {
2059 /* Set Class Code to Processor/PPC */
2060 out_le32(mbase + 0x208, 0x0b200001);
2061
2062 printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
2063 port->index);
2064 }
2065
2066 return;
2067 fail:
2068 if (hose)
2069 pcibios_free_controller(hose);
2070 if (cfg_data)
2071 iounmap(cfg_data);
2072 if (mbase)
2073 iounmap(mbase);
2074}
2075
2076static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
2077{
2078 struct ppc4xx_pciex_port *port;
2079 const u32 *pval;
2080 int portno;
2081 unsigned int dcrs;
2082 const char *val;
2083
2084 /* First, proceed to core initialization as we assume there's
2085 * only one PCIe core in the system
2086 */
2087 if (ppc4xx_pciex_check_core_init(np))
2088 return;
2089
2090 /* Get the port number from the device-tree */
2091 pval = of_get_property(np, "port", NULL);
2092 if (pval == NULL) {
2093 printk(KERN_ERR "PCIE: Can't find port number for %s\n",
2094 np->full_name);
2095 return;
2096 }
2097 portno = *pval;
2098 if (portno >= ppc4xx_pciex_port_count) {
2099 printk(KERN_ERR "PCIE: port number out of range for %s\n",
2100 np->full_name);
2101 return;
2102 }
2103 port = &ppc4xx_pciex_ports[portno];
2104 port->index = portno;
2105
2106 /*
2107 * Check if device is enabled
2108 */
2109 if (!of_device_is_available(np)) {
2110 printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
2111 return;
2112 }
2113
2114 port->node = of_node_get(np);
2115 if (ppc4xx_pciex_hwops->want_sdr) {
2116 pval = of_get_property(np, "sdr-base", NULL);
2117 if (pval == NULL) {
2118 printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
2119 np->full_name);
2120 return;
2121 }
2122 port->sdr_base = *pval;
2123 }
2124
2125 /* Check if device_type property is set to "pci" or "pci-endpoint".
2126 * Resulting from this setup this PCIe port will be configured
2127 * as root-complex or as endpoint.
2128 */
2129 val = of_get_property(port->node, "device_type", NULL);
2130 if (!strcmp(val, "pci-endpoint")) {
2131 port->endpoint = 1;
2132 } else if (!strcmp(val, "pci")) {
2133 port->endpoint = 0;
2134 } else {
2135 printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n",
2136 np->full_name);
2137 return;
2138 }
2139
2140 /* Fetch config space registers address */
2141 if (of_address_to_resource(np, 0, &port->cfg_space)) {
2142 printk(KERN_ERR "%s: Can't get PCI-E config space !",
2143 np->full_name);
2144 return;
2145 }
2146 /* Fetch host bridge internal registers address */
2147 if (of_address_to_resource(np, 1, &port->utl_regs)) {
2148 printk(KERN_ERR "%s: Can't get UTL register base !",
2149 np->full_name);
2150 return;
2151 }
2152
2153 /* Map DCRs */
2154 dcrs = dcr_resource_start(np, 0);
2155 if (dcrs == 0) {
2156 printk(KERN_ERR "%s: Can't get DCR register base !",
2157 np->full_name);
2158 return;
2159 }
2160 port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
2161
2162 /* Initialize the port specific registers */
2163 if (ppc4xx_pciex_port_init(port)) {
2164 printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
2165 return;
2166 }
2167
2168 /* Setup the linux hose data structure */
2169 ppc4xx_pciex_port_setup_hose(port);
2170}
2171
2172#endif /* CONFIG_PPC4xx_PCI_EXPRESS */
2173
2174static int __init ppc4xx_pci_find_bridges(void)
2175{
2176 struct device_node *np;
2177
2178 pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
2179
2180#ifdef CONFIG_PPC4xx_PCI_EXPRESS
2181 for_each_compatible_node(np, NULL, "ibm,plb-pciex")
2182 ppc4xx_probe_pciex_bridge(np);
2183#endif
2184 for_each_compatible_node(np, NULL, "ibm,plb-pcix")
2185 ppc4xx_probe_pcix_bridge(np);
2186 for_each_compatible_node(np, NULL, "ibm,plb-pci")
2187 ppc4xx_probe_pci_bridge(np);
2188
2189 return 0;
2190}
2191arch_initcall(ppc4xx_pci_find_bridges);
2192