Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* pci_fire.c: Sun4u platform PCI-E controller support.
3 *
4 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
5 */
6#include <linux/kernel.h>
7#include <linux/pci.h>
8#include <linux/slab.h>
9#include <linux/init.h>
10#include <linux/msi.h>
11#include <linux/export.h>
12#include <linux/irq.h>
13#include <linux/of_device.h>
14#include <linux/numa.h>
15
16#include <asm/prom.h>
17#include <asm/irq.h>
18#include <asm/upa.h>
19
20#include "pci_impl.h"
21
22#define DRIVER_NAME "fire"
23#define PFX DRIVER_NAME ": "
24
25#define FIRE_IOMMU_CONTROL 0x40000UL
26#define FIRE_IOMMU_TSBBASE 0x40008UL
27#define FIRE_IOMMU_FLUSH 0x40100UL
28#define FIRE_IOMMU_FLUSHINV 0x40108UL
29
30static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
31{
32 struct iommu *iommu = pbm->iommu;
33 u32 vdma[2], dma_mask;
34 u64 control;
35 int tsbsize, err;
36
37 /* No virtual-dma property on these guys, use largest size. */
38 vdma[0] = 0xc0000000; /* base */
39 vdma[1] = 0x40000000; /* size */
40 dma_mask = 0xffffffff;
41 tsbsize = 128;
42
43 /* Register addresses. */
44 iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL;
45 iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE;
46 iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH;
47 iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV;
48
49 /* We use the main control/status register of FIRE as the write
50 * completion register.
51 */
52 iommu->write_complete_reg = pbm->controller_regs + 0x410000UL;
53
54 /*
55 * Invalidate TLB Entries.
56 */
57 upa_writeq(~(u64)0, iommu->iommu_flushinv);
58
59 err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
60 pbm->numa_node);
61 if (err)
62 return err;
63
64 upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase);
65
66 control = upa_readq(iommu->iommu_control);
67 control |= (0x00000400 /* TSB cache snoop enable */ |
68 0x00000300 /* Cache mode */ |
69 0x00000002 /* Bypass enable */ |
70 0x00000001 /* Translation enable */);
71 upa_writeq(control, iommu->iommu_control);
72
73 return 0;
74}
75
76#ifdef CONFIG_PCI_MSI
77struct pci_msiq_entry {
78 u64 word0;
79#define MSIQ_WORD0_RESV 0x8000000000000000UL
80#define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
81#define MSIQ_WORD0_FMT_TYPE_SHIFT 56
82#define MSIQ_WORD0_LEN 0x00ffc00000000000UL
83#define MSIQ_WORD0_LEN_SHIFT 46
84#define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
85#define MSIQ_WORD0_ADDR0_SHIFT 32
86#define MSIQ_WORD0_RID 0x00000000ffff0000UL
87#define MSIQ_WORD0_RID_SHIFT 16
88#define MSIQ_WORD0_DATA0 0x000000000000ffffUL
89#define MSIQ_WORD0_DATA0_SHIFT 0
90
91#define MSIQ_TYPE_MSG 0x6
92#define MSIQ_TYPE_MSI32 0xb
93#define MSIQ_TYPE_MSI64 0xf
94
95 u64 word1;
96#define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
97#define MSIQ_WORD1_ADDR1_SHIFT 16
98#define MSIQ_WORD1_DATA1 0x000000000000ffffUL
99#define MSIQ_WORD1_DATA1_SHIFT 0
100
101 u64 resv[6];
102};
103
104/* All MSI registers are offset from pbm->pbm_regs */
105#define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
106#define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
107
108#define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
109#define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
110#define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
111
112#define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
113#define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
114#define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
115#define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
116
117#define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
118#define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
119#define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
120#define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
121#define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
122
123#define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
124#define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
125#define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
126
127#define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
128#define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
129
130#define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
131#define MSI_MAP_VALID 0x8000000000000000UL
132#define MSI_MAP_EQWR_N 0x4000000000000000UL
133#define MSI_MAP_EQNUM 0x000000000000003fUL
134
135#define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
136#define MSI_CLEAR_EQWR_N 0x4000000000000000UL
137
138#define IMONDO_DATA0 0x02C000UL
139#define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
140
141#define IMONDO_DATA1 0x02C008UL
142#define IMONDO_DATA1_DATA 0xffffffffffffffffUL
143
144#define MSI_32BIT_ADDR 0x034000UL
145#define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
146
147#define MSI_64BIT_ADDR 0x034008UL
148#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
149
150static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
151 unsigned long *head)
152{
153 *head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
154 return 0;
155}
156
157static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
158 unsigned long *head, unsigned long *msi)
159{
160 unsigned long type_fmt, type, msi_num;
161 struct pci_msiq_entry *base, *ep;
162
163 base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
164 ep = &base[*head];
165
166 if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
167 return 0;
168
169 type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
170 MSIQ_WORD0_FMT_TYPE_SHIFT);
171 type = (type_fmt >> 3);
172 if (unlikely(type != MSIQ_TYPE_MSI32 &&
173 type != MSIQ_TYPE_MSI64))
174 return -EINVAL;
175
176 *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
177 MSIQ_WORD0_DATA0_SHIFT);
178
179 upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num));
180
181 /* Clear the entry. */
182 ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
183
184 /* Go to next entry in ring. */
185 (*head)++;
186 if (*head >= pbm->msiq_ent_count)
187 *head = 0;
188
189 return 1;
190}
191
192static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
193 unsigned long head)
194{
195 upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
196 return 0;
197}
198
199static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
200 unsigned long msi, int is_msi64)
201{
202 u64 val;
203
204 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
205 val &= ~(MSI_MAP_EQNUM);
206 val |= msiqid;
207 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
208
209 upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi));
210
211 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
212 val |= MSI_MAP_VALID;
213 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
214
215 return 0;
216}
217
218static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
219{
220 u64 val;
221
222 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
223
224 val &= ~MSI_MAP_VALID;
225
226 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
227
228 return 0;
229}
230
231static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
232{
233 unsigned long pages, order, i;
234
235 order = get_order(512 * 1024);
236 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
237 if (pages == 0UL) {
238 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
239 order);
240 return -ENOMEM;
241 }
242 memset((char *)pages, 0, PAGE_SIZE << order);
243 pbm->msi_queues = (void *) pages;
244
245 upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES |
246 __pa(pbm->msi_queues)),
247 pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG);
248
249 upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0);
250 upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1);
251
252 upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR);
253 upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR);
254
255 for (i = 0; i < pbm->msiq_num; i++) {
256 upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i));
257 upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i));
258 }
259
260 return 0;
261}
262
263static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
264{
265 unsigned long pages, order;
266
267 order = get_order(512 * 1024);
268 pages = (unsigned long) pbm->msi_queues;
269
270 free_pages(pages, order);
271
272 pbm->msi_queues = NULL;
273}
274
275static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
276 unsigned long msiqid,
277 unsigned long devino)
278{
279 unsigned long cregs = (unsigned long) pbm->pbm_regs;
280 unsigned long imap_reg, iclr_reg, int_ctrlr;
281 unsigned int irq;
282 int fixup;
283 u64 val;
284
285 imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
286 iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
287
288 /* XXX iterate amongst the 4 IRQ controllers XXX */
289 int_ctrlr = (1UL << 6);
290
291 val = upa_readq(imap_reg);
292 val |= (1UL << 63) | int_ctrlr;
293 upa_writeq(val, imap_reg);
294
295 fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
296
297 irq = build_irq(fixup, iclr_reg, imap_reg);
298 if (!irq)
299 return -ENOMEM;
300
301 upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
302 pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
303
304 return irq;
305}
306
307static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
308 .get_head = pci_fire_get_head,
309 .dequeue_msi = pci_fire_dequeue_msi,
310 .set_head = pci_fire_set_head,
311 .msi_setup = pci_fire_msi_setup,
312 .msi_teardown = pci_fire_msi_teardown,
313 .msiq_alloc = pci_fire_msiq_alloc,
314 .msiq_free = pci_fire_msiq_free,
315 .msiq_build_irq = pci_fire_msiq_build_irq,
316};
317
318static void pci_fire_msi_init(struct pci_pbm_info *pbm)
319{
320 sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
321}
322#else /* CONFIG_PCI_MSI */
323static void pci_fire_msi_init(struct pci_pbm_info *pbm)
324{
325}
326#endif /* !(CONFIG_PCI_MSI) */
327
328/* Based at pbm->controller_regs */
329#define FIRE_PARITY_CONTROL 0x470010UL
330#define FIRE_PARITY_ENAB 0x8000000000000000UL
331#define FIRE_FATAL_RESET_CTL 0x471028UL
332#define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL
333#define FIRE_FATAL_RESET_MB 0x0000000002000000UL
334#define FIRE_FATAL_RESET_CPE 0x0000000000008000UL
335#define FIRE_FATAL_RESET_APE 0x0000000000004000UL
336#define FIRE_FATAL_RESET_PIO 0x0000000000000040UL
337#define FIRE_FATAL_RESET_JW 0x0000000000000004UL
338#define FIRE_FATAL_RESET_JI 0x0000000000000002UL
339#define FIRE_FATAL_RESET_JR 0x0000000000000001UL
340#define FIRE_CORE_INTR_ENABLE 0x471800UL
341
342/* Based at pbm->pbm_regs */
343#define FIRE_TLU_CTRL 0x80000UL
344#define FIRE_TLU_CTRL_TIM 0x00000000da000000UL
345#define FIRE_TLU_CTRL_QDET 0x0000000000000100UL
346#define FIRE_TLU_CTRL_CFG 0x0000000000000001UL
347#define FIRE_TLU_DEV_CTRL 0x90008UL
348#define FIRE_TLU_LINK_CTRL 0x90020UL
349#define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL
350#define FIRE_LPU_RESET 0xe2008UL
351#define FIRE_LPU_LLCFG 0xe2200UL
352#define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL
353#define FIRE_LPU_FCTRL_UCTRL 0xe2240UL
354#define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL
355#define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL
356#define FIRE_LPU_TXL_FIFOP 0xe2430UL
357#define FIRE_LPU_LTSSM_CFG2 0xe2788UL
358#define FIRE_LPU_LTSSM_CFG3 0xe2790UL
359#define FIRE_LPU_LTSSM_CFG4 0xe2798UL
360#define FIRE_LPU_LTSSM_CFG5 0xe27a0UL
361#define FIRE_DMC_IENAB 0x31800UL
362#define FIRE_DMC_DBG_SEL_A 0x53000UL
363#define FIRE_DMC_DBG_SEL_B 0x53008UL
364#define FIRE_PEC_IENAB 0x51800UL
365
366static void pci_fire_hw_init(struct pci_pbm_info *pbm)
367{
368 u64 val;
369
370 upa_writeq(FIRE_PARITY_ENAB,
371 pbm->controller_regs + FIRE_PARITY_CONTROL);
372
373 upa_writeq((FIRE_FATAL_RESET_SPARE |
374 FIRE_FATAL_RESET_MB |
375 FIRE_FATAL_RESET_CPE |
376 FIRE_FATAL_RESET_APE |
377 FIRE_FATAL_RESET_PIO |
378 FIRE_FATAL_RESET_JW |
379 FIRE_FATAL_RESET_JI |
380 FIRE_FATAL_RESET_JR),
381 pbm->controller_regs + FIRE_FATAL_RESET_CTL);
382
383 upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE);
384
385 val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL);
386 val |= (FIRE_TLU_CTRL_TIM |
387 FIRE_TLU_CTRL_QDET |
388 FIRE_TLU_CTRL_CFG);
389 upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL);
390 upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL);
391 upa_writeq(FIRE_TLU_LINK_CTRL_CLK,
392 pbm->pbm_regs + FIRE_TLU_LINK_CTRL);
393
394 upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET);
395 upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG);
396 upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P),
397 pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL);
398 upa_writeq(((0xffff << 16) | (0x0000 << 0)),
399 pbm->pbm_regs + FIRE_LPU_TXL_FIFOP);
400 upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2);
401 upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3);
402 upa_writeq((2 << 16) | (140 << 8),
403 pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4);
404 upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5);
405
406 upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB);
407 upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A);
408 upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B);
409
410 upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB);
411}
412
413static int pci_fire_pbm_init(struct pci_pbm_info *pbm,
414 struct platform_device *op, u32 portid)
415{
416 const struct linux_prom64_registers *regs;
417 struct device_node *dp = op->dev.of_node;
418 int err;
419
420 pbm->numa_node = NUMA_NO_NODE;
421
422 pbm->pci_ops = &sun4u_pci_ops;
423 pbm->config_space_reg_bits = 12;
424
425 pbm->index = pci_num_pbms++;
426
427 pbm->portid = portid;
428 pbm->op = op;
429 pbm->name = dp->full_name;
430
431 regs = of_get_property(dp, "reg", NULL);
432 pbm->pbm_regs = regs[0].phys_addr;
433 pbm->controller_regs = regs[1].phys_addr - 0x410000UL;
434
435 printk("%s: SUN4U PCIE Bus Module\n", pbm->name);
436
437 pci_determine_mem_io_space(pbm);
438
439 pci_get_pbm_props(pbm);
440
441 pci_fire_hw_init(pbm);
442
443 err = pci_fire_pbm_iommu_init(pbm);
444 if (err)
445 return err;
446
447 pci_fire_msi_init(pbm);
448
449 pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev);
450
451 /* XXX register error interrupt handlers XXX */
452
453 pbm->next = pci_pbm_root;
454 pci_pbm_root = pbm;
455
456 return 0;
457}
458
459static int fire_probe(struct platform_device *op)
460{
461 struct device_node *dp = op->dev.of_node;
462 struct pci_pbm_info *pbm;
463 struct iommu *iommu;
464 u32 portid;
465 int err;
466
467 portid = of_getintprop_default(dp, "portid", 0xff);
468
469 err = -ENOMEM;
470 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
471 if (!pbm) {
472 printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n");
473 goto out_err;
474 }
475
476 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
477 if (!iommu) {
478 printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
479 goto out_free_controller;
480 }
481
482 pbm->iommu = iommu;
483
484 err = pci_fire_pbm_init(pbm, op, portid);
485 if (err)
486 goto out_free_iommu;
487
488 dev_set_drvdata(&op->dev, pbm);
489
490 return 0;
491
492out_free_iommu:
493 kfree(pbm->iommu);
494
495out_free_controller:
496 kfree(pbm);
497
498out_err:
499 return err;
500}
501
502static const struct of_device_id fire_match[] = {
503 {
504 .name = "pci",
505 .compatible = "pciex108e,80f0",
506 },
507 {},
508};
509
510static struct platform_driver fire_driver = {
511 .driver = {
512 .name = DRIVER_NAME,
513 .of_match_table = fire_match,
514 },
515 .probe = fire_probe,
516};
517
518static int __init fire_init(void)
519{
520 return platform_driver_register(&fire_driver);
521}
522
523subsys_initcall(fire_init);
1/* pci_fire.c: Sun4u platform PCI-E controller support.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5#include <linux/kernel.h>
6#include <linux/pci.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/msi.h>
10#include <linux/irq.h>
11#include <linux/of_device.h>
12
13#include <asm/prom.h>
14#include <asm/irq.h>
15#include <asm/upa.h>
16
17#include "pci_impl.h"
18
19#define DRIVER_NAME "fire"
20#define PFX DRIVER_NAME ": "
21
22#define FIRE_IOMMU_CONTROL 0x40000UL
23#define FIRE_IOMMU_TSBBASE 0x40008UL
24#define FIRE_IOMMU_FLUSH 0x40100UL
25#define FIRE_IOMMU_FLUSHINV 0x40108UL
26
27static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
28{
29 struct iommu *iommu = pbm->iommu;
30 u32 vdma[2], dma_mask;
31 u64 control;
32 int tsbsize, err;
33
34 /* No virtual-dma property on these guys, use largest size. */
35 vdma[0] = 0xc0000000; /* base */
36 vdma[1] = 0x40000000; /* size */
37 dma_mask = 0xffffffff;
38 tsbsize = 128;
39
40 /* Register addresses. */
41 iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL;
42 iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE;
43 iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH;
44 iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV;
45
46 /* We use the main control/status register of FIRE as the write
47 * completion register.
48 */
49 iommu->write_complete_reg = pbm->controller_regs + 0x410000UL;
50
51 /*
52 * Invalidate TLB Entries.
53 */
54 upa_writeq(~(u64)0, iommu->iommu_flushinv);
55
56 err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
57 pbm->numa_node);
58 if (err)
59 return err;
60
61 upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase);
62
63 control = upa_readq(iommu->iommu_control);
64 control |= (0x00000400 /* TSB cache snoop enable */ |
65 0x00000300 /* Cache mode */ |
66 0x00000002 /* Bypass enable */ |
67 0x00000001 /* Translation enable */);
68 upa_writeq(control, iommu->iommu_control);
69
70 return 0;
71}
72
73#ifdef CONFIG_PCI_MSI
74struct pci_msiq_entry {
75 u64 word0;
76#define MSIQ_WORD0_RESV 0x8000000000000000UL
77#define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
78#define MSIQ_WORD0_FMT_TYPE_SHIFT 56
79#define MSIQ_WORD0_LEN 0x00ffc00000000000UL
80#define MSIQ_WORD0_LEN_SHIFT 46
81#define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
82#define MSIQ_WORD0_ADDR0_SHIFT 32
83#define MSIQ_WORD0_RID 0x00000000ffff0000UL
84#define MSIQ_WORD0_RID_SHIFT 16
85#define MSIQ_WORD0_DATA0 0x000000000000ffffUL
86#define MSIQ_WORD0_DATA0_SHIFT 0
87
88#define MSIQ_TYPE_MSG 0x6
89#define MSIQ_TYPE_MSI32 0xb
90#define MSIQ_TYPE_MSI64 0xf
91
92 u64 word1;
93#define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
94#define MSIQ_WORD1_ADDR1_SHIFT 16
95#define MSIQ_WORD1_DATA1 0x000000000000ffffUL
96#define MSIQ_WORD1_DATA1_SHIFT 0
97
98 u64 resv[6];
99};
100
101/* All MSI registers are offset from pbm->pbm_regs */
102#define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
103#define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
104
105#define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
106#define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
107#define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
108
109#define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
110#define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
111#define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
112#define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
113
114#define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
115#define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
116#define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
117#define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
118#define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
119
120#define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
121#define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
122#define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
123
124#define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
125#define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
126
127#define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
128#define MSI_MAP_VALID 0x8000000000000000UL
129#define MSI_MAP_EQWR_N 0x4000000000000000UL
130#define MSI_MAP_EQNUM 0x000000000000003fUL
131
132#define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
133#define MSI_CLEAR_EQWR_N 0x4000000000000000UL
134
135#define IMONDO_DATA0 0x02C000UL
136#define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
137
138#define IMONDO_DATA1 0x02C008UL
139#define IMONDO_DATA1_DATA 0xffffffffffffffffUL
140
141#define MSI_32BIT_ADDR 0x034000UL
142#define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
143
144#define MSI_64BIT_ADDR 0x034008UL
145#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
146
147static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
148 unsigned long *head)
149{
150 *head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
151 return 0;
152}
153
154static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
155 unsigned long *head, unsigned long *msi)
156{
157 unsigned long type_fmt, type, msi_num;
158 struct pci_msiq_entry *base, *ep;
159
160 base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
161 ep = &base[*head];
162
163 if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
164 return 0;
165
166 type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
167 MSIQ_WORD0_FMT_TYPE_SHIFT);
168 type = (type_fmt >> 3);
169 if (unlikely(type != MSIQ_TYPE_MSI32 &&
170 type != MSIQ_TYPE_MSI64))
171 return -EINVAL;
172
173 *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
174 MSIQ_WORD0_DATA0_SHIFT);
175
176 upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num));
177
178 /* Clear the entry. */
179 ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
180
181 /* Go to next entry in ring. */
182 (*head)++;
183 if (*head >= pbm->msiq_ent_count)
184 *head = 0;
185
186 return 1;
187}
188
189static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
190 unsigned long head)
191{
192 upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
193 return 0;
194}
195
196static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
197 unsigned long msi, int is_msi64)
198{
199 u64 val;
200
201 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
202 val &= ~(MSI_MAP_EQNUM);
203 val |= msiqid;
204 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
205
206 upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi));
207
208 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
209 val |= MSI_MAP_VALID;
210 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
211
212 return 0;
213}
214
215static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
216{
217 u64 val;
218
219 val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
220
221 val &= ~MSI_MAP_VALID;
222
223 upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
224
225 return 0;
226}
227
228static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
229{
230 unsigned long pages, order, i;
231
232 order = get_order(512 * 1024);
233 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
234 if (pages == 0UL) {
235 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
236 order);
237 return -ENOMEM;
238 }
239 memset((char *)pages, 0, PAGE_SIZE << order);
240 pbm->msi_queues = (void *) pages;
241
242 upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES |
243 __pa(pbm->msi_queues)),
244 pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG);
245
246 upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0);
247 upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1);
248
249 upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR);
250 upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR);
251
252 for (i = 0; i < pbm->msiq_num; i++) {
253 upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i));
254 upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i));
255 }
256
257 return 0;
258}
259
260static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
261{
262 unsigned long pages, order;
263
264 order = get_order(512 * 1024);
265 pages = (unsigned long) pbm->msi_queues;
266
267 free_pages(pages, order);
268
269 pbm->msi_queues = NULL;
270}
271
272static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
273 unsigned long msiqid,
274 unsigned long devino)
275{
276 unsigned long cregs = (unsigned long) pbm->pbm_regs;
277 unsigned long imap_reg, iclr_reg, int_ctrlr;
278 unsigned int irq;
279 int fixup;
280 u64 val;
281
282 imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
283 iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
284
285 /* XXX iterate amongst the 4 IRQ controllers XXX */
286 int_ctrlr = (1UL << 6);
287
288 val = upa_readq(imap_reg);
289 val |= (1UL << 63) | int_ctrlr;
290 upa_writeq(val, imap_reg);
291
292 fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
293
294 irq = build_irq(fixup, iclr_reg, imap_reg);
295 if (!irq)
296 return -ENOMEM;
297
298 upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
299 pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
300
301 return irq;
302}
303
304static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
305 .get_head = pci_fire_get_head,
306 .dequeue_msi = pci_fire_dequeue_msi,
307 .set_head = pci_fire_set_head,
308 .msi_setup = pci_fire_msi_setup,
309 .msi_teardown = pci_fire_msi_teardown,
310 .msiq_alloc = pci_fire_msiq_alloc,
311 .msiq_free = pci_fire_msiq_free,
312 .msiq_build_irq = pci_fire_msiq_build_irq,
313};
314
315static void pci_fire_msi_init(struct pci_pbm_info *pbm)
316{
317 sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
318}
319#else /* CONFIG_PCI_MSI */
320static void pci_fire_msi_init(struct pci_pbm_info *pbm)
321{
322}
323#endif /* !(CONFIG_PCI_MSI) */
324
325/* Based at pbm->controller_regs */
326#define FIRE_PARITY_CONTROL 0x470010UL
327#define FIRE_PARITY_ENAB 0x8000000000000000UL
328#define FIRE_FATAL_RESET_CTL 0x471028UL
329#define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL
330#define FIRE_FATAL_RESET_MB 0x0000000002000000UL
331#define FIRE_FATAL_RESET_CPE 0x0000000000008000UL
332#define FIRE_FATAL_RESET_APE 0x0000000000004000UL
333#define FIRE_FATAL_RESET_PIO 0x0000000000000040UL
334#define FIRE_FATAL_RESET_JW 0x0000000000000004UL
335#define FIRE_FATAL_RESET_JI 0x0000000000000002UL
336#define FIRE_FATAL_RESET_JR 0x0000000000000001UL
337#define FIRE_CORE_INTR_ENABLE 0x471800UL
338
339/* Based at pbm->pbm_regs */
340#define FIRE_TLU_CTRL 0x80000UL
341#define FIRE_TLU_CTRL_TIM 0x00000000da000000UL
342#define FIRE_TLU_CTRL_QDET 0x0000000000000100UL
343#define FIRE_TLU_CTRL_CFG 0x0000000000000001UL
344#define FIRE_TLU_DEV_CTRL 0x90008UL
345#define FIRE_TLU_LINK_CTRL 0x90020UL
346#define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL
347#define FIRE_LPU_RESET 0xe2008UL
348#define FIRE_LPU_LLCFG 0xe2200UL
349#define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL
350#define FIRE_LPU_FCTRL_UCTRL 0xe2240UL
351#define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL
352#define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL
353#define FIRE_LPU_TXL_FIFOP 0xe2430UL
354#define FIRE_LPU_LTSSM_CFG2 0xe2788UL
355#define FIRE_LPU_LTSSM_CFG3 0xe2790UL
356#define FIRE_LPU_LTSSM_CFG4 0xe2798UL
357#define FIRE_LPU_LTSSM_CFG5 0xe27a0UL
358#define FIRE_DMC_IENAB 0x31800UL
359#define FIRE_DMC_DBG_SEL_A 0x53000UL
360#define FIRE_DMC_DBG_SEL_B 0x53008UL
361#define FIRE_PEC_IENAB 0x51800UL
362
363static void pci_fire_hw_init(struct pci_pbm_info *pbm)
364{
365 u64 val;
366
367 upa_writeq(FIRE_PARITY_ENAB,
368 pbm->controller_regs + FIRE_PARITY_CONTROL);
369
370 upa_writeq((FIRE_FATAL_RESET_SPARE |
371 FIRE_FATAL_RESET_MB |
372 FIRE_FATAL_RESET_CPE |
373 FIRE_FATAL_RESET_APE |
374 FIRE_FATAL_RESET_PIO |
375 FIRE_FATAL_RESET_JW |
376 FIRE_FATAL_RESET_JI |
377 FIRE_FATAL_RESET_JR),
378 pbm->controller_regs + FIRE_FATAL_RESET_CTL);
379
380 upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE);
381
382 val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL);
383 val |= (FIRE_TLU_CTRL_TIM |
384 FIRE_TLU_CTRL_QDET |
385 FIRE_TLU_CTRL_CFG);
386 upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL);
387 upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL);
388 upa_writeq(FIRE_TLU_LINK_CTRL_CLK,
389 pbm->pbm_regs + FIRE_TLU_LINK_CTRL);
390
391 upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET);
392 upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG);
393 upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P),
394 pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL);
395 upa_writeq(((0xffff << 16) | (0x0000 << 0)),
396 pbm->pbm_regs + FIRE_LPU_TXL_FIFOP);
397 upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2);
398 upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3);
399 upa_writeq((2 << 16) | (140 << 8),
400 pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4);
401 upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5);
402
403 upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB);
404 upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A);
405 upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B);
406
407 upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB);
408}
409
410static int __devinit pci_fire_pbm_init(struct pci_pbm_info *pbm,
411 struct platform_device *op, u32 portid)
412{
413 const struct linux_prom64_registers *regs;
414 struct device_node *dp = op->dev.of_node;
415 int err;
416
417 pbm->numa_node = -1;
418
419 pbm->pci_ops = &sun4u_pci_ops;
420 pbm->config_space_reg_bits = 12;
421
422 pbm->index = pci_num_pbms++;
423
424 pbm->portid = portid;
425 pbm->op = op;
426 pbm->name = dp->full_name;
427
428 regs = of_get_property(dp, "reg", NULL);
429 pbm->pbm_regs = regs[0].phys_addr;
430 pbm->controller_regs = regs[1].phys_addr - 0x410000UL;
431
432 printk("%s: SUN4U PCIE Bus Module\n", pbm->name);
433
434 pci_determine_mem_io_space(pbm);
435
436 pci_get_pbm_props(pbm);
437
438 pci_fire_hw_init(pbm);
439
440 err = pci_fire_pbm_iommu_init(pbm);
441 if (err)
442 return err;
443
444 pci_fire_msi_init(pbm);
445
446 pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev);
447
448 /* XXX register error interrupt handlers XXX */
449
450 pbm->next = pci_pbm_root;
451 pci_pbm_root = pbm;
452
453 return 0;
454}
455
456static int __devinit fire_probe(struct platform_device *op)
457{
458 struct device_node *dp = op->dev.of_node;
459 struct pci_pbm_info *pbm;
460 struct iommu *iommu;
461 u32 portid;
462 int err;
463
464 portid = of_getintprop_default(dp, "portid", 0xff);
465
466 err = -ENOMEM;
467 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
468 if (!pbm) {
469 printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n");
470 goto out_err;
471 }
472
473 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
474 if (!iommu) {
475 printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
476 goto out_free_controller;
477 }
478
479 pbm->iommu = iommu;
480
481 err = pci_fire_pbm_init(pbm, op, portid);
482 if (err)
483 goto out_free_iommu;
484
485 dev_set_drvdata(&op->dev, pbm);
486
487 return 0;
488
489out_free_iommu:
490 kfree(pbm->iommu);
491
492out_free_controller:
493 kfree(pbm);
494
495out_err:
496 return err;
497}
498
499static const struct of_device_id fire_match[] = {
500 {
501 .name = "pci",
502 .compatible = "pciex108e,80f0",
503 },
504 {},
505};
506
507static struct platform_driver fire_driver = {
508 .driver = {
509 .name = DRIVER_NAME,
510 .owner = THIS_MODULE,
511 .of_match_table = fire_match,
512 },
513 .probe = fire_probe,
514};
515
516static int __init fire_init(void)
517{
518 return platform_driver_register(&fire_driver);
519}
520
521subsys_initcall(fire_init);