Loading...
1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
12#include <linux/percpu.h>
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/export.h>
16#include <linux/log2.h>
17#include <linux/of_device.h>
18
19#include <asm/iommu.h>
20#include <asm/irq.h>
21#include <asm/hypervisor.h>
22#include <asm/prom.h>
23
24#include "pci_impl.h"
25#include "iommu_common.h"
26
27#include "pci_sun4v.h"
28
29#define DRIVER_NAME "pci_sun4v"
30#define PFX DRIVER_NAME ": "
31
32static unsigned long vpci_major = 1;
33static unsigned long vpci_minor = 1;
34
35#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
36
37struct iommu_batch {
38 struct device *dev; /* Device mapping is for. */
39 unsigned long prot; /* IOMMU page protections */
40 unsigned long entry; /* Index into IOTSB. */
41 u64 *pglist; /* List of physical pages */
42 unsigned long npages; /* Number of pages in list. */
43};
44
45static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
46static int iommu_batch_initialized;
47
48/* Interrupts must be disabled. */
49static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
50{
51 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
52
53 p->dev = dev;
54 p->prot = prot;
55 p->entry = entry;
56 p->npages = 0;
57}
58
59/* Interrupts must be disabled. */
60static long iommu_batch_flush(struct iommu_batch *p)
61{
62 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
63 unsigned long devhandle = pbm->devhandle;
64 unsigned long prot = p->prot;
65 unsigned long entry = p->entry;
66 u64 *pglist = p->pglist;
67 unsigned long npages = p->npages;
68
69 while (npages != 0) {
70 long num;
71
72 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
73 npages, prot, __pa(pglist));
74 if (unlikely(num < 0)) {
75 if (printk_ratelimit())
76 printk("iommu_batch_flush: IOMMU map of "
77 "[%08lx:%08llx:%lx:%lx:%lx] failed with "
78 "status %ld\n",
79 devhandle, HV_PCI_TSBID(0, entry),
80 npages, prot, __pa(pglist), num);
81 return -1;
82 }
83
84 entry += num;
85 npages -= num;
86 pglist += num;
87 }
88
89 p->entry = entry;
90 p->npages = 0;
91
92 return 0;
93}
94
95static inline void iommu_batch_new_entry(unsigned long entry)
96{
97 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
98
99 if (p->entry + p->npages == entry)
100 return;
101 if (p->entry != ~0UL)
102 iommu_batch_flush(p);
103 p->entry = entry;
104}
105
106/* Interrupts must be disabled. */
107static inline long iommu_batch_add(u64 phys_page)
108{
109 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
110
111 BUG_ON(p->npages >= PGLIST_NENTS);
112
113 p->pglist[p->npages++] = phys_page;
114 if (p->npages == PGLIST_NENTS)
115 return iommu_batch_flush(p);
116
117 return 0;
118}
119
120/* Interrupts must be disabled. */
121static inline long iommu_batch_end(void)
122{
123 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
124
125 BUG_ON(p->npages >= PGLIST_NENTS);
126
127 return iommu_batch_flush(p);
128}
129
130static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
131 dma_addr_t *dma_addrp, gfp_t gfp,
132 struct dma_attrs *attrs)
133{
134 unsigned long flags, order, first_page, npages, n;
135 struct iommu *iommu;
136 struct page *page;
137 void *ret;
138 long entry;
139 int nid;
140
141 size = IO_PAGE_ALIGN(size);
142 order = get_order(size);
143 if (unlikely(order >= MAX_ORDER))
144 return NULL;
145
146 npages = size >> IO_PAGE_SHIFT;
147
148 nid = dev->archdata.numa_node;
149 page = alloc_pages_node(nid, gfp, order);
150 if (unlikely(!page))
151 return NULL;
152
153 first_page = (unsigned long) page_address(page);
154 memset((char *)first_page, 0, PAGE_SIZE << order);
155
156 iommu = dev->archdata.iommu;
157
158 spin_lock_irqsave(&iommu->lock, flags);
159 entry = iommu_range_alloc(dev, iommu, npages, NULL);
160 spin_unlock_irqrestore(&iommu->lock, flags);
161
162 if (unlikely(entry == DMA_ERROR_CODE))
163 goto range_alloc_fail;
164
165 *dma_addrp = (iommu->page_table_map_base +
166 (entry << IO_PAGE_SHIFT));
167 ret = (void *) first_page;
168 first_page = __pa(first_page);
169
170 local_irq_save(flags);
171
172 iommu_batch_start(dev,
173 (HV_PCI_MAP_ATTR_READ |
174 HV_PCI_MAP_ATTR_WRITE),
175 entry);
176
177 for (n = 0; n < npages; n++) {
178 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
179 if (unlikely(err < 0L))
180 goto iommu_map_fail;
181 }
182
183 if (unlikely(iommu_batch_end() < 0L))
184 goto iommu_map_fail;
185
186 local_irq_restore(flags);
187
188 return ret;
189
190iommu_map_fail:
191 /* Interrupts are disabled. */
192 spin_lock(&iommu->lock);
193 iommu_range_free(iommu, *dma_addrp, npages);
194 spin_unlock_irqrestore(&iommu->lock, flags);
195
196range_alloc_fail:
197 free_pages(first_page, order);
198 return NULL;
199}
200
201static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
202 dma_addr_t dvma, struct dma_attrs *attrs)
203{
204 struct pci_pbm_info *pbm;
205 struct iommu *iommu;
206 unsigned long flags, order, npages, entry;
207 u32 devhandle;
208
209 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
210 iommu = dev->archdata.iommu;
211 pbm = dev->archdata.host_controller;
212 devhandle = pbm->devhandle;
213 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
214
215 spin_lock_irqsave(&iommu->lock, flags);
216
217 iommu_range_free(iommu, dvma, npages);
218
219 do {
220 unsigned long num;
221
222 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
223 npages);
224 entry += num;
225 npages -= num;
226 } while (npages != 0);
227
228 spin_unlock_irqrestore(&iommu->lock, flags);
229
230 order = get_order(size);
231 if (order < 10)
232 free_pages((unsigned long)cpu, order);
233}
234
235static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
236 unsigned long offset, size_t sz,
237 enum dma_data_direction direction,
238 struct dma_attrs *attrs)
239{
240 struct iommu *iommu;
241 unsigned long flags, npages, oaddr;
242 unsigned long i, base_paddr;
243 u32 bus_addr, ret;
244 unsigned long prot;
245 long entry;
246
247 iommu = dev->archdata.iommu;
248
249 if (unlikely(direction == DMA_NONE))
250 goto bad;
251
252 oaddr = (unsigned long)(page_address(page) + offset);
253 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
254 npages >>= IO_PAGE_SHIFT;
255
256 spin_lock_irqsave(&iommu->lock, flags);
257 entry = iommu_range_alloc(dev, iommu, npages, NULL);
258 spin_unlock_irqrestore(&iommu->lock, flags);
259
260 if (unlikely(entry == DMA_ERROR_CODE))
261 goto bad;
262
263 bus_addr = (iommu->page_table_map_base +
264 (entry << IO_PAGE_SHIFT));
265 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
266 base_paddr = __pa(oaddr & IO_PAGE_MASK);
267 prot = HV_PCI_MAP_ATTR_READ;
268 if (direction != DMA_TO_DEVICE)
269 prot |= HV_PCI_MAP_ATTR_WRITE;
270
271 local_irq_save(flags);
272
273 iommu_batch_start(dev, prot, entry);
274
275 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
276 long err = iommu_batch_add(base_paddr);
277 if (unlikely(err < 0L))
278 goto iommu_map_fail;
279 }
280 if (unlikely(iommu_batch_end() < 0L))
281 goto iommu_map_fail;
282
283 local_irq_restore(flags);
284
285 return ret;
286
287bad:
288 if (printk_ratelimit())
289 WARN_ON(1);
290 return DMA_ERROR_CODE;
291
292iommu_map_fail:
293 /* Interrupts are disabled. */
294 spin_lock(&iommu->lock);
295 iommu_range_free(iommu, bus_addr, npages);
296 spin_unlock_irqrestore(&iommu->lock, flags);
297
298 return DMA_ERROR_CODE;
299}
300
301static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
302 size_t sz, enum dma_data_direction direction,
303 struct dma_attrs *attrs)
304{
305 struct pci_pbm_info *pbm;
306 struct iommu *iommu;
307 unsigned long flags, npages;
308 long entry;
309 u32 devhandle;
310
311 if (unlikely(direction == DMA_NONE)) {
312 if (printk_ratelimit())
313 WARN_ON(1);
314 return;
315 }
316
317 iommu = dev->archdata.iommu;
318 pbm = dev->archdata.host_controller;
319 devhandle = pbm->devhandle;
320
321 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
322 npages >>= IO_PAGE_SHIFT;
323 bus_addr &= IO_PAGE_MASK;
324
325 spin_lock_irqsave(&iommu->lock, flags);
326
327 iommu_range_free(iommu, bus_addr, npages);
328
329 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
330 do {
331 unsigned long num;
332
333 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
334 npages);
335 entry += num;
336 npages -= num;
337 } while (npages != 0);
338
339 spin_unlock_irqrestore(&iommu->lock, flags);
340}
341
342static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
343 int nelems, enum dma_data_direction direction,
344 struct dma_attrs *attrs)
345{
346 struct scatterlist *s, *outs, *segstart;
347 unsigned long flags, handle, prot;
348 dma_addr_t dma_next = 0, dma_addr;
349 unsigned int max_seg_size;
350 unsigned long seg_boundary_size;
351 int outcount, incount, i;
352 struct iommu *iommu;
353 unsigned long base_shift;
354 long err;
355
356 BUG_ON(direction == DMA_NONE);
357
358 iommu = dev->archdata.iommu;
359 if (nelems == 0 || !iommu)
360 return 0;
361
362 prot = HV_PCI_MAP_ATTR_READ;
363 if (direction != DMA_TO_DEVICE)
364 prot |= HV_PCI_MAP_ATTR_WRITE;
365
366 outs = s = segstart = &sglist[0];
367 outcount = 1;
368 incount = nelems;
369 handle = 0;
370
371 /* Init first segment length for backout at failure */
372 outs->dma_length = 0;
373
374 spin_lock_irqsave(&iommu->lock, flags);
375
376 iommu_batch_start(dev, prot, ~0UL);
377
378 max_seg_size = dma_get_max_seg_size(dev);
379 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
380 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
381 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
382 for_each_sg(sglist, s, nelems, i) {
383 unsigned long paddr, npages, entry, out_entry = 0, slen;
384
385 slen = s->length;
386 /* Sanity check */
387 if (slen == 0) {
388 dma_next = 0;
389 continue;
390 }
391 /* Allocate iommu entries for that segment */
392 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
393 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
394 entry = iommu_range_alloc(dev, iommu, npages, &handle);
395
396 /* Handle failure */
397 if (unlikely(entry == DMA_ERROR_CODE)) {
398 if (printk_ratelimit())
399 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
400 " npages %lx\n", iommu, paddr, npages);
401 goto iommu_map_failed;
402 }
403
404 iommu_batch_new_entry(entry);
405
406 /* Convert entry to a dma_addr_t */
407 dma_addr = iommu->page_table_map_base +
408 (entry << IO_PAGE_SHIFT);
409 dma_addr |= (s->offset & ~IO_PAGE_MASK);
410
411 /* Insert into HW table */
412 paddr &= IO_PAGE_MASK;
413 while (npages--) {
414 err = iommu_batch_add(paddr);
415 if (unlikely(err < 0L))
416 goto iommu_map_failed;
417 paddr += IO_PAGE_SIZE;
418 }
419
420 /* If we are in an open segment, try merging */
421 if (segstart != s) {
422 /* We cannot merge if:
423 * - allocated dma_addr isn't contiguous to previous allocation
424 */
425 if ((dma_addr != dma_next) ||
426 (outs->dma_length + s->length > max_seg_size) ||
427 (is_span_boundary(out_entry, base_shift,
428 seg_boundary_size, outs, s))) {
429 /* Can't merge: create a new segment */
430 segstart = s;
431 outcount++;
432 outs = sg_next(outs);
433 } else {
434 outs->dma_length += s->length;
435 }
436 }
437
438 if (segstart == s) {
439 /* This is a new segment, fill entries */
440 outs->dma_address = dma_addr;
441 outs->dma_length = slen;
442 out_entry = entry;
443 }
444
445 /* Calculate next page pointer for contiguous check */
446 dma_next = dma_addr + slen;
447 }
448
449 err = iommu_batch_end();
450
451 if (unlikely(err < 0L))
452 goto iommu_map_failed;
453
454 spin_unlock_irqrestore(&iommu->lock, flags);
455
456 if (outcount < incount) {
457 outs = sg_next(outs);
458 outs->dma_address = DMA_ERROR_CODE;
459 outs->dma_length = 0;
460 }
461
462 return outcount;
463
464iommu_map_failed:
465 for_each_sg(sglist, s, nelems, i) {
466 if (s->dma_length != 0) {
467 unsigned long vaddr, npages;
468
469 vaddr = s->dma_address & IO_PAGE_MASK;
470 npages = iommu_num_pages(s->dma_address, s->dma_length,
471 IO_PAGE_SIZE);
472 iommu_range_free(iommu, vaddr, npages);
473 /* XXX demap? XXX */
474 s->dma_address = DMA_ERROR_CODE;
475 s->dma_length = 0;
476 }
477 if (s == outs)
478 break;
479 }
480 spin_unlock_irqrestore(&iommu->lock, flags);
481
482 return 0;
483}
484
485static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
486 int nelems, enum dma_data_direction direction,
487 struct dma_attrs *attrs)
488{
489 struct pci_pbm_info *pbm;
490 struct scatterlist *sg;
491 struct iommu *iommu;
492 unsigned long flags;
493 u32 devhandle;
494
495 BUG_ON(direction == DMA_NONE);
496
497 iommu = dev->archdata.iommu;
498 pbm = dev->archdata.host_controller;
499 devhandle = pbm->devhandle;
500
501 spin_lock_irqsave(&iommu->lock, flags);
502
503 sg = sglist;
504 while (nelems--) {
505 dma_addr_t dma_handle = sg->dma_address;
506 unsigned int len = sg->dma_length;
507 unsigned long npages, entry;
508
509 if (!len)
510 break;
511 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
512 iommu_range_free(iommu, dma_handle, npages);
513
514 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
515 while (npages) {
516 unsigned long num;
517
518 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
519 npages);
520 entry += num;
521 npages -= num;
522 }
523
524 sg = sg_next(sg);
525 }
526
527 spin_unlock_irqrestore(&iommu->lock, flags);
528}
529
530static struct dma_map_ops sun4v_dma_ops = {
531 .alloc = dma_4v_alloc_coherent,
532 .free = dma_4v_free_coherent,
533 .map_page = dma_4v_map_page,
534 .unmap_page = dma_4v_unmap_page,
535 .map_sg = dma_4v_map_sg,
536 .unmap_sg = dma_4v_unmap_sg,
537};
538
539static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
540 struct device *parent)
541{
542 struct property *prop;
543 struct device_node *dp;
544
545 dp = pbm->op->dev.of_node;
546 prop = of_find_property(dp, "66mhz-capable", NULL);
547 pbm->is_66mhz_capable = (prop != NULL);
548 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
549
550 /* XXX register error interrupt handlers XXX */
551}
552
553static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
554 struct iommu *iommu)
555{
556 struct iommu_arena *arena = &iommu->arena;
557 unsigned long i, cnt = 0;
558 u32 devhandle;
559
560 devhandle = pbm->devhandle;
561 for (i = 0; i < arena->limit; i++) {
562 unsigned long ret, io_attrs, ra;
563
564 ret = pci_sun4v_iommu_getmap(devhandle,
565 HV_PCI_TSBID(0, i),
566 &io_attrs, &ra);
567 if (ret == HV_EOK) {
568 if (page_in_phys_avail(ra)) {
569 pci_sun4v_iommu_demap(devhandle,
570 HV_PCI_TSBID(0, i), 1);
571 } else {
572 cnt++;
573 __set_bit(i, arena->map);
574 }
575 }
576 }
577
578 return cnt;
579}
580
581static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
582{
583 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
584 struct iommu *iommu = pbm->iommu;
585 unsigned long num_tsb_entries, sz;
586 u32 dma_mask, dma_offset;
587 const u32 *vdma;
588
589 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
590 if (!vdma)
591 vdma = vdma_default;
592
593 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
594 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
595 vdma[0], vdma[1]);
596 return -EINVAL;
597 };
598
599 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
600 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
601
602 dma_offset = vdma[0];
603
604 /* Setup initial software IOMMU state. */
605 spin_lock_init(&iommu->lock);
606 iommu->ctx_lowest_free = 1;
607 iommu->page_table_map_base = dma_offset;
608 iommu->dma_addr_mask = dma_mask;
609
610 /* Allocate and initialize the free area map. */
611 sz = (num_tsb_entries + 7) / 8;
612 sz = (sz + 7UL) & ~7UL;
613 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
614 if (!iommu->arena.map) {
615 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
616 return -ENOMEM;
617 }
618 iommu->arena.limit = num_tsb_entries;
619
620 sz = probe_existing_entries(pbm, iommu);
621 if (sz)
622 printk("%s: Imported %lu TSB entries from OBP\n",
623 pbm->name, sz);
624
625 return 0;
626}
627
628#ifdef CONFIG_PCI_MSI
629struct pci_sun4v_msiq_entry {
630 u64 version_type;
631#define MSIQ_VERSION_MASK 0xffffffff00000000UL
632#define MSIQ_VERSION_SHIFT 32
633#define MSIQ_TYPE_MASK 0x00000000000000ffUL
634#define MSIQ_TYPE_SHIFT 0
635#define MSIQ_TYPE_NONE 0x00
636#define MSIQ_TYPE_MSG 0x01
637#define MSIQ_TYPE_MSI32 0x02
638#define MSIQ_TYPE_MSI64 0x03
639#define MSIQ_TYPE_INTX 0x08
640#define MSIQ_TYPE_NONE2 0xff
641
642 u64 intx_sysino;
643 u64 reserved1;
644 u64 stick;
645 u64 req_id; /* bus/device/func */
646#define MSIQ_REQID_BUS_MASK 0xff00UL
647#define MSIQ_REQID_BUS_SHIFT 8
648#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
649#define MSIQ_REQID_DEVICE_SHIFT 3
650#define MSIQ_REQID_FUNC_MASK 0x0007UL
651#define MSIQ_REQID_FUNC_SHIFT 0
652
653 u64 msi_address;
654
655 /* The format of this value is message type dependent.
656 * For MSI bits 15:0 are the data from the MSI packet.
657 * For MSI-X bits 31:0 are the data from the MSI packet.
658 * For MSG, the message code and message routing code where:
659 * bits 39:32 is the bus/device/fn of the msg target-id
660 * bits 18:16 is the message routing code
661 * bits 7:0 is the message code
662 * For INTx the low order 2-bits are:
663 * 00 - INTA
664 * 01 - INTB
665 * 10 - INTC
666 * 11 - INTD
667 */
668 u64 msi_data;
669
670 u64 reserved2;
671};
672
673static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
674 unsigned long *head)
675{
676 unsigned long err, limit;
677
678 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
679 if (unlikely(err))
680 return -ENXIO;
681
682 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
683 if (unlikely(*head >= limit))
684 return -EFBIG;
685
686 return 0;
687}
688
689static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
690 unsigned long msiqid, unsigned long *head,
691 unsigned long *msi)
692{
693 struct pci_sun4v_msiq_entry *ep;
694 unsigned long err, type;
695
696 /* Note: void pointer arithmetic, 'head' is a byte offset */
697 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
698 (pbm->msiq_ent_count *
699 sizeof(struct pci_sun4v_msiq_entry))) +
700 *head);
701
702 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
703 return 0;
704
705 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
706 if (unlikely(type != MSIQ_TYPE_MSI32 &&
707 type != MSIQ_TYPE_MSI64))
708 return -EINVAL;
709
710 *msi = ep->msi_data;
711
712 err = pci_sun4v_msi_setstate(pbm->devhandle,
713 ep->msi_data /* msi_num */,
714 HV_MSISTATE_IDLE);
715 if (unlikely(err))
716 return -ENXIO;
717
718 /* Clear the entry. */
719 ep->version_type &= ~MSIQ_TYPE_MASK;
720
721 (*head) += sizeof(struct pci_sun4v_msiq_entry);
722 if (*head >=
723 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
724 *head = 0;
725
726 return 1;
727}
728
729static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
730 unsigned long head)
731{
732 unsigned long err;
733
734 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
735 if (unlikely(err))
736 return -EINVAL;
737
738 return 0;
739}
740
741static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
742 unsigned long msi, int is_msi64)
743{
744 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
745 (is_msi64 ?
746 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
747 return -ENXIO;
748 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
749 return -ENXIO;
750 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
751 return -ENXIO;
752 return 0;
753}
754
755static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
756{
757 unsigned long err, msiqid;
758
759 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
760 if (err)
761 return -ENXIO;
762
763 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
764
765 return 0;
766}
767
768static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
769{
770 unsigned long q_size, alloc_size, pages, order;
771 int i;
772
773 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
774 alloc_size = (pbm->msiq_num * q_size);
775 order = get_order(alloc_size);
776 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
777 if (pages == 0UL) {
778 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
779 order);
780 return -ENOMEM;
781 }
782 memset((char *)pages, 0, PAGE_SIZE << order);
783 pbm->msi_queues = (void *) pages;
784
785 for (i = 0; i < pbm->msiq_num; i++) {
786 unsigned long err, base = __pa(pages + (i * q_size));
787 unsigned long ret1, ret2;
788
789 err = pci_sun4v_msiq_conf(pbm->devhandle,
790 pbm->msiq_first + i,
791 base, pbm->msiq_ent_count);
792 if (err) {
793 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
794 err);
795 goto h_error;
796 }
797
798 err = pci_sun4v_msiq_info(pbm->devhandle,
799 pbm->msiq_first + i,
800 &ret1, &ret2);
801 if (err) {
802 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
803 err);
804 goto h_error;
805 }
806 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
807 printk(KERN_ERR "MSI: Bogus qconf "
808 "expected[%lx:%x] got[%lx:%lx]\n",
809 base, pbm->msiq_ent_count,
810 ret1, ret2);
811 goto h_error;
812 }
813 }
814
815 return 0;
816
817h_error:
818 free_pages(pages, order);
819 return -EINVAL;
820}
821
822static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
823{
824 unsigned long q_size, alloc_size, pages, order;
825 int i;
826
827 for (i = 0; i < pbm->msiq_num; i++) {
828 unsigned long msiqid = pbm->msiq_first + i;
829
830 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
831 }
832
833 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
834 alloc_size = (pbm->msiq_num * q_size);
835 order = get_order(alloc_size);
836
837 pages = (unsigned long) pbm->msi_queues;
838
839 free_pages(pages, order);
840
841 pbm->msi_queues = NULL;
842}
843
844static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
845 unsigned long msiqid,
846 unsigned long devino)
847{
848 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
849
850 if (!irq)
851 return -ENOMEM;
852
853 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
854 return -EINVAL;
855 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
856 return -EINVAL;
857
858 return irq;
859}
860
861static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
862 .get_head = pci_sun4v_get_head,
863 .dequeue_msi = pci_sun4v_dequeue_msi,
864 .set_head = pci_sun4v_set_head,
865 .msi_setup = pci_sun4v_msi_setup,
866 .msi_teardown = pci_sun4v_msi_teardown,
867 .msiq_alloc = pci_sun4v_msiq_alloc,
868 .msiq_free = pci_sun4v_msiq_free,
869 .msiq_build_irq = pci_sun4v_msiq_build_irq,
870};
871
872static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
873{
874 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
875}
876#else /* CONFIG_PCI_MSI */
877static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
878{
879}
880#endif /* !(CONFIG_PCI_MSI) */
881
882static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
883 struct platform_device *op, u32 devhandle)
884{
885 struct device_node *dp = op->dev.of_node;
886 int err;
887
888 pbm->numa_node = of_node_to_nid(dp);
889
890 pbm->pci_ops = &sun4v_pci_ops;
891 pbm->config_space_reg_bits = 12;
892
893 pbm->index = pci_num_pbms++;
894
895 pbm->op = op;
896
897 pbm->devhandle = devhandle;
898
899 pbm->name = dp->full_name;
900
901 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
902 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
903
904 pci_determine_mem_io_space(pbm);
905
906 pci_get_pbm_props(pbm);
907
908 err = pci_sun4v_iommu_init(pbm);
909 if (err)
910 return err;
911
912 pci_sun4v_msi_init(pbm);
913
914 pci_sun4v_scan_bus(pbm, &op->dev);
915
916 pbm->next = pci_pbm_root;
917 pci_pbm_root = pbm;
918
919 return 0;
920}
921
922static int __devinit pci_sun4v_probe(struct platform_device *op)
923{
924 const struct linux_prom64_registers *regs;
925 static int hvapi_negotiated = 0;
926 struct pci_pbm_info *pbm;
927 struct device_node *dp;
928 struct iommu *iommu;
929 u32 devhandle;
930 int i, err;
931
932 dp = op->dev.of_node;
933
934 if (!hvapi_negotiated++) {
935 err = sun4v_hvapi_register(HV_GRP_PCI,
936 vpci_major,
937 &vpci_minor);
938
939 if (err) {
940 printk(KERN_ERR PFX "Could not register hvapi, "
941 "err=%d\n", err);
942 return err;
943 }
944 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
945 vpci_major, vpci_minor);
946
947 dma_ops = &sun4v_dma_ops;
948 }
949
950 regs = of_get_property(dp, "reg", NULL);
951 err = -ENODEV;
952 if (!regs) {
953 printk(KERN_ERR PFX "Could not find config registers\n");
954 goto out_err;
955 }
956 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
957
958 err = -ENOMEM;
959 if (!iommu_batch_initialized) {
960 for_each_possible_cpu(i) {
961 unsigned long page = get_zeroed_page(GFP_KERNEL);
962
963 if (!page)
964 goto out_err;
965
966 per_cpu(iommu_batch, i).pglist = (u64 *) page;
967 }
968 iommu_batch_initialized = 1;
969 }
970
971 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
972 if (!pbm) {
973 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
974 goto out_err;
975 }
976
977 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
978 if (!iommu) {
979 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
980 goto out_free_controller;
981 }
982
983 pbm->iommu = iommu;
984
985 err = pci_sun4v_pbm_init(pbm, op, devhandle);
986 if (err)
987 goto out_free_iommu;
988
989 dev_set_drvdata(&op->dev, pbm);
990
991 return 0;
992
993out_free_iommu:
994 kfree(pbm->iommu);
995
996out_free_controller:
997 kfree(pbm);
998
999out_err:
1000 return err;
1001}
1002
1003static const struct of_device_id pci_sun4v_match[] = {
1004 {
1005 .name = "pci",
1006 .compatible = "SUNW,sun4v-pci",
1007 },
1008 {},
1009};
1010
1011static struct platform_driver pci_sun4v_driver = {
1012 .driver = {
1013 .name = DRIVER_NAME,
1014 .owner = THIS_MODULE,
1015 .of_match_table = pci_sun4v_match,
1016 },
1017 .probe = pci_sun4v_probe,
1018};
1019
1020static int __init pci_sun4v_init(void)
1021{
1022 return platform_driver_register(&pci_sun4v_driver);
1023}
1024
1025subsys_initcall(pci_sun4v_init);
1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
12#include <linux/percpu.h>
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/log2.h>
16#include <linux/of_device.h>
17
18#include <asm/iommu.h>
19#include <asm/irq.h>
20#include <asm/hypervisor.h>
21#include <asm/prom.h>
22
23#include "pci_impl.h"
24#include "iommu_common.h"
25
26#include "pci_sun4v.h"
27
28#define DRIVER_NAME "pci_sun4v"
29#define PFX DRIVER_NAME ": "
30
31static unsigned long vpci_major = 1;
32static unsigned long vpci_minor = 1;
33
34#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
35
36struct iommu_batch {
37 struct device *dev; /* Device mapping is for. */
38 unsigned long prot; /* IOMMU page protections */
39 unsigned long entry; /* Index into IOTSB. */
40 u64 *pglist; /* List of physical pages */
41 unsigned long npages; /* Number of pages in list. */
42};
43
44static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
45static int iommu_batch_initialized;
46
47/* Interrupts must be disabled. */
48static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
49{
50 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
51
52 p->dev = dev;
53 p->prot = prot;
54 p->entry = entry;
55 p->npages = 0;
56}
57
58/* Interrupts must be disabled. */
59static long iommu_batch_flush(struct iommu_batch *p)
60{
61 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
62 unsigned long devhandle = pbm->devhandle;
63 unsigned long prot = p->prot;
64 unsigned long entry = p->entry;
65 u64 *pglist = p->pglist;
66 unsigned long npages = p->npages;
67
68 while (npages != 0) {
69 long num;
70
71 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
72 npages, prot, __pa(pglist));
73 if (unlikely(num < 0)) {
74 if (printk_ratelimit())
75 printk("iommu_batch_flush: IOMMU map of "
76 "[%08lx:%08llx:%lx:%lx:%lx] failed with "
77 "status %ld\n",
78 devhandle, HV_PCI_TSBID(0, entry),
79 npages, prot, __pa(pglist), num);
80 return -1;
81 }
82
83 entry += num;
84 npages -= num;
85 pglist += num;
86 }
87
88 p->entry = entry;
89 p->npages = 0;
90
91 return 0;
92}
93
94static inline void iommu_batch_new_entry(unsigned long entry)
95{
96 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
97
98 if (p->entry + p->npages == entry)
99 return;
100 if (p->entry != ~0UL)
101 iommu_batch_flush(p);
102 p->entry = entry;
103}
104
105/* Interrupts must be disabled. */
106static inline long iommu_batch_add(u64 phys_page)
107{
108 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
109
110 BUG_ON(p->npages >= PGLIST_NENTS);
111
112 p->pglist[p->npages++] = phys_page;
113 if (p->npages == PGLIST_NENTS)
114 return iommu_batch_flush(p);
115
116 return 0;
117}
118
119/* Interrupts must be disabled. */
120static inline long iommu_batch_end(void)
121{
122 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
123
124 BUG_ON(p->npages >= PGLIST_NENTS);
125
126 return iommu_batch_flush(p);
127}
128
129static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
130 dma_addr_t *dma_addrp, gfp_t gfp)
131{
132 unsigned long flags, order, first_page, npages, n;
133 struct iommu *iommu;
134 struct page *page;
135 void *ret;
136 long entry;
137 int nid;
138
139 size = IO_PAGE_ALIGN(size);
140 order = get_order(size);
141 if (unlikely(order >= MAX_ORDER))
142 return NULL;
143
144 npages = size >> IO_PAGE_SHIFT;
145
146 nid = dev->archdata.numa_node;
147 page = alloc_pages_node(nid, gfp, order);
148 if (unlikely(!page))
149 return NULL;
150
151 first_page = (unsigned long) page_address(page);
152 memset((char *)first_page, 0, PAGE_SIZE << order);
153
154 iommu = dev->archdata.iommu;
155
156 spin_lock_irqsave(&iommu->lock, flags);
157 entry = iommu_range_alloc(dev, iommu, npages, NULL);
158 spin_unlock_irqrestore(&iommu->lock, flags);
159
160 if (unlikely(entry == DMA_ERROR_CODE))
161 goto range_alloc_fail;
162
163 *dma_addrp = (iommu->page_table_map_base +
164 (entry << IO_PAGE_SHIFT));
165 ret = (void *) first_page;
166 first_page = __pa(first_page);
167
168 local_irq_save(flags);
169
170 iommu_batch_start(dev,
171 (HV_PCI_MAP_ATTR_READ |
172 HV_PCI_MAP_ATTR_WRITE),
173 entry);
174
175 for (n = 0; n < npages; n++) {
176 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
177 if (unlikely(err < 0L))
178 goto iommu_map_fail;
179 }
180
181 if (unlikely(iommu_batch_end() < 0L))
182 goto iommu_map_fail;
183
184 local_irq_restore(flags);
185
186 return ret;
187
188iommu_map_fail:
189 /* Interrupts are disabled. */
190 spin_lock(&iommu->lock);
191 iommu_range_free(iommu, *dma_addrp, npages);
192 spin_unlock_irqrestore(&iommu->lock, flags);
193
194range_alloc_fail:
195 free_pages(first_page, order);
196 return NULL;
197}
198
199static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
200 dma_addr_t dvma)
201{
202 struct pci_pbm_info *pbm;
203 struct iommu *iommu;
204 unsigned long flags, order, npages, entry;
205 u32 devhandle;
206
207 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
208 iommu = dev->archdata.iommu;
209 pbm = dev->archdata.host_controller;
210 devhandle = pbm->devhandle;
211 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
212
213 spin_lock_irqsave(&iommu->lock, flags);
214
215 iommu_range_free(iommu, dvma, npages);
216
217 do {
218 unsigned long num;
219
220 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
221 npages);
222 entry += num;
223 npages -= num;
224 } while (npages != 0);
225
226 spin_unlock_irqrestore(&iommu->lock, flags);
227
228 order = get_order(size);
229 if (order < 10)
230 free_pages((unsigned long)cpu, order);
231}
232
233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t sz,
235 enum dma_data_direction direction,
236 struct dma_attrs *attrs)
237{
238 struct iommu *iommu;
239 unsigned long flags, npages, oaddr;
240 unsigned long i, base_paddr;
241 u32 bus_addr, ret;
242 unsigned long prot;
243 long entry;
244
245 iommu = dev->archdata.iommu;
246
247 if (unlikely(direction == DMA_NONE))
248 goto bad;
249
250 oaddr = (unsigned long)(page_address(page) + offset);
251 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
252 npages >>= IO_PAGE_SHIFT;
253
254 spin_lock_irqsave(&iommu->lock, flags);
255 entry = iommu_range_alloc(dev, iommu, npages, NULL);
256 spin_unlock_irqrestore(&iommu->lock, flags);
257
258 if (unlikely(entry == DMA_ERROR_CODE))
259 goto bad;
260
261 bus_addr = (iommu->page_table_map_base +
262 (entry << IO_PAGE_SHIFT));
263 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
264 base_paddr = __pa(oaddr & IO_PAGE_MASK);
265 prot = HV_PCI_MAP_ATTR_READ;
266 if (direction != DMA_TO_DEVICE)
267 prot |= HV_PCI_MAP_ATTR_WRITE;
268
269 local_irq_save(flags);
270
271 iommu_batch_start(dev, prot, entry);
272
273 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
274 long err = iommu_batch_add(base_paddr);
275 if (unlikely(err < 0L))
276 goto iommu_map_fail;
277 }
278 if (unlikely(iommu_batch_end() < 0L))
279 goto iommu_map_fail;
280
281 local_irq_restore(flags);
282
283 return ret;
284
285bad:
286 if (printk_ratelimit())
287 WARN_ON(1);
288 return DMA_ERROR_CODE;
289
290iommu_map_fail:
291 /* Interrupts are disabled. */
292 spin_lock(&iommu->lock);
293 iommu_range_free(iommu, bus_addr, npages);
294 spin_unlock_irqrestore(&iommu->lock, flags);
295
296 return DMA_ERROR_CODE;
297}
298
299static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
300 size_t sz, enum dma_data_direction direction,
301 struct dma_attrs *attrs)
302{
303 struct pci_pbm_info *pbm;
304 struct iommu *iommu;
305 unsigned long flags, npages;
306 long entry;
307 u32 devhandle;
308
309 if (unlikely(direction == DMA_NONE)) {
310 if (printk_ratelimit())
311 WARN_ON(1);
312 return;
313 }
314
315 iommu = dev->archdata.iommu;
316 pbm = dev->archdata.host_controller;
317 devhandle = pbm->devhandle;
318
319 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
320 npages >>= IO_PAGE_SHIFT;
321 bus_addr &= IO_PAGE_MASK;
322
323 spin_lock_irqsave(&iommu->lock, flags);
324
325 iommu_range_free(iommu, bus_addr, npages);
326
327 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
328 do {
329 unsigned long num;
330
331 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
332 npages);
333 entry += num;
334 npages -= num;
335 } while (npages != 0);
336
337 spin_unlock_irqrestore(&iommu->lock, flags);
338}
339
340static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
341 int nelems, enum dma_data_direction direction,
342 struct dma_attrs *attrs)
343{
344 struct scatterlist *s, *outs, *segstart;
345 unsigned long flags, handle, prot;
346 dma_addr_t dma_next = 0, dma_addr;
347 unsigned int max_seg_size;
348 unsigned long seg_boundary_size;
349 int outcount, incount, i;
350 struct iommu *iommu;
351 unsigned long base_shift;
352 long err;
353
354 BUG_ON(direction == DMA_NONE);
355
356 iommu = dev->archdata.iommu;
357 if (nelems == 0 || !iommu)
358 return 0;
359
360 prot = HV_PCI_MAP_ATTR_READ;
361 if (direction != DMA_TO_DEVICE)
362 prot |= HV_PCI_MAP_ATTR_WRITE;
363
364 outs = s = segstart = &sglist[0];
365 outcount = 1;
366 incount = nelems;
367 handle = 0;
368
369 /* Init first segment length for backout at failure */
370 outs->dma_length = 0;
371
372 spin_lock_irqsave(&iommu->lock, flags);
373
374 iommu_batch_start(dev, prot, ~0UL);
375
376 max_seg_size = dma_get_max_seg_size(dev);
377 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
378 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
379 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
380 for_each_sg(sglist, s, nelems, i) {
381 unsigned long paddr, npages, entry, out_entry = 0, slen;
382
383 slen = s->length;
384 /* Sanity check */
385 if (slen == 0) {
386 dma_next = 0;
387 continue;
388 }
389 /* Allocate iommu entries for that segment */
390 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
391 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
392 entry = iommu_range_alloc(dev, iommu, npages, &handle);
393
394 /* Handle failure */
395 if (unlikely(entry == DMA_ERROR_CODE)) {
396 if (printk_ratelimit())
397 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
398 " npages %lx\n", iommu, paddr, npages);
399 goto iommu_map_failed;
400 }
401
402 iommu_batch_new_entry(entry);
403
404 /* Convert entry to a dma_addr_t */
405 dma_addr = iommu->page_table_map_base +
406 (entry << IO_PAGE_SHIFT);
407 dma_addr |= (s->offset & ~IO_PAGE_MASK);
408
409 /* Insert into HW table */
410 paddr &= IO_PAGE_MASK;
411 while (npages--) {
412 err = iommu_batch_add(paddr);
413 if (unlikely(err < 0L))
414 goto iommu_map_failed;
415 paddr += IO_PAGE_SIZE;
416 }
417
418 /* If we are in an open segment, try merging */
419 if (segstart != s) {
420 /* We cannot merge if:
421 * - allocated dma_addr isn't contiguous to previous allocation
422 */
423 if ((dma_addr != dma_next) ||
424 (outs->dma_length + s->length > max_seg_size) ||
425 (is_span_boundary(out_entry, base_shift,
426 seg_boundary_size, outs, s))) {
427 /* Can't merge: create a new segment */
428 segstart = s;
429 outcount++;
430 outs = sg_next(outs);
431 } else {
432 outs->dma_length += s->length;
433 }
434 }
435
436 if (segstart == s) {
437 /* This is a new segment, fill entries */
438 outs->dma_address = dma_addr;
439 outs->dma_length = slen;
440 out_entry = entry;
441 }
442
443 /* Calculate next page pointer for contiguous check */
444 dma_next = dma_addr + slen;
445 }
446
447 err = iommu_batch_end();
448
449 if (unlikely(err < 0L))
450 goto iommu_map_failed;
451
452 spin_unlock_irqrestore(&iommu->lock, flags);
453
454 if (outcount < incount) {
455 outs = sg_next(outs);
456 outs->dma_address = DMA_ERROR_CODE;
457 outs->dma_length = 0;
458 }
459
460 return outcount;
461
462iommu_map_failed:
463 for_each_sg(sglist, s, nelems, i) {
464 if (s->dma_length != 0) {
465 unsigned long vaddr, npages;
466
467 vaddr = s->dma_address & IO_PAGE_MASK;
468 npages = iommu_num_pages(s->dma_address, s->dma_length,
469 IO_PAGE_SIZE);
470 iommu_range_free(iommu, vaddr, npages);
471 /* XXX demap? XXX */
472 s->dma_address = DMA_ERROR_CODE;
473 s->dma_length = 0;
474 }
475 if (s == outs)
476 break;
477 }
478 spin_unlock_irqrestore(&iommu->lock, flags);
479
480 return 0;
481}
482
483static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
484 int nelems, enum dma_data_direction direction,
485 struct dma_attrs *attrs)
486{
487 struct pci_pbm_info *pbm;
488 struct scatterlist *sg;
489 struct iommu *iommu;
490 unsigned long flags;
491 u32 devhandle;
492
493 BUG_ON(direction == DMA_NONE);
494
495 iommu = dev->archdata.iommu;
496 pbm = dev->archdata.host_controller;
497 devhandle = pbm->devhandle;
498
499 spin_lock_irqsave(&iommu->lock, flags);
500
501 sg = sglist;
502 while (nelems--) {
503 dma_addr_t dma_handle = sg->dma_address;
504 unsigned int len = sg->dma_length;
505 unsigned long npages, entry;
506
507 if (!len)
508 break;
509 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
510 iommu_range_free(iommu, dma_handle, npages);
511
512 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
513 while (npages) {
514 unsigned long num;
515
516 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
517 npages);
518 entry += num;
519 npages -= num;
520 }
521
522 sg = sg_next(sg);
523 }
524
525 spin_unlock_irqrestore(&iommu->lock, flags);
526}
527
528static struct dma_map_ops sun4v_dma_ops = {
529 .alloc_coherent = dma_4v_alloc_coherent,
530 .free_coherent = dma_4v_free_coherent,
531 .map_page = dma_4v_map_page,
532 .unmap_page = dma_4v_unmap_page,
533 .map_sg = dma_4v_map_sg,
534 .unmap_sg = dma_4v_unmap_sg,
535};
536
537static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
538 struct device *parent)
539{
540 struct property *prop;
541 struct device_node *dp;
542
543 dp = pbm->op->dev.of_node;
544 prop = of_find_property(dp, "66mhz-capable", NULL);
545 pbm->is_66mhz_capable = (prop != NULL);
546 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
547
548 /* XXX register error interrupt handlers XXX */
549}
550
551static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
552 struct iommu *iommu)
553{
554 struct iommu_arena *arena = &iommu->arena;
555 unsigned long i, cnt = 0;
556 u32 devhandle;
557
558 devhandle = pbm->devhandle;
559 for (i = 0; i < arena->limit; i++) {
560 unsigned long ret, io_attrs, ra;
561
562 ret = pci_sun4v_iommu_getmap(devhandle,
563 HV_PCI_TSBID(0, i),
564 &io_attrs, &ra);
565 if (ret == HV_EOK) {
566 if (page_in_phys_avail(ra)) {
567 pci_sun4v_iommu_demap(devhandle,
568 HV_PCI_TSBID(0, i), 1);
569 } else {
570 cnt++;
571 __set_bit(i, arena->map);
572 }
573 }
574 }
575
576 return cnt;
577}
578
579static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
580{
581 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
582 struct iommu *iommu = pbm->iommu;
583 unsigned long num_tsb_entries, sz;
584 u32 dma_mask, dma_offset;
585 const u32 *vdma;
586
587 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
588 if (!vdma)
589 vdma = vdma_default;
590
591 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
592 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
593 vdma[0], vdma[1]);
594 return -EINVAL;
595 };
596
597 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
598 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
599
600 dma_offset = vdma[0];
601
602 /* Setup initial software IOMMU state. */
603 spin_lock_init(&iommu->lock);
604 iommu->ctx_lowest_free = 1;
605 iommu->page_table_map_base = dma_offset;
606 iommu->dma_addr_mask = dma_mask;
607
608 /* Allocate and initialize the free area map. */
609 sz = (num_tsb_entries + 7) / 8;
610 sz = (sz + 7UL) & ~7UL;
611 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
612 if (!iommu->arena.map) {
613 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
614 return -ENOMEM;
615 }
616 iommu->arena.limit = num_tsb_entries;
617
618 sz = probe_existing_entries(pbm, iommu);
619 if (sz)
620 printk("%s: Imported %lu TSB entries from OBP\n",
621 pbm->name, sz);
622
623 return 0;
624}
625
626#ifdef CONFIG_PCI_MSI
627struct pci_sun4v_msiq_entry {
628 u64 version_type;
629#define MSIQ_VERSION_MASK 0xffffffff00000000UL
630#define MSIQ_VERSION_SHIFT 32
631#define MSIQ_TYPE_MASK 0x00000000000000ffUL
632#define MSIQ_TYPE_SHIFT 0
633#define MSIQ_TYPE_NONE 0x00
634#define MSIQ_TYPE_MSG 0x01
635#define MSIQ_TYPE_MSI32 0x02
636#define MSIQ_TYPE_MSI64 0x03
637#define MSIQ_TYPE_INTX 0x08
638#define MSIQ_TYPE_NONE2 0xff
639
640 u64 intx_sysino;
641 u64 reserved1;
642 u64 stick;
643 u64 req_id; /* bus/device/func */
644#define MSIQ_REQID_BUS_MASK 0xff00UL
645#define MSIQ_REQID_BUS_SHIFT 8
646#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
647#define MSIQ_REQID_DEVICE_SHIFT 3
648#define MSIQ_REQID_FUNC_MASK 0x0007UL
649#define MSIQ_REQID_FUNC_SHIFT 0
650
651 u64 msi_address;
652
653 /* The format of this value is message type dependent.
654 * For MSI bits 15:0 are the data from the MSI packet.
655 * For MSI-X bits 31:0 are the data from the MSI packet.
656 * For MSG, the message code and message routing code where:
657 * bits 39:32 is the bus/device/fn of the msg target-id
658 * bits 18:16 is the message routing code
659 * bits 7:0 is the message code
660 * For INTx the low order 2-bits are:
661 * 00 - INTA
662 * 01 - INTB
663 * 10 - INTC
664 * 11 - INTD
665 */
666 u64 msi_data;
667
668 u64 reserved2;
669};
670
671static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
672 unsigned long *head)
673{
674 unsigned long err, limit;
675
676 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
677 if (unlikely(err))
678 return -ENXIO;
679
680 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
681 if (unlikely(*head >= limit))
682 return -EFBIG;
683
684 return 0;
685}
686
687static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
688 unsigned long msiqid, unsigned long *head,
689 unsigned long *msi)
690{
691 struct pci_sun4v_msiq_entry *ep;
692 unsigned long err, type;
693
694 /* Note: void pointer arithmetic, 'head' is a byte offset */
695 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
696 (pbm->msiq_ent_count *
697 sizeof(struct pci_sun4v_msiq_entry))) +
698 *head);
699
700 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
701 return 0;
702
703 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
704 if (unlikely(type != MSIQ_TYPE_MSI32 &&
705 type != MSIQ_TYPE_MSI64))
706 return -EINVAL;
707
708 *msi = ep->msi_data;
709
710 err = pci_sun4v_msi_setstate(pbm->devhandle,
711 ep->msi_data /* msi_num */,
712 HV_MSISTATE_IDLE);
713 if (unlikely(err))
714 return -ENXIO;
715
716 /* Clear the entry. */
717 ep->version_type &= ~MSIQ_TYPE_MASK;
718
719 (*head) += sizeof(struct pci_sun4v_msiq_entry);
720 if (*head >=
721 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
722 *head = 0;
723
724 return 1;
725}
726
727static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
728 unsigned long head)
729{
730 unsigned long err;
731
732 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
733 if (unlikely(err))
734 return -EINVAL;
735
736 return 0;
737}
738
739static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
740 unsigned long msi, int is_msi64)
741{
742 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
743 (is_msi64 ?
744 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
745 return -ENXIO;
746 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
747 return -ENXIO;
748 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
749 return -ENXIO;
750 return 0;
751}
752
753static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
754{
755 unsigned long err, msiqid;
756
757 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
758 if (err)
759 return -ENXIO;
760
761 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
762
763 return 0;
764}
765
766static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
767{
768 unsigned long q_size, alloc_size, pages, order;
769 int i;
770
771 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
772 alloc_size = (pbm->msiq_num * q_size);
773 order = get_order(alloc_size);
774 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
775 if (pages == 0UL) {
776 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
777 order);
778 return -ENOMEM;
779 }
780 memset((char *)pages, 0, PAGE_SIZE << order);
781 pbm->msi_queues = (void *) pages;
782
783 for (i = 0; i < pbm->msiq_num; i++) {
784 unsigned long err, base = __pa(pages + (i * q_size));
785 unsigned long ret1, ret2;
786
787 err = pci_sun4v_msiq_conf(pbm->devhandle,
788 pbm->msiq_first + i,
789 base, pbm->msiq_ent_count);
790 if (err) {
791 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
792 err);
793 goto h_error;
794 }
795
796 err = pci_sun4v_msiq_info(pbm->devhandle,
797 pbm->msiq_first + i,
798 &ret1, &ret2);
799 if (err) {
800 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
801 err);
802 goto h_error;
803 }
804 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
805 printk(KERN_ERR "MSI: Bogus qconf "
806 "expected[%lx:%x] got[%lx:%lx]\n",
807 base, pbm->msiq_ent_count,
808 ret1, ret2);
809 goto h_error;
810 }
811 }
812
813 return 0;
814
815h_error:
816 free_pages(pages, order);
817 return -EINVAL;
818}
819
820static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
821{
822 unsigned long q_size, alloc_size, pages, order;
823 int i;
824
825 for (i = 0; i < pbm->msiq_num; i++) {
826 unsigned long msiqid = pbm->msiq_first + i;
827
828 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
829 }
830
831 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
832 alloc_size = (pbm->msiq_num * q_size);
833 order = get_order(alloc_size);
834
835 pages = (unsigned long) pbm->msi_queues;
836
837 free_pages(pages, order);
838
839 pbm->msi_queues = NULL;
840}
841
842static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
843 unsigned long msiqid,
844 unsigned long devino)
845{
846 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
847
848 if (!irq)
849 return -ENOMEM;
850
851 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
852 return -EINVAL;
853 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
854 return -EINVAL;
855
856 return irq;
857}
858
859static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
860 .get_head = pci_sun4v_get_head,
861 .dequeue_msi = pci_sun4v_dequeue_msi,
862 .set_head = pci_sun4v_set_head,
863 .msi_setup = pci_sun4v_msi_setup,
864 .msi_teardown = pci_sun4v_msi_teardown,
865 .msiq_alloc = pci_sun4v_msiq_alloc,
866 .msiq_free = pci_sun4v_msiq_free,
867 .msiq_build_irq = pci_sun4v_msiq_build_irq,
868};
869
870static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
871{
872 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
873}
874#else /* CONFIG_PCI_MSI */
875static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
876{
877}
878#endif /* !(CONFIG_PCI_MSI) */
879
880static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
881 struct platform_device *op, u32 devhandle)
882{
883 struct device_node *dp = op->dev.of_node;
884 int err;
885
886 pbm->numa_node = of_node_to_nid(dp);
887
888 pbm->pci_ops = &sun4v_pci_ops;
889 pbm->config_space_reg_bits = 12;
890
891 pbm->index = pci_num_pbms++;
892
893 pbm->op = op;
894
895 pbm->devhandle = devhandle;
896
897 pbm->name = dp->full_name;
898
899 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
900 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
901
902 pci_determine_mem_io_space(pbm);
903
904 pci_get_pbm_props(pbm);
905
906 err = pci_sun4v_iommu_init(pbm);
907 if (err)
908 return err;
909
910 pci_sun4v_msi_init(pbm);
911
912 pci_sun4v_scan_bus(pbm, &op->dev);
913
914 pbm->next = pci_pbm_root;
915 pci_pbm_root = pbm;
916
917 return 0;
918}
919
920static int __devinit pci_sun4v_probe(struct platform_device *op)
921{
922 const struct linux_prom64_registers *regs;
923 static int hvapi_negotiated = 0;
924 struct pci_pbm_info *pbm;
925 struct device_node *dp;
926 struct iommu *iommu;
927 u32 devhandle;
928 int i, err;
929
930 dp = op->dev.of_node;
931
932 if (!hvapi_negotiated++) {
933 err = sun4v_hvapi_register(HV_GRP_PCI,
934 vpci_major,
935 &vpci_minor);
936
937 if (err) {
938 printk(KERN_ERR PFX "Could not register hvapi, "
939 "err=%d\n", err);
940 return err;
941 }
942 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
943 vpci_major, vpci_minor);
944
945 dma_ops = &sun4v_dma_ops;
946 }
947
948 regs = of_get_property(dp, "reg", NULL);
949 err = -ENODEV;
950 if (!regs) {
951 printk(KERN_ERR PFX "Could not find config registers\n");
952 goto out_err;
953 }
954 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
955
956 err = -ENOMEM;
957 if (!iommu_batch_initialized) {
958 for_each_possible_cpu(i) {
959 unsigned long page = get_zeroed_page(GFP_KERNEL);
960
961 if (!page)
962 goto out_err;
963
964 per_cpu(iommu_batch, i).pglist = (u64 *) page;
965 }
966 iommu_batch_initialized = 1;
967 }
968
969 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
970 if (!pbm) {
971 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
972 goto out_err;
973 }
974
975 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
976 if (!iommu) {
977 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
978 goto out_free_controller;
979 }
980
981 pbm->iommu = iommu;
982
983 err = pci_sun4v_pbm_init(pbm, op, devhandle);
984 if (err)
985 goto out_free_iommu;
986
987 dev_set_drvdata(&op->dev, pbm);
988
989 return 0;
990
991out_free_iommu:
992 kfree(pbm->iommu);
993
994out_free_controller:
995 kfree(pbm);
996
997out_err:
998 return err;
999}
1000
1001static const struct of_device_id pci_sun4v_match[] = {
1002 {
1003 .name = "pci",
1004 .compatible = "SUNW,sun4v-pci",
1005 },
1006 {},
1007};
1008
1009static struct platform_driver pci_sun4v_driver = {
1010 .driver = {
1011 .name = DRIVER_NAME,
1012 .owner = THIS_MODULE,
1013 .of_match_table = pci_sun4v_match,
1014 },
1015 .probe = pci_sun4v_probe,
1016};
1017
1018static int __init pci_sun4v_init(void)
1019{
1020 return platform_driver_register(&pci_sun4v_driver);
1021}
1022
1023subsys_initcall(pci_sun4v_init);