Loading...
1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
3 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
12#include <linux/percpu.h>
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/log2.h>
16#include <linux/of_device.h>
17
18#include <asm/iommu.h>
19#include <asm/irq.h>
20#include <asm/hypervisor.h>
21#include <asm/prom.h>
22
23#include "pci_impl.h"
24#include "iommu_common.h"
25
26#include "pci_sun4v.h"
27
28#define DRIVER_NAME "pci_sun4v"
29#define PFX DRIVER_NAME ": "
30
31static unsigned long vpci_major = 1;
32static unsigned long vpci_minor = 1;
33
34#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
35
36struct iommu_batch {
37 struct device *dev; /* Device mapping is for. */
38 unsigned long prot; /* IOMMU page protections */
39 unsigned long entry; /* Index into IOTSB. */
40 u64 *pglist; /* List of physical pages */
41 unsigned long npages; /* Number of pages in list. */
42};
43
44static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
45static int iommu_batch_initialized;
46
47/* Interrupts must be disabled. */
48static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
49{
50 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
51
52 p->dev = dev;
53 p->prot = prot;
54 p->entry = entry;
55 p->npages = 0;
56}
57
58/* Interrupts must be disabled. */
59static long iommu_batch_flush(struct iommu_batch *p)
60{
61 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
62 unsigned long devhandle = pbm->devhandle;
63 unsigned long prot = p->prot;
64 unsigned long entry = p->entry;
65 u64 *pglist = p->pglist;
66 unsigned long npages = p->npages;
67
68 while (npages != 0) {
69 long num;
70
71 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
72 npages, prot, __pa(pglist));
73 if (unlikely(num < 0)) {
74 if (printk_ratelimit())
75 printk("iommu_batch_flush: IOMMU map of "
76 "[%08lx:%08llx:%lx:%lx:%lx] failed with "
77 "status %ld\n",
78 devhandle, HV_PCI_TSBID(0, entry),
79 npages, prot, __pa(pglist), num);
80 return -1;
81 }
82
83 entry += num;
84 npages -= num;
85 pglist += num;
86 }
87
88 p->entry = entry;
89 p->npages = 0;
90
91 return 0;
92}
93
94static inline void iommu_batch_new_entry(unsigned long entry)
95{
96 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
97
98 if (p->entry + p->npages == entry)
99 return;
100 if (p->entry != ~0UL)
101 iommu_batch_flush(p);
102 p->entry = entry;
103}
104
105/* Interrupts must be disabled. */
106static inline long iommu_batch_add(u64 phys_page)
107{
108 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
109
110 BUG_ON(p->npages >= PGLIST_NENTS);
111
112 p->pglist[p->npages++] = phys_page;
113 if (p->npages == PGLIST_NENTS)
114 return iommu_batch_flush(p);
115
116 return 0;
117}
118
119/* Interrupts must be disabled. */
120static inline long iommu_batch_end(void)
121{
122 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
123
124 BUG_ON(p->npages >= PGLIST_NENTS);
125
126 return iommu_batch_flush(p);
127}
128
129static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
130 dma_addr_t *dma_addrp, gfp_t gfp)
131{
132 unsigned long flags, order, first_page, npages, n;
133 struct iommu *iommu;
134 struct page *page;
135 void *ret;
136 long entry;
137 int nid;
138
139 size = IO_PAGE_ALIGN(size);
140 order = get_order(size);
141 if (unlikely(order >= MAX_ORDER))
142 return NULL;
143
144 npages = size >> IO_PAGE_SHIFT;
145
146 nid = dev->archdata.numa_node;
147 page = alloc_pages_node(nid, gfp, order);
148 if (unlikely(!page))
149 return NULL;
150
151 first_page = (unsigned long) page_address(page);
152 memset((char *)first_page, 0, PAGE_SIZE << order);
153
154 iommu = dev->archdata.iommu;
155
156 spin_lock_irqsave(&iommu->lock, flags);
157 entry = iommu_range_alloc(dev, iommu, npages, NULL);
158 spin_unlock_irqrestore(&iommu->lock, flags);
159
160 if (unlikely(entry == DMA_ERROR_CODE))
161 goto range_alloc_fail;
162
163 *dma_addrp = (iommu->page_table_map_base +
164 (entry << IO_PAGE_SHIFT));
165 ret = (void *) first_page;
166 first_page = __pa(first_page);
167
168 local_irq_save(flags);
169
170 iommu_batch_start(dev,
171 (HV_PCI_MAP_ATTR_READ |
172 HV_PCI_MAP_ATTR_WRITE),
173 entry);
174
175 for (n = 0; n < npages; n++) {
176 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
177 if (unlikely(err < 0L))
178 goto iommu_map_fail;
179 }
180
181 if (unlikely(iommu_batch_end() < 0L))
182 goto iommu_map_fail;
183
184 local_irq_restore(flags);
185
186 return ret;
187
188iommu_map_fail:
189 /* Interrupts are disabled. */
190 spin_lock(&iommu->lock);
191 iommu_range_free(iommu, *dma_addrp, npages);
192 spin_unlock_irqrestore(&iommu->lock, flags);
193
194range_alloc_fail:
195 free_pages(first_page, order);
196 return NULL;
197}
198
199static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
200 dma_addr_t dvma)
201{
202 struct pci_pbm_info *pbm;
203 struct iommu *iommu;
204 unsigned long flags, order, npages, entry;
205 u32 devhandle;
206
207 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
208 iommu = dev->archdata.iommu;
209 pbm = dev->archdata.host_controller;
210 devhandle = pbm->devhandle;
211 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
212
213 spin_lock_irqsave(&iommu->lock, flags);
214
215 iommu_range_free(iommu, dvma, npages);
216
217 do {
218 unsigned long num;
219
220 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
221 npages);
222 entry += num;
223 npages -= num;
224 } while (npages != 0);
225
226 spin_unlock_irqrestore(&iommu->lock, flags);
227
228 order = get_order(size);
229 if (order < 10)
230 free_pages((unsigned long)cpu, order);
231}
232
233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t sz,
235 enum dma_data_direction direction,
236 struct dma_attrs *attrs)
237{
238 struct iommu *iommu;
239 unsigned long flags, npages, oaddr;
240 unsigned long i, base_paddr;
241 u32 bus_addr, ret;
242 unsigned long prot;
243 long entry;
244
245 iommu = dev->archdata.iommu;
246
247 if (unlikely(direction == DMA_NONE))
248 goto bad;
249
250 oaddr = (unsigned long)(page_address(page) + offset);
251 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
252 npages >>= IO_PAGE_SHIFT;
253
254 spin_lock_irqsave(&iommu->lock, flags);
255 entry = iommu_range_alloc(dev, iommu, npages, NULL);
256 spin_unlock_irqrestore(&iommu->lock, flags);
257
258 if (unlikely(entry == DMA_ERROR_CODE))
259 goto bad;
260
261 bus_addr = (iommu->page_table_map_base +
262 (entry << IO_PAGE_SHIFT));
263 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
264 base_paddr = __pa(oaddr & IO_PAGE_MASK);
265 prot = HV_PCI_MAP_ATTR_READ;
266 if (direction != DMA_TO_DEVICE)
267 prot |= HV_PCI_MAP_ATTR_WRITE;
268
269 local_irq_save(flags);
270
271 iommu_batch_start(dev, prot, entry);
272
273 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
274 long err = iommu_batch_add(base_paddr);
275 if (unlikely(err < 0L))
276 goto iommu_map_fail;
277 }
278 if (unlikely(iommu_batch_end() < 0L))
279 goto iommu_map_fail;
280
281 local_irq_restore(flags);
282
283 return ret;
284
285bad:
286 if (printk_ratelimit())
287 WARN_ON(1);
288 return DMA_ERROR_CODE;
289
290iommu_map_fail:
291 /* Interrupts are disabled. */
292 spin_lock(&iommu->lock);
293 iommu_range_free(iommu, bus_addr, npages);
294 spin_unlock_irqrestore(&iommu->lock, flags);
295
296 return DMA_ERROR_CODE;
297}
298
299static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
300 size_t sz, enum dma_data_direction direction,
301 struct dma_attrs *attrs)
302{
303 struct pci_pbm_info *pbm;
304 struct iommu *iommu;
305 unsigned long flags, npages;
306 long entry;
307 u32 devhandle;
308
309 if (unlikely(direction == DMA_NONE)) {
310 if (printk_ratelimit())
311 WARN_ON(1);
312 return;
313 }
314
315 iommu = dev->archdata.iommu;
316 pbm = dev->archdata.host_controller;
317 devhandle = pbm->devhandle;
318
319 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
320 npages >>= IO_PAGE_SHIFT;
321 bus_addr &= IO_PAGE_MASK;
322
323 spin_lock_irqsave(&iommu->lock, flags);
324
325 iommu_range_free(iommu, bus_addr, npages);
326
327 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
328 do {
329 unsigned long num;
330
331 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
332 npages);
333 entry += num;
334 npages -= num;
335 } while (npages != 0);
336
337 spin_unlock_irqrestore(&iommu->lock, flags);
338}
339
340static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
341 int nelems, enum dma_data_direction direction,
342 struct dma_attrs *attrs)
343{
344 struct scatterlist *s, *outs, *segstart;
345 unsigned long flags, handle, prot;
346 dma_addr_t dma_next = 0, dma_addr;
347 unsigned int max_seg_size;
348 unsigned long seg_boundary_size;
349 int outcount, incount, i;
350 struct iommu *iommu;
351 unsigned long base_shift;
352 long err;
353
354 BUG_ON(direction == DMA_NONE);
355
356 iommu = dev->archdata.iommu;
357 if (nelems == 0 || !iommu)
358 return 0;
359
360 prot = HV_PCI_MAP_ATTR_READ;
361 if (direction != DMA_TO_DEVICE)
362 prot |= HV_PCI_MAP_ATTR_WRITE;
363
364 outs = s = segstart = &sglist[0];
365 outcount = 1;
366 incount = nelems;
367 handle = 0;
368
369 /* Init first segment length for backout at failure */
370 outs->dma_length = 0;
371
372 spin_lock_irqsave(&iommu->lock, flags);
373
374 iommu_batch_start(dev, prot, ~0UL);
375
376 max_seg_size = dma_get_max_seg_size(dev);
377 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
378 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
379 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
380 for_each_sg(sglist, s, nelems, i) {
381 unsigned long paddr, npages, entry, out_entry = 0, slen;
382
383 slen = s->length;
384 /* Sanity check */
385 if (slen == 0) {
386 dma_next = 0;
387 continue;
388 }
389 /* Allocate iommu entries for that segment */
390 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
391 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
392 entry = iommu_range_alloc(dev, iommu, npages, &handle);
393
394 /* Handle failure */
395 if (unlikely(entry == DMA_ERROR_CODE)) {
396 if (printk_ratelimit())
397 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
398 " npages %lx\n", iommu, paddr, npages);
399 goto iommu_map_failed;
400 }
401
402 iommu_batch_new_entry(entry);
403
404 /* Convert entry to a dma_addr_t */
405 dma_addr = iommu->page_table_map_base +
406 (entry << IO_PAGE_SHIFT);
407 dma_addr |= (s->offset & ~IO_PAGE_MASK);
408
409 /* Insert into HW table */
410 paddr &= IO_PAGE_MASK;
411 while (npages--) {
412 err = iommu_batch_add(paddr);
413 if (unlikely(err < 0L))
414 goto iommu_map_failed;
415 paddr += IO_PAGE_SIZE;
416 }
417
418 /* If we are in an open segment, try merging */
419 if (segstart != s) {
420 /* We cannot merge if:
421 * - allocated dma_addr isn't contiguous to previous allocation
422 */
423 if ((dma_addr != dma_next) ||
424 (outs->dma_length + s->length > max_seg_size) ||
425 (is_span_boundary(out_entry, base_shift,
426 seg_boundary_size, outs, s))) {
427 /* Can't merge: create a new segment */
428 segstart = s;
429 outcount++;
430 outs = sg_next(outs);
431 } else {
432 outs->dma_length += s->length;
433 }
434 }
435
436 if (segstart == s) {
437 /* This is a new segment, fill entries */
438 outs->dma_address = dma_addr;
439 outs->dma_length = slen;
440 out_entry = entry;
441 }
442
443 /* Calculate next page pointer for contiguous check */
444 dma_next = dma_addr + slen;
445 }
446
447 err = iommu_batch_end();
448
449 if (unlikely(err < 0L))
450 goto iommu_map_failed;
451
452 spin_unlock_irqrestore(&iommu->lock, flags);
453
454 if (outcount < incount) {
455 outs = sg_next(outs);
456 outs->dma_address = DMA_ERROR_CODE;
457 outs->dma_length = 0;
458 }
459
460 return outcount;
461
462iommu_map_failed:
463 for_each_sg(sglist, s, nelems, i) {
464 if (s->dma_length != 0) {
465 unsigned long vaddr, npages;
466
467 vaddr = s->dma_address & IO_PAGE_MASK;
468 npages = iommu_num_pages(s->dma_address, s->dma_length,
469 IO_PAGE_SIZE);
470 iommu_range_free(iommu, vaddr, npages);
471 /* XXX demap? XXX */
472 s->dma_address = DMA_ERROR_CODE;
473 s->dma_length = 0;
474 }
475 if (s == outs)
476 break;
477 }
478 spin_unlock_irqrestore(&iommu->lock, flags);
479
480 return 0;
481}
482
483static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
484 int nelems, enum dma_data_direction direction,
485 struct dma_attrs *attrs)
486{
487 struct pci_pbm_info *pbm;
488 struct scatterlist *sg;
489 struct iommu *iommu;
490 unsigned long flags;
491 u32 devhandle;
492
493 BUG_ON(direction == DMA_NONE);
494
495 iommu = dev->archdata.iommu;
496 pbm = dev->archdata.host_controller;
497 devhandle = pbm->devhandle;
498
499 spin_lock_irqsave(&iommu->lock, flags);
500
501 sg = sglist;
502 while (nelems--) {
503 dma_addr_t dma_handle = sg->dma_address;
504 unsigned int len = sg->dma_length;
505 unsigned long npages, entry;
506
507 if (!len)
508 break;
509 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
510 iommu_range_free(iommu, dma_handle, npages);
511
512 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
513 while (npages) {
514 unsigned long num;
515
516 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
517 npages);
518 entry += num;
519 npages -= num;
520 }
521
522 sg = sg_next(sg);
523 }
524
525 spin_unlock_irqrestore(&iommu->lock, flags);
526}
527
528static struct dma_map_ops sun4v_dma_ops = {
529 .alloc_coherent = dma_4v_alloc_coherent,
530 .free_coherent = dma_4v_free_coherent,
531 .map_page = dma_4v_map_page,
532 .unmap_page = dma_4v_unmap_page,
533 .map_sg = dma_4v_map_sg,
534 .unmap_sg = dma_4v_unmap_sg,
535};
536
537static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
538 struct device *parent)
539{
540 struct property *prop;
541 struct device_node *dp;
542
543 dp = pbm->op->dev.of_node;
544 prop = of_find_property(dp, "66mhz-capable", NULL);
545 pbm->is_66mhz_capable = (prop != NULL);
546 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
547
548 /* XXX register error interrupt handlers XXX */
549}
550
551static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
552 struct iommu *iommu)
553{
554 struct iommu_arena *arena = &iommu->arena;
555 unsigned long i, cnt = 0;
556 u32 devhandle;
557
558 devhandle = pbm->devhandle;
559 for (i = 0; i < arena->limit; i++) {
560 unsigned long ret, io_attrs, ra;
561
562 ret = pci_sun4v_iommu_getmap(devhandle,
563 HV_PCI_TSBID(0, i),
564 &io_attrs, &ra);
565 if (ret == HV_EOK) {
566 if (page_in_phys_avail(ra)) {
567 pci_sun4v_iommu_demap(devhandle,
568 HV_PCI_TSBID(0, i), 1);
569 } else {
570 cnt++;
571 __set_bit(i, arena->map);
572 }
573 }
574 }
575
576 return cnt;
577}
578
579static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
580{
581 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
582 struct iommu *iommu = pbm->iommu;
583 unsigned long num_tsb_entries, sz;
584 u32 dma_mask, dma_offset;
585 const u32 *vdma;
586
587 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
588 if (!vdma)
589 vdma = vdma_default;
590
591 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
592 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
593 vdma[0], vdma[1]);
594 return -EINVAL;
595 };
596
597 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
598 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
599
600 dma_offset = vdma[0];
601
602 /* Setup initial software IOMMU state. */
603 spin_lock_init(&iommu->lock);
604 iommu->ctx_lowest_free = 1;
605 iommu->page_table_map_base = dma_offset;
606 iommu->dma_addr_mask = dma_mask;
607
608 /* Allocate and initialize the free area map. */
609 sz = (num_tsb_entries + 7) / 8;
610 sz = (sz + 7UL) & ~7UL;
611 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
612 if (!iommu->arena.map) {
613 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
614 return -ENOMEM;
615 }
616 iommu->arena.limit = num_tsb_entries;
617
618 sz = probe_existing_entries(pbm, iommu);
619 if (sz)
620 printk("%s: Imported %lu TSB entries from OBP\n",
621 pbm->name, sz);
622
623 return 0;
624}
625
626#ifdef CONFIG_PCI_MSI
627struct pci_sun4v_msiq_entry {
628 u64 version_type;
629#define MSIQ_VERSION_MASK 0xffffffff00000000UL
630#define MSIQ_VERSION_SHIFT 32
631#define MSIQ_TYPE_MASK 0x00000000000000ffUL
632#define MSIQ_TYPE_SHIFT 0
633#define MSIQ_TYPE_NONE 0x00
634#define MSIQ_TYPE_MSG 0x01
635#define MSIQ_TYPE_MSI32 0x02
636#define MSIQ_TYPE_MSI64 0x03
637#define MSIQ_TYPE_INTX 0x08
638#define MSIQ_TYPE_NONE2 0xff
639
640 u64 intx_sysino;
641 u64 reserved1;
642 u64 stick;
643 u64 req_id; /* bus/device/func */
644#define MSIQ_REQID_BUS_MASK 0xff00UL
645#define MSIQ_REQID_BUS_SHIFT 8
646#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
647#define MSIQ_REQID_DEVICE_SHIFT 3
648#define MSIQ_REQID_FUNC_MASK 0x0007UL
649#define MSIQ_REQID_FUNC_SHIFT 0
650
651 u64 msi_address;
652
653 /* The format of this value is message type dependent.
654 * For MSI bits 15:0 are the data from the MSI packet.
655 * For MSI-X bits 31:0 are the data from the MSI packet.
656 * For MSG, the message code and message routing code where:
657 * bits 39:32 is the bus/device/fn of the msg target-id
658 * bits 18:16 is the message routing code
659 * bits 7:0 is the message code
660 * For INTx the low order 2-bits are:
661 * 00 - INTA
662 * 01 - INTB
663 * 10 - INTC
664 * 11 - INTD
665 */
666 u64 msi_data;
667
668 u64 reserved2;
669};
670
671static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
672 unsigned long *head)
673{
674 unsigned long err, limit;
675
676 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
677 if (unlikely(err))
678 return -ENXIO;
679
680 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
681 if (unlikely(*head >= limit))
682 return -EFBIG;
683
684 return 0;
685}
686
687static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
688 unsigned long msiqid, unsigned long *head,
689 unsigned long *msi)
690{
691 struct pci_sun4v_msiq_entry *ep;
692 unsigned long err, type;
693
694 /* Note: void pointer arithmetic, 'head' is a byte offset */
695 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
696 (pbm->msiq_ent_count *
697 sizeof(struct pci_sun4v_msiq_entry))) +
698 *head);
699
700 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
701 return 0;
702
703 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
704 if (unlikely(type != MSIQ_TYPE_MSI32 &&
705 type != MSIQ_TYPE_MSI64))
706 return -EINVAL;
707
708 *msi = ep->msi_data;
709
710 err = pci_sun4v_msi_setstate(pbm->devhandle,
711 ep->msi_data /* msi_num */,
712 HV_MSISTATE_IDLE);
713 if (unlikely(err))
714 return -ENXIO;
715
716 /* Clear the entry. */
717 ep->version_type &= ~MSIQ_TYPE_MASK;
718
719 (*head) += sizeof(struct pci_sun4v_msiq_entry);
720 if (*head >=
721 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
722 *head = 0;
723
724 return 1;
725}
726
727static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
728 unsigned long head)
729{
730 unsigned long err;
731
732 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
733 if (unlikely(err))
734 return -EINVAL;
735
736 return 0;
737}
738
739static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
740 unsigned long msi, int is_msi64)
741{
742 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
743 (is_msi64 ?
744 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
745 return -ENXIO;
746 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
747 return -ENXIO;
748 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
749 return -ENXIO;
750 return 0;
751}
752
753static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
754{
755 unsigned long err, msiqid;
756
757 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
758 if (err)
759 return -ENXIO;
760
761 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
762
763 return 0;
764}
765
766static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
767{
768 unsigned long q_size, alloc_size, pages, order;
769 int i;
770
771 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
772 alloc_size = (pbm->msiq_num * q_size);
773 order = get_order(alloc_size);
774 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
775 if (pages == 0UL) {
776 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
777 order);
778 return -ENOMEM;
779 }
780 memset((char *)pages, 0, PAGE_SIZE << order);
781 pbm->msi_queues = (void *) pages;
782
783 for (i = 0; i < pbm->msiq_num; i++) {
784 unsigned long err, base = __pa(pages + (i * q_size));
785 unsigned long ret1, ret2;
786
787 err = pci_sun4v_msiq_conf(pbm->devhandle,
788 pbm->msiq_first + i,
789 base, pbm->msiq_ent_count);
790 if (err) {
791 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
792 err);
793 goto h_error;
794 }
795
796 err = pci_sun4v_msiq_info(pbm->devhandle,
797 pbm->msiq_first + i,
798 &ret1, &ret2);
799 if (err) {
800 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
801 err);
802 goto h_error;
803 }
804 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
805 printk(KERN_ERR "MSI: Bogus qconf "
806 "expected[%lx:%x] got[%lx:%lx]\n",
807 base, pbm->msiq_ent_count,
808 ret1, ret2);
809 goto h_error;
810 }
811 }
812
813 return 0;
814
815h_error:
816 free_pages(pages, order);
817 return -EINVAL;
818}
819
820static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
821{
822 unsigned long q_size, alloc_size, pages, order;
823 int i;
824
825 for (i = 0; i < pbm->msiq_num; i++) {
826 unsigned long msiqid = pbm->msiq_first + i;
827
828 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
829 }
830
831 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
832 alloc_size = (pbm->msiq_num * q_size);
833 order = get_order(alloc_size);
834
835 pages = (unsigned long) pbm->msi_queues;
836
837 free_pages(pages, order);
838
839 pbm->msi_queues = NULL;
840}
841
842static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
843 unsigned long msiqid,
844 unsigned long devino)
845{
846 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
847
848 if (!irq)
849 return -ENOMEM;
850
851 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
852 return -EINVAL;
853 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
854 return -EINVAL;
855
856 return irq;
857}
858
859static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
860 .get_head = pci_sun4v_get_head,
861 .dequeue_msi = pci_sun4v_dequeue_msi,
862 .set_head = pci_sun4v_set_head,
863 .msi_setup = pci_sun4v_msi_setup,
864 .msi_teardown = pci_sun4v_msi_teardown,
865 .msiq_alloc = pci_sun4v_msiq_alloc,
866 .msiq_free = pci_sun4v_msiq_free,
867 .msiq_build_irq = pci_sun4v_msiq_build_irq,
868};
869
870static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
871{
872 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
873}
874#else /* CONFIG_PCI_MSI */
875static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
876{
877}
878#endif /* !(CONFIG_PCI_MSI) */
879
880static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
881 struct platform_device *op, u32 devhandle)
882{
883 struct device_node *dp = op->dev.of_node;
884 int err;
885
886 pbm->numa_node = of_node_to_nid(dp);
887
888 pbm->pci_ops = &sun4v_pci_ops;
889 pbm->config_space_reg_bits = 12;
890
891 pbm->index = pci_num_pbms++;
892
893 pbm->op = op;
894
895 pbm->devhandle = devhandle;
896
897 pbm->name = dp->full_name;
898
899 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
900 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
901
902 pci_determine_mem_io_space(pbm);
903
904 pci_get_pbm_props(pbm);
905
906 err = pci_sun4v_iommu_init(pbm);
907 if (err)
908 return err;
909
910 pci_sun4v_msi_init(pbm);
911
912 pci_sun4v_scan_bus(pbm, &op->dev);
913
914 pbm->next = pci_pbm_root;
915 pci_pbm_root = pbm;
916
917 return 0;
918}
919
920static int __devinit pci_sun4v_probe(struct platform_device *op)
921{
922 const struct linux_prom64_registers *regs;
923 static int hvapi_negotiated = 0;
924 struct pci_pbm_info *pbm;
925 struct device_node *dp;
926 struct iommu *iommu;
927 u32 devhandle;
928 int i, err;
929
930 dp = op->dev.of_node;
931
932 if (!hvapi_negotiated++) {
933 err = sun4v_hvapi_register(HV_GRP_PCI,
934 vpci_major,
935 &vpci_minor);
936
937 if (err) {
938 printk(KERN_ERR PFX "Could not register hvapi, "
939 "err=%d\n", err);
940 return err;
941 }
942 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
943 vpci_major, vpci_minor);
944
945 dma_ops = &sun4v_dma_ops;
946 }
947
948 regs = of_get_property(dp, "reg", NULL);
949 err = -ENODEV;
950 if (!regs) {
951 printk(KERN_ERR PFX "Could not find config registers\n");
952 goto out_err;
953 }
954 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
955
956 err = -ENOMEM;
957 if (!iommu_batch_initialized) {
958 for_each_possible_cpu(i) {
959 unsigned long page = get_zeroed_page(GFP_KERNEL);
960
961 if (!page)
962 goto out_err;
963
964 per_cpu(iommu_batch, i).pglist = (u64 *) page;
965 }
966 iommu_batch_initialized = 1;
967 }
968
969 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
970 if (!pbm) {
971 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
972 goto out_err;
973 }
974
975 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
976 if (!iommu) {
977 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
978 goto out_free_controller;
979 }
980
981 pbm->iommu = iommu;
982
983 err = pci_sun4v_pbm_init(pbm, op, devhandle);
984 if (err)
985 goto out_free_iommu;
986
987 dev_set_drvdata(&op->dev, pbm);
988
989 return 0;
990
991out_free_iommu:
992 kfree(pbm->iommu);
993
994out_free_controller:
995 kfree(pbm);
996
997out_err:
998 return err;
999}
1000
1001static const struct of_device_id pci_sun4v_match[] = {
1002 {
1003 .name = "pci",
1004 .compatible = "SUNW,sun4v-pci",
1005 },
1006 {},
1007};
1008
1009static struct platform_driver pci_sun4v_driver = {
1010 .driver = {
1011 .name = DRIVER_NAME,
1012 .owner = THIS_MODULE,
1013 .of_match_table = pci_sun4v_match,
1014 },
1015 .probe = pci_sun4v_probe,
1016};
1017
1018static int __init pci_sun4v_init(void)
1019{
1020 return platform_driver_register(&pci_sun4v_driver);
1021}
1022
1023subsys_initcall(pci_sun4v_init);
1// SPDX-License-Identifier: GPL-2.0
2/* pci_sun4v.c: SUN4V specific PCI controller support.
3 *
4 * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/pci.h>
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/interrupt.h>
13#include <linux/percpu.h>
14#include <linux/irq.h>
15#include <linux/msi.h>
16#include <linux/export.h>
17#include <linux/log2.h>
18#include <linux/of_device.h>
19#include <asm/iommu-common.h>
20
21#include <asm/iommu.h>
22#include <asm/irq.h>
23#include <asm/hypervisor.h>
24#include <asm/prom.h>
25
26#include "pci_impl.h"
27#include "iommu_common.h"
28#include "kernel.h"
29
30#include "pci_sun4v.h"
31
32#define DRIVER_NAME "pci_sun4v"
33#define PFX DRIVER_NAME ": "
34
35static unsigned long vpci_major;
36static unsigned long vpci_minor;
37
38struct vpci_version {
39 unsigned long major;
40 unsigned long minor;
41};
42
43/* Ordered from largest major to lowest */
44static struct vpci_version vpci_versions[] = {
45 { .major = 2, .minor = 0 },
46 { .major = 1, .minor = 1 },
47};
48
49static unsigned long vatu_major = 1;
50static unsigned long vatu_minor = 1;
51
52#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
53
54struct iommu_batch {
55 struct device *dev; /* Device mapping is for. */
56 unsigned long prot; /* IOMMU page protections */
57 unsigned long entry; /* Index into IOTSB. */
58 u64 *pglist; /* List of physical pages */
59 unsigned long npages; /* Number of pages in list. */
60};
61
62static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
63static int iommu_batch_initialized;
64
65/* Interrupts must be disabled. */
66static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
67{
68 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
69
70 p->dev = dev;
71 p->prot = prot;
72 p->entry = entry;
73 p->npages = 0;
74}
75
76static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
77{
78 return iommu->atu && mask > DMA_BIT_MASK(32);
79}
80
81/* Interrupts must be disabled. */
82static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
83{
84 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
85 u64 *pglist = p->pglist;
86 u64 index_count;
87 unsigned long devhandle = pbm->devhandle;
88 unsigned long prot = p->prot;
89 unsigned long entry = p->entry;
90 unsigned long npages = p->npages;
91 unsigned long iotsb_num;
92 unsigned long ret;
93 long num;
94
95 /* VPCI maj=1, min=[0,1] only supports read and write */
96 if (vpci_major < 2)
97 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
98
99 while (npages != 0) {
100 if (!iommu_use_atu(pbm->iommu, mask)) {
101 num = pci_sun4v_iommu_map(devhandle,
102 HV_PCI_TSBID(0, entry),
103 npages,
104 prot,
105 __pa(pglist));
106 if (unlikely(num < 0)) {
107 pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
108 __func__,
109 devhandle,
110 HV_PCI_TSBID(0, entry),
111 npages, prot, __pa(pglist),
112 num);
113 return -1;
114 }
115 } else {
116 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
117 iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
118 ret = pci_sun4v_iotsb_map(devhandle,
119 iotsb_num,
120 index_count,
121 prot,
122 __pa(pglist),
123 &num);
124 if (unlikely(ret != HV_EOK)) {
125 pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
126 __func__,
127 devhandle, iotsb_num,
128 index_count, prot,
129 __pa(pglist), ret);
130 return -1;
131 }
132 }
133 entry += num;
134 npages -= num;
135 pglist += num;
136 }
137
138 p->entry = entry;
139 p->npages = 0;
140
141 return 0;
142}
143
144static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
145{
146 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
147
148 if (p->entry + p->npages == entry)
149 return;
150 if (p->entry != ~0UL)
151 iommu_batch_flush(p, mask);
152 p->entry = entry;
153}
154
155/* Interrupts must be disabled. */
156static inline long iommu_batch_add(u64 phys_page, u64 mask)
157{
158 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
159
160 BUG_ON(p->npages >= PGLIST_NENTS);
161
162 p->pglist[p->npages++] = phys_page;
163 if (p->npages == PGLIST_NENTS)
164 return iommu_batch_flush(p, mask);
165
166 return 0;
167}
168
169/* Interrupts must be disabled. */
170static inline long iommu_batch_end(u64 mask)
171{
172 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
173
174 BUG_ON(p->npages >= PGLIST_NENTS);
175
176 return iommu_batch_flush(p, mask);
177}
178
179static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
180 dma_addr_t *dma_addrp, gfp_t gfp,
181 unsigned long attrs)
182{
183 u64 mask;
184 unsigned long flags, order, first_page, npages, n;
185 unsigned long prot = 0;
186 struct iommu *iommu;
187 struct iommu_map_table *tbl;
188 struct page *page;
189 void *ret;
190 long entry;
191 int nid;
192
193 size = IO_PAGE_ALIGN(size);
194 order = get_order(size);
195 if (unlikely(order >= MAX_ORDER))
196 return NULL;
197
198 npages = size >> IO_PAGE_SHIFT;
199
200 if (attrs & DMA_ATTR_WEAK_ORDERING)
201 prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
202
203 nid = dev->archdata.numa_node;
204 page = alloc_pages_node(nid, gfp, order);
205 if (unlikely(!page))
206 return NULL;
207
208 first_page = (unsigned long) page_address(page);
209 memset((char *)first_page, 0, PAGE_SIZE << order);
210
211 iommu = dev->archdata.iommu;
212 mask = dev->coherent_dma_mask;
213 if (!iommu_use_atu(iommu, mask))
214 tbl = &iommu->tbl;
215 else
216 tbl = &iommu->atu->tbl;
217
218 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
219 (unsigned long)(-1), 0);
220
221 if (unlikely(entry == IOMMU_ERROR_CODE))
222 goto range_alloc_fail;
223
224 *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
225 ret = (void *) first_page;
226 first_page = __pa(first_page);
227
228 local_irq_save(flags);
229
230 iommu_batch_start(dev,
231 (HV_PCI_MAP_ATTR_READ | prot |
232 HV_PCI_MAP_ATTR_WRITE),
233 entry);
234
235 for (n = 0; n < npages; n++) {
236 long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
237 if (unlikely(err < 0L))
238 goto iommu_map_fail;
239 }
240
241 if (unlikely(iommu_batch_end(mask) < 0L))
242 goto iommu_map_fail;
243
244 local_irq_restore(flags);
245
246 return ret;
247
248iommu_map_fail:
249 local_irq_restore(flags);
250 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
251
252range_alloc_fail:
253 free_pages(first_page, order);
254 return NULL;
255}
256
257unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
258 unsigned long iotsb_num,
259 struct pci_bus *bus_dev)
260{
261 struct pci_dev *pdev;
262 unsigned long err;
263 unsigned int bus;
264 unsigned int device;
265 unsigned int fun;
266
267 list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
268 if (pdev->subordinate) {
269 /* No need to bind pci bridge */
270 dma_4v_iotsb_bind(devhandle, iotsb_num,
271 pdev->subordinate);
272 } else {
273 bus = bus_dev->number;
274 device = PCI_SLOT(pdev->devfn);
275 fun = PCI_FUNC(pdev->devfn);
276 err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
277 HV_PCI_DEVICE_BUILD(bus,
278 device,
279 fun));
280
281 /* If bind fails for one device it is going to fail
282 * for rest of the devices because we are sharing
283 * IOTSB. So in case of failure simply return with
284 * error.
285 */
286 if (err)
287 return err;
288 }
289 }
290
291 return 0;
292}
293
294static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
295 dma_addr_t dvma, unsigned long iotsb_num,
296 unsigned long entry, unsigned long npages)
297{
298 unsigned long num, flags;
299 unsigned long ret;
300
301 local_irq_save(flags);
302 do {
303 if (dvma <= DMA_BIT_MASK(32)) {
304 num = pci_sun4v_iommu_demap(devhandle,
305 HV_PCI_TSBID(0, entry),
306 npages);
307 } else {
308 ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
309 entry, npages, &num);
310 if (unlikely(ret != HV_EOK)) {
311 pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
312 ret);
313 }
314 }
315 entry += num;
316 npages -= num;
317 } while (npages != 0);
318 local_irq_restore(flags);
319}
320
321static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
322 dma_addr_t dvma, unsigned long attrs)
323{
324 struct pci_pbm_info *pbm;
325 struct iommu *iommu;
326 struct atu *atu;
327 struct iommu_map_table *tbl;
328 unsigned long order, npages, entry;
329 unsigned long iotsb_num;
330 u32 devhandle;
331
332 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
333 iommu = dev->archdata.iommu;
334 pbm = dev->archdata.host_controller;
335 atu = iommu->atu;
336 devhandle = pbm->devhandle;
337
338 if (!iommu_use_atu(iommu, dvma)) {
339 tbl = &iommu->tbl;
340 iotsb_num = 0; /* we don't care for legacy iommu */
341 } else {
342 tbl = &atu->tbl;
343 iotsb_num = atu->iotsb->iotsb_num;
344 }
345 entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
346 dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
347 iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
348 order = get_order(size);
349 if (order < 10)
350 free_pages((unsigned long)cpu, order);
351}
352
353static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
354 unsigned long offset, size_t sz,
355 enum dma_data_direction direction,
356 unsigned long attrs)
357{
358 struct iommu *iommu;
359 struct atu *atu;
360 struct iommu_map_table *tbl;
361 u64 mask;
362 unsigned long flags, npages, oaddr;
363 unsigned long i, base_paddr;
364 unsigned long prot;
365 dma_addr_t bus_addr, ret;
366 long entry;
367
368 iommu = dev->archdata.iommu;
369 atu = iommu->atu;
370
371 if (unlikely(direction == DMA_NONE))
372 goto bad;
373
374 oaddr = (unsigned long)(page_address(page) + offset);
375 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
376 npages >>= IO_PAGE_SHIFT;
377
378 mask = *dev->dma_mask;
379 if (!iommu_use_atu(iommu, mask))
380 tbl = &iommu->tbl;
381 else
382 tbl = &atu->tbl;
383
384 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
385 (unsigned long)(-1), 0);
386
387 if (unlikely(entry == IOMMU_ERROR_CODE))
388 goto bad;
389
390 bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
391 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
392 base_paddr = __pa(oaddr & IO_PAGE_MASK);
393 prot = HV_PCI_MAP_ATTR_READ;
394 if (direction != DMA_TO_DEVICE)
395 prot |= HV_PCI_MAP_ATTR_WRITE;
396
397 if (attrs & DMA_ATTR_WEAK_ORDERING)
398 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
399
400 local_irq_save(flags);
401
402 iommu_batch_start(dev, prot, entry);
403
404 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
405 long err = iommu_batch_add(base_paddr, mask);
406 if (unlikely(err < 0L))
407 goto iommu_map_fail;
408 }
409 if (unlikely(iommu_batch_end(mask) < 0L))
410 goto iommu_map_fail;
411
412 local_irq_restore(flags);
413
414 return ret;
415
416bad:
417 if (printk_ratelimit())
418 WARN_ON(1);
419 return DMA_MAPPING_ERROR;
420
421iommu_map_fail:
422 local_irq_restore(flags);
423 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
424 return DMA_MAPPING_ERROR;
425}
426
427static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
428 size_t sz, enum dma_data_direction direction,
429 unsigned long attrs)
430{
431 struct pci_pbm_info *pbm;
432 struct iommu *iommu;
433 struct atu *atu;
434 struct iommu_map_table *tbl;
435 unsigned long npages;
436 unsigned long iotsb_num;
437 long entry;
438 u32 devhandle;
439
440 if (unlikely(direction == DMA_NONE)) {
441 if (printk_ratelimit())
442 WARN_ON(1);
443 return;
444 }
445
446 iommu = dev->archdata.iommu;
447 pbm = dev->archdata.host_controller;
448 atu = iommu->atu;
449 devhandle = pbm->devhandle;
450
451 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
452 npages >>= IO_PAGE_SHIFT;
453 bus_addr &= IO_PAGE_MASK;
454
455 if (bus_addr <= DMA_BIT_MASK(32)) {
456 iotsb_num = 0; /* we don't care for legacy iommu */
457 tbl = &iommu->tbl;
458 } else {
459 iotsb_num = atu->iotsb->iotsb_num;
460 tbl = &atu->tbl;
461 }
462 entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
463 dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
464 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
465}
466
467static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
468 int nelems, enum dma_data_direction direction,
469 unsigned long attrs)
470{
471 struct scatterlist *s, *outs, *segstart;
472 unsigned long flags, handle, prot;
473 dma_addr_t dma_next = 0, dma_addr;
474 unsigned int max_seg_size;
475 unsigned long seg_boundary_size;
476 int outcount, incount, i;
477 struct iommu *iommu;
478 struct atu *atu;
479 struct iommu_map_table *tbl;
480 u64 mask;
481 unsigned long base_shift;
482 long err;
483
484 BUG_ON(direction == DMA_NONE);
485
486 iommu = dev->archdata.iommu;
487 if (nelems == 0 || !iommu)
488 return 0;
489 atu = iommu->atu;
490
491 prot = HV_PCI_MAP_ATTR_READ;
492 if (direction != DMA_TO_DEVICE)
493 prot |= HV_PCI_MAP_ATTR_WRITE;
494
495 if (attrs & DMA_ATTR_WEAK_ORDERING)
496 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
497
498 outs = s = segstart = &sglist[0];
499 outcount = 1;
500 incount = nelems;
501 handle = 0;
502
503 /* Init first segment length for backout at failure */
504 outs->dma_length = 0;
505
506 local_irq_save(flags);
507
508 iommu_batch_start(dev, prot, ~0UL);
509
510 max_seg_size = dma_get_max_seg_size(dev);
511 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
512 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
513
514 mask = *dev->dma_mask;
515 if (!iommu_use_atu(iommu, mask))
516 tbl = &iommu->tbl;
517 else
518 tbl = &atu->tbl;
519
520 base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
521
522 for_each_sg(sglist, s, nelems, i) {
523 unsigned long paddr, npages, entry, out_entry = 0, slen;
524
525 slen = s->length;
526 /* Sanity check */
527 if (slen == 0) {
528 dma_next = 0;
529 continue;
530 }
531 /* Allocate iommu entries for that segment */
532 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
533 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
534 entry = iommu_tbl_range_alloc(dev, tbl, npages,
535 &handle, (unsigned long)(-1), 0);
536
537 /* Handle failure */
538 if (unlikely(entry == IOMMU_ERROR_CODE)) {
539 pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
540 tbl, paddr, npages);
541 goto iommu_map_failed;
542 }
543
544 iommu_batch_new_entry(entry, mask);
545
546 /* Convert entry to a dma_addr_t */
547 dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
548 dma_addr |= (s->offset & ~IO_PAGE_MASK);
549
550 /* Insert into HW table */
551 paddr &= IO_PAGE_MASK;
552 while (npages--) {
553 err = iommu_batch_add(paddr, mask);
554 if (unlikely(err < 0L))
555 goto iommu_map_failed;
556 paddr += IO_PAGE_SIZE;
557 }
558
559 /* If we are in an open segment, try merging */
560 if (segstart != s) {
561 /* We cannot merge if:
562 * - allocated dma_addr isn't contiguous to previous allocation
563 */
564 if ((dma_addr != dma_next) ||
565 (outs->dma_length + s->length > max_seg_size) ||
566 (is_span_boundary(out_entry, base_shift,
567 seg_boundary_size, outs, s))) {
568 /* Can't merge: create a new segment */
569 segstart = s;
570 outcount++;
571 outs = sg_next(outs);
572 } else {
573 outs->dma_length += s->length;
574 }
575 }
576
577 if (segstart == s) {
578 /* This is a new segment, fill entries */
579 outs->dma_address = dma_addr;
580 outs->dma_length = slen;
581 out_entry = entry;
582 }
583
584 /* Calculate next page pointer for contiguous check */
585 dma_next = dma_addr + slen;
586 }
587
588 err = iommu_batch_end(mask);
589
590 if (unlikely(err < 0L))
591 goto iommu_map_failed;
592
593 local_irq_restore(flags);
594
595 if (outcount < incount) {
596 outs = sg_next(outs);
597 outs->dma_address = DMA_MAPPING_ERROR;
598 outs->dma_length = 0;
599 }
600
601 return outcount;
602
603iommu_map_failed:
604 for_each_sg(sglist, s, nelems, i) {
605 if (s->dma_length != 0) {
606 unsigned long vaddr, npages;
607
608 vaddr = s->dma_address & IO_PAGE_MASK;
609 npages = iommu_num_pages(s->dma_address, s->dma_length,
610 IO_PAGE_SIZE);
611 iommu_tbl_range_free(tbl, vaddr, npages,
612 IOMMU_ERROR_CODE);
613 /* XXX demap? XXX */
614 s->dma_address = DMA_MAPPING_ERROR;
615 s->dma_length = 0;
616 }
617 if (s == outs)
618 break;
619 }
620 local_irq_restore(flags);
621
622 return 0;
623}
624
625static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
626 int nelems, enum dma_data_direction direction,
627 unsigned long attrs)
628{
629 struct pci_pbm_info *pbm;
630 struct scatterlist *sg;
631 struct iommu *iommu;
632 struct atu *atu;
633 unsigned long flags, entry;
634 unsigned long iotsb_num;
635 u32 devhandle;
636
637 BUG_ON(direction == DMA_NONE);
638
639 iommu = dev->archdata.iommu;
640 pbm = dev->archdata.host_controller;
641 atu = iommu->atu;
642 devhandle = pbm->devhandle;
643
644 local_irq_save(flags);
645
646 sg = sglist;
647 while (nelems--) {
648 dma_addr_t dma_handle = sg->dma_address;
649 unsigned int len = sg->dma_length;
650 unsigned long npages;
651 struct iommu_map_table *tbl;
652 unsigned long shift = IO_PAGE_SHIFT;
653
654 if (!len)
655 break;
656 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
657
658 if (dma_handle <= DMA_BIT_MASK(32)) {
659 iotsb_num = 0; /* we don't care for legacy iommu */
660 tbl = &iommu->tbl;
661 } else {
662 iotsb_num = atu->iotsb->iotsb_num;
663 tbl = &atu->tbl;
664 }
665 entry = ((dma_handle - tbl->table_map_base) >> shift);
666 dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
667 entry, npages);
668 iommu_tbl_range_free(tbl, dma_handle, npages,
669 IOMMU_ERROR_CODE);
670 sg = sg_next(sg);
671 }
672
673 local_irq_restore(flags);
674}
675
676static int dma_4v_supported(struct device *dev, u64 device_mask)
677{
678 struct iommu *iommu = dev->archdata.iommu;
679
680 if (ali_sound_dma_hack(dev, device_mask))
681 return 1;
682 if (device_mask < iommu->dma_addr_mask)
683 return 0;
684 return 1;
685}
686
687static const struct dma_map_ops sun4v_dma_ops = {
688 .alloc = dma_4v_alloc_coherent,
689 .free = dma_4v_free_coherent,
690 .map_page = dma_4v_map_page,
691 .unmap_page = dma_4v_unmap_page,
692 .map_sg = dma_4v_map_sg,
693 .unmap_sg = dma_4v_unmap_sg,
694 .dma_supported = dma_4v_supported,
695};
696
697static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
698{
699 struct property *prop;
700 struct device_node *dp;
701
702 dp = pbm->op->dev.of_node;
703 prop = of_find_property(dp, "66mhz-capable", NULL);
704 pbm->is_66mhz_capable = (prop != NULL);
705 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
706
707 /* XXX register error interrupt handlers XXX */
708}
709
710static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
711 struct iommu_map_table *iommu)
712{
713 struct iommu_pool *pool;
714 unsigned long i, pool_nr, cnt = 0;
715 u32 devhandle;
716
717 devhandle = pbm->devhandle;
718 for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
719 pool = &(iommu->pools[pool_nr]);
720 for (i = pool->start; i <= pool->end; i++) {
721 unsigned long ret, io_attrs, ra;
722
723 ret = pci_sun4v_iommu_getmap(devhandle,
724 HV_PCI_TSBID(0, i),
725 &io_attrs, &ra);
726 if (ret == HV_EOK) {
727 if (page_in_phys_avail(ra)) {
728 pci_sun4v_iommu_demap(devhandle,
729 HV_PCI_TSBID(0,
730 i), 1);
731 } else {
732 cnt++;
733 __set_bit(i, iommu->map);
734 }
735 }
736 }
737 }
738 return cnt;
739}
740
741static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
742{
743 struct atu *atu = pbm->iommu->atu;
744 struct atu_iotsb *iotsb;
745 void *table;
746 u64 table_size;
747 u64 iotsb_num;
748 unsigned long order;
749 unsigned long err;
750
751 iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
752 if (!iotsb) {
753 err = -ENOMEM;
754 goto out_err;
755 }
756 atu->iotsb = iotsb;
757
758 /* calculate size of IOTSB */
759 table_size = (atu->size / IO_PAGE_SIZE) * 8;
760 order = get_order(table_size);
761 table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
762 if (!table) {
763 err = -ENOMEM;
764 goto table_failed;
765 }
766 iotsb->table = table;
767 iotsb->ra = __pa(table);
768 iotsb->dvma_size = atu->size;
769 iotsb->dvma_base = atu->base;
770 iotsb->table_size = table_size;
771 iotsb->page_size = IO_PAGE_SIZE;
772
773 /* configure and register IOTSB with HV */
774 err = pci_sun4v_iotsb_conf(pbm->devhandle,
775 iotsb->ra,
776 iotsb->table_size,
777 iotsb->page_size,
778 iotsb->dvma_base,
779 &iotsb_num);
780 if (err) {
781 pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
782 goto iotsb_conf_failed;
783 }
784 iotsb->iotsb_num = iotsb_num;
785
786 err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
787 if (err) {
788 pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
789 goto iotsb_conf_failed;
790 }
791
792 return 0;
793
794iotsb_conf_failed:
795 free_pages((unsigned long)table, order);
796table_failed:
797 kfree(iotsb);
798out_err:
799 return err;
800}
801
802static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
803{
804 struct atu *atu = pbm->iommu->atu;
805 unsigned long err;
806 const u64 *ranges;
807 u64 map_size, num_iotte;
808 u64 dma_mask;
809 const u32 *page_size;
810 int len;
811
812 ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
813 &len);
814 if (!ranges) {
815 pr_err(PFX "No iommu-address-ranges\n");
816 return -EINVAL;
817 }
818
819 page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
820 NULL);
821 if (!page_size) {
822 pr_err(PFX "No iommu-pagesizes\n");
823 return -EINVAL;
824 }
825
826 /* There are 4 iommu-address-ranges supported. Each range is pair of
827 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
828 * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
829 * address ranges to support 64bit addressing. Because 'size' for
830 * address ranges[2] and ranges[3] are same we can select either of
831 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
832 * large for OS to allocate IOTSB we are using fix size 32G
833 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
834 * to share.
835 */
836 atu->ranges = (struct atu_ranges *)ranges;
837 atu->base = atu->ranges[3].base;
838 atu->size = ATU_64_SPACE_SIZE;
839
840 /* Create IOTSB */
841 err = pci_sun4v_atu_alloc_iotsb(pbm);
842 if (err) {
843 pr_err(PFX "Error creating ATU IOTSB\n");
844 return err;
845 }
846
847 /* Create ATU iommu map.
848 * One bit represents one iotte in IOTSB table.
849 */
850 dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
851 num_iotte = atu->size / IO_PAGE_SIZE;
852 map_size = num_iotte / 8;
853 atu->tbl.table_map_base = atu->base;
854 atu->dma_addr_mask = dma_mask;
855 atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
856 if (!atu->tbl.map)
857 return -ENOMEM;
858
859 iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
860 NULL, false /* no large_pool */,
861 0 /* default npools */,
862 false /* want span boundary checking */);
863
864 return 0;
865}
866
867static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
868{
869 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
870 struct iommu *iommu = pbm->iommu;
871 unsigned long num_tsb_entries, sz;
872 u32 dma_mask, dma_offset;
873 const u32 *vdma;
874
875 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
876 if (!vdma)
877 vdma = vdma_default;
878
879 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
880 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
881 vdma[0], vdma[1]);
882 return -EINVAL;
883 }
884
885 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
886 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
887
888 dma_offset = vdma[0];
889
890 /* Setup initial software IOMMU state. */
891 spin_lock_init(&iommu->lock);
892 iommu->ctx_lowest_free = 1;
893 iommu->tbl.table_map_base = dma_offset;
894 iommu->dma_addr_mask = dma_mask;
895
896 /* Allocate and initialize the free area map. */
897 sz = (num_tsb_entries + 7) / 8;
898 sz = (sz + 7UL) & ~7UL;
899 iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
900 if (!iommu->tbl.map) {
901 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
902 return -ENOMEM;
903 }
904 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
905 NULL, false /* no large_pool */,
906 0 /* default npools */,
907 false /* want span boundary checking */);
908 sz = probe_existing_entries(pbm, &iommu->tbl);
909 if (sz)
910 printk("%s: Imported %lu TSB entries from OBP\n",
911 pbm->name, sz);
912
913 return 0;
914}
915
916#ifdef CONFIG_PCI_MSI
917struct pci_sun4v_msiq_entry {
918 u64 version_type;
919#define MSIQ_VERSION_MASK 0xffffffff00000000UL
920#define MSIQ_VERSION_SHIFT 32
921#define MSIQ_TYPE_MASK 0x00000000000000ffUL
922#define MSIQ_TYPE_SHIFT 0
923#define MSIQ_TYPE_NONE 0x00
924#define MSIQ_TYPE_MSG 0x01
925#define MSIQ_TYPE_MSI32 0x02
926#define MSIQ_TYPE_MSI64 0x03
927#define MSIQ_TYPE_INTX 0x08
928#define MSIQ_TYPE_NONE2 0xff
929
930 u64 intx_sysino;
931 u64 reserved1;
932 u64 stick;
933 u64 req_id; /* bus/device/func */
934#define MSIQ_REQID_BUS_MASK 0xff00UL
935#define MSIQ_REQID_BUS_SHIFT 8
936#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
937#define MSIQ_REQID_DEVICE_SHIFT 3
938#define MSIQ_REQID_FUNC_MASK 0x0007UL
939#define MSIQ_REQID_FUNC_SHIFT 0
940
941 u64 msi_address;
942
943 /* The format of this value is message type dependent.
944 * For MSI bits 15:0 are the data from the MSI packet.
945 * For MSI-X bits 31:0 are the data from the MSI packet.
946 * For MSG, the message code and message routing code where:
947 * bits 39:32 is the bus/device/fn of the msg target-id
948 * bits 18:16 is the message routing code
949 * bits 7:0 is the message code
950 * For INTx the low order 2-bits are:
951 * 00 - INTA
952 * 01 - INTB
953 * 10 - INTC
954 * 11 - INTD
955 */
956 u64 msi_data;
957
958 u64 reserved2;
959};
960
961static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
962 unsigned long *head)
963{
964 unsigned long err, limit;
965
966 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
967 if (unlikely(err))
968 return -ENXIO;
969
970 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
971 if (unlikely(*head >= limit))
972 return -EFBIG;
973
974 return 0;
975}
976
977static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
978 unsigned long msiqid, unsigned long *head,
979 unsigned long *msi)
980{
981 struct pci_sun4v_msiq_entry *ep;
982 unsigned long err, type;
983
984 /* Note: void pointer arithmetic, 'head' is a byte offset */
985 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
986 (pbm->msiq_ent_count *
987 sizeof(struct pci_sun4v_msiq_entry))) +
988 *head);
989
990 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
991 return 0;
992
993 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
994 if (unlikely(type != MSIQ_TYPE_MSI32 &&
995 type != MSIQ_TYPE_MSI64))
996 return -EINVAL;
997
998 *msi = ep->msi_data;
999
1000 err = pci_sun4v_msi_setstate(pbm->devhandle,
1001 ep->msi_data /* msi_num */,
1002 HV_MSISTATE_IDLE);
1003 if (unlikely(err))
1004 return -ENXIO;
1005
1006 /* Clear the entry. */
1007 ep->version_type &= ~MSIQ_TYPE_MASK;
1008
1009 (*head) += sizeof(struct pci_sun4v_msiq_entry);
1010 if (*head >=
1011 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
1012 *head = 0;
1013
1014 return 1;
1015}
1016
1017static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
1018 unsigned long head)
1019{
1020 unsigned long err;
1021
1022 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
1023 if (unlikely(err))
1024 return -EINVAL;
1025
1026 return 0;
1027}
1028
1029static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
1030 unsigned long msi, int is_msi64)
1031{
1032 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
1033 (is_msi64 ?
1034 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
1035 return -ENXIO;
1036 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
1037 return -ENXIO;
1038 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
1039 return -ENXIO;
1040 return 0;
1041}
1042
1043static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
1044{
1045 unsigned long err, msiqid;
1046
1047 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
1048 if (err)
1049 return -ENXIO;
1050
1051 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
1052
1053 return 0;
1054}
1055
1056static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
1057{
1058 unsigned long q_size, alloc_size, pages, order;
1059 int i;
1060
1061 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1062 alloc_size = (pbm->msiq_num * q_size);
1063 order = get_order(alloc_size);
1064 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
1065 if (pages == 0UL) {
1066 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
1067 order);
1068 return -ENOMEM;
1069 }
1070 memset((char *)pages, 0, PAGE_SIZE << order);
1071 pbm->msi_queues = (void *) pages;
1072
1073 for (i = 0; i < pbm->msiq_num; i++) {
1074 unsigned long err, base = __pa(pages + (i * q_size));
1075 unsigned long ret1, ret2;
1076
1077 err = pci_sun4v_msiq_conf(pbm->devhandle,
1078 pbm->msiq_first + i,
1079 base, pbm->msiq_ent_count);
1080 if (err) {
1081 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
1082 err);
1083 goto h_error;
1084 }
1085
1086 err = pci_sun4v_msiq_info(pbm->devhandle,
1087 pbm->msiq_first + i,
1088 &ret1, &ret2);
1089 if (err) {
1090 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
1091 err);
1092 goto h_error;
1093 }
1094 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
1095 printk(KERN_ERR "MSI: Bogus qconf "
1096 "expected[%lx:%x] got[%lx:%lx]\n",
1097 base, pbm->msiq_ent_count,
1098 ret1, ret2);
1099 goto h_error;
1100 }
1101 }
1102
1103 return 0;
1104
1105h_error:
1106 free_pages(pages, order);
1107 return -EINVAL;
1108}
1109
1110static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
1111{
1112 unsigned long q_size, alloc_size, pages, order;
1113 int i;
1114
1115 for (i = 0; i < pbm->msiq_num; i++) {
1116 unsigned long msiqid = pbm->msiq_first + i;
1117
1118 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
1119 }
1120
1121 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1122 alloc_size = (pbm->msiq_num * q_size);
1123 order = get_order(alloc_size);
1124
1125 pages = (unsigned long) pbm->msi_queues;
1126
1127 free_pages(pages, order);
1128
1129 pbm->msi_queues = NULL;
1130}
1131
1132static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1133 unsigned long msiqid,
1134 unsigned long devino)
1135{
1136 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
1137
1138 if (!irq)
1139 return -ENOMEM;
1140
1141 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
1142 return -EINVAL;
1143 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
1144 return -EINVAL;
1145
1146 return irq;
1147}
1148
1149static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
1150 .get_head = pci_sun4v_get_head,
1151 .dequeue_msi = pci_sun4v_dequeue_msi,
1152 .set_head = pci_sun4v_set_head,
1153 .msi_setup = pci_sun4v_msi_setup,
1154 .msi_teardown = pci_sun4v_msi_teardown,
1155 .msiq_alloc = pci_sun4v_msiq_alloc,
1156 .msiq_free = pci_sun4v_msiq_free,
1157 .msiq_build_irq = pci_sun4v_msiq_build_irq,
1158};
1159
1160static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1161{
1162 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
1163}
1164#else /* CONFIG_PCI_MSI */
1165static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1166{
1167}
1168#endif /* !(CONFIG_PCI_MSI) */
1169
1170static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
1171 struct platform_device *op, u32 devhandle)
1172{
1173 struct device_node *dp = op->dev.of_node;
1174 int err;
1175
1176 pbm->numa_node = of_node_to_nid(dp);
1177
1178 pbm->pci_ops = &sun4v_pci_ops;
1179 pbm->config_space_reg_bits = 12;
1180
1181 pbm->index = pci_num_pbms++;
1182
1183 pbm->op = op;
1184
1185 pbm->devhandle = devhandle;
1186
1187 pbm->name = dp->full_name;
1188
1189 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1190 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
1191
1192 pci_determine_mem_io_space(pbm);
1193
1194 pci_get_pbm_props(pbm);
1195
1196 err = pci_sun4v_iommu_init(pbm);
1197 if (err)
1198 return err;
1199
1200 pci_sun4v_msi_init(pbm);
1201
1202 pci_sun4v_scan_bus(pbm, &op->dev);
1203
1204 /* if atu_init fails its not complete failure.
1205 * we can still continue using legacy iommu.
1206 */
1207 if (pbm->iommu->atu) {
1208 err = pci_sun4v_atu_init(pbm);
1209 if (err) {
1210 kfree(pbm->iommu->atu);
1211 pbm->iommu->atu = NULL;
1212 pr_err(PFX "ATU init failed, err=%d\n", err);
1213 }
1214 }
1215
1216 pbm->next = pci_pbm_root;
1217 pci_pbm_root = pbm;
1218
1219 return 0;
1220}
1221
1222static int pci_sun4v_probe(struct platform_device *op)
1223{
1224 const struct linux_prom64_registers *regs;
1225 static int hvapi_negotiated = 0;
1226 struct pci_pbm_info *pbm;
1227 struct device_node *dp;
1228 struct iommu *iommu;
1229 struct atu *atu;
1230 u32 devhandle;
1231 int i, err = -ENODEV;
1232 static bool hv_atu = true;
1233
1234 dp = op->dev.of_node;
1235
1236 if (!hvapi_negotiated++) {
1237 for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
1238 vpci_major = vpci_versions[i].major;
1239 vpci_minor = vpci_versions[i].minor;
1240
1241 err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
1242 &vpci_minor);
1243 if (!err)
1244 break;
1245 }
1246
1247 if (err) {
1248 pr_err(PFX "Could not register hvapi, err=%d\n", err);
1249 return err;
1250 }
1251 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
1252 vpci_major, vpci_minor);
1253
1254 err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1255 if (err) {
1256 /* don't return an error if we fail to register the
1257 * ATU group, but ATU hcalls won't be available.
1258 */
1259 hv_atu = false;
1260 } else {
1261 pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1262 vatu_major, vatu_minor);
1263 }
1264
1265 dma_ops = &sun4v_dma_ops;
1266 }
1267
1268 regs = of_get_property(dp, "reg", NULL);
1269 err = -ENODEV;
1270 if (!regs) {
1271 printk(KERN_ERR PFX "Could not find config registers\n");
1272 goto out_err;
1273 }
1274 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1275
1276 err = -ENOMEM;
1277 if (!iommu_batch_initialized) {
1278 for_each_possible_cpu(i) {
1279 unsigned long page = get_zeroed_page(GFP_KERNEL);
1280
1281 if (!page)
1282 goto out_err;
1283
1284 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1285 }
1286 iommu_batch_initialized = 1;
1287 }
1288
1289 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
1290 if (!pbm) {
1291 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
1292 goto out_err;
1293 }
1294
1295 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
1296 if (!iommu) {
1297 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
1298 goto out_free_controller;
1299 }
1300
1301 pbm->iommu = iommu;
1302 iommu->atu = NULL;
1303 if (hv_atu) {
1304 atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1305 if (!atu)
1306 pr_err(PFX "Could not allocate atu\n");
1307 else
1308 iommu->atu = atu;
1309 }
1310
1311 err = pci_sun4v_pbm_init(pbm, op, devhandle);
1312 if (err)
1313 goto out_free_iommu;
1314
1315 dev_set_drvdata(&op->dev, pbm);
1316
1317 return 0;
1318
1319out_free_iommu:
1320 kfree(iommu->atu);
1321 kfree(pbm->iommu);
1322
1323out_free_controller:
1324 kfree(pbm);
1325
1326out_err:
1327 return err;
1328}
1329
1330static const struct of_device_id pci_sun4v_match[] = {
1331 {
1332 .name = "pci",
1333 .compatible = "SUNW,sun4v-pci",
1334 },
1335 {},
1336};
1337
1338static struct platform_driver pci_sun4v_driver = {
1339 .driver = {
1340 .name = DRIVER_NAME,
1341 .of_match_table = pci_sun4v_match,
1342 },
1343 .probe = pci_sun4v_probe,
1344};
1345
1346static int __init pci_sun4v_init(void)
1347{
1348 return platform_driver_register(&pci_sun4v_driver);
1349}
1350
1351subsys_initcall(pci_sun4v_init);