Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 *
5 * Rewrite, cleanup, new allocation schemes, virtual merging:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * and Ben. Herrenschmidt, IBM Corporation
8 *
9 * Dynamic DMA mapping support, bus-independent parts.
10 */
11
12
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/spinlock.h>
18#include <linux/string.h>
19#include <linux/dma-mapping.h>
20#include <linux/bitmap.h>
21#include <linux/iommu-helper.h>
22#include <linux/crash_dump.h>
23#include <linux/hash.h>
24#include <linux/fault-inject.h>
25#include <linux/pci.h>
26#include <linux/iommu.h>
27#include <linux/sched.h>
28#include <linux/debugfs.h>
29#include <asm/io.h>
30#include <asm/iommu.h>
31#include <asm/pci-bridge.h>
32#include <asm/machdep.h>
33#include <asm/kdump.h>
34#include <asm/fadump.h>
35#include <asm/vio.h>
36#include <asm/tce.h>
37#include <asm/mmu_context.h>
38#include <asm/ppc-pci.h>
39
40#define DBG(...)
41
42#ifdef CONFIG_IOMMU_DEBUGFS
43static int iommu_debugfs_weight_get(void *data, u64 *val)
44{
45 struct iommu_table *tbl = data;
46 *val = bitmap_weight(tbl->it_map, tbl->it_size);
47 return 0;
48}
49DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
50
51static void iommu_debugfs_add(struct iommu_table *tbl)
52{
53 char name[10];
54 struct dentry *liobn_entry;
55
56 sprintf(name, "%08lx", tbl->it_index);
57 liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
58
59 debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
60 debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
61 debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
62 debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
63 debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
64 debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
65 debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
66}
67
68static void iommu_debugfs_del(struct iommu_table *tbl)
69{
70 char name[10];
71
72 sprintf(name, "%08lx", tbl->it_index);
73 debugfs_lookup_and_remove(name, iommu_debugfs_dir);
74}
75#else
76static void iommu_debugfs_add(struct iommu_table *tbl){}
77static void iommu_debugfs_del(struct iommu_table *tbl){}
78#endif
79
80static int novmerge;
81
82static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
83
84static int __init setup_iommu(char *str)
85{
86 if (!strcmp(str, "novmerge"))
87 novmerge = 1;
88 else if (!strcmp(str, "vmerge"))
89 novmerge = 0;
90 return 1;
91}
92
93__setup("iommu=", setup_iommu);
94
95static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
96
97/*
98 * We precalculate the hash to avoid doing it on every allocation.
99 *
100 * The hash is important to spread CPUs across all the pools. For example,
101 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
102 * with 4 pools all primary threads would map to the same pool.
103 */
104static int __init setup_iommu_pool_hash(void)
105{
106 unsigned int i;
107
108 for_each_possible_cpu(i)
109 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
110
111 return 0;
112}
113subsys_initcall(setup_iommu_pool_hash);
114
115#ifdef CONFIG_FAIL_IOMMU
116
117static DECLARE_FAULT_ATTR(fail_iommu);
118
119static int __init setup_fail_iommu(char *str)
120{
121 return setup_fault_attr(&fail_iommu, str);
122}
123__setup("fail_iommu=", setup_fail_iommu);
124
125static bool should_fail_iommu(struct device *dev)
126{
127 return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
128}
129
130static int __init fail_iommu_debugfs(void)
131{
132 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
133 NULL, &fail_iommu);
134
135 return PTR_ERR_OR_ZERO(dir);
136}
137late_initcall(fail_iommu_debugfs);
138
139static ssize_t fail_iommu_show(struct device *dev,
140 struct device_attribute *attr, char *buf)
141{
142 return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
143}
144
145static ssize_t fail_iommu_store(struct device *dev,
146 struct device_attribute *attr, const char *buf,
147 size_t count)
148{
149 int i;
150
151 if (count > 0 && sscanf(buf, "%d", &i) > 0)
152 dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
153
154 return count;
155}
156
157static DEVICE_ATTR_RW(fail_iommu);
158
159static int fail_iommu_bus_notify(struct notifier_block *nb,
160 unsigned long action, void *data)
161{
162 struct device *dev = data;
163
164 if (action == BUS_NOTIFY_ADD_DEVICE) {
165 if (device_create_file(dev, &dev_attr_fail_iommu))
166 pr_warn("Unable to create IOMMU fault injection sysfs "
167 "entries\n");
168 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
169 device_remove_file(dev, &dev_attr_fail_iommu);
170 }
171
172 return 0;
173}
174
175/*
176 * PCI and VIO buses need separate notifier_block structs, since they're linked
177 * list nodes. Sharing a notifier_block would mean that any notifiers later
178 * registered for PCI buses would also get called by VIO buses and vice versa.
179 */
180static struct notifier_block fail_iommu_pci_bus_notifier = {
181 .notifier_call = fail_iommu_bus_notify
182};
183
184#ifdef CONFIG_IBMVIO
185static struct notifier_block fail_iommu_vio_bus_notifier = {
186 .notifier_call = fail_iommu_bus_notify
187};
188#endif
189
190static int __init fail_iommu_setup(void)
191{
192#ifdef CONFIG_PCI
193 bus_register_notifier(&pci_bus_type, &fail_iommu_pci_bus_notifier);
194#endif
195#ifdef CONFIG_IBMVIO
196 bus_register_notifier(&vio_bus_type, &fail_iommu_vio_bus_notifier);
197#endif
198
199 return 0;
200}
201/*
202 * Must execute after PCI and VIO subsystem have initialised but before
203 * devices are probed.
204 */
205arch_initcall(fail_iommu_setup);
206#else
207static inline bool should_fail_iommu(struct device *dev)
208{
209 return false;
210}
211#endif
212
213static unsigned long iommu_range_alloc(struct device *dev,
214 struct iommu_table *tbl,
215 unsigned long npages,
216 unsigned long *handle,
217 unsigned long mask,
218 unsigned int align_order)
219{
220 unsigned long n, end, start;
221 unsigned long limit;
222 int largealloc = npages > 15;
223 int pass = 0;
224 unsigned long align_mask;
225 unsigned long flags;
226 unsigned int pool_nr;
227 struct iommu_pool *pool;
228
229 align_mask = (1ull << align_order) - 1;
230
231 /* This allocator was derived from x86_64's bit string search */
232
233 /* Sanity check */
234 if (unlikely(npages == 0)) {
235 if (printk_ratelimit())
236 WARN_ON(1);
237 return DMA_MAPPING_ERROR;
238 }
239
240 if (should_fail_iommu(dev))
241 return DMA_MAPPING_ERROR;
242
243 /*
244 * We don't need to disable preemption here because any CPU can
245 * safely use any IOMMU pool.
246 */
247 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
248
249 if (largealloc)
250 pool = &(tbl->large_pool);
251 else
252 pool = &(tbl->pools[pool_nr]);
253
254 spin_lock_irqsave(&(pool->lock), flags);
255
256again:
257 if ((pass == 0) && handle && *handle &&
258 (*handle >= pool->start) && (*handle < pool->end))
259 start = *handle;
260 else
261 start = pool->hint;
262
263 limit = pool->end;
264
265 /* The case below can happen if we have a small segment appended
266 * to a large, or when the previous alloc was at the very end of
267 * the available space. If so, go back to the initial start.
268 */
269 if (start >= limit)
270 start = pool->start;
271
272 if (limit + tbl->it_offset > mask) {
273 limit = mask - tbl->it_offset + 1;
274 /* If we're constrained on address range, first try
275 * at the masked hint to avoid O(n) search complexity,
276 * but on second pass, start at 0 in pool 0.
277 */
278 if ((start & mask) >= limit || pass > 0) {
279 spin_unlock(&(pool->lock));
280 pool = &(tbl->pools[0]);
281 spin_lock(&(pool->lock));
282 start = pool->start;
283 } else {
284 start &= mask;
285 }
286 }
287
288 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
289 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
290 align_mask);
291 if (n == -1) {
292 if (likely(pass == 0)) {
293 /* First try the pool from the start */
294 pool->hint = pool->start;
295 pass++;
296 goto again;
297
298 } else if (pass <= tbl->nr_pools) {
299 /* Now try scanning all the other pools */
300 spin_unlock(&(pool->lock));
301 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
302 pool = &tbl->pools[pool_nr];
303 spin_lock(&(pool->lock));
304 pool->hint = pool->start;
305 pass++;
306 goto again;
307
308 } else if (pass == tbl->nr_pools + 1) {
309 /* Last resort: try largepool */
310 spin_unlock(&pool->lock);
311 pool = &tbl->large_pool;
312 spin_lock(&pool->lock);
313 pool->hint = pool->start;
314 pass++;
315 goto again;
316
317 } else {
318 /* Give up */
319 spin_unlock_irqrestore(&(pool->lock), flags);
320 return DMA_MAPPING_ERROR;
321 }
322 }
323
324 end = n + npages;
325
326 /* Bump the hint to a new block for small allocs. */
327 if (largealloc) {
328 /* Don't bump to new block to avoid fragmentation */
329 pool->hint = end;
330 } else {
331 /* Overflow will be taken care of at the next allocation */
332 pool->hint = (end + tbl->it_blocksize - 1) &
333 ~(tbl->it_blocksize - 1);
334 }
335
336 /* Update handle for SG allocations */
337 if (handle)
338 *handle = end;
339
340 spin_unlock_irqrestore(&(pool->lock), flags);
341
342 return n;
343}
344
345static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
346 void *page, unsigned int npages,
347 enum dma_data_direction direction,
348 unsigned long mask, unsigned int align_order,
349 unsigned long attrs)
350{
351 unsigned long entry;
352 dma_addr_t ret = DMA_MAPPING_ERROR;
353 int build_fail;
354
355 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
356
357 if (unlikely(entry == DMA_MAPPING_ERROR))
358 return DMA_MAPPING_ERROR;
359
360 entry += tbl->it_offset; /* Offset into real TCE table */
361 ret = entry << tbl->it_page_shift; /* Set the return dma address */
362
363 /* Put the TCEs in the HW table */
364 build_fail = tbl->it_ops->set(tbl, entry, npages,
365 (unsigned long)page &
366 IOMMU_PAGE_MASK(tbl), direction, attrs);
367
368 /* tbl->it_ops->set() only returns non-zero for transient errors.
369 * Clean up the table bitmap in this case and return
370 * DMA_MAPPING_ERROR. For all other errors the functionality is
371 * not altered.
372 */
373 if (unlikely(build_fail)) {
374 __iommu_free(tbl, ret, npages);
375 return DMA_MAPPING_ERROR;
376 }
377
378 /* Flush/invalidate TLB caches if necessary */
379 if (tbl->it_ops->flush)
380 tbl->it_ops->flush(tbl);
381
382 /* Make sure updates are seen by hardware */
383 mb();
384
385 return ret;
386}
387
388static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
389 unsigned int npages)
390{
391 unsigned long entry, free_entry;
392
393 entry = dma_addr >> tbl->it_page_shift;
394 free_entry = entry - tbl->it_offset;
395
396 if (((free_entry + npages) > tbl->it_size) ||
397 (entry < tbl->it_offset)) {
398 if (printk_ratelimit()) {
399 printk(KERN_INFO "iommu_free: invalid entry\n");
400 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
401 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
402 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
403 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
404 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
405 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
406 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
407 WARN_ON(1);
408 }
409
410 return false;
411 }
412
413 return true;
414}
415
416static struct iommu_pool *get_pool(struct iommu_table *tbl,
417 unsigned long entry)
418{
419 struct iommu_pool *p;
420 unsigned long largepool_start = tbl->large_pool.start;
421
422 /* The large pool is the last pool at the top of the table */
423 if (entry >= largepool_start) {
424 p = &tbl->large_pool;
425 } else {
426 unsigned int pool_nr = entry / tbl->poolsize;
427
428 BUG_ON(pool_nr > tbl->nr_pools);
429 p = &tbl->pools[pool_nr];
430 }
431
432 return p;
433}
434
435static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
436 unsigned int npages)
437{
438 unsigned long entry, free_entry;
439 unsigned long flags;
440 struct iommu_pool *pool;
441
442 entry = dma_addr >> tbl->it_page_shift;
443 free_entry = entry - tbl->it_offset;
444
445 pool = get_pool(tbl, free_entry);
446
447 if (!iommu_free_check(tbl, dma_addr, npages))
448 return;
449
450 tbl->it_ops->clear(tbl, entry, npages);
451
452 spin_lock_irqsave(&(pool->lock), flags);
453 bitmap_clear(tbl->it_map, free_entry, npages);
454 spin_unlock_irqrestore(&(pool->lock), flags);
455}
456
457static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
458 unsigned int npages)
459{
460 __iommu_free(tbl, dma_addr, npages);
461
462 /* Make sure TLB cache is flushed if the HW needs it. We do
463 * not do an mb() here on purpose, it is not needed on any of
464 * the current platforms.
465 */
466 if (tbl->it_ops->flush)
467 tbl->it_ops->flush(tbl);
468}
469
470int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
471 struct scatterlist *sglist, int nelems,
472 unsigned long mask, enum dma_data_direction direction,
473 unsigned long attrs)
474{
475 dma_addr_t dma_next = 0, dma_addr;
476 struct scatterlist *s, *outs, *segstart;
477 int outcount, incount, i, build_fail = 0;
478 unsigned int align;
479 unsigned long handle;
480 unsigned int max_seg_size;
481
482 BUG_ON(direction == DMA_NONE);
483
484 if ((nelems == 0) || !tbl)
485 return -EINVAL;
486
487 outs = s = segstart = &sglist[0];
488 outcount = 1;
489 incount = nelems;
490 handle = 0;
491
492 /* Init first segment length for backout at failure */
493 outs->dma_length = 0;
494
495 DBG("sg mapping %d elements:\n", nelems);
496
497 max_seg_size = dma_get_max_seg_size(dev);
498 for_each_sg(sglist, s, nelems, i) {
499 unsigned long vaddr, npages, entry, slen;
500
501 slen = s->length;
502 /* Sanity check */
503 if (slen == 0) {
504 dma_next = 0;
505 continue;
506 }
507 /* Allocate iommu entries for that segment */
508 vaddr = (unsigned long) sg_virt(s);
509 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
510 align = 0;
511 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
512 (vaddr & ~PAGE_MASK) == 0)
513 align = PAGE_SHIFT - tbl->it_page_shift;
514 entry = iommu_range_alloc(dev, tbl, npages, &handle,
515 mask >> tbl->it_page_shift, align);
516
517 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
518
519 /* Handle failure */
520 if (unlikely(entry == DMA_MAPPING_ERROR)) {
521 if (!(attrs & DMA_ATTR_NO_WARN) &&
522 printk_ratelimit())
523 dev_info(dev, "iommu_alloc failed, tbl %p "
524 "vaddr %lx npages %lu\n", tbl, vaddr,
525 npages);
526 goto failure;
527 }
528
529 /* Convert entry to a dma_addr_t */
530 entry += tbl->it_offset;
531 dma_addr = entry << tbl->it_page_shift;
532 dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
533
534 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
535 npages, entry, dma_addr);
536
537 /* Insert into HW table */
538 build_fail = tbl->it_ops->set(tbl, entry, npages,
539 vaddr & IOMMU_PAGE_MASK(tbl),
540 direction, attrs);
541 if(unlikely(build_fail))
542 goto failure;
543
544 /* If we are in an open segment, try merging */
545 if (segstart != s) {
546 DBG(" - trying merge...\n");
547 /* We cannot merge if:
548 * - allocated dma_addr isn't contiguous to previous allocation
549 */
550 if (novmerge || (dma_addr != dma_next) ||
551 (outs->dma_length + s->length > max_seg_size)) {
552 /* Can't merge: create a new segment */
553 segstart = s;
554 outcount++;
555 outs = sg_next(outs);
556 DBG(" can't merge, new segment.\n");
557 } else {
558 outs->dma_length += s->length;
559 DBG(" merged, new len: %ux\n", outs->dma_length);
560 }
561 }
562
563 if (segstart == s) {
564 /* This is a new segment, fill entries */
565 DBG(" - filling new segment.\n");
566 outs->dma_address = dma_addr;
567 outs->dma_length = slen;
568 }
569
570 /* Calculate next page pointer for contiguous check */
571 dma_next = dma_addr + slen;
572
573 DBG(" - dma next is: %lx\n", dma_next);
574 }
575
576 /* Flush/invalidate TLB caches if necessary */
577 if (tbl->it_ops->flush)
578 tbl->it_ops->flush(tbl);
579
580 DBG("mapped %d elements:\n", outcount);
581
582 /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
583 * next entry of the sglist if we didn't fill the list completely
584 */
585 if (outcount < incount) {
586 outs = sg_next(outs);
587 outs->dma_length = 0;
588 }
589
590 /* Make sure updates are seen by hardware */
591 mb();
592
593 return outcount;
594
595 failure:
596 for_each_sg(sglist, s, nelems, i) {
597 if (s->dma_length != 0) {
598 unsigned long vaddr, npages;
599
600 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
601 npages = iommu_num_pages(s->dma_address, s->dma_length,
602 IOMMU_PAGE_SIZE(tbl));
603 __iommu_free(tbl, vaddr, npages);
604 s->dma_length = 0;
605 }
606 if (s == outs)
607 break;
608 }
609 return -EIO;
610}
611
612
613void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
614 int nelems, enum dma_data_direction direction,
615 unsigned long attrs)
616{
617 struct scatterlist *sg;
618
619 BUG_ON(direction == DMA_NONE);
620
621 if (!tbl)
622 return;
623
624 sg = sglist;
625 while (nelems--) {
626 unsigned int npages;
627 dma_addr_t dma_handle = sg->dma_address;
628
629 if (sg->dma_length == 0)
630 break;
631 npages = iommu_num_pages(dma_handle, sg->dma_length,
632 IOMMU_PAGE_SIZE(tbl));
633 __iommu_free(tbl, dma_handle, npages);
634 sg = sg_next(sg);
635 }
636
637 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
638 * do not do an mb() here, the affected platforms do not need it
639 * when freeing.
640 */
641 if (tbl->it_ops->flush)
642 tbl->it_ops->flush(tbl);
643}
644
645static void iommu_table_clear(struct iommu_table *tbl)
646{
647 /*
648 * In case of firmware assisted dump system goes through clean
649 * reboot process at the time of system crash. Hence it's safe to
650 * clear the TCE entries if firmware assisted dump is active.
651 */
652 if (!is_kdump_kernel() || is_fadump_active()) {
653 /* Clear the table in case firmware left allocations in it */
654 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
655 return;
656 }
657
658#ifdef CONFIG_CRASH_DUMP
659 if (tbl->it_ops->get) {
660 unsigned long index, tceval, tcecount = 0;
661
662 /* Reserve the existing mappings left by the first kernel. */
663 for (index = 0; index < tbl->it_size; index++) {
664 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
665 /*
666 * Freed TCE entry contains 0x7fffffffffffffff on JS20
667 */
668 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
669 __set_bit(index, tbl->it_map);
670 tcecount++;
671 }
672 }
673
674 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
675 printk(KERN_WARNING "TCE table is full; freeing ");
676 printk(KERN_WARNING "%d entries for the kdump boot\n",
677 KDUMP_MIN_TCE_ENTRIES);
678 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
679 index < tbl->it_size; index++)
680 __clear_bit(index, tbl->it_map);
681 }
682 }
683#endif
684}
685
686static void iommu_table_reserve_pages(struct iommu_table *tbl,
687 unsigned long res_start, unsigned long res_end)
688{
689 int i;
690
691 WARN_ON_ONCE(res_end < res_start);
692 /*
693 * Reserve page 0 so it will not be used for any mappings.
694 * This avoids buggy drivers that consider page 0 to be invalid
695 * to crash the machine or even lose data.
696 */
697 if (tbl->it_offset == 0)
698 set_bit(0, tbl->it_map);
699
700 if (res_start < tbl->it_offset)
701 res_start = tbl->it_offset;
702
703 if (res_end > (tbl->it_offset + tbl->it_size))
704 res_end = tbl->it_offset + tbl->it_size;
705
706 /* Check if res_start..res_end is a valid range in the table */
707 if (res_start >= res_end) {
708 tbl->it_reserved_start = tbl->it_offset;
709 tbl->it_reserved_end = tbl->it_offset;
710 return;
711 }
712
713 tbl->it_reserved_start = res_start;
714 tbl->it_reserved_end = res_end;
715
716 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
717 set_bit(i - tbl->it_offset, tbl->it_map);
718}
719
720/*
721 * Build a iommu_table structure. This contains a bit map which
722 * is used to manage allocation of the tce space.
723 */
724struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
725 unsigned long res_start, unsigned long res_end)
726{
727 unsigned long sz;
728 static int welcomed = 0;
729 unsigned int i;
730 struct iommu_pool *p;
731
732 BUG_ON(!tbl->it_ops);
733
734 /* number of bytes needed for the bitmap */
735 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
736
737 tbl->it_map = vzalloc_node(sz, nid);
738 if (!tbl->it_map) {
739 pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
740 return NULL;
741 }
742
743 iommu_table_reserve_pages(tbl, res_start, res_end);
744
745 /* We only split the IOMMU table if we have 1GB or more of space */
746 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
747 tbl->nr_pools = IOMMU_NR_POOLS;
748 else
749 tbl->nr_pools = 1;
750
751 /* We reserve the top 1/4 of the table for large allocations */
752 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
753
754 for (i = 0; i < tbl->nr_pools; i++) {
755 p = &tbl->pools[i];
756 spin_lock_init(&(p->lock));
757 p->start = tbl->poolsize * i;
758 p->hint = p->start;
759 p->end = p->start + tbl->poolsize;
760 }
761
762 p = &tbl->large_pool;
763 spin_lock_init(&(p->lock));
764 p->start = tbl->poolsize * i;
765 p->hint = p->start;
766 p->end = tbl->it_size;
767
768 iommu_table_clear(tbl);
769
770 if (!welcomed) {
771 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
772 novmerge ? "disabled" : "enabled");
773 welcomed = 1;
774 }
775
776 iommu_debugfs_add(tbl);
777
778 return tbl;
779}
780
781bool iommu_table_in_use(struct iommu_table *tbl)
782{
783 unsigned long start = 0, end;
784
785 /* ignore reserved bit0 */
786 if (tbl->it_offset == 0)
787 start = 1;
788
789 /* Simple case with no reserved MMIO32 region */
790 if (!tbl->it_reserved_start && !tbl->it_reserved_end)
791 return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
792
793 end = tbl->it_reserved_start - tbl->it_offset;
794 if (find_next_bit(tbl->it_map, end, start) != end)
795 return true;
796
797 start = tbl->it_reserved_end - tbl->it_offset;
798 end = tbl->it_size;
799 return find_next_bit(tbl->it_map, end, start) != end;
800}
801
802static void iommu_table_free(struct kref *kref)
803{
804 struct iommu_table *tbl;
805
806 tbl = container_of(kref, struct iommu_table, it_kref);
807
808 if (tbl->it_ops->free)
809 tbl->it_ops->free(tbl);
810
811 if (!tbl->it_map) {
812 kfree(tbl);
813 return;
814 }
815
816 iommu_debugfs_del(tbl);
817
818 /* verify that table contains no entries */
819 if (iommu_table_in_use(tbl))
820 pr_warn("%s: Unexpected TCEs\n", __func__);
821
822 /* free bitmap */
823 vfree(tbl->it_map);
824
825 /* free table */
826 kfree(tbl);
827}
828
829struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
830{
831 if (kref_get_unless_zero(&tbl->it_kref))
832 return tbl;
833
834 return NULL;
835}
836EXPORT_SYMBOL_GPL(iommu_tce_table_get);
837
838int iommu_tce_table_put(struct iommu_table *tbl)
839{
840 if (WARN_ON(!tbl))
841 return 0;
842
843 return kref_put(&tbl->it_kref, iommu_table_free);
844}
845EXPORT_SYMBOL_GPL(iommu_tce_table_put);
846
847/* Creates TCEs for a user provided buffer. The user buffer must be
848 * contiguous real kernel storage (not vmalloc). The address passed here
849 * comprises a page address and offset into that page. The dma_addr_t
850 * returned will point to the same byte within the page as was passed in.
851 */
852dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
853 struct page *page, unsigned long offset, size_t size,
854 unsigned long mask, enum dma_data_direction direction,
855 unsigned long attrs)
856{
857 dma_addr_t dma_handle = DMA_MAPPING_ERROR;
858 void *vaddr;
859 unsigned long uaddr;
860 unsigned int npages, align;
861
862 BUG_ON(direction == DMA_NONE);
863
864 vaddr = page_address(page) + offset;
865 uaddr = (unsigned long)vaddr;
866
867 if (tbl) {
868 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
869 align = 0;
870 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
871 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
872 align = PAGE_SHIFT - tbl->it_page_shift;
873
874 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
875 mask >> tbl->it_page_shift, align,
876 attrs);
877 if (dma_handle == DMA_MAPPING_ERROR) {
878 if (!(attrs & DMA_ATTR_NO_WARN) &&
879 printk_ratelimit()) {
880 dev_info(dev, "iommu_alloc failed, tbl %p "
881 "vaddr %p npages %d\n", tbl, vaddr,
882 npages);
883 }
884 } else
885 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
886 }
887
888 return dma_handle;
889}
890
891void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
892 size_t size, enum dma_data_direction direction,
893 unsigned long attrs)
894{
895 unsigned int npages;
896
897 BUG_ON(direction == DMA_NONE);
898
899 if (tbl) {
900 npages = iommu_num_pages(dma_handle, size,
901 IOMMU_PAGE_SIZE(tbl));
902 iommu_free(tbl, dma_handle, npages);
903 }
904}
905
906/* Allocates a contiguous real buffer and creates mappings over it.
907 * Returns the virtual address of the buffer and sets dma_handle
908 * to the dma address (mapping) of the first page.
909 */
910void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
911 size_t size, dma_addr_t *dma_handle,
912 unsigned long mask, gfp_t flag, int node)
913{
914 void *ret = NULL;
915 dma_addr_t mapping;
916 unsigned int order;
917 unsigned int nio_pages, io_order;
918 struct page *page;
919 int tcesize = (1 << tbl->it_page_shift);
920
921 size = PAGE_ALIGN(size);
922 order = get_order(size);
923
924 /*
925 * Client asked for way too much space. This is checked later
926 * anyway. It is easier to debug here for the drivers than in
927 * the tce tables.
928 */
929 if (order >= IOMAP_MAX_ORDER) {
930 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
931 size);
932 return NULL;
933 }
934
935 if (!tbl)
936 return NULL;
937
938 /* Alloc enough pages (and possibly more) */
939 page = alloc_pages_node(node, flag, order);
940 if (!page)
941 return NULL;
942 ret = page_address(page);
943 memset(ret, 0, size);
944
945 /* Set up tces to cover the allocated range */
946 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
947
948 io_order = get_iommu_order(size, tbl);
949 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
950 mask >> tbl->it_page_shift, io_order, 0);
951 if (mapping == DMA_MAPPING_ERROR) {
952 free_pages((unsigned long)ret, order);
953 return NULL;
954 }
955
956 *dma_handle = mapping | ((u64)ret & (tcesize - 1));
957 return ret;
958}
959
960void iommu_free_coherent(struct iommu_table *tbl, size_t size,
961 void *vaddr, dma_addr_t dma_handle)
962{
963 if (tbl) {
964 unsigned int nio_pages;
965
966 size = PAGE_ALIGN(size);
967 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
968 iommu_free(tbl, dma_handle, nio_pages);
969 size = PAGE_ALIGN(size);
970 free_pages((unsigned long)vaddr, get_order(size));
971 }
972}
973
974unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
975{
976 switch (dir) {
977 case DMA_BIDIRECTIONAL:
978 return TCE_PCI_READ | TCE_PCI_WRITE;
979 case DMA_FROM_DEVICE:
980 return TCE_PCI_WRITE;
981 case DMA_TO_DEVICE:
982 return TCE_PCI_READ;
983 default:
984 return 0;
985 }
986}
987EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
988
989#ifdef CONFIG_IOMMU_API
990/*
991 * SPAPR TCE API
992 */
993static void group_release(void *iommu_data)
994{
995 struct iommu_table_group *table_group = iommu_data;
996
997 table_group->group = NULL;
998}
999
1000void iommu_register_group(struct iommu_table_group *table_group,
1001 int pci_domain_number, unsigned long pe_num)
1002{
1003 struct iommu_group *grp;
1004 char *name;
1005
1006 grp = iommu_group_alloc();
1007 if (IS_ERR(grp)) {
1008 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
1009 PTR_ERR(grp));
1010 return;
1011 }
1012 table_group->group = grp;
1013 iommu_group_set_iommudata(grp, table_group, group_release);
1014 name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
1015 pci_domain_number, pe_num);
1016 if (!name)
1017 return;
1018 iommu_group_set_name(grp, name);
1019 kfree(name);
1020}
1021
1022enum dma_data_direction iommu_tce_direction(unsigned long tce)
1023{
1024 if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
1025 return DMA_BIDIRECTIONAL;
1026 else if (tce & TCE_PCI_READ)
1027 return DMA_TO_DEVICE;
1028 else if (tce & TCE_PCI_WRITE)
1029 return DMA_FROM_DEVICE;
1030 else
1031 return DMA_NONE;
1032}
1033EXPORT_SYMBOL_GPL(iommu_tce_direction);
1034
1035void iommu_flush_tce(struct iommu_table *tbl)
1036{
1037 /* Flush/invalidate TLB caches if necessary */
1038 if (tbl->it_ops->flush)
1039 tbl->it_ops->flush(tbl);
1040
1041 /* Make sure updates are seen by hardware */
1042 mb();
1043}
1044EXPORT_SYMBOL_GPL(iommu_flush_tce);
1045
1046int iommu_tce_check_ioba(unsigned long page_shift,
1047 unsigned long offset, unsigned long size,
1048 unsigned long ioba, unsigned long npages)
1049{
1050 unsigned long mask = (1UL << page_shift) - 1;
1051
1052 if (ioba & mask)
1053 return -EINVAL;
1054
1055 ioba >>= page_shift;
1056 if (ioba < offset)
1057 return -EINVAL;
1058
1059 if ((ioba + 1) > (offset + size))
1060 return -EINVAL;
1061
1062 return 0;
1063}
1064EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1065
1066int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1067{
1068 unsigned long mask = (1UL << page_shift) - 1;
1069
1070 if (gpa & mask)
1071 return -EINVAL;
1072
1073 return 0;
1074}
1075EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1076
1077long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1078 struct iommu_table *tbl,
1079 unsigned long entry, unsigned long *hpa,
1080 enum dma_data_direction *direction)
1081{
1082 long ret;
1083 unsigned long size = 0;
1084
1085 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
1086 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1087 (*direction == DMA_BIDIRECTIONAL)) &&
1088 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1089 &size))
1090 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1091
1092 return ret;
1093}
1094EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1095
1096void iommu_tce_kill(struct iommu_table *tbl,
1097 unsigned long entry, unsigned long pages)
1098{
1099 if (tbl->it_ops->tce_kill)
1100 tbl->it_ops->tce_kill(tbl, entry, pages);
1101}
1102EXPORT_SYMBOL_GPL(iommu_tce_kill);
1103
1104#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1105static int iommu_take_ownership(struct iommu_table *tbl)
1106{
1107 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1108 int ret = 0;
1109
1110 /*
1111 * VFIO does not control TCE entries allocation and the guest
1112 * can write new TCEs on top of existing ones so iommu_tce_build()
1113 * must be able to release old pages. This functionality
1114 * requires exchange() callback defined so if it is not
1115 * implemented, we disallow taking ownership over the table.
1116 */
1117 if (!tbl->it_ops->xchg_no_kill)
1118 return -EINVAL;
1119
1120 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1121 for (i = 0; i < tbl->nr_pools; i++)
1122 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1123
1124 if (iommu_table_in_use(tbl)) {
1125 pr_err("iommu_tce: it_map is not empty");
1126 ret = -EBUSY;
1127 } else {
1128 memset(tbl->it_map, 0xff, sz);
1129 }
1130
1131 for (i = 0; i < tbl->nr_pools; i++)
1132 spin_unlock(&tbl->pools[i].lock);
1133 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1134
1135 return ret;
1136}
1137
1138static void iommu_release_ownership(struct iommu_table *tbl)
1139{
1140 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1141
1142 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1143 for (i = 0; i < tbl->nr_pools; i++)
1144 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1145
1146 memset(tbl->it_map, 0, sz);
1147
1148 iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1149 tbl->it_reserved_end);
1150
1151 for (i = 0; i < tbl->nr_pools; i++)
1152 spin_unlock(&tbl->pools[i].lock);
1153 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1154}
1155#endif
1156
1157int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1158{
1159 /*
1160 * The sysfs entries should be populated before
1161 * binding IOMMU group. If sysfs entries isn't
1162 * ready, we simply bail.
1163 */
1164 if (!device_is_registered(dev))
1165 return -ENOENT;
1166
1167 if (device_iommu_mapped(dev)) {
1168 pr_debug("%s: Skipping device %s with iommu group %d\n",
1169 __func__, dev_name(dev),
1170 iommu_group_id(dev->iommu_group));
1171 return -EBUSY;
1172 }
1173
1174 pr_debug("%s: Adding %s to iommu group %d\n",
1175 __func__, dev_name(dev), iommu_group_id(table_group->group));
1176 /*
1177 * This is still not adding devices via the IOMMU bus notifier because
1178 * of pcibios_init() from arch/powerpc/kernel/pci_64.c which calls
1179 * pcibios_scan_phb() first (and this guy adds devices and triggers
1180 * the notifier) and only then it calls pci_bus_add_devices() which
1181 * configures DMA for buses which also creates PEs and IOMMU groups.
1182 */
1183 return iommu_probe_device(dev);
1184}
1185EXPORT_SYMBOL_GPL(iommu_add_device);
1186
1187#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1188/*
1189 * A simple iommu_table_group_ops which only allows reusing the existing
1190 * iommu_table. This handles VFIO for POWER7 or the nested KVM.
1191 * The ops does not allow creating windows and only allows reusing the existing
1192 * one if it matches table_group->tce32_start/tce32_size/page_shift.
1193 */
1194static unsigned long spapr_tce_get_table_size(__u32 page_shift,
1195 __u64 window_size, __u32 levels)
1196{
1197 unsigned long size;
1198
1199 if (levels > 1)
1200 return ~0U;
1201 size = window_size >> (page_shift - 3);
1202 return size;
1203}
1204
1205static long spapr_tce_create_table(struct iommu_table_group *table_group, int num,
1206 __u32 page_shift, __u64 window_size, __u32 levels,
1207 struct iommu_table **ptbl)
1208{
1209 struct iommu_table *tbl = table_group->tables[0];
1210
1211 if (num > 0)
1212 return -EPERM;
1213
1214 if (tbl->it_page_shift != page_shift ||
1215 tbl->it_size != (window_size >> page_shift) ||
1216 tbl->it_indirect_levels != levels - 1)
1217 return -EINVAL;
1218
1219 *ptbl = iommu_tce_table_get(tbl);
1220 return 0;
1221}
1222
1223static long spapr_tce_set_window(struct iommu_table_group *table_group,
1224 int num, struct iommu_table *tbl)
1225{
1226 return tbl == table_group->tables[num] ? 0 : -EPERM;
1227}
1228
1229static long spapr_tce_unset_window(struct iommu_table_group *table_group, int num)
1230{
1231 return 0;
1232}
1233
1234static long spapr_tce_take_ownership(struct iommu_table_group *table_group)
1235{
1236 int i, j, rc = 0;
1237
1238 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1239 struct iommu_table *tbl = table_group->tables[i];
1240
1241 if (!tbl || !tbl->it_map)
1242 continue;
1243
1244 rc = iommu_take_ownership(tbl);
1245 if (!rc)
1246 continue;
1247
1248 for (j = 0; j < i; ++j)
1249 iommu_release_ownership(table_group->tables[j]);
1250 return rc;
1251 }
1252 return 0;
1253}
1254
1255static void spapr_tce_release_ownership(struct iommu_table_group *table_group)
1256{
1257 int i;
1258
1259 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1260 struct iommu_table *tbl = table_group->tables[i];
1261
1262 if (!tbl)
1263 continue;
1264
1265 iommu_table_clear(tbl);
1266 if (tbl->it_map)
1267 iommu_release_ownership(tbl);
1268 }
1269}
1270
1271struct iommu_table_group_ops spapr_tce_table_group_ops = {
1272 .get_table_size = spapr_tce_get_table_size,
1273 .create_table = spapr_tce_create_table,
1274 .set_window = spapr_tce_set_window,
1275 .unset_window = spapr_tce_unset_window,
1276 .take_ownership = spapr_tce_take_ownership,
1277 .release_ownership = spapr_tce_release_ownership,
1278};
1279
1280/*
1281 * A simple iommu_ops to allow less cruft in generic VFIO code.
1282 */
1283static int
1284spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
1285 struct device *dev)
1286{
1287 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1288 struct iommu_group *grp = iommu_group_get(dev);
1289 struct iommu_table_group *table_group;
1290
1291 /* At first attach the ownership is already set */
1292 if (!domain) {
1293 iommu_group_put(grp);
1294 return 0;
1295 }
1296
1297 table_group = iommu_group_get_iommudata(grp);
1298 /*
1299 * The domain being set to PLATFORM from earlier
1300 * BLOCKED. The table_group ownership has to be released.
1301 */
1302 table_group->ops->release_ownership(table_group);
1303 iommu_group_put(grp);
1304
1305 return 0;
1306}
1307
1308static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
1309 .attach_dev = spapr_tce_platform_iommu_attach_dev,
1310};
1311
1312static struct iommu_domain spapr_tce_platform_domain = {
1313 .type = IOMMU_DOMAIN_PLATFORM,
1314 .ops = &spapr_tce_platform_domain_ops,
1315};
1316
1317static int
1318spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain,
1319 struct device *dev)
1320{
1321 struct iommu_group *grp = iommu_group_get(dev);
1322 struct iommu_table_group *table_group;
1323 int ret = -EINVAL;
1324
1325 /*
1326 * FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
1327 * also sets the dma_api ops
1328 */
1329 table_group = iommu_group_get_iommudata(grp);
1330 ret = table_group->ops->take_ownership(table_group);
1331 iommu_group_put(grp);
1332
1333 return ret;
1334}
1335
1336static const struct iommu_domain_ops spapr_tce_blocked_domain_ops = {
1337 .attach_dev = spapr_tce_blocked_iommu_attach_dev,
1338};
1339
1340static struct iommu_domain spapr_tce_blocked_domain = {
1341 .type = IOMMU_DOMAIN_BLOCKED,
1342 .ops = &spapr_tce_blocked_domain_ops,
1343};
1344
1345static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
1346{
1347 switch (cap) {
1348 case IOMMU_CAP_CACHE_COHERENCY:
1349 return true;
1350 default:
1351 break;
1352 }
1353
1354 return false;
1355}
1356
1357static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
1358{
1359 struct pci_dev *pdev;
1360 struct pci_controller *hose;
1361
1362 if (!dev_is_pci(dev))
1363 return ERR_PTR(-ENODEV);
1364
1365 pdev = to_pci_dev(dev);
1366 hose = pdev->bus->sysdata;
1367
1368 return &hose->iommu;
1369}
1370
1371static void spapr_tce_iommu_release_device(struct device *dev)
1372{
1373}
1374
1375static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev)
1376{
1377 struct pci_controller *hose;
1378 struct pci_dev *pdev;
1379
1380 pdev = to_pci_dev(dev);
1381 hose = pdev->bus->sysdata;
1382
1383 if (!hose->controller_ops.device_group)
1384 return ERR_PTR(-ENOENT);
1385
1386 return hose->controller_ops.device_group(hose, pdev);
1387}
1388
1389static const struct iommu_ops spapr_tce_iommu_ops = {
1390 .default_domain = &spapr_tce_platform_domain,
1391 .blocked_domain = &spapr_tce_blocked_domain,
1392 .capable = spapr_tce_iommu_capable,
1393 .probe_device = spapr_tce_iommu_probe_device,
1394 .release_device = spapr_tce_iommu_release_device,
1395 .device_group = spapr_tce_iommu_device_group,
1396};
1397
1398static struct attribute *spapr_tce_iommu_attrs[] = {
1399 NULL,
1400};
1401
1402static struct attribute_group spapr_tce_iommu_group = {
1403 .name = "spapr-tce-iommu",
1404 .attrs = spapr_tce_iommu_attrs,
1405};
1406
1407static const struct attribute_group *spapr_tce_iommu_groups[] = {
1408 &spapr_tce_iommu_group,
1409 NULL,
1410};
1411
1412void ppc_iommu_register_device(struct pci_controller *phb)
1413{
1414 iommu_device_sysfs_add(&phb->iommu, phb->parent,
1415 spapr_tce_iommu_groups, "iommu-phb%04x",
1416 phb->global_number);
1417 iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
1418 phb->parent);
1419}
1420
1421void ppc_iommu_unregister_device(struct pci_controller *phb)
1422{
1423 iommu_device_unregister(&phb->iommu);
1424 iommu_device_sysfs_remove(&phb->iommu);
1425}
1426
1427/*
1428 * This registers IOMMU devices of PHBs. This needs to happen
1429 * after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
1430 * before subsys_initcall(iommu_subsys_init).
1431 */
1432static int __init spapr_tce_setup_phb_iommus_initcall(void)
1433{
1434 struct pci_controller *hose;
1435
1436 list_for_each_entry(hose, &hose_list, list_node) {
1437 ppc_iommu_register_device(hose);
1438 }
1439 return 0;
1440}
1441postcore_initcall_sync(spapr_tce_setup_phb_iommus_initcall);
1442#endif
1443
1444#endif /* CONFIG_IOMMU_API */
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 *
5 * Rewrite, cleanup, new allocation schemes, virtual merging:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * and Ben. Herrenschmidt, IBM Corporation
8 *
9 * Dynamic DMA mapping support, bus-independent parts.
10 */
11
12
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/mm.h>
17#include <linux/spinlock.h>
18#include <linux/string.h>
19#include <linux/dma-mapping.h>
20#include <linux/bitmap.h>
21#include <linux/iommu-helper.h>
22#include <linux/crash_dump.h>
23#include <linux/hash.h>
24#include <linux/fault-inject.h>
25#include <linux/pci.h>
26#include <linux/iommu.h>
27#include <linux/sched.h>
28#include <linux/debugfs.h>
29#include <linux/vmalloc.h>
30#include <asm/io.h>
31#include <asm/iommu.h>
32#include <asm/pci-bridge.h>
33#include <asm/machdep.h>
34#include <asm/kdump.h>
35#include <asm/fadump.h>
36#include <asm/vio.h>
37#include <asm/tce.h>
38#include <asm/mmu_context.h>
39#include <asm/ppc-pci.h>
40
41#define DBG(...)
42
43#ifdef CONFIG_IOMMU_DEBUGFS
44static int iommu_debugfs_weight_get(void *data, u64 *val)
45{
46 struct iommu_table *tbl = data;
47 *val = bitmap_weight(tbl->it_map, tbl->it_size);
48 return 0;
49}
50DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
51
52static void iommu_debugfs_add(struct iommu_table *tbl)
53{
54 char name[10];
55 struct dentry *liobn_entry;
56
57 sprintf(name, "%08lx", tbl->it_index);
58 liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
59
60 debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
61 debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
62 debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
63 debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
64 debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
65 debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
66 debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
67}
68
69static void iommu_debugfs_del(struct iommu_table *tbl)
70{
71 char name[10];
72
73 sprintf(name, "%08lx", tbl->it_index);
74 debugfs_lookup_and_remove(name, iommu_debugfs_dir);
75}
76#else
77static void iommu_debugfs_add(struct iommu_table *tbl){}
78static void iommu_debugfs_del(struct iommu_table *tbl){}
79#endif
80
81static int novmerge;
82
83static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
84
85static int __init setup_iommu(char *str)
86{
87 if (!strcmp(str, "novmerge"))
88 novmerge = 1;
89 else if (!strcmp(str, "vmerge"))
90 novmerge = 0;
91 return 1;
92}
93
94__setup("iommu=", setup_iommu);
95
96static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
97
98/*
99 * We precalculate the hash to avoid doing it on every allocation.
100 *
101 * The hash is important to spread CPUs across all the pools. For example,
102 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
103 * with 4 pools all primary threads would map to the same pool.
104 */
105static int __init setup_iommu_pool_hash(void)
106{
107 unsigned int i;
108
109 for_each_possible_cpu(i)
110 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
111
112 return 0;
113}
114subsys_initcall(setup_iommu_pool_hash);
115
116#ifdef CONFIG_FAIL_IOMMU
117
118static DECLARE_FAULT_ATTR(fail_iommu);
119
120static int __init setup_fail_iommu(char *str)
121{
122 return setup_fault_attr(&fail_iommu, str);
123}
124__setup("fail_iommu=", setup_fail_iommu);
125
126static bool should_fail_iommu(struct device *dev)
127{
128 return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
129}
130
131static int __init fail_iommu_debugfs(void)
132{
133 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
134 NULL, &fail_iommu);
135
136 return PTR_ERR_OR_ZERO(dir);
137}
138late_initcall(fail_iommu_debugfs);
139
140static ssize_t fail_iommu_show(struct device *dev,
141 struct device_attribute *attr, char *buf)
142{
143 return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
144}
145
146static ssize_t fail_iommu_store(struct device *dev,
147 struct device_attribute *attr, const char *buf,
148 size_t count)
149{
150 int i;
151
152 if (count > 0 && sscanf(buf, "%d", &i) > 0)
153 dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
154
155 return count;
156}
157
158static DEVICE_ATTR_RW(fail_iommu);
159
160static int fail_iommu_bus_notify(struct notifier_block *nb,
161 unsigned long action, void *data)
162{
163 struct device *dev = data;
164
165 if (action == BUS_NOTIFY_ADD_DEVICE) {
166 if (device_create_file(dev, &dev_attr_fail_iommu))
167 pr_warn("Unable to create IOMMU fault injection sysfs "
168 "entries\n");
169 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
170 device_remove_file(dev, &dev_attr_fail_iommu);
171 }
172
173 return 0;
174}
175
176/*
177 * PCI and VIO buses need separate notifier_block structs, since they're linked
178 * list nodes. Sharing a notifier_block would mean that any notifiers later
179 * registered for PCI buses would also get called by VIO buses and vice versa.
180 */
181static struct notifier_block fail_iommu_pci_bus_notifier = {
182 .notifier_call = fail_iommu_bus_notify
183};
184
185#ifdef CONFIG_IBMVIO
186static struct notifier_block fail_iommu_vio_bus_notifier = {
187 .notifier_call = fail_iommu_bus_notify
188};
189#endif
190
191static int __init fail_iommu_setup(void)
192{
193#ifdef CONFIG_PCI
194 bus_register_notifier(&pci_bus_type, &fail_iommu_pci_bus_notifier);
195#endif
196#ifdef CONFIG_IBMVIO
197 bus_register_notifier(&vio_bus_type, &fail_iommu_vio_bus_notifier);
198#endif
199
200 return 0;
201}
202/*
203 * Must execute after PCI and VIO subsystem have initialised but before
204 * devices are probed.
205 */
206arch_initcall(fail_iommu_setup);
207#else
208static inline bool should_fail_iommu(struct device *dev)
209{
210 return false;
211}
212#endif
213
214static unsigned long iommu_range_alloc(struct device *dev,
215 struct iommu_table *tbl,
216 unsigned long npages,
217 unsigned long *handle,
218 unsigned long mask,
219 unsigned int align_order)
220{
221 unsigned long n, end, start;
222 unsigned long limit;
223 int largealloc = npages > 15;
224 int pass = 0;
225 unsigned long align_mask;
226 unsigned long flags;
227 unsigned int pool_nr;
228 struct iommu_pool *pool;
229
230 align_mask = (1ull << align_order) - 1;
231
232 /* This allocator was derived from x86_64's bit string search */
233
234 /* Sanity check */
235 if (unlikely(npages == 0)) {
236 if (printk_ratelimit())
237 WARN_ON(1);
238 return DMA_MAPPING_ERROR;
239 }
240
241 if (should_fail_iommu(dev))
242 return DMA_MAPPING_ERROR;
243
244 /*
245 * We don't need to disable preemption here because any CPU can
246 * safely use any IOMMU pool.
247 */
248 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
249
250 if (largealloc)
251 pool = &(tbl->large_pool);
252 else
253 pool = &(tbl->pools[pool_nr]);
254
255 spin_lock_irqsave(&(pool->lock), flags);
256
257again:
258 if ((pass == 0) && handle && *handle &&
259 (*handle >= pool->start) && (*handle < pool->end))
260 start = *handle;
261 else
262 start = pool->hint;
263
264 limit = pool->end;
265
266 /* The case below can happen if we have a small segment appended
267 * to a large, or when the previous alloc was at the very end of
268 * the available space. If so, go back to the initial start.
269 */
270 if (start >= limit)
271 start = pool->start;
272
273 if (limit + tbl->it_offset > mask) {
274 limit = mask - tbl->it_offset + 1;
275 /* If we're constrained on address range, first try
276 * at the masked hint to avoid O(n) search complexity,
277 * but on second pass, start at 0 in pool 0.
278 */
279 if ((start & mask) >= limit || pass > 0) {
280 spin_unlock(&(pool->lock));
281 pool = &(tbl->pools[0]);
282 spin_lock(&(pool->lock));
283 start = pool->start;
284 } else {
285 start &= mask;
286 }
287 }
288
289 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
290 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
291 align_mask);
292 if (n == -1) {
293 if (likely(pass == 0)) {
294 /* First try the pool from the start */
295 pool->hint = pool->start;
296 pass++;
297 goto again;
298
299 } else if (pass <= tbl->nr_pools) {
300 /* Now try scanning all the other pools */
301 spin_unlock(&(pool->lock));
302 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
303 pool = &tbl->pools[pool_nr];
304 spin_lock(&(pool->lock));
305 pool->hint = pool->start;
306 pass++;
307 goto again;
308
309 } else if (pass == tbl->nr_pools + 1) {
310 /* Last resort: try largepool */
311 spin_unlock(&pool->lock);
312 pool = &tbl->large_pool;
313 spin_lock(&pool->lock);
314 pool->hint = pool->start;
315 pass++;
316 goto again;
317
318 } else {
319 /* Give up */
320 spin_unlock_irqrestore(&(pool->lock), flags);
321 return DMA_MAPPING_ERROR;
322 }
323 }
324
325 end = n + npages;
326
327 /* Bump the hint to a new block for small allocs. */
328 if (largealloc) {
329 /* Don't bump to new block to avoid fragmentation */
330 pool->hint = end;
331 } else {
332 /* Overflow will be taken care of at the next allocation */
333 pool->hint = (end + tbl->it_blocksize - 1) &
334 ~(tbl->it_blocksize - 1);
335 }
336
337 /* Update handle for SG allocations */
338 if (handle)
339 *handle = end;
340
341 spin_unlock_irqrestore(&(pool->lock), flags);
342
343 return n;
344}
345
346static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
347 void *page, unsigned int npages,
348 enum dma_data_direction direction,
349 unsigned long mask, unsigned int align_order,
350 unsigned long attrs)
351{
352 unsigned long entry;
353 dma_addr_t ret = DMA_MAPPING_ERROR;
354 int build_fail;
355
356 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
357
358 if (unlikely(entry == DMA_MAPPING_ERROR))
359 return DMA_MAPPING_ERROR;
360
361 entry += tbl->it_offset; /* Offset into real TCE table */
362 ret = entry << tbl->it_page_shift; /* Set the return dma address */
363
364 /* Put the TCEs in the HW table */
365 build_fail = tbl->it_ops->set(tbl, entry, npages,
366 (unsigned long)page &
367 IOMMU_PAGE_MASK(tbl), direction, attrs);
368
369 /* tbl->it_ops->set() only returns non-zero for transient errors.
370 * Clean up the table bitmap in this case and return
371 * DMA_MAPPING_ERROR. For all other errors the functionality is
372 * not altered.
373 */
374 if (unlikely(build_fail)) {
375 __iommu_free(tbl, ret, npages);
376 return DMA_MAPPING_ERROR;
377 }
378
379 /* Flush/invalidate TLB caches if necessary */
380 if (tbl->it_ops->flush)
381 tbl->it_ops->flush(tbl);
382
383 /* Make sure updates are seen by hardware */
384 mb();
385
386 return ret;
387}
388
389static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
390 unsigned int npages)
391{
392 unsigned long entry, free_entry;
393
394 entry = dma_addr >> tbl->it_page_shift;
395 free_entry = entry - tbl->it_offset;
396
397 if (((free_entry + npages) > tbl->it_size) ||
398 (entry < tbl->it_offset)) {
399 if (printk_ratelimit()) {
400 printk(KERN_INFO "iommu_free: invalid entry\n");
401 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
402 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
403 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
404 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
405 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
406 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
407 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
408 WARN_ON(1);
409 }
410
411 return false;
412 }
413
414 return true;
415}
416
417static struct iommu_pool *get_pool(struct iommu_table *tbl,
418 unsigned long entry)
419{
420 struct iommu_pool *p;
421 unsigned long largepool_start = tbl->large_pool.start;
422
423 /* The large pool is the last pool at the top of the table */
424 if (entry >= largepool_start) {
425 p = &tbl->large_pool;
426 } else {
427 unsigned int pool_nr = entry / tbl->poolsize;
428
429 BUG_ON(pool_nr > tbl->nr_pools);
430 p = &tbl->pools[pool_nr];
431 }
432
433 return p;
434}
435
436static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
437 unsigned int npages)
438{
439 unsigned long entry, free_entry;
440 unsigned long flags;
441 struct iommu_pool *pool;
442
443 entry = dma_addr >> tbl->it_page_shift;
444 free_entry = entry - tbl->it_offset;
445
446 pool = get_pool(tbl, free_entry);
447
448 if (!iommu_free_check(tbl, dma_addr, npages))
449 return;
450
451 tbl->it_ops->clear(tbl, entry, npages);
452
453 spin_lock_irqsave(&(pool->lock), flags);
454 bitmap_clear(tbl->it_map, free_entry, npages);
455 spin_unlock_irqrestore(&(pool->lock), flags);
456}
457
458static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
459 unsigned int npages)
460{
461 __iommu_free(tbl, dma_addr, npages);
462
463 /* Make sure TLB cache is flushed if the HW needs it. We do
464 * not do an mb() here on purpose, it is not needed on any of
465 * the current platforms.
466 */
467 if (tbl->it_ops->flush)
468 tbl->it_ops->flush(tbl);
469}
470
471int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
472 struct scatterlist *sglist, int nelems,
473 unsigned long mask, enum dma_data_direction direction,
474 unsigned long attrs)
475{
476 dma_addr_t dma_next = 0, dma_addr;
477 struct scatterlist *s, *outs, *segstart;
478 int outcount, incount, i, build_fail = 0;
479 unsigned int align;
480 unsigned long handle;
481 unsigned int max_seg_size;
482
483 BUG_ON(direction == DMA_NONE);
484
485 if ((nelems == 0) || !tbl)
486 return -EINVAL;
487
488 outs = s = segstart = &sglist[0];
489 outcount = 1;
490 incount = nelems;
491 handle = 0;
492
493 /* Init first segment length for backout at failure */
494 outs->dma_length = 0;
495
496 DBG("sg mapping %d elements:\n", nelems);
497
498 max_seg_size = dma_get_max_seg_size(dev);
499 for_each_sg(sglist, s, nelems, i) {
500 unsigned long vaddr, npages, entry, slen;
501
502 slen = s->length;
503 /* Sanity check */
504 if (slen == 0) {
505 dma_next = 0;
506 continue;
507 }
508 /* Allocate iommu entries for that segment */
509 vaddr = (unsigned long) sg_virt(s);
510 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
511 align = 0;
512 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
513 (vaddr & ~PAGE_MASK) == 0)
514 align = PAGE_SHIFT - tbl->it_page_shift;
515 entry = iommu_range_alloc(dev, tbl, npages, &handle,
516 mask >> tbl->it_page_shift, align);
517
518 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
519
520 /* Handle failure */
521 if (unlikely(entry == DMA_MAPPING_ERROR)) {
522 if (!(attrs & DMA_ATTR_NO_WARN) &&
523 printk_ratelimit())
524 dev_info(dev, "iommu_alloc failed, tbl %p "
525 "vaddr %lx npages %lu\n", tbl, vaddr,
526 npages);
527 goto failure;
528 }
529
530 /* Convert entry to a dma_addr_t */
531 entry += tbl->it_offset;
532 dma_addr = entry << tbl->it_page_shift;
533 dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
534
535 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
536 npages, entry, dma_addr);
537
538 /* Insert into HW table */
539 build_fail = tbl->it_ops->set(tbl, entry, npages,
540 vaddr & IOMMU_PAGE_MASK(tbl),
541 direction, attrs);
542 if(unlikely(build_fail))
543 goto failure;
544
545 /* If we are in an open segment, try merging */
546 if (segstart != s) {
547 DBG(" - trying merge...\n");
548 /* We cannot merge if:
549 * - allocated dma_addr isn't contiguous to previous allocation
550 */
551 if (novmerge || (dma_addr != dma_next) ||
552 (outs->dma_length + s->length > max_seg_size)) {
553 /* Can't merge: create a new segment */
554 segstart = s;
555 outcount++;
556 outs = sg_next(outs);
557 DBG(" can't merge, new segment.\n");
558 } else {
559 outs->dma_length += s->length;
560 DBG(" merged, new len: %ux\n", outs->dma_length);
561 }
562 }
563
564 if (segstart == s) {
565 /* This is a new segment, fill entries */
566 DBG(" - filling new segment.\n");
567 outs->dma_address = dma_addr;
568 outs->dma_length = slen;
569 }
570
571 /* Calculate next page pointer for contiguous check */
572 dma_next = dma_addr + slen;
573
574 DBG(" - dma next is: %lx\n", dma_next);
575 }
576
577 /* Flush/invalidate TLB caches if necessary */
578 if (tbl->it_ops->flush)
579 tbl->it_ops->flush(tbl);
580
581 DBG("mapped %d elements:\n", outcount);
582
583 /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
584 * next entry of the sglist if we didn't fill the list completely
585 */
586 if (outcount < incount) {
587 outs = sg_next(outs);
588 outs->dma_length = 0;
589 }
590
591 /* Make sure updates are seen by hardware */
592 mb();
593
594 return outcount;
595
596 failure:
597 for_each_sg(sglist, s, nelems, i) {
598 if (s->dma_length != 0) {
599 unsigned long vaddr, npages;
600
601 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
602 npages = iommu_num_pages(s->dma_address, s->dma_length,
603 IOMMU_PAGE_SIZE(tbl));
604 __iommu_free(tbl, vaddr, npages);
605 s->dma_length = 0;
606 }
607 if (s == outs)
608 break;
609 }
610 return -EIO;
611}
612
613
614void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
615 int nelems, enum dma_data_direction direction,
616 unsigned long attrs)
617{
618 struct scatterlist *sg;
619
620 BUG_ON(direction == DMA_NONE);
621
622 if (!tbl)
623 return;
624
625 sg = sglist;
626 while (nelems--) {
627 unsigned int npages;
628 dma_addr_t dma_handle = sg->dma_address;
629
630 if (sg->dma_length == 0)
631 break;
632 npages = iommu_num_pages(dma_handle, sg->dma_length,
633 IOMMU_PAGE_SIZE(tbl));
634 __iommu_free(tbl, dma_handle, npages);
635 sg = sg_next(sg);
636 }
637
638 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
639 * do not do an mb() here, the affected platforms do not need it
640 * when freeing.
641 */
642 if (tbl->it_ops->flush)
643 tbl->it_ops->flush(tbl);
644}
645
646void iommu_table_clear(struct iommu_table *tbl)
647{
648 /*
649 * In case of firmware assisted dump system goes through clean
650 * reboot process at the time of system crash. Hence it's safe to
651 * clear the TCE entries if firmware assisted dump is active.
652 */
653 if (!is_kdump_kernel() || is_fadump_active()) {
654 /* Clear the table in case firmware left allocations in it */
655 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
656 return;
657 }
658
659#ifdef CONFIG_CRASH_DUMP
660 if (tbl->it_ops->get) {
661 unsigned long index, tceval, tcecount = 0;
662
663 /* Reserve the existing mappings left by the first kernel. */
664 for (index = 0; index < tbl->it_size; index++) {
665 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
666 /*
667 * Freed TCE entry contains 0x7fffffffffffffff on JS20
668 */
669 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
670 __set_bit(index, tbl->it_map);
671 tcecount++;
672 }
673 }
674
675 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
676 printk(KERN_WARNING "TCE table is full; freeing ");
677 printk(KERN_WARNING "%d entries for the kdump boot\n",
678 KDUMP_MIN_TCE_ENTRIES);
679 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
680 index < tbl->it_size; index++)
681 __clear_bit(index, tbl->it_map);
682 }
683 }
684#endif
685}
686
687void iommu_table_reserve_pages(struct iommu_table *tbl,
688 unsigned long res_start, unsigned long res_end)
689{
690 unsigned long i;
691
692 WARN_ON_ONCE(res_end < res_start);
693 /*
694 * Reserve page 0 so it will not be used for any mappings.
695 * This avoids buggy drivers that consider page 0 to be invalid
696 * to crash the machine or even lose data.
697 */
698 if (tbl->it_offset == 0)
699 set_bit(0, tbl->it_map);
700
701 if (res_start < tbl->it_offset)
702 res_start = tbl->it_offset;
703
704 if (res_end > (tbl->it_offset + tbl->it_size))
705 res_end = tbl->it_offset + tbl->it_size;
706
707 /* Check if res_start..res_end is a valid range in the table */
708 if (res_start >= res_end) {
709 tbl->it_reserved_start = tbl->it_offset;
710 tbl->it_reserved_end = tbl->it_offset;
711 return;
712 }
713
714 tbl->it_reserved_start = res_start;
715 tbl->it_reserved_end = res_end;
716
717 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
718 set_bit(i - tbl->it_offset, tbl->it_map);
719}
720
721/*
722 * Build a iommu_table structure. This contains a bit map which
723 * is used to manage allocation of the tce space.
724 */
725struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
726 unsigned long res_start, unsigned long res_end)
727{
728 unsigned long sz;
729 static int welcomed = 0;
730 unsigned int i;
731 struct iommu_pool *p;
732
733 BUG_ON(!tbl->it_ops);
734
735 /* number of bytes needed for the bitmap */
736 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
737
738 tbl->it_map = vzalloc_node(sz, nid);
739 if (!tbl->it_map) {
740 pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
741 return NULL;
742 }
743
744 iommu_table_reserve_pages(tbl, res_start, res_end);
745
746 /* We only split the IOMMU table if we have 1GB or more of space */
747 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
748 tbl->nr_pools = IOMMU_NR_POOLS;
749 else
750 tbl->nr_pools = 1;
751
752 /* We reserve the top 1/4 of the table for large allocations */
753 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
754
755 for (i = 0; i < tbl->nr_pools; i++) {
756 p = &tbl->pools[i];
757 spin_lock_init(&(p->lock));
758 p->start = tbl->poolsize * i;
759 p->hint = p->start;
760 p->end = p->start + tbl->poolsize;
761 }
762
763 p = &tbl->large_pool;
764 spin_lock_init(&(p->lock));
765 p->start = tbl->poolsize * i;
766 p->hint = p->start;
767 p->end = tbl->it_size;
768
769 iommu_table_clear(tbl);
770
771 if (!welcomed) {
772 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
773 novmerge ? "disabled" : "enabled");
774 welcomed = 1;
775 }
776
777 iommu_debugfs_add(tbl);
778
779 return tbl;
780}
781
782bool iommu_table_in_use(struct iommu_table *tbl)
783{
784 unsigned long start = 0, end;
785
786 /* ignore reserved bit0 */
787 if (tbl->it_offset == 0)
788 start = 1;
789
790 /* Simple case with no reserved MMIO32 region */
791 if (!tbl->it_reserved_start && !tbl->it_reserved_end)
792 return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
793
794 end = tbl->it_reserved_start - tbl->it_offset;
795 if (find_next_bit(tbl->it_map, end, start) != end)
796 return true;
797
798 start = tbl->it_reserved_end - tbl->it_offset;
799 end = tbl->it_size;
800 return find_next_bit(tbl->it_map, end, start) != end;
801}
802
803static void iommu_table_free(struct kref *kref)
804{
805 struct iommu_table *tbl;
806
807 tbl = container_of(kref, struct iommu_table, it_kref);
808
809 if (tbl->it_ops->free)
810 tbl->it_ops->free(tbl);
811
812 if (!tbl->it_map) {
813 kfree(tbl);
814 return;
815 }
816
817 iommu_debugfs_del(tbl);
818
819 /* verify that table contains no entries */
820 if (iommu_table_in_use(tbl))
821 pr_warn("%s: Unexpected TCEs\n", __func__);
822
823 /* free bitmap */
824 vfree(tbl->it_map);
825
826 /* free table */
827 kfree(tbl);
828}
829
830struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
831{
832 if (kref_get_unless_zero(&tbl->it_kref))
833 return tbl;
834
835 return NULL;
836}
837EXPORT_SYMBOL_GPL(iommu_tce_table_get);
838
839int iommu_tce_table_put(struct iommu_table *tbl)
840{
841 if (WARN_ON(!tbl))
842 return 0;
843
844 return kref_put(&tbl->it_kref, iommu_table_free);
845}
846EXPORT_SYMBOL_GPL(iommu_tce_table_put);
847
848/* Creates TCEs for a user provided buffer. The user buffer must be
849 * contiguous real kernel storage (not vmalloc). The address passed here
850 * comprises a page address and offset into that page. The dma_addr_t
851 * returned will point to the same byte within the page as was passed in.
852 */
853dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
854 struct page *page, unsigned long offset, size_t size,
855 unsigned long mask, enum dma_data_direction direction,
856 unsigned long attrs)
857{
858 dma_addr_t dma_handle = DMA_MAPPING_ERROR;
859 void *vaddr;
860 unsigned long uaddr;
861 unsigned int npages, align;
862
863 BUG_ON(direction == DMA_NONE);
864
865 vaddr = page_address(page) + offset;
866 uaddr = (unsigned long)vaddr;
867
868 if (tbl) {
869 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
870 align = 0;
871 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
872 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
873 align = PAGE_SHIFT - tbl->it_page_shift;
874
875 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
876 mask >> tbl->it_page_shift, align,
877 attrs);
878 if (dma_handle == DMA_MAPPING_ERROR) {
879 if (!(attrs & DMA_ATTR_NO_WARN) &&
880 printk_ratelimit()) {
881 dev_info(dev, "iommu_alloc failed, tbl %p "
882 "vaddr %p npages %d\n", tbl, vaddr,
883 npages);
884 }
885 } else
886 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
887 }
888
889 return dma_handle;
890}
891
892void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
893 size_t size, enum dma_data_direction direction,
894 unsigned long attrs)
895{
896 unsigned int npages;
897
898 BUG_ON(direction == DMA_NONE);
899
900 if (tbl) {
901 npages = iommu_num_pages(dma_handle, size,
902 IOMMU_PAGE_SIZE(tbl));
903 iommu_free(tbl, dma_handle, npages);
904 }
905}
906
907/* Allocates a contiguous real buffer and creates mappings over it.
908 * Returns the virtual address of the buffer and sets dma_handle
909 * to the dma address (mapping) of the first page.
910 */
911void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
912 size_t size, dma_addr_t *dma_handle,
913 unsigned long mask, gfp_t flag, int node)
914{
915 void *ret = NULL;
916 dma_addr_t mapping;
917 unsigned int order;
918 unsigned int nio_pages, io_order;
919 struct page *page;
920 int tcesize = (1 << tbl->it_page_shift);
921
922 size = PAGE_ALIGN(size);
923 order = get_order(size);
924
925 /*
926 * Client asked for way too much space. This is checked later
927 * anyway. It is easier to debug here for the drivers than in
928 * the tce tables.
929 */
930 if (order >= IOMAP_MAX_ORDER) {
931 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
932 size);
933 return NULL;
934 }
935
936 if (!tbl)
937 return NULL;
938
939 /* Alloc enough pages (and possibly more) */
940 page = alloc_pages_node(node, flag, order);
941 if (!page)
942 return NULL;
943 ret = page_address(page);
944 memset(ret, 0, size);
945
946 /* Set up tces to cover the allocated range */
947 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
948
949 io_order = get_iommu_order(size, tbl);
950 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
951 mask >> tbl->it_page_shift, io_order, 0);
952 if (mapping == DMA_MAPPING_ERROR) {
953 free_pages((unsigned long)ret, order);
954 return NULL;
955 }
956
957 *dma_handle = mapping | ((u64)ret & (tcesize - 1));
958 return ret;
959}
960
961void iommu_free_coherent(struct iommu_table *tbl, size_t size,
962 void *vaddr, dma_addr_t dma_handle)
963{
964 if (tbl) {
965 unsigned int nio_pages;
966
967 size = PAGE_ALIGN(size);
968 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
969 iommu_free(tbl, dma_handle, nio_pages);
970 size = PAGE_ALIGN(size);
971 free_pages((unsigned long)vaddr, get_order(size));
972 }
973}
974
975unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
976{
977 switch (dir) {
978 case DMA_BIDIRECTIONAL:
979 return TCE_PCI_READ | TCE_PCI_WRITE;
980 case DMA_FROM_DEVICE:
981 return TCE_PCI_WRITE;
982 case DMA_TO_DEVICE:
983 return TCE_PCI_READ;
984 default:
985 return 0;
986 }
987}
988EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
989
990#ifdef CONFIG_IOMMU_API
991
992int dev_has_iommu_table(struct device *dev, void *data)
993{
994 struct pci_dev *pdev = to_pci_dev(dev);
995 struct pci_dev **ppdev = data;
996
997 if (!dev)
998 return 0;
999
1000 if (device_iommu_mapped(dev)) {
1001 *ppdev = pdev;
1002 return 1;
1003 }
1004
1005 return 0;
1006}
1007
1008/*
1009 * SPAPR TCE API
1010 */
1011static void group_release(void *iommu_data)
1012{
1013 struct iommu_table_group *table_group = iommu_data;
1014
1015 table_group->group = NULL;
1016}
1017
1018void iommu_register_group(struct iommu_table_group *table_group,
1019 int pci_domain_number, unsigned long pe_num)
1020{
1021 struct iommu_group *grp;
1022 char *name;
1023
1024 grp = iommu_group_alloc();
1025 if (IS_ERR(grp)) {
1026 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
1027 PTR_ERR(grp));
1028 return;
1029 }
1030 table_group->group = grp;
1031 iommu_group_set_iommudata(grp, table_group, group_release);
1032 name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
1033 pci_domain_number, pe_num);
1034 if (!name)
1035 return;
1036 iommu_group_set_name(grp, name);
1037 kfree(name);
1038}
1039
1040enum dma_data_direction iommu_tce_direction(unsigned long tce)
1041{
1042 if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
1043 return DMA_BIDIRECTIONAL;
1044 else if (tce & TCE_PCI_READ)
1045 return DMA_TO_DEVICE;
1046 else if (tce & TCE_PCI_WRITE)
1047 return DMA_FROM_DEVICE;
1048 else
1049 return DMA_NONE;
1050}
1051EXPORT_SYMBOL_GPL(iommu_tce_direction);
1052
1053void iommu_flush_tce(struct iommu_table *tbl)
1054{
1055 /* Flush/invalidate TLB caches if necessary */
1056 if (tbl->it_ops->flush)
1057 tbl->it_ops->flush(tbl);
1058
1059 /* Make sure updates are seen by hardware */
1060 mb();
1061}
1062EXPORT_SYMBOL_GPL(iommu_flush_tce);
1063
1064int iommu_tce_check_ioba(unsigned long page_shift,
1065 unsigned long offset, unsigned long size,
1066 unsigned long ioba, unsigned long npages)
1067{
1068 unsigned long mask = (1UL << page_shift) - 1;
1069
1070 if (ioba & mask)
1071 return -EINVAL;
1072
1073 ioba >>= page_shift;
1074 if (ioba < offset)
1075 return -EINVAL;
1076
1077 if ((ioba + 1) > (offset + size))
1078 return -EINVAL;
1079
1080 return 0;
1081}
1082EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1083
1084int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1085{
1086 unsigned long mask = (1UL << page_shift) - 1;
1087
1088 if (gpa & mask)
1089 return -EINVAL;
1090
1091 return 0;
1092}
1093EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1094
1095long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1096 struct iommu_table *tbl,
1097 unsigned long entry, unsigned long *hpa,
1098 enum dma_data_direction *direction)
1099{
1100 long ret;
1101 unsigned long size = 0;
1102
1103 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
1104 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1105 (*direction == DMA_BIDIRECTIONAL)) &&
1106 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1107 &size))
1108 SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1109
1110 return ret;
1111}
1112EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1113
1114void iommu_tce_kill(struct iommu_table *tbl,
1115 unsigned long entry, unsigned long pages)
1116{
1117 if (tbl->it_ops->tce_kill)
1118 tbl->it_ops->tce_kill(tbl, entry, pages);
1119}
1120EXPORT_SYMBOL_GPL(iommu_tce_kill);
1121
1122int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1123{
1124 /*
1125 * The sysfs entries should be populated before
1126 * binding IOMMU group. If sysfs entries isn't
1127 * ready, we simply bail.
1128 */
1129 if (!device_is_registered(dev))
1130 return -ENOENT;
1131
1132 if (device_iommu_mapped(dev)) {
1133 pr_debug("%s: Skipping device %s with iommu group %d\n",
1134 __func__, dev_name(dev),
1135 iommu_group_id(dev->iommu_group));
1136 return -EBUSY;
1137 }
1138
1139 pr_debug("%s: Adding %s to iommu group %d\n",
1140 __func__, dev_name(dev), iommu_group_id(table_group->group));
1141 /*
1142 * This is still not adding devices via the IOMMU bus notifier because
1143 * of pcibios_init() from arch/powerpc/kernel/pci_64.c which calls
1144 * pcibios_scan_phb() first (and this guy adds devices and triggers
1145 * the notifier) and only then it calls pci_bus_add_devices() which
1146 * configures DMA for buses which also creates PEs and IOMMU groups.
1147 */
1148 return iommu_probe_device(dev);
1149}
1150EXPORT_SYMBOL_GPL(iommu_add_device);
1151
1152#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1153/*
1154 * A simple iommu_ops to allow less cruft in generic VFIO code.
1155 */
1156static int
1157spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
1158 struct device *dev)
1159{
1160 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1161 struct iommu_table_group *table_group;
1162 struct iommu_group *grp;
1163
1164 /* At first attach the ownership is already set */
1165 if (!domain)
1166 return 0;
1167
1168 grp = iommu_group_get(dev);
1169 table_group = iommu_group_get_iommudata(grp);
1170 /*
1171 * The domain being set to PLATFORM from earlier
1172 * BLOCKED. The table_group ownership has to be released.
1173 */
1174 table_group->ops->release_ownership(table_group, dev);
1175 iommu_group_put(grp);
1176
1177 return 0;
1178}
1179
1180static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
1181 .attach_dev = spapr_tce_platform_iommu_attach_dev,
1182};
1183
1184static struct iommu_domain spapr_tce_platform_domain = {
1185 .type = IOMMU_DOMAIN_PLATFORM,
1186 .ops = &spapr_tce_platform_domain_ops,
1187};
1188
1189static int
1190spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain,
1191 struct device *dev)
1192{
1193 struct iommu_group *grp = iommu_group_get(dev);
1194 struct iommu_table_group *table_group;
1195 int ret = -EINVAL;
1196
1197 /*
1198 * FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
1199 * also sets the dma_api ops
1200 */
1201 table_group = iommu_group_get_iommudata(grp);
1202 ret = table_group->ops->take_ownership(table_group, dev);
1203 iommu_group_put(grp);
1204
1205 return ret;
1206}
1207
1208static const struct iommu_domain_ops spapr_tce_blocked_domain_ops = {
1209 .attach_dev = spapr_tce_blocked_iommu_attach_dev,
1210};
1211
1212static struct iommu_domain spapr_tce_blocked_domain = {
1213 .type = IOMMU_DOMAIN_BLOCKED,
1214 .ops = &spapr_tce_blocked_domain_ops,
1215};
1216
1217static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
1218{
1219 switch (cap) {
1220 case IOMMU_CAP_CACHE_COHERENCY:
1221 return true;
1222 default:
1223 break;
1224 }
1225
1226 return false;
1227}
1228
1229static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
1230{
1231 struct pci_dev *pdev;
1232 struct pci_controller *hose;
1233
1234 if (!dev_is_pci(dev))
1235 return ERR_PTR(-ENODEV);
1236
1237 pdev = to_pci_dev(dev);
1238 hose = pdev->bus->sysdata;
1239
1240 return &hose->iommu;
1241}
1242
1243static void spapr_tce_iommu_release_device(struct device *dev)
1244{
1245}
1246
1247static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev)
1248{
1249 struct pci_controller *hose;
1250 struct pci_dev *pdev;
1251
1252 pdev = to_pci_dev(dev);
1253 hose = pdev->bus->sysdata;
1254
1255 if (!hose->controller_ops.device_group)
1256 return ERR_PTR(-ENOENT);
1257
1258 return hose->controller_ops.device_group(hose, pdev);
1259}
1260
1261static const struct iommu_ops spapr_tce_iommu_ops = {
1262 .default_domain = &spapr_tce_platform_domain,
1263 .blocked_domain = &spapr_tce_blocked_domain,
1264 .capable = spapr_tce_iommu_capable,
1265 .probe_device = spapr_tce_iommu_probe_device,
1266 .release_device = spapr_tce_iommu_release_device,
1267 .device_group = spapr_tce_iommu_device_group,
1268};
1269
1270static struct attribute *spapr_tce_iommu_attrs[] = {
1271 NULL,
1272};
1273
1274static struct attribute_group spapr_tce_iommu_group = {
1275 .name = "spapr-tce-iommu",
1276 .attrs = spapr_tce_iommu_attrs,
1277};
1278
1279static const struct attribute_group *spapr_tce_iommu_groups[] = {
1280 &spapr_tce_iommu_group,
1281 NULL,
1282};
1283
1284void ppc_iommu_register_device(struct pci_controller *phb)
1285{
1286 iommu_device_sysfs_add(&phb->iommu, phb->parent,
1287 spapr_tce_iommu_groups, "iommu-phb%04x",
1288 phb->global_number);
1289 iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
1290 phb->parent);
1291}
1292
1293void ppc_iommu_unregister_device(struct pci_controller *phb)
1294{
1295 iommu_device_unregister(&phb->iommu);
1296 iommu_device_sysfs_remove(&phb->iommu);
1297}
1298
1299/*
1300 * This registers IOMMU devices of PHBs. This needs to happen
1301 * after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
1302 * before subsys_initcall(iommu_subsys_init).
1303 */
1304static int __init spapr_tce_setup_phb_iommus_initcall(void)
1305{
1306 struct pci_controller *hose;
1307
1308 list_for_each_entry(hose, &hose_list, list_node) {
1309 ppc_iommu_register_device(hose);
1310 }
1311 return 0;
1312}
1313postcore_initcall_sync(spapr_tce_setup_phb_iommus_initcall);
1314#endif
1315
1316#endif /* CONFIG_IOMMU_API */