Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2012
4 *
5 * Author(s):
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/export.h>
12#include <linux/iommu-helper.h>
13#include <linux/dma-map-ops.h>
14#include <linux/vmalloc.h>
15#include <linux/pci.h>
16#include <asm/pci_dma.h>
17
18static struct kmem_cache *dma_region_table_cache;
19static struct kmem_cache *dma_page_table_cache;
20static int s390_iommu_strict;
21static u64 s390_iommu_aperture;
22static u32 s390_iommu_aperture_factor = 1;
23
24static int zpci_refresh_global(struct zpci_dev *zdev)
25{
26 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
27 zdev->iommu_pages * PAGE_SIZE);
28}
29
30unsigned long *dma_alloc_cpu_table(void)
31{
32 unsigned long *table, *entry;
33
34 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
35 if (!table)
36 return NULL;
37
38 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
39 *entry = ZPCI_TABLE_INVALID;
40 return table;
41}
42
43static void dma_free_cpu_table(void *table)
44{
45 kmem_cache_free(dma_region_table_cache, table);
46}
47
48static unsigned long *dma_alloc_page_table(void)
49{
50 unsigned long *table, *entry;
51
52 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
53 if (!table)
54 return NULL;
55
56 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
57 *entry = ZPCI_PTE_INVALID;
58 return table;
59}
60
61static void dma_free_page_table(void *table)
62{
63 kmem_cache_free(dma_page_table_cache, table);
64}
65
66static unsigned long *dma_get_seg_table_origin(unsigned long *rtep)
67{
68 unsigned long old_rte, rte;
69 unsigned long *sto;
70
71 rte = READ_ONCE(*rtep);
72 if (reg_entry_isvalid(rte)) {
73 sto = get_rt_sto(rte);
74 } else {
75 sto = dma_alloc_cpu_table();
76 if (!sto)
77 return NULL;
78
79 set_rt_sto(&rte, virt_to_phys(sto));
80 validate_rt_entry(&rte);
81 entry_clr_protected(&rte);
82
83 old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
84 if (old_rte != ZPCI_TABLE_INVALID) {
85 /* Somone else was faster, use theirs */
86 dma_free_cpu_table(sto);
87 sto = get_rt_sto(old_rte);
88 }
89 }
90 return sto;
91}
92
93static unsigned long *dma_get_page_table_origin(unsigned long *step)
94{
95 unsigned long old_ste, ste;
96 unsigned long *pto;
97
98 ste = READ_ONCE(*step);
99 if (reg_entry_isvalid(ste)) {
100 pto = get_st_pto(ste);
101 } else {
102 pto = dma_alloc_page_table();
103 if (!pto)
104 return NULL;
105 set_st_pto(&ste, virt_to_phys(pto));
106 validate_st_entry(&ste);
107 entry_clr_protected(&ste);
108
109 old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
110 if (old_ste != ZPCI_TABLE_INVALID) {
111 /* Somone else was faster, use theirs */
112 dma_free_page_table(pto);
113 pto = get_st_pto(old_ste);
114 }
115 }
116 return pto;
117}
118
119unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
120{
121 unsigned long *sto, *pto;
122 unsigned int rtx, sx, px;
123
124 rtx = calc_rtx(dma_addr);
125 sto = dma_get_seg_table_origin(&rto[rtx]);
126 if (!sto)
127 return NULL;
128
129 sx = calc_sx(dma_addr);
130 pto = dma_get_page_table_origin(&sto[sx]);
131 if (!pto)
132 return NULL;
133
134 px = calc_px(dma_addr);
135 return &pto[px];
136}
137
138void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
139{
140 unsigned long pte;
141
142 pte = READ_ONCE(*ptep);
143 if (flags & ZPCI_PTE_INVALID) {
144 invalidate_pt_entry(&pte);
145 } else {
146 set_pt_pfaa(&pte, page_addr);
147 validate_pt_entry(&pte);
148 }
149
150 if (flags & ZPCI_TABLE_PROTECTED)
151 entry_set_protected(&pte);
152 else
153 entry_clr_protected(&pte);
154
155 xchg(ptep, pte);
156}
157
158static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
159 dma_addr_t dma_addr, size_t size, int flags)
160{
161 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
162 phys_addr_t page_addr = (pa & PAGE_MASK);
163 unsigned long *entry;
164 int i, rc = 0;
165
166 if (!nr_pages)
167 return -EINVAL;
168
169 if (!zdev->dma_table)
170 return -EINVAL;
171
172 for (i = 0; i < nr_pages; i++) {
173 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
174 if (!entry) {
175 rc = -ENOMEM;
176 goto undo_cpu_trans;
177 }
178 dma_update_cpu_trans(entry, page_addr, flags);
179 page_addr += PAGE_SIZE;
180 dma_addr += PAGE_SIZE;
181 }
182
183undo_cpu_trans:
184 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
185 flags = ZPCI_PTE_INVALID;
186 while (i-- > 0) {
187 page_addr -= PAGE_SIZE;
188 dma_addr -= PAGE_SIZE;
189 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
190 if (!entry)
191 break;
192 dma_update_cpu_trans(entry, page_addr, flags);
193 }
194 }
195 return rc;
196}
197
198static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
199 size_t size, int flags)
200{
201 unsigned long irqflags;
202 int ret;
203
204 /*
205 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
206 * translations when previously invalid translation-table entries are
207 * validated. With lazy unmap, rpcit is skipped for previously valid
208 * entries, but a global rpcit is then required before any address can
209 * be re-used, i.e. after each iommu bitmap wrap-around.
210 */
211 if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
212 if (!zdev->tlb_refresh)
213 return 0;
214 } else {
215 if (!s390_iommu_strict)
216 return 0;
217 }
218
219 ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
220 PAGE_ALIGN(size));
221 if (ret == -ENOMEM && !s390_iommu_strict) {
222 /* enable the hypervisor to free some resources */
223 if (zpci_refresh_global(zdev))
224 goto out;
225
226 spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
227 bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
228 zdev->lazy_bitmap, zdev->iommu_pages);
229 bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
230 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
231 ret = 0;
232 }
233out:
234 return ret;
235}
236
237static int dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
238 dma_addr_t dma_addr, size_t size, int flags)
239{
240 int rc;
241
242 rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
243 if (rc)
244 return rc;
245
246 rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
247 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
248 __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
249
250 return rc;
251}
252
253void dma_free_seg_table(unsigned long entry)
254{
255 unsigned long *sto = get_rt_sto(entry);
256 int sx;
257
258 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
259 if (reg_entry_isvalid(sto[sx]))
260 dma_free_page_table(get_st_pto(sto[sx]));
261
262 dma_free_cpu_table(sto);
263}
264
265void dma_cleanup_tables(unsigned long *table)
266{
267 int rtx;
268
269 if (!table)
270 return;
271
272 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
273 if (reg_entry_isvalid(table[rtx]))
274 dma_free_seg_table(table[rtx]);
275
276 dma_free_cpu_table(table);
277}
278
279static unsigned long __dma_alloc_iommu(struct device *dev,
280 unsigned long start, int size)
281{
282 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
283
284 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
285 start, size, zdev->start_dma >> PAGE_SHIFT,
286 dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
287 0);
288}
289
290static dma_addr_t dma_alloc_address(struct device *dev, int size)
291{
292 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
293 unsigned long offset, flags;
294
295 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
296 offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
297 if (offset == -1) {
298 if (!s390_iommu_strict) {
299 /* global flush before DMA addresses are reused */
300 if (zpci_refresh_global(zdev))
301 goto out_error;
302
303 bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
304 zdev->lazy_bitmap, zdev->iommu_pages);
305 bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
306 }
307 /* wrap-around */
308 offset = __dma_alloc_iommu(dev, 0, size);
309 if (offset == -1)
310 goto out_error;
311 }
312 zdev->next_bit = offset + size;
313 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
314
315 return zdev->start_dma + offset * PAGE_SIZE;
316
317out_error:
318 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
319 return DMA_MAPPING_ERROR;
320}
321
322static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
323{
324 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
325 unsigned long flags, offset;
326
327 offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
328
329 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
330 if (!zdev->iommu_bitmap)
331 goto out;
332
333 if (s390_iommu_strict)
334 bitmap_clear(zdev->iommu_bitmap, offset, size);
335 else
336 bitmap_set(zdev->lazy_bitmap, offset, size);
337
338out:
339 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
340}
341
342static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
343{
344 struct {
345 unsigned long rc;
346 unsigned long addr;
347 } __packed data = {rc, addr};
348
349 zpci_err_hex(&data, sizeof(data));
350}
351
352static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
353 unsigned long offset, size_t size,
354 enum dma_data_direction direction,
355 unsigned long attrs)
356{
357 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
358 unsigned long pa = page_to_phys(page) + offset;
359 int flags = ZPCI_PTE_VALID;
360 unsigned long nr_pages;
361 dma_addr_t dma_addr;
362 int ret;
363
364 /* This rounds up number of pages based on size and offset */
365 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
366 dma_addr = dma_alloc_address(dev, nr_pages);
367 if (dma_addr == DMA_MAPPING_ERROR) {
368 ret = -ENOSPC;
369 goto out_err;
370 }
371
372 /* Use rounded up size */
373 size = nr_pages * PAGE_SIZE;
374
375 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
376 flags |= ZPCI_TABLE_PROTECTED;
377
378 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
379 if (ret)
380 goto out_free;
381
382 atomic64_add(nr_pages, &zdev->mapped_pages);
383 return dma_addr + (offset & ~PAGE_MASK);
384
385out_free:
386 dma_free_address(dev, dma_addr, nr_pages);
387out_err:
388 zpci_err("map error:\n");
389 zpci_err_dma(ret, pa);
390 return DMA_MAPPING_ERROR;
391}
392
393static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
394 size_t size, enum dma_data_direction direction,
395 unsigned long attrs)
396{
397 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
398 int npages, ret;
399
400 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
401 dma_addr = dma_addr & PAGE_MASK;
402 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
403 ZPCI_PTE_INVALID);
404 if (ret) {
405 zpci_err("unmap error:\n");
406 zpci_err_dma(ret, dma_addr);
407 return;
408 }
409
410 atomic64_add(npages, &zdev->unmapped_pages);
411 dma_free_address(dev, dma_addr, npages);
412}
413
414static void *s390_dma_alloc(struct device *dev, size_t size,
415 dma_addr_t *dma_handle, gfp_t flag,
416 unsigned long attrs)
417{
418 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
419 struct page *page;
420 phys_addr_t pa;
421 dma_addr_t map;
422
423 size = PAGE_ALIGN(size);
424 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
425 if (!page)
426 return NULL;
427
428 pa = page_to_phys(page);
429 map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
430 if (dma_mapping_error(dev, map)) {
431 __free_pages(page, get_order(size));
432 return NULL;
433 }
434
435 atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
436 if (dma_handle)
437 *dma_handle = map;
438 return phys_to_virt(pa);
439}
440
441static void s390_dma_free(struct device *dev, size_t size,
442 void *vaddr, dma_addr_t dma_handle,
443 unsigned long attrs)
444{
445 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
446
447 size = PAGE_ALIGN(size);
448 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
449 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
450 free_pages((unsigned long)vaddr, get_order(size));
451}
452
453/* Map a segment into a contiguous dma address area */
454static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
455 size_t size, dma_addr_t *handle,
456 enum dma_data_direction dir)
457{
458 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
459 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
460 dma_addr_t dma_addr_base, dma_addr;
461 int flags = ZPCI_PTE_VALID;
462 struct scatterlist *s;
463 phys_addr_t pa = 0;
464 int ret;
465
466 dma_addr_base = dma_alloc_address(dev, nr_pages);
467 if (dma_addr_base == DMA_MAPPING_ERROR)
468 return -ENOMEM;
469
470 dma_addr = dma_addr_base;
471 if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
472 flags |= ZPCI_TABLE_PROTECTED;
473
474 for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
475 pa = page_to_phys(sg_page(s));
476 ret = __dma_update_trans(zdev, pa, dma_addr,
477 s->offset + s->length, flags);
478 if (ret)
479 goto unmap;
480
481 dma_addr += s->offset + s->length;
482 }
483 ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
484 if (ret)
485 goto unmap;
486
487 *handle = dma_addr_base;
488 atomic64_add(nr_pages, &zdev->mapped_pages);
489
490 return ret;
491
492unmap:
493 dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
494 ZPCI_PTE_INVALID);
495 dma_free_address(dev, dma_addr_base, nr_pages);
496 zpci_err("map error:\n");
497 zpci_err_dma(ret, pa);
498 return ret;
499}
500
501static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
502 int nr_elements, enum dma_data_direction dir,
503 unsigned long attrs)
504{
505 struct scatterlist *s = sg, *start = sg, *dma = sg;
506 unsigned int max = dma_get_max_seg_size(dev);
507 unsigned int size = s->offset + s->length;
508 unsigned int offset = s->offset;
509 int count = 0, i, ret;
510
511 for (i = 1; i < nr_elements; i++) {
512 s = sg_next(s);
513
514 s->dma_length = 0;
515
516 if (s->offset || (size & ~PAGE_MASK) ||
517 size + s->length > max) {
518 ret = __s390_dma_map_sg(dev, start, size,
519 &dma->dma_address, dir);
520 if (ret)
521 goto unmap;
522
523 dma->dma_address += offset;
524 dma->dma_length = size - offset;
525
526 size = offset = s->offset;
527 start = s;
528 dma = sg_next(dma);
529 count++;
530 }
531 size += s->length;
532 }
533 ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir);
534 if (ret)
535 goto unmap;
536
537 dma->dma_address += offset;
538 dma->dma_length = size - offset;
539
540 return count + 1;
541unmap:
542 for_each_sg(sg, s, count, i)
543 s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
544 dir, attrs);
545
546 return ret;
547}
548
549static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
550 int nr_elements, enum dma_data_direction dir,
551 unsigned long attrs)
552{
553 struct scatterlist *s;
554 int i;
555
556 for_each_sg(sg, s, nr_elements, i) {
557 if (s->dma_length)
558 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
559 dir, attrs);
560 s->dma_address = 0;
561 s->dma_length = 0;
562 }
563}
564
565int zpci_dma_init_device(struct zpci_dev *zdev)
566{
567 u8 status;
568 int rc;
569
570 /*
571 * At this point, if the device is part of an IOMMU domain, this would
572 * be a strong hint towards a bug in the IOMMU API (common) code and/or
573 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
574 */
575 WARN_ON(zdev->s390_domain);
576
577 spin_lock_init(&zdev->iommu_bitmap_lock);
578
579 zdev->dma_table = dma_alloc_cpu_table();
580 if (!zdev->dma_table) {
581 rc = -ENOMEM;
582 goto out;
583 }
584
585 /*
586 * Restrict the iommu bitmap size to the minimum of the following:
587 * - s390_iommu_aperture which defaults to high_memory
588 * - 3-level pagetable address limit minus start_dma offset
589 * - DMA address range allowed by the hardware (clp query pci fn)
590 *
591 * Also set zdev->end_dma to the actual end address of the usable
592 * range, instead of the theoretical maximum as reported by hardware.
593 *
594 * This limits the number of concurrently usable DMA mappings since
595 * for each DMA mapped memory address we need a DMA address including
596 * extra DMA addresses for multiple mappings of the same memory address.
597 */
598 zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
599 zdev->iommu_size = min3(s390_iommu_aperture,
600 ZPCI_TABLE_SIZE_RT - zdev->start_dma,
601 zdev->end_dma - zdev->start_dma + 1);
602 zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
603 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
604 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
605 if (!zdev->iommu_bitmap) {
606 rc = -ENOMEM;
607 goto free_dma_table;
608 }
609 if (!s390_iommu_strict) {
610 zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
611 if (!zdev->lazy_bitmap) {
612 rc = -ENOMEM;
613 goto free_bitmap;
614 }
615
616 }
617 if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
618 virt_to_phys(zdev->dma_table), &status)) {
619 rc = -EIO;
620 goto free_bitmap;
621 }
622
623 return 0;
624free_bitmap:
625 vfree(zdev->iommu_bitmap);
626 zdev->iommu_bitmap = NULL;
627 vfree(zdev->lazy_bitmap);
628 zdev->lazy_bitmap = NULL;
629free_dma_table:
630 dma_free_cpu_table(zdev->dma_table);
631 zdev->dma_table = NULL;
632out:
633 return rc;
634}
635
636int zpci_dma_exit_device(struct zpci_dev *zdev)
637{
638 int cc = 0;
639
640 /*
641 * At this point, if the device is part of an IOMMU domain, this would
642 * be a strong hint towards a bug in the IOMMU API (common) code and/or
643 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
644 */
645 WARN_ON(zdev->s390_domain);
646 if (zdev_enabled(zdev))
647 cc = zpci_unregister_ioat(zdev, 0);
648 /*
649 * cc == 3 indicates the function is gone already. This can happen
650 * if the function was deconfigured/disabled suddenly and we have not
651 * received a new handle yet.
652 */
653 if (cc && cc != 3)
654 return -EIO;
655
656 dma_cleanup_tables(zdev->dma_table);
657 zdev->dma_table = NULL;
658 vfree(zdev->iommu_bitmap);
659 zdev->iommu_bitmap = NULL;
660 vfree(zdev->lazy_bitmap);
661 zdev->lazy_bitmap = NULL;
662 zdev->next_bit = 0;
663 return 0;
664}
665
666static int __init dma_alloc_cpu_table_caches(void)
667{
668 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
669 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
670 0, NULL);
671 if (!dma_region_table_cache)
672 return -ENOMEM;
673
674 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
675 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
676 0, NULL);
677 if (!dma_page_table_cache) {
678 kmem_cache_destroy(dma_region_table_cache);
679 return -ENOMEM;
680 }
681 return 0;
682}
683
684int __init zpci_dma_init(void)
685{
686 s390_iommu_aperture = (u64)virt_to_phys(high_memory);
687 if (!s390_iommu_aperture_factor)
688 s390_iommu_aperture = ULONG_MAX;
689 else
690 s390_iommu_aperture *= s390_iommu_aperture_factor;
691
692 return dma_alloc_cpu_table_caches();
693}
694
695void zpci_dma_exit(void)
696{
697 kmem_cache_destroy(dma_page_table_cache);
698 kmem_cache_destroy(dma_region_table_cache);
699}
700
701const struct dma_map_ops s390_pci_dma_ops = {
702 .alloc = s390_dma_alloc,
703 .free = s390_dma_free,
704 .map_sg = s390_dma_map_sg,
705 .unmap_sg = s390_dma_unmap_sg,
706 .map_page = s390_dma_map_pages,
707 .unmap_page = s390_dma_unmap_pages,
708 .mmap = dma_common_mmap,
709 .get_sgtable = dma_common_get_sgtable,
710 .alloc_pages = dma_common_alloc_pages,
711 .free_pages = dma_common_free_pages,
712 /* dma_supported is unconditionally true without a callback */
713};
714EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
715
716static int __init s390_iommu_setup(char *str)
717{
718 if (!strcmp(str, "strict"))
719 s390_iommu_strict = 1;
720 return 1;
721}
722
723__setup("s390_iommu=", s390_iommu_setup);
724
725static int __init s390_iommu_aperture_setup(char *str)
726{
727 if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
728 s390_iommu_aperture_factor = 1;
729 return 1;
730}
731
732__setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2012
4 *
5 * Author(s):
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/export.h>
12#include <linux/iommu-helper.h>
13#include <linux/dma-mapping.h>
14#include <linux/vmalloc.h>
15#include <linux/pci.h>
16#include <asm/pci_dma.h>
17
18static struct kmem_cache *dma_region_table_cache;
19static struct kmem_cache *dma_page_table_cache;
20static int s390_iommu_strict;
21
22static int zpci_refresh_global(struct zpci_dev *zdev)
23{
24 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
25 zdev->iommu_pages * PAGE_SIZE);
26}
27
28unsigned long *dma_alloc_cpu_table(void)
29{
30 unsigned long *table, *entry;
31
32 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
33 if (!table)
34 return NULL;
35
36 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
37 *entry = ZPCI_TABLE_INVALID;
38 return table;
39}
40
41static void dma_free_cpu_table(void *table)
42{
43 kmem_cache_free(dma_region_table_cache, table);
44}
45
46static unsigned long *dma_alloc_page_table(void)
47{
48 unsigned long *table, *entry;
49
50 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
51 if (!table)
52 return NULL;
53
54 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
55 *entry = ZPCI_PTE_INVALID;
56 return table;
57}
58
59static void dma_free_page_table(void *table)
60{
61 kmem_cache_free(dma_page_table_cache, table);
62}
63
64static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
65{
66 unsigned long *sto;
67
68 if (reg_entry_isvalid(*entry))
69 sto = get_rt_sto(*entry);
70 else {
71 sto = dma_alloc_cpu_table();
72 if (!sto)
73 return NULL;
74
75 set_rt_sto(entry, sto);
76 validate_rt_entry(entry);
77 entry_clr_protected(entry);
78 }
79 return sto;
80}
81
82static unsigned long *dma_get_page_table_origin(unsigned long *entry)
83{
84 unsigned long *pto;
85
86 if (reg_entry_isvalid(*entry))
87 pto = get_st_pto(*entry);
88 else {
89 pto = dma_alloc_page_table();
90 if (!pto)
91 return NULL;
92 set_st_pto(entry, pto);
93 validate_st_entry(entry);
94 entry_clr_protected(entry);
95 }
96 return pto;
97}
98
99unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
100{
101 unsigned long *sto, *pto;
102 unsigned int rtx, sx, px;
103
104 rtx = calc_rtx(dma_addr);
105 sto = dma_get_seg_table_origin(&rto[rtx]);
106 if (!sto)
107 return NULL;
108
109 sx = calc_sx(dma_addr);
110 pto = dma_get_page_table_origin(&sto[sx]);
111 if (!pto)
112 return NULL;
113
114 px = calc_px(dma_addr);
115 return &pto[px];
116}
117
118void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
119{
120 if (flags & ZPCI_PTE_INVALID) {
121 invalidate_pt_entry(entry);
122 } else {
123 set_pt_pfaa(entry, page_addr);
124 validate_pt_entry(entry);
125 }
126
127 if (flags & ZPCI_TABLE_PROTECTED)
128 entry_set_protected(entry);
129 else
130 entry_clr_protected(entry);
131}
132
133static int __dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
134 dma_addr_t dma_addr, size_t size, int flags)
135{
136 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
137 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
138 unsigned long irq_flags;
139 unsigned long *entry;
140 int i, rc = 0;
141
142 if (!nr_pages)
143 return -EINVAL;
144
145 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
146 if (!zdev->dma_table) {
147 rc = -EINVAL;
148 goto out_unlock;
149 }
150
151 for (i = 0; i < nr_pages; i++) {
152 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
153 if (!entry) {
154 rc = -ENOMEM;
155 goto undo_cpu_trans;
156 }
157 dma_update_cpu_trans(entry, page_addr, flags);
158 page_addr += PAGE_SIZE;
159 dma_addr += PAGE_SIZE;
160 }
161
162undo_cpu_trans:
163 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
164 flags = ZPCI_PTE_INVALID;
165 while (i-- > 0) {
166 page_addr -= PAGE_SIZE;
167 dma_addr -= PAGE_SIZE;
168 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
169 if (!entry)
170 break;
171 dma_update_cpu_trans(entry, page_addr, flags);
172 }
173 }
174out_unlock:
175 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
176 return rc;
177}
178
179static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
180 size_t size, int flags)
181{
182 unsigned long irqflags;
183 int ret;
184
185 /*
186 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
187 * translations when previously invalid translation-table entries are
188 * validated. With lazy unmap, rpcit is skipped for previously valid
189 * entries, but a global rpcit is then required before any address can
190 * be re-used, i.e. after each iommu bitmap wrap-around.
191 */
192 if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
193 if (!zdev->tlb_refresh)
194 return 0;
195 } else {
196 if (!s390_iommu_strict)
197 return 0;
198 }
199
200 ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
201 PAGE_ALIGN(size));
202 if (ret == -ENOMEM && !s390_iommu_strict) {
203 /* enable the hypervisor to free some resources */
204 if (zpci_refresh_global(zdev))
205 goto out;
206
207 spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
208 bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
209 zdev->lazy_bitmap, zdev->iommu_pages);
210 bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
211 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
212 ret = 0;
213 }
214out:
215 return ret;
216}
217
218static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
219 dma_addr_t dma_addr, size_t size, int flags)
220{
221 int rc;
222
223 rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
224 if (rc)
225 return rc;
226
227 rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
228 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
229 __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
230
231 return rc;
232}
233
234void dma_free_seg_table(unsigned long entry)
235{
236 unsigned long *sto = get_rt_sto(entry);
237 int sx;
238
239 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
240 if (reg_entry_isvalid(sto[sx]))
241 dma_free_page_table(get_st_pto(sto[sx]));
242
243 dma_free_cpu_table(sto);
244}
245
246void dma_cleanup_tables(unsigned long *table)
247{
248 int rtx;
249
250 if (!table)
251 return;
252
253 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
254 if (reg_entry_isvalid(table[rtx]))
255 dma_free_seg_table(table[rtx]);
256
257 dma_free_cpu_table(table);
258}
259
260static unsigned long __dma_alloc_iommu(struct device *dev,
261 unsigned long start, int size)
262{
263 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
264 unsigned long boundary_size;
265
266 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
267 PAGE_SIZE) >> PAGE_SHIFT;
268 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
269 start, size, zdev->start_dma >> PAGE_SHIFT,
270 boundary_size, 0);
271}
272
273static dma_addr_t dma_alloc_address(struct device *dev, int size)
274{
275 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
276 unsigned long offset, flags;
277
278 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
279 offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
280 if (offset == -1) {
281 if (!s390_iommu_strict) {
282 /* global flush before DMA addresses are reused */
283 if (zpci_refresh_global(zdev))
284 goto out_error;
285
286 bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
287 zdev->lazy_bitmap, zdev->iommu_pages);
288 bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
289 }
290 /* wrap-around */
291 offset = __dma_alloc_iommu(dev, 0, size);
292 if (offset == -1)
293 goto out_error;
294 }
295 zdev->next_bit = offset + size;
296 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
297
298 return zdev->start_dma + offset * PAGE_SIZE;
299
300out_error:
301 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
302 return DMA_MAPPING_ERROR;
303}
304
305static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
306{
307 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
308 unsigned long flags, offset;
309
310 offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
311
312 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
313 if (!zdev->iommu_bitmap)
314 goto out;
315
316 if (s390_iommu_strict)
317 bitmap_clear(zdev->iommu_bitmap, offset, size);
318 else
319 bitmap_set(zdev->lazy_bitmap, offset, size);
320
321out:
322 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
323}
324
325static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
326{
327 struct {
328 unsigned long rc;
329 unsigned long addr;
330 } __packed data = {rc, addr};
331
332 zpci_err_hex(&data, sizeof(data));
333}
334
335static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
336 unsigned long offset, size_t size,
337 enum dma_data_direction direction,
338 unsigned long attrs)
339{
340 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
341 unsigned long pa = page_to_phys(page) + offset;
342 int flags = ZPCI_PTE_VALID;
343 unsigned long nr_pages;
344 dma_addr_t dma_addr;
345 int ret;
346
347 /* This rounds up number of pages based on size and offset */
348 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
349 dma_addr = dma_alloc_address(dev, nr_pages);
350 if (dma_addr == DMA_MAPPING_ERROR) {
351 ret = -ENOSPC;
352 goto out_err;
353 }
354
355 /* Use rounded up size */
356 size = nr_pages * PAGE_SIZE;
357
358 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
359 flags |= ZPCI_TABLE_PROTECTED;
360
361 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
362 if (ret)
363 goto out_free;
364
365 atomic64_add(nr_pages, &zdev->mapped_pages);
366 return dma_addr + (offset & ~PAGE_MASK);
367
368out_free:
369 dma_free_address(dev, dma_addr, nr_pages);
370out_err:
371 zpci_err("map error:\n");
372 zpci_err_dma(ret, pa);
373 return DMA_MAPPING_ERROR;
374}
375
376static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
377 size_t size, enum dma_data_direction direction,
378 unsigned long attrs)
379{
380 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
381 int npages, ret;
382
383 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
384 dma_addr = dma_addr & PAGE_MASK;
385 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
386 ZPCI_PTE_INVALID);
387 if (ret) {
388 zpci_err("unmap error:\n");
389 zpci_err_dma(ret, dma_addr);
390 return;
391 }
392
393 atomic64_add(npages, &zdev->unmapped_pages);
394 dma_free_address(dev, dma_addr, npages);
395}
396
397static void *s390_dma_alloc(struct device *dev, size_t size,
398 dma_addr_t *dma_handle, gfp_t flag,
399 unsigned long attrs)
400{
401 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
402 struct page *page;
403 unsigned long pa;
404 dma_addr_t map;
405
406 size = PAGE_ALIGN(size);
407 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
408 if (!page)
409 return NULL;
410
411 pa = page_to_phys(page);
412 map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
413 if (dma_mapping_error(dev, map)) {
414 free_pages(pa, get_order(size));
415 return NULL;
416 }
417
418 atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
419 if (dma_handle)
420 *dma_handle = map;
421 return (void *) pa;
422}
423
424static void s390_dma_free(struct device *dev, size_t size,
425 void *pa, dma_addr_t dma_handle,
426 unsigned long attrs)
427{
428 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
429
430 size = PAGE_ALIGN(size);
431 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
432 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
433 free_pages((unsigned long) pa, get_order(size));
434}
435
436/* Map a segment into a contiguous dma address area */
437static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
438 size_t size, dma_addr_t *handle,
439 enum dma_data_direction dir)
440{
441 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
442 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
443 dma_addr_t dma_addr_base, dma_addr;
444 int flags = ZPCI_PTE_VALID;
445 struct scatterlist *s;
446 unsigned long pa = 0;
447 int ret;
448
449 dma_addr_base = dma_alloc_address(dev, nr_pages);
450 if (dma_addr_base == DMA_MAPPING_ERROR)
451 return -ENOMEM;
452
453 dma_addr = dma_addr_base;
454 if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
455 flags |= ZPCI_TABLE_PROTECTED;
456
457 for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
458 pa = page_to_phys(sg_page(s));
459 ret = __dma_update_trans(zdev, pa, dma_addr,
460 s->offset + s->length, flags);
461 if (ret)
462 goto unmap;
463
464 dma_addr += s->offset + s->length;
465 }
466 ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
467 if (ret)
468 goto unmap;
469
470 *handle = dma_addr_base;
471 atomic64_add(nr_pages, &zdev->mapped_pages);
472
473 return ret;
474
475unmap:
476 dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
477 ZPCI_PTE_INVALID);
478 dma_free_address(dev, dma_addr_base, nr_pages);
479 zpci_err("map error:\n");
480 zpci_err_dma(ret, pa);
481 return ret;
482}
483
484static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
485 int nr_elements, enum dma_data_direction dir,
486 unsigned long attrs)
487{
488 struct scatterlist *s = sg, *start = sg, *dma = sg;
489 unsigned int max = dma_get_max_seg_size(dev);
490 unsigned int size = s->offset + s->length;
491 unsigned int offset = s->offset;
492 int count = 0, i;
493
494 for (i = 1; i < nr_elements; i++) {
495 s = sg_next(s);
496
497 s->dma_address = DMA_MAPPING_ERROR;
498 s->dma_length = 0;
499
500 if (s->offset || (size & ~PAGE_MASK) ||
501 size + s->length > max) {
502 if (__s390_dma_map_sg(dev, start, size,
503 &dma->dma_address, dir))
504 goto unmap;
505
506 dma->dma_address += offset;
507 dma->dma_length = size - offset;
508
509 size = offset = s->offset;
510 start = s;
511 dma = sg_next(dma);
512 count++;
513 }
514 size += s->length;
515 }
516 if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
517 goto unmap;
518
519 dma->dma_address += offset;
520 dma->dma_length = size - offset;
521
522 return count + 1;
523unmap:
524 for_each_sg(sg, s, count, i)
525 s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
526 dir, attrs);
527
528 return 0;
529}
530
531static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
532 int nr_elements, enum dma_data_direction dir,
533 unsigned long attrs)
534{
535 struct scatterlist *s;
536 int i;
537
538 for_each_sg(sg, s, nr_elements, i) {
539 if (s->dma_length)
540 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
541 dir, attrs);
542 s->dma_address = 0;
543 s->dma_length = 0;
544 }
545}
546
547int zpci_dma_init_device(struct zpci_dev *zdev)
548{
549 int rc;
550
551 /*
552 * At this point, if the device is part of an IOMMU domain, this would
553 * be a strong hint towards a bug in the IOMMU API (common) code and/or
554 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
555 */
556 WARN_ON(zdev->s390_domain);
557
558 spin_lock_init(&zdev->iommu_bitmap_lock);
559 spin_lock_init(&zdev->dma_table_lock);
560
561 zdev->dma_table = dma_alloc_cpu_table();
562 if (!zdev->dma_table) {
563 rc = -ENOMEM;
564 goto out;
565 }
566
567 /*
568 * Restrict the iommu bitmap size to the minimum of the following:
569 * - main memory size
570 * - 3-level pagetable address limit minus start_dma offset
571 * - DMA address range allowed by the hardware (clp query pci fn)
572 *
573 * Also set zdev->end_dma to the actual end address of the usable
574 * range, instead of the theoretical maximum as reported by hardware.
575 */
576 zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
577 zdev->iommu_size = min3((u64) high_memory,
578 ZPCI_TABLE_SIZE_RT - zdev->start_dma,
579 zdev->end_dma - zdev->start_dma + 1);
580 zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
581 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
582 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
583 if (!zdev->iommu_bitmap) {
584 rc = -ENOMEM;
585 goto free_dma_table;
586 }
587 if (!s390_iommu_strict) {
588 zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
589 if (!zdev->lazy_bitmap) {
590 rc = -ENOMEM;
591 goto free_bitmap;
592 }
593
594 }
595 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
596 (u64) zdev->dma_table);
597 if (rc)
598 goto free_bitmap;
599
600 return 0;
601free_bitmap:
602 vfree(zdev->iommu_bitmap);
603 zdev->iommu_bitmap = NULL;
604 vfree(zdev->lazy_bitmap);
605 zdev->lazy_bitmap = NULL;
606free_dma_table:
607 dma_free_cpu_table(zdev->dma_table);
608 zdev->dma_table = NULL;
609out:
610 return rc;
611}
612
613void zpci_dma_exit_device(struct zpci_dev *zdev)
614{
615 /*
616 * At this point, if the device is part of an IOMMU domain, this would
617 * be a strong hint towards a bug in the IOMMU API (common) code and/or
618 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
619 */
620 WARN_ON(zdev->s390_domain);
621
622 if (zpci_unregister_ioat(zdev, 0))
623 return;
624
625 dma_cleanup_tables(zdev->dma_table);
626 zdev->dma_table = NULL;
627 vfree(zdev->iommu_bitmap);
628 zdev->iommu_bitmap = NULL;
629 vfree(zdev->lazy_bitmap);
630 zdev->lazy_bitmap = NULL;
631
632 zdev->next_bit = 0;
633}
634
635static int __init dma_alloc_cpu_table_caches(void)
636{
637 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
638 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
639 0, NULL);
640 if (!dma_region_table_cache)
641 return -ENOMEM;
642
643 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
644 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
645 0, NULL);
646 if (!dma_page_table_cache) {
647 kmem_cache_destroy(dma_region_table_cache);
648 return -ENOMEM;
649 }
650 return 0;
651}
652
653int __init zpci_dma_init(void)
654{
655 return dma_alloc_cpu_table_caches();
656}
657
658void zpci_dma_exit(void)
659{
660 kmem_cache_destroy(dma_page_table_cache);
661 kmem_cache_destroy(dma_region_table_cache);
662}
663
664const struct dma_map_ops s390_pci_dma_ops = {
665 .alloc = s390_dma_alloc,
666 .free = s390_dma_free,
667 .map_sg = s390_dma_map_sg,
668 .unmap_sg = s390_dma_unmap_sg,
669 .map_page = s390_dma_map_pages,
670 .unmap_page = s390_dma_unmap_pages,
671 .mmap = dma_common_mmap,
672 .get_sgtable = dma_common_get_sgtable,
673 /* dma_supported is unconditionally true without a callback */
674};
675EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
676
677static int __init s390_iommu_setup(char *str)
678{
679 if (!strcmp(str, "strict"))
680 s390_iommu_strict = 1;
681 return 1;
682}
683
684__setup("s390_iommu=", s390_iommu_setup);