Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * busses using the iommu infrastructure
7 */
8
9#include <linux/dma-direct.h>
10#include <linux/pci.h>
11#include <asm/iommu.h>
12
13/*
14 * Generic iommu implementation
15 */
16
17/* Allocates a contiguous real buffer and creates mappings over it.
18 * Returns the virtual address of the buffer and sets dma_handle
19 * to the dma address (mapping) of the first page.
20 */
21static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
22 dma_addr_t *dma_handle, gfp_t flag,
23 unsigned long attrs)
24{
25 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
26 dma_handle, dev->coherent_dma_mask, flag,
27 dev_to_node(dev));
28}
29
30static void dma_iommu_free_coherent(struct device *dev, size_t size,
31 void *vaddr, dma_addr_t dma_handle,
32 unsigned long attrs)
33{
34 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
35}
36
37/* Creates TCEs for a user provided buffer. The user buffer must be
38 * contiguous real kernel storage (not vmalloc). The address passed here
39 * comprises a page address and offset into that page. The dma_addr_t
40 * returned will point to the same byte within the page as was passed in.
41 */
42static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
43 unsigned long offset, size_t size,
44 enum dma_data_direction direction,
45 unsigned long attrs)
46{
47 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
48 size, dma_get_mask(dev), direction, attrs);
49}
50
51
52static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
53 size_t size, enum dma_data_direction direction,
54 unsigned long attrs)
55{
56 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
57 attrs);
58}
59
60
61static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
62 int nelems, enum dma_data_direction direction,
63 unsigned long attrs)
64{
65 return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
66 dma_get_mask(dev), direction, attrs);
67}
68
69static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
70 int nelems, enum dma_data_direction direction,
71 unsigned long attrs)
72{
73 ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
74 direction, attrs);
75}
76
77static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
78{
79 struct pci_dev *pdev = to_pci_dev(dev);
80 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
81
82 if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
83 return false;
84 return phb->controller_ops.iommu_bypass_supported(pdev, mask);
85}
86
87/* We support DMA to/from any memory page via the iommu */
88int dma_iommu_dma_supported(struct device *dev, u64 mask)
89{
90 struct iommu_table *tbl = get_iommu_table_base(dev);
91
92 if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
93 dev->dma_ops_bypass = true;
94 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
95 return 1;
96 }
97
98 if (!tbl) {
99 dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
100 return 0;
101 }
102
103 if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
104 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
105 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
106 mask, tbl->it_offset << tbl->it_page_shift);
107 return 0;
108 }
109
110 dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
111 dev->dma_ops_bypass = false;
112 return 1;
113}
114
115u64 dma_iommu_get_required_mask(struct device *dev)
116{
117 struct iommu_table *tbl = get_iommu_table_base(dev);
118 u64 mask;
119
120 if (!tbl)
121 return 0;
122
123 mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
124 tbl->it_page_shift - 1);
125 mask += mask - 1;
126
127 return mask;
128}
129
130const struct dma_map_ops dma_iommu_ops = {
131 .alloc = dma_iommu_alloc_coherent,
132 .free = dma_iommu_free_coherent,
133 .map_sg = dma_iommu_map_sg,
134 .unmap_sg = dma_iommu_unmap_sg,
135 .dma_supported = dma_iommu_dma_supported,
136 .map_page = dma_iommu_map_page,
137 .unmap_page = dma_iommu_unmap_page,
138 .get_required_mask = dma_iommu_get_required_mask,
139 .mmap = dma_common_mmap,
140 .get_sgtable = dma_common_get_sgtable,
141};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * busses using the iommu infrastructure
7 */
8
9#include <linux/dma-direct.h>
10#include <linux/pci.h>
11#include <asm/iommu.h>
12
13/*
14 * Generic iommu implementation
15 */
16
17/*
18 * The coherent mask may be smaller than the real mask, check if we can
19 * really use a direct window.
20 */
21static inline bool dma_iommu_alloc_bypass(struct device *dev)
22{
23 return dev->archdata.iommu_bypass && !iommu_fixed_is_weak &&
24 dma_direct_supported(dev, dev->coherent_dma_mask);
25}
26
27static inline bool dma_iommu_map_bypass(struct device *dev,
28 unsigned long attrs)
29{
30 return dev->archdata.iommu_bypass &&
31 (!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING));
32}
33
34/* Allocates a contiguous real buffer and creates mappings over it.
35 * Returns the virtual address of the buffer and sets dma_handle
36 * to the dma address (mapping) of the first page.
37 */
38static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flag,
40 unsigned long attrs)
41{
42 if (dma_iommu_alloc_bypass(dev))
43 return dma_direct_alloc(dev, size, dma_handle, flag, attrs);
44 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
45 dma_handle, dev->coherent_dma_mask, flag,
46 dev_to_node(dev));
47}
48
49static void dma_iommu_free_coherent(struct device *dev, size_t size,
50 void *vaddr, dma_addr_t dma_handle,
51 unsigned long attrs)
52{
53 if (dma_iommu_alloc_bypass(dev))
54 dma_direct_free(dev, size, vaddr, dma_handle, attrs);
55 else
56 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr,
57 dma_handle);
58}
59
60/* Creates TCEs for a user provided buffer. The user buffer must be
61 * contiguous real kernel storage (not vmalloc). The address passed here
62 * comprises a page address and offset into that page. The dma_addr_t
63 * returned will point to the same byte within the page as was passed in.
64 */
65static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
66 unsigned long offset, size_t size,
67 enum dma_data_direction direction,
68 unsigned long attrs)
69{
70 if (dma_iommu_map_bypass(dev, attrs))
71 return dma_direct_map_page(dev, page, offset, size, direction,
72 attrs);
73 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
74 size, dma_get_mask(dev), direction, attrs);
75}
76
77
78static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
79 size_t size, enum dma_data_direction direction,
80 unsigned long attrs)
81{
82 if (!dma_iommu_map_bypass(dev, attrs))
83 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size,
84 direction, attrs);
85 else
86 dma_direct_unmap_page(dev, dma_handle, size, direction, attrs);
87}
88
89
90static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
91 int nelems, enum dma_data_direction direction,
92 unsigned long attrs)
93{
94 if (dma_iommu_map_bypass(dev, attrs))
95 return dma_direct_map_sg(dev, sglist, nelems, direction, attrs);
96 return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
97 dma_get_mask(dev), direction, attrs);
98}
99
100static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
101 int nelems, enum dma_data_direction direction,
102 unsigned long attrs)
103{
104 if (!dma_iommu_map_bypass(dev, attrs))
105 ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
106 direction, attrs);
107 else
108 dma_direct_unmap_sg(dev, sglist, nelems, direction, attrs);
109}
110
111static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
112{
113 struct pci_dev *pdev = to_pci_dev(dev);
114 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
115
116 return phb->controller_ops.iommu_bypass_supported &&
117 phb->controller_ops.iommu_bypass_supported(pdev, mask);
118}
119
120/* We support DMA to/from any memory page via the iommu */
121int dma_iommu_dma_supported(struct device *dev, u64 mask)
122{
123 struct iommu_table *tbl = get_iommu_table_base(dev);
124
125 if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
126 dev->archdata.iommu_bypass = true;
127 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
128 return 1;
129 }
130
131 if (!tbl) {
132 dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
133 return 0;
134 }
135
136 if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
137 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
138 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
139 mask, tbl->it_offset << tbl->it_page_shift);
140 return 0;
141 }
142
143 dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
144 dev->archdata.iommu_bypass = false;
145 return 1;
146}
147
148u64 dma_iommu_get_required_mask(struct device *dev)
149{
150 struct iommu_table *tbl = get_iommu_table_base(dev);
151 u64 mask;
152
153 if (!tbl)
154 return 0;
155
156 if (dev_is_pci(dev)) {
157 u64 bypass_mask = dma_direct_get_required_mask(dev);
158
159 if (dma_iommu_bypass_supported(dev, bypass_mask))
160 return bypass_mask;
161 }
162
163 mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
164 mask += mask - 1;
165
166 return mask;
167}
168
169static void dma_iommu_sync_for_cpu(struct device *dev, dma_addr_t addr,
170 size_t size, enum dma_data_direction dir)
171{
172 if (dma_iommu_alloc_bypass(dev))
173 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
174}
175
176static void dma_iommu_sync_for_device(struct device *dev, dma_addr_t addr,
177 size_t sz, enum dma_data_direction dir)
178{
179 if (dma_iommu_alloc_bypass(dev))
180 dma_direct_sync_single_for_device(dev, addr, sz, dir);
181}
182
183extern void dma_iommu_sync_sg_for_cpu(struct device *dev,
184 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
185{
186 if (dma_iommu_alloc_bypass(dev))
187 dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
188}
189
190extern void dma_iommu_sync_sg_for_device(struct device *dev,
191 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
192{
193 if (dma_iommu_alloc_bypass(dev))
194 dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
195}
196
197const struct dma_map_ops dma_iommu_ops = {
198 .alloc = dma_iommu_alloc_coherent,
199 .free = dma_iommu_free_coherent,
200 .map_sg = dma_iommu_map_sg,
201 .unmap_sg = dma_iommu_unmap_sg,
202 .dma_supported = dma_iommu_dma_supported,
203 .map_page = dma_iommu_map_page,
204 .unmap_page = dma_iommu_unmap_page,
205 .get_required_mask = dma_iommu_get_required_mask,
206 .sync_single_for_cpu = dma_iommu_sync_for_cpu,
207 .sync_single_for_device = dma_iommu_sync_for_device,
208 .sync_sg_for_cpu = dma_iommu_sync_sg_for_cpu,
209 .sync_sg_for_device = dma_iommu_sync_sg_for_device,
210 .mmap = dma_common_mmap,
211 .get_sgtable = dma_common_get_sgtable,
212};