Loading...
1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/bug.h>
20#include <linux/types.h>
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/errno.h>
24#include <linux/iommu.h>
25
26static struct iommu_ops *iommu_ops;
27
28void register_iommu(struct iommu_ops *ops)
29{
30 if (iommu_ops)
31 BUG();
32
33 iommu_ops = ops;
34}
35
36bool iommu_found(void)
37{
38 return iommu_ops != NULL;
39}
40EXPORT_SYMBOL_GPL(iommu_found);
41
42struct iommu_domain *iommu_domain_alloc(void)
43{
44 struct iommu_domain *domain;
45 int ret;
46
47 domain = kmalloc(sizeof(*domain), GFP_KERNEL);
48 if (!domain)
49 return NULL;
50
51 ret = iommu_ops->domain_init(domain);
52 if (ret)
53 goto out_free;
54
55 return domain;
56
57out_free:
58 kfree(domain);
59
60 return NULL;
61}
62EXPORT_SYMBOL_GPL(iommu_domain_alloc);
63
64void iommu_domain_free(struct iommu_domain *domain)
65{
66 iommu_ops->domain_destroy(domain);
67 kfree(domain);
68}
69EXPORT_SYMBOL_GPL(iommu_domain_free);
70
71int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
72{
73 return iommu_ops->attach_dev(domain, dev);
74}
75EXPORT_SYMBOL_GPL(iommu_attach_device);
76
77void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
78{
79 iommu_ops->detach_dev(domain, dev);
80}
81EXPORT_SYMBOL_GPL(iommu_detach_device);
82
83phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
84 unsigned long iova)
85{
86 return iommu_ops->iova_to_phys(domain, iova);
87}
88EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
89
90int iommu_domain_has_cap(struct iommu_domain *domain,
91 unsigned long cap)
92{
93 return iommu_ops->domain_has_cap(domain, cap);
94}
95EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
96
97int iommu_map(struct iommu_domain *domain, unsigned long iova,
98 phys_addr_t paddr, int gfp_order, int prot)
99{
100 unsigned long invalid_mask;
101 size_t size;
102
103 size = 0x1000UL << gfp_order;
104 invalid_mask = size - 1;
105
106 BUG_ON((iova | paddr) & invalid_mask);
107
108 return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
109}
110EXPORT_SYMBOL_GPL(iommu_map);
111
112int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
113{
114 unsigned long invalid_mask;
115 size_t size;
116
117 size = 0x1000UL << gfp_order;
118 invalid_mask = size - 1;
119
120 BUG_ON(iova & invalid_mask);
121
122 return iommu_ops->unmap(domain, iova, gfp_order);
123}
124EXPORT_SYMBOL_GPL(iommu_unmap);
1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#define pr_fmt(fmt) "%s: " fmt, __func__
20
21#include <linux/device.h>
22#include <linux/kernel.h>
23#include <linux/bug.h>
24#include <linux/types.h>
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/errno.h>
28#include <linux/iommu.h>
29
30static ssize_t show_iommu_group(struct device *dev,
31 struct device_attribute *attr, char *buf)
32{
33 unsigned int groupid;
34
35 if (iommu_device_group(dev, &groupid))
36 return 0;
37
38 return sprintf(buf, "%u", groupid);
39}
40static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
41
42static int add_iommu_group(struct device *dev, void *data)
43{
44 unsigned int groupid;
45
46 if (iommu_device_group(dev, &groupid) == 0)
47 return device_create_file(dev, &dev_attr_iommu_group);
48
49 return 0;
50}
51
52static int remove_iommu_group(struct device *dev)
53{
54 unsigned int groupid;
55
56 if (iommu_device_group(dev, &groupid) == 0)
57 device_remove_file(dev, &dev_attr_iommu_group);
58
59 return 0;
60}
61
62static int iommu_device_notifier(struct notifier_block *nb,
63 unsigned long action, void *data)
64{
65 struct device *dev = data;
66
67 if (action == BUS_NOTIFY_ADD_DEVICE)
68 return add_iommu_group(dev, NULL);
69 else if (action == BUS_NOTIFY_DEL_DEVICE)
70 return remove_iommu_group(dev);
71
72 return 0;
73}
74
75static struct notifier_block iommu_device_nb = {
76 .notifier_call = iommu_device_notifier,
77};
78
79static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
80{
81 bus_register_notifier(bus, &iommu_device_nb);
82 bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
83}
84
85/**
86 * bus_set_iommu - set iommu-callbacks for the bus
87 * @bus: bus.
88 * @ops: the callbacks provided by the iommu-driver
89 *
90 * This function is called by an iommu driver to set the iommu methods
91 * used for a particular bus. Drivers for devices on that bus can use
92 * the iommu-api after these ops are registered.
93 * This special function is needed because IOMMUs are usually devices on
94 * the bus itself, so the iommu drivers are not initialized when the bus
95 * is set up. With this function the iommu-driver can set the iommu-ops
96 * afterwards.
97 */
98int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
99{
100 if (bus->iommu_ops != NULL)
101 return -EBUSY;
102
103 bus->iommu_ops = ops;
104
105 /* Do IOMMU specific setup for this bus-type */
106 iommu_bus_init(bus, ops);
107
108 return 0;
109}
110EXPORT_SYMBOL_GPL(bus_set_iommu);
111
112bool iommu_present(struct bus_type *bus)
113{
114 return bus->iommu_ops != NULL;
115}
116EXPORT_SYMBOL_GPL(iommu_present);
117
118/**
119 * iommu_set_fault_handler() - set a fault handler for an iommu domain
120 * @domain: iommu domain
121 * @handler: fault handler
122 * @token: user data, will be passed back to the fault handler
123 *
124 * This function should be used by IOMMU users which want to be notified
125 * whenever an IOMMU fault happens.
126 *
127 * The fault handler itself should return 0 on success, and an appropriate
128 * error code otherwise.
129 */
130void iommu_set_fault_handler(struct iommu_domain *domain,
131 iommu_fault_handler_t handler,
132 void *token)
133{
134 BUG_ON(!domain);
135
136 domain->handler = handler;
137 domain->handler_token = token;
138}
139EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
140
141struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
142{
143 struct iommu_domain *domain;
144 int ret;
145
146 if (bus == NULL || bus->iommu_ops == NULL)
147 return NULL;
148
149 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
150 if (!domain)
151 return NULL;
152
153 domain->ops = bus->iommu_ops;
154
155 ret = domain->ops->domain_init(domain);
156 if (ret)
157 goto out_free;
158
159 return domain;
160
161out_free:
162 kfree(domain);
163
164 return NULL;
165}
166EXPORT_SYMBOL_GPL(iommu_domain_alloc);
167
168void iommu_domain_free(struct iommu_domain *domain)
169{
170 if (likely(domain->ops->domain_destroy != NULL))
171 domain->ops->domain_destroy(domain);
172
173 kfree(domain);
174}
175EXPORT_SYMBOL_GPL(iommu_domain_free);
176
177int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
178{
179 if (unlikely(domain->ops->attach_dev == NULL))
180 return -ENODEV;
181
182 return domain->ops->attach_dev(domain, dev);
183}
184EXPORT_SYMBOL_GPL(iommu_attach_device);
185
186void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
187{
188 if (unlikely(domain->ops->detach_dev == NULL))
189 return;
190
191 domain->ops->detach_dev(domain, dev);
192}
193EXPORT_SYMBOL_GPL(iommu_detach_device);
194
195phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
196 unsigned long iova)
197{
198 if (unlikely(domain->ops->iova_to_phys == NULL))
199 return 0;
200
201 return domain->ops->iova_to_phys(domain, iova);
202}
203EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
204
205int iommu_domain_has_cap(struct iommu_domain *domain,
206 unsigned long cap)
207{
208 if (unlikely(domain->ops->domain_has_cap == NULL))
209 return 0;
210
211 return domain->ops->domain_has_cap(domain, cap);
212}
213EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
214
215int iommu_map(struct iommu_domain *domain, unsigned long iova,
216 phys_addr_t paddr, size_t size, int prot)
217{
218 unsigned long orig_iova = iova;
219 unsigned int min_pagesz;
220 size_t orig_size = size;
221 int ret = 0;
222
223 if (unlikely(domain->ops->map == NULL))
224 return -ENODEV;
225
226 /* find out the minimum page size supported */
227 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
228
229 /*
230 * both the virtual address and the physical one, as well as
231 * the size of the mapping, must be aligned (at least) to the
232 * size of the smallest page supported by the hardware
233 */
234 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
235 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
236 "0x%x\n", iova, (unsigned long)paddr,
237 (unsigned long)size, min_pagesz);
238 return -EINVAL;
239 }
240
241 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
242 (unsigned long)paddr, (unsigned long)size);
243
244 while (size) {
245 unsigned long pgsize, addr_merge = iova | paddr;
246 unsigned int pgsize_idx;
247
248 /* Max page size that still fits into 'size' */
249 pgsize_idx = __fls(size);
250
251 /* need to consider alignment requirements ? */
252 if (likely(addr_merge)) {
253 /* Max page size allowed by both iova and paddr */
254 unsigned int align_pgsize_idx = __ffs(addr_merge);
255
256 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
257 }
258
259 /* build a mask of acceptable page sizes */
260 pgsize = (1UL << (pgsize_idx + 1)) - 1;
261
262 /* throw away page sizes not supported by the hardware */
263 pgsize &= domain->ops->pgsize_bitmap;
264
265 /* make sure we're still sane */
266 BUG_ON(!pgsize);
267
268 /* pick the biggest page */
269 pgsize_idx = __fls(pgsize);
270 pgsize = 1UL << pgsize_idx;
271
272 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
273 (unsigned long)paddr, pgsize);
274
275 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
276 if (ret)
277 break;
278
279 iova += pgsize;
280 paddr += pgsize;
281 size -= pgsize;
282 }
283
284 /* unroll mapping in case something went wrong */
285 if (ret)
286 iommu_unmap(domain, orig_iova, orig_size - size);
287
288 return ret;
289}
290EXPORT_SYMBOL_GPL(iommu_map);
291
292size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
293{
294 size_t unmapped_page, unmapped = 0;
295 unsigned int min_pagesz;
296
297 if (unlikely(domain->ops->unmap == NULL))
298 return -ENODEV;
299
300 /* find out the minimum page size supported */
301 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
302
303 /*
304 * The virtual address, as well as the size of the mapping, must be
305 * aligned (at least) to the size of the smallest page supported
306 * by the hardware
307 */
308 if (!IS_ALIGNED(iova | size, min_pagesz)) {
309 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
310 iova, (unsigned long)size, min_pagesz);
311 return -EINVAL;
312 }
313
314 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
315 (unsigned long)size);
316
317 /*
318 * Keep iterating until we either unmap 'size' bytes (or more)
319 * or we hit an area that isn't mapped.
320 */
321 while (unmapped < size) {
322 size_t left = size - unmapped;
323
324 unmapped_page = domain->ops->unmap(domain, iova, left);
325 if (!unmapped_page)
326 break;
327
328 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
329 (unsigned long)unmapped_page);
330
331 iova += unmapped_page;
332 unmapped += unmapped_page;
333 }
334
335 return unmapped;
336}
337EXPORT_SYMBOL_GPL(iommu_unmap);
338
339int iommu_device_group(struct device *dev, unsigned int *groupid)
340{
341 if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
342 return dev->bus->iommu_ops->device_group(dev, groupid);
343
344 return -ENODEV;
345}
346EXPORT_SYMBOL_GPL(iommu_device_group);