Loading...
1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/bug.h>
20#include <linux/types.h>
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/errno.h>
24#include <linux/iommu.h>
25
26static struct iommu_ops *iommu_ops;
27
28void register_iommu(struct iommu_ops *ops)
29{
30 if (iommu_ops)
31 BUG();
32
33 iommu_ops = ops;
34}
35
36bool iommu_found(void)
37{
38 return iommu_ops != NULL;
39}
40EXPORT_SYMBOL_GPL(iommu_found);
41
42struct iommu_domain *iommu_domain_alloc(void)
43{
44 struct iommu_domain *domain;
45 int ret;
46
47 domain = kmalloc(sizeof(*domain), GFP_KERNEL);
48 if (!domain)
49 return NULL;
50
51 ret = iommu_ops->domain_init(domain);
52 if (ret)
53 goto out_free;
54
55 return domain;
56
57out_free:
58 kfree(domain);
59
60 return NULL;
61}
62EXPORT_SYMBOL_GPL(iommu_domain_alloc);
63
64void iommu_domain_free(struct iommu_domain *domain)
65{
66 iommu_ops->domain_destroy(domain);
67 kfree(domain);
68}
69EXPORT_SYMBOL_GPL(iommu_domain_free);
70
71int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
72{
73 return iommu_ops->attach_dev(domain, dev);
74}
75EXPORT_SYMBOL_GPL(iommu_attach_device);
76
77void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
78{
79 iommu_ops->detach_dev(domain, dev);
80}
81EXPORT_SYMBOL_GPL(iommu_detach_device);
82
83phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
84 unsigned long iova)
85{
86 return iommu_ops->iova_to_phys(domain, iova);
87}
88EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
89
90int iommu_domain_has_cap(struct iommu_domain *domain,
91 unsigned long cap)
92{
93 return iommu_ops->domain_has_cap(domain, cap);
94}
95EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
96
97int iommu_map(struct iommu_domain *domain, unsigned long iova,
98 phys_addr_t paddr, int gfp_order, int prot)
99{
100 unsigned long invalid_mask;
101 size_t size;
102
103 size = 0x1000UL << gfp_order;
104 invalid_mask = size - 1;
105
106 BUG_ON((iova | paddr) & invalid_mask);
107
108 return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
109}
110EXPORT_SYMBOL_GPL(iommu_map);
111
112int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
113{
114 unsigned long invalid_mask;
115 size_t size;
116
117 size = 0x1000UL << gfp_order;
118 invalid_mask = size - 1;
119
120 BUG_ON(iova & invalid_mask);
121
122 return iommu_ops->unmap(domain, iova, gfp_order);
123}
124EXPORT_SYMBOL_GPL(iommu_unmap);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 */
6
7#define pr_fmt(fmt) "iommu: " fmt
8
9#include <linux/device.h>
10#include <linux/kernel.h>
11#include <linux/bug.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/export.h>
15#include <linux/slab.h>
16#include <linux/errno.h>
17#include <linux/iommu.h>
18#include <linux/idr.h>
19#include <linux/notifier.h>
20#include <linux/err.h>
21#include <linux/pci.h>
22#include <linux/bitops.h>
23#include <linux/property.h>
24#include <linux/fsl/mc.h>
25#include <trace/events/iommu.h>
26
27static struct kset *iommu_group_kset;
28static DEFINE_IDA(iommu_group_ida);
29
30static unsigned int iommu_def_domain_type __read_mostly;
31static bool iommu_dma_strict __read_mostly = true;
32static u32 iommu_cmd_line __read_mostly;
33
34struct iommu_group {
35 struct kobject kobj;
36 struct kobject *devices_kobj;
37 struct list_head devices;
38 struct mutex mutex;
39 struct blocking_notifier_head notifier;
40 void *iommu_data;
41 void (*iommu_data_release)(void *iommu_data);
42 char *name;
43 int id;
44 struct iommu_domain *default_domain;
45 struct iommu_domain *domain;
46};
47
48struct group_device {
49 struct list_head list;
50 struct device *dev;
51 char *name;
52};
53
54struct iommu_group_attribute {
55 struct attribute attr;
56 ssize_t (*show)(struct iommu_group *group, char *buf);
57 ssize_t (*store)(struct iommu_group *group,
58 const char *buf, size_t count);
59};
60
61static const char * const iommu_group_resv_type_string[] = {
62 [IOMMU_RESV_DIRECT] = "direct",
63 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
64 [IOMMU_RESV_RESERVED] = "reserved",
65 [IOMMU_RESV_MSI] = "msi",
66 [IOMMU_RESV_SW_MSI] = "msi",
67};
68
69#define IOMMU_CMD_LINE_DMA_API BIT(0)
70
71static void iommu_set_cmd_line_dma_api(void)
72{
73 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
74}
75
76static bool iommu_cmd_line_dma_api(void)
77{
78 return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
79}
80
81#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
82struct iommu_group_attribute iommu_group_attr_##_name = \
83 __ATTR(_name, _mode, _show, _store)
84
85#define to_iommu_group_attr(_attr) \
86 container_of(_attr, struct iommu_group_attribute, attr)
87#define to_iommu_group(_kobj) \
88 container_of(_kobj, struct iommu_group, kobj)
89
90static LIST_HEAD(iommu_device_list);
91static DEFINE_SPINLOCK(iommu_device_lock);
92
93/*
94 * Use a function instead of an array here because the domain-type is a
95 * bit-field, so an array would waste memory.
96 */
97static const char *iommu_domain_type_str(unsigned int t)
98{
99 switch (t) {
100 case IOMMU_DOMAIN_BLOCKED:
101 return "Blocked";
102 case IOMMU_DOMAIN_IDENTITY:
103 return "Passthrough";
104 case IOMMU_DOMAIN_UNMANAGED:
105 return "Unmanaged";
106 case IOMMU_DOMAIN_DMA:
107 return "Translated";
108 default:
109 return "Unknown";
110 }
111}
112
113static int __init iommu_subsys_init(void)
114{
115 bool cmd_line = iommu_cmd_line_dma_api();
116
117 if (!cmd_line) {
118 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
119 iommu_set_default_passthrough(false);
120 else
121 iommu_set_default_translated(false);
122
123 if (iommu_default_passthrough() && mem_encrypt_active()) {
124 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
125 iommu_set_default_translated(false);
126 }
127 }
128
129 pr_info("Default domain type: %s %s\n",
130 iommu_domain_type_str(iommu_def_domain_type),
131 cmd_line ? "(set via kernel command line)" : "");
132
133 return 0;
134}
135subsys_initcall(iommu_subsys_init);
136
137int iommu_device_register(struct iommu_device *iommu)
138{
139 spin_lock(&iommu_device_lock);
140 list_add_tail(&iommu->list, &iommu_device_list);
141 spin_unlock(&iommu_device_lock);
142 return 0;
143}
144
145void iommu_device_unregister(struct iommu_device *iommu)
146{
147 spin_lock(&iommu_device_lock);
148 list_del(&iommu->list);
149 spin_unlock(&iommu_device_lock);
150}
151
152static struct iommu_param *iommu_get_dev_param(struct device *dev)
153{
154 struct iommu_param *param = dev->iommu_param;
155
156 if (param)
157 return param;
158
159 param = kzalloc(sizeof(*param), GFP_KERNEL);
160 if (!param)
161 return NULL;
162
163 mutex_init(¶m->lock);
164 dev->iommu_param = param;
165 return param;
166}
167
168static void iommu_free_dev_param(struct device *dev)
169{
170 kfree(dev->iommu_param);
171 dev->iommu_param = NULL;
172}
173
174int iommu_probe_device(struct device *dev)
175{
176 const struct iommu_ops *ops = dev->bus->iommu_ops;
177 int ret;
178
179 WARN_ON(dev->iommu_group);
180 if (!ops)
181 return -EINVAL;
182
183 if (!iommu_get_dev_param(dev))
184 return -ENOMEM;
185
186 ret = ops->add_device(dev);
187 if (ret)
188 iommu_free_dev_param(dev);
189
190 return ret;
191}
192
193void iommu_release_device(struct device *dev)
194{
195 const struct iommu_ops *ops = dev->bus->iommu_ops;
196
197 if (dev->iommu_group)
198 ops->remove_device(dev);
199
200 iommu_free_dev_param(dev);
201}
202
203static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
204 unsigned type);
205static int __iommu_attach_device(struct iommu_domain *domain,
206 struct device *dev);
207static int __iommu_attach_group(struct iommu_domain *domain,
208 struct iommu_group *group);
209static void __iommu_detach_group(struct iommu_domain *domain,
210 struct iommu_group *group);
211
212static int __init iommu_set_def_domain_type(char *str)
213{
214 bool pt;
215 int ret;
216
217 ret = kstrtobool(str, &pt);
218 if (ret)
219 return ret;
220
221 if (pt)
222 iommu_set_default_passthrough(true);
223 else
224 iommu_set_default_translated(true);
225
226 return 0;
227}
228early_param("iommu.passthrough", iommu_set_def_domain_type);
229
230static int __init iommu_dma_setup(char *str)
231{
232 return kstrtobool(str, &iommu_dma_strict);
233}
234early_param("iommu.strict", iommu_dma_setup);
235
236static ssize_t iommu_group_attr_show(struct kobject *kobj,
237 struct attribute *__attr, char *buf)
238{
239 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
240 struct iommu_group *group = to_iommu_group(kobj);
241 ssize_t ret = -EIO;
242
243 if (attr->show)
244 ret = attr->show(group, buf);
245 return ret;
246}
247
248static ssize_t iommu_group_attr_store(struct kobject *kobj,
249 struct attribute *__attr,
250 const char *buf, size_t count)
251{
252 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
253 struct iommu_group *group = to_iommu_group(kobj);
254 ssize_t ret = -EIO;
255
256 if (attr->store)
257 ret = attr->store(group, buf, count);
258 return ret;
259}
260
261static const struct sysfs_ops iommu_group_sysfs_ops = {
262 .show = iommu_group_attr_show,
263 .store = iommu_group_attr_store,
264};
265
266static int iommu_group_create_file(struct iommu_group *group,
267 struct iommu_group_attribute *attr)
268{
269 return sysfs_create_file(&group->kobj, &attr->attr);
270}
271
272static void iommu_group_remove_file(struct iommu_group *group,
273 struct iommu_group_attribute *attr)
274{
275 sysfs_remove_file(&group->kobj, &attr->attr);
276}
277
278static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
279{
280 return sprintf(buf, "%s\n", group->name);
281}
282
283/**
284 * iommu_insert_resv_region - Insert a new region in the
285 * list of reserved regions.
286 * @new: new region to insert
287 * @regions: list of regions
288 *
289 * Elements are sorted by start address and overlapping segments
290 * of the same type are merged.
291 */
292int iommu_insert_resv_region(struct iommu_resv_region *new,
293 struct list_head *regions)
294{
295 struct iommu_resv_region *iter, *tmp, *nr, *top;
296 LIST_HEAD(stack);
297
298 nr = iommu_alloc_resv_region(new->start, new->length,
299 new->prot, new->type);
300 if (!nr)
301 return -ENOMEM;
302
303 /* First add the new element based on start address sorting */
304 list_for_each_entry(iter, regions, list) {
305 if (nr->start < iter->start ||
306 (nr->start == iter->start && nr->type <= iter->type))
307 break;
308 }
309 list_add_tail(&nr->list, &iter->list);
310
311 /* Merge overlapping segments of type nr->type in @regions, if any */
312 list_for_each_entry_safe(iter, tmp, regions, list) {
313 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
314
315 /* no merge needed on elements of different types than @nr */
316 if (iter->type != nr->type) {
317 list_move_tail(&iter->list, &stack);
318 continue;
319 }
320
321 /* look for the last stack element of same type as @iter */
322 list_for_each_entry_reverse(top, &stack, list)
323 if (top->type == iter->type)
324 goto check_overlap;
325
326 list_move_tail(&iter->list, &stack);
327 continue;
328
329check_overlap:
330 top_end = top->start + top->length - 1;
331
332 if (iter->start > top_end + 1) {
333 list_move_tail(&iter->list, &stack);
334 } else {
335 top->length = max(top_end, iter_end) - top->start + 1;
336 list_del(&iter->list);
337 kfree(iter);
338 }
339 }
340 list_splice(&stack, regions);
341 return 0;
342}
343
344static int
345iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
346 struct list_head *group_resv_regions)
347{
348 struct iommu_resv_region *entry;
349 int ret = 0;
350
351 list_for_each_entry(entry, dev_resv_regions, list) {
352 ret = iommu_insert_resv_region(entry, group_resv_regions);
353 if (ret)
354 break;
355 }
356 return ret;
357}
358
359int iommu_get_group_resv_regions(struct iommu_group *group,
360 struct list_head *head)
361{
362 struct group_device *device;
363 int ret = 0;
364
365 mutex_lock(&group->mutex);
366 list_for_each_entry(device, &group->devices, list) {
367 struct list_head dev_resv_regions;
368
369 INIT_LIST_HEAD(&dev_resv_regions);
370 iommu_get_resv_regions(device->dev, &dev_resv_regions);
371 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
372 iommu_put_resv_regions(device->dev, &dev_resv_regions);
373 if (ret)
374 break;
375 }
376 mutex_unlock(&group->mutex);
377 return ret;
378}
379EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
380
381static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
382 char *buf)
383{
384 struct iommu_resv_region *region, *next;
385 struct list_head group_resv_regions;
386 char *str = buf;
387
388 INIT_LIST_HEAD(&group_resv_regions);
389 iommu_get_group_resv_regions(group, &group_resv_regions);
390
391 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
392 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
393 (long long int)region->start,
394 (long long int)(region->start +
395 region->length - 1),
396 iommu_group_resv_type_string[region->type]);
397 kfree(region);
398 }
399
400 return (str - buf);
401}
402
403static ssize_t iommu_group_show_type(struct iommu_group *group,
404 char *buf)
405{
406 char *type = "unknown\n";
407
408 if (group->default_domain) {
409 switch (group->default_domain->type) {
410 case IOMMU_DOMAIN_BLOCKED:
411 type = "blocked\n";
412 break;
413 case IOMMU_DOMAIN_IDENTITY:
414 type = "identity\n";
415 break;
416 case IOMMU_DOMAIN_UNMANAGED:
417 type = "unmanaged\n";
418 break;
419 case IOMMU_DOMAIN_DMA:
420 type = "DMA\n";
421 break;
422 }
423 }
424 strcpy(buf, type);
425
426 return strlen(type);
427}
428
429static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
430
431static IOMMU_GROUP_ATTR(reserved_regions, 0444,
432 iommu_group_show_resv_regions, NULL);
433
434static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
435
436static void iommu_group_release(struct kobject *kobj)
437{
438 struct iommu_group *group = to_iommu_group(kobj);
439
440 pr_debug("Releasing group %d\n", group->id);
441
442 if (group->iommu_data_release)
443 group->iommu_data_release(group->iommu_data);
444
445 ida_simple_remove(&iommu_group_ida, group->id);
446
447 if (group->default_domain)
448 iommu_domain_free(group->default_domain);
449
450 kfree(group->name);
451 kfree(group);
452}
453
454static struct kobj_type iommu_group_ktype = {
455 .sysfs_ops = &iommu_group_sysfs_ops,
456 .release = iommu_group_release,
457};
458
459/**
460 * iommu_group_alloc - Allocate a new group
461 *
462 * This function is called by an iommu driver to allocate a new iommu
463 * group. The iommu group represents the minimum granularity of the iommu.
464 * Upon successful return, the caller holds a reference to the supplied
465 * group in order to hold the group until devices are added. Use
466 * iommu_group_put() to release this extra reference count, allowing the
467 * group to be automatically reclaimed once it has no devices or external
468 * references.
469 */
470struct iommu_group *iommu_group_alloc(void)
471{
472 struct iommu_group *group;
473 int ret;
474
475 group = kzalloc(sizeof(*group), GFP_KERNEL);
476 if (!group)
477 return ERR_PTR(-ENOMEM);
478
479 group->kobj.kset = iommu_group_kset;
480 mutex_init(&group->mutex);
481 INIT_LIST_HEAD(&group->devices);
482 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
483
484 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
485 if (ret < 0) {
486 kfree(group);
487 return ERR_PTR(ret);
488 }
489 group->id = ret;
490
491 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
492 NULL, "%d", group->id);
493 if (ret) {
494 ida_simple_remove(&iommu_group_ida, group->id);
495 kfree(group);
496 return ERR_PTR(ret);
497 }
498
499 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
500 if (!group->devices_kobj) {
501 kobject_put(&group->kobj); /* triggers .release & free */
502 return ERR_PTR(-ENOMEM);
503 }
504
505 /*
506 * The devices_kobj holds a reference on the group kobject, so
507 * as long as that exists so will the group. We can therefore
508 * use the devices_kobj for reference counting.
509 */
510 kobject_put(&group->kobj);
511
512 ret = iommu_group_create_file(group,
513 &iommu_group_attr_reserved_regions);
514 if (ret)
515 return ERR_PTR(ret);
516
517 ret = iommu_group_create_file(group, &iommu_group_attr_type);
518 if (ret)
519 return ERR_PTR(ret);
520
521 pr_debug("Allocated group %d\n", group->id);
522
523 return group;
524}
525EXPORT_SYMBOL_GPL(iommu_group_alloc);
526
527struct iommu_group *iommu_group_get_by_id(int id)
528{
529 struct kobject *group_kobj;
530 struct iommu_group *group;
531 const char *name;
532
533 if (!iommu_group_kset)
534 return NULL;
535
536 name = kasprintf(GFP_KERNEL, "%d", id);
537 if (!name)
538 return NULL;
539
540 group_kobj = kset_find_obj(iommu_group_kset, name);
541 kfree(name);
542
543 if (!group_kobj)
544 return NULL;
545
546 group = container_of(group_kobj, struct iommu_group, kobj);
547 BUG_ON(group->id != id);
548
549 kobject_get(group->devices_kobj);
550 kobject_put(&group->kobj);
551
552 return group;
553}
554EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
555
556/**
557 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
558 * @group: the group
559 *
560 * iommu drivers can store data in the group for use when doing iommu
561 * operations. This function provides a way to retrieve it. Caller
562 * should hold a group reference.
563 */
564void *iommu_group_get_iommudata(struct iommu_group *group)
565{
566 return group->iommu_data;
567}
568EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
569
570/**
571 * iommu_group_set_iommudata - set iommu_data for a group
572 * @group: the group
573 * @iommu_data: new data
574 * @release: release function for iommu_data
575 *
576 * iommu drivers can store data in the group for use when doing iommu
577 * operations. This function provides a way to set the data after
578 * the group has been allocated. Caller should hold a group reference.
579 */
580void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
581 void (*release)(void *iommu_data))
582{
583 group->iommu_data = iommu_data;
584 group->iommu_data_release = release;
585}
586EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
587
588/**
589 * iommu_group_set_name - set name for a group
590 * @group: the group
591 * @name: name
592 *
593 * Allow iommu driver to set a name for a group. When set it will
594 * appear in a name attribute file under the group in sysfs.
595 */
596int iommu_group_set_name(struct iommu_group *group, const char *name)
597{
598 int ret;
599
600 if (group->name) {
601 iommu_group_remove_file(group, &iommu_group_attr_name);
602 kfree(group->name);
603 group->name = NULL;
604 if (!name)
605 return 0;
606 }
607
608 group->name = kstrdup(name, GFP_KERNEL);
609 if (!group->name)
610 return -ENOMEM;
611
612 ret = iommu_group_create_file(group, &iommu_group_attr_name);
613 if (ret) {
614 kfree(group->name);
615 group->name = NULL;
616 return ret;
617 }
618
619 return 0;
620}
621EXPORT_SYMBOL_GPL(iommu_group_set_name);
622
623static int iommu_group_create_direct_mappings(struct iommu_group *group,
624 struct device *dev)
625{
626 struct iommu_domain *domain = group->default_domain;
627 struct iommu_resv_region *entry;
628 struct list_head mappings;
629 unsigned long pg_size;
630 int ret = 0;
631
632 if (!domain || domain->type != IOMMU_DOMAIN_DMA)
633 return 0;
634
635 BUG_ON(!domain->pgsize_bitmap);
636
637 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
638 INIT_LIST_HEAD(&mappings);
639
640 iommu_get_resv_regions(dev, &mappings);
641
642 /* We need to consider overlapping regions for different devices */
643 list_for_each_entry(entry, &mappings, list) {
644 dma_addr_t start, end, addr;
645
646 if (domain->ops->apply_resv_region)
647 domain->ops->apply_resv_region(dev, domain, entry);
648
649 start = ALIGN(entry->start, pg_size);
650 end = ALIGN(entry->start + entry->length, pg_size);
651
652 if (entry->type != IOMMU_RESV_DIRECT &&
653 entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
654 continue;
655
656 for (addr = start; addr < end; addr += pg_size) {
657 phys_addr_t phys_addr;
658
659 phys_addr = iommu_iova_to_phys(domain, addr);
660 if (phys_addr)
661 continue;
662
663 ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
664 if (ret)
665 goto out;
666 }
667
668 }
669
670 iommu_flush_tlb_all(domain);
671
672out:
673 iommu_put_resv_regions(dev, &mappings);
674
675 return ret;
676}
677
678/**
679 * iommu_group_add_device - add a device to an iommu group
680 * @group: the group into which to add the device (reference should be held)
681 * @dev: the device
682 *
683 * This function is called by an iommu driver to add a device into a
684 * group. Adding a device increments the group reference count.
685 */
686int iommu_group_add_device(struct iommu_group *group, struct device *dev)
687{
688 int ret, i = 0;
689 struct group_device *device;
690
691 device = kzalloc(sizeof(*device), GFP_KERNEL);
692 if (!device)
693 return -ENOMEM;
694
695 device->dev = dev;
696
697 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
698 if (ret)
699 goto err_free_device;
700
701 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
702rename:
703 if (!device->name) {
704 ret = -ENOMEM;
705 goto err_remove_link;
706 }
707
708 ret = sysfs_create_link_nowarn(group->devices_kobj,
709 &dev->kobj, device->name);
710 if (ret) {
711 if (ret == -EEXIST && i >= 0) {
712 /*
713 * Account for the slim chance of collision
714 * and append an instance to the name.
715 */
716 kfree(device->name);
717 device->name = kasprintf(GFP_KERNEL, "%s.%d",
718 kobject_name(&dev->kobj), i++);
719 goto rename;
720 }
721 goto err_free_name;
722 }
723
724 kobject_get(group->devices_kobj);
725
726 dev->iommu_group = group;
727
728 iommu_group_create_direct_mappings(group, dev);
729
730 mutex_lock(&group->mutex);
731 list_add_tail(&device->list, &group->devices);
732 if (group->domain)
733 ret = __iommu_attach_device(group->domain, dev);
734 mutex_unlock(&group->mutex);
735 if (ret)
736 goto err_put_group;
737
738 /* Notify any listeners about change to group. */
739 blocking_notifier_call_chain(&group->notifier,
740 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
741
742 trace_add_device_to_group(group->id, dev);
743
744 dev_info(dev, "Adding to iommu group %d\n", group->id);
745
746 return 0;
747
748err_put_group:
749 mutex_lock(&group->mutex);
750 list_del(&device->list);
751 mutex_unlock(&group->mutex);
752 dev->iommu_group = NULL;
753 kobject_put(group->devices_kobj);
754err_free_name:
755 kfree(device->name);
756err_remove_link:
757 sysfs_remove_link(&dev->kobj, "iommu_group");
758err_free_device:
759 kfree(device);
760 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
761 return ret;
762}
763EXPORT_SYMBOL_GPL(iommu_group_add_device);
764
765/**
766 * iommu_group_remove_device - remove a device from it's current group
767 * @dev: device to be removed
768 *
769 * This function is called by an iommu driver to remove the device from
770 * it's current group. This decrements the iommu group reference count.
771 */
772void iommu_group_remove_device(struct device *dev)
773{
774 struct iommu_group *group = dev->iommu_group;
775 struct group_device *tmp_device, *device = NULL;
776
777 dev_info(dev, "Removing from iommu group %d\n", group->id);
778
779 /* Pre-notify listeners that a device is being removed. */
780 blocking_notifier_call_chain(&group->notifier,
781 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
782
783 mutex_lock(&group->mutex);
784 list_for_each_entry(tmp_device, &group->devices, list) {
785 if (tmp_device->dev == dev) {
786 device = tmp_device;
787 list_del(&device->list);
788 break;
789 }
790 }
791 mutex_unlock(&group->mutex);
792
793 if (!device)
794 return;
795
796 sysfs_remove_link(group->devices_kobj, device->name);
797 sysfs_remove_link(&dev->kobj, "iommu_group");
798
799 trace_remove_device_from_group(group->id, dev);
800
801 kfree(device->name);
802 kfree(device);
803 dev->iommu_group = NULL;
804 kobject_put(group->devices_kobj);
805}
806EXPORT_SYMBOL_GPL(iommu_group_remove_device);
807
808static int iommu_group_device_count(struct iommu_group *group)
809{
810 struct group_device *entry;
811 int ret = 0;
812
813 list_for_each_entry(entry, &group->devices, list)
814 ret++;
815
816 return ret;
817}
818
819/**
820 * iommu_group_for_each_dev - iterate over each device in the group
821 * @group: the group
822 * @data: caller opaque data to be passed to callback function
823 * @fn: caller supplied callback function
824 *
825 * This function is called by group users to iterate over group devices.
826 * Callers should hold a reference count to the group during callback.
827 * The group->mutex is held across callbacks, which will block calls to
828 * iommu_group_add/remove_device.
829 */
830static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
831 int (*fn)(struct device *, void *))
832{
833 struct group_device *device;
834 int ret = 0;
835
836 list_for_each_entry(device, &group->devices, list) {
837 ret = fn(device->dev, data);
838 if (ret)
839 break;
840 }
841 return ret;
842}
843
844
845int iommu_group_for_each_dev(struct iommu_group *group, void *data,
846 int (*fn)(struct device *, void *))
847{
848 int ret;
849
850 mutex_lock(&group->mutex);
851 ret = __iommu_group_for_each_dev(group, data, fn);
852 mutex_unlock(&group->mutex);
853
854 return ret;
855}
856EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
857
858/**
859 * iommu_group_get - Return the group for a device and increment reference
860 * @dev: get the group that this device belongs to
861 *
862 * This function is called by iommu drivers and users to get the group
863 * for the specified device. If found, the group is returned and the group
864 * reference in incremented, else NULL.
865 */
866struct iommu_group *iommu_group_get(struct device *dev)
867{
868 struct iommu_group *group = dev->iommu_group;
869
870 if (group)
871 kobject_get(group->devices_kobj);
872
873 return group;
874}
875EXPORT_SYMBOL_GPL(iommu_group_get);
876
877/**
878 * iommu_group_ref_get - Increment reference on a group
879 * @group: the group to use, must not be NULL
880 *
881 * This function is called by iommu drivers to take additional references on an
882 * existing group. Returns the given group for convenience.
883 */
884struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
885{
886 kobject_get(group->devices_kobj);
887 return group;
888}
889
890/**
891 * iommu_group_put - Decrement group reference
892 * @group: the group to use
893 *
894 * This function is called by iommu drivers and users to release the
895 * iommu group. Once the reference count is zero, the group is released.
896 */
897void iommu_group_put(struct iommu_group *group)
898{
899 if (group)
900 kobject_put(group->devices_kobj);
901}
902EXPORT_SYMBOL_GPL(iommu_group_put);
903
904/**
905 * iommu_group_register_notifier - Register a notifier for group changes
906 * @group: the group to watch
907 * @nb: notifier block to signal
908 *
909 * This function allows iommu group users to track changes in a group.
910 * See include/linux/iommu.h for actions sent via this notifier. Caller
911 * should hold a reference to the group throughout notifier registration.
912 */
913int iommu_group_register_notifier(struct iommu_group *group,
914 struct notifier_block *nb)
915{
916 return blocking_notifier_chain_register(&group->notifier, nb);
917}
918EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
919
920/**
921 * iommu_group_unregister_notifier - Unregister a notifier
922 * @group: the group to watch
923 * @nb: notifier block to signal
924 *
925 * Unregister a previously registered group notifier block.
926 */
927int iommu_group_unregister_notifier(struct iommu_group *group,
928 struct notifier_block *nb)
929{
930 return blocking_notifier_chain_unregister(&group->notifier, nb);
931}
932EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
933
934/**
935 * iommu_register_device_fault_handler() - Register a device fault handler
936 * @dev: the device
937 * @handler: the fault handler
938 * @data: private data passed as argument to the handler
939 *
940 * When an IOMMU fault event is received, this handler gets called with the
941 * fault event and data as argument. The handler should return 0 on success. If
942 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
943 * complete the fault by calling iommu_page_response() with one of the following
944 * response code:
945 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
946 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
947 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
948 * page faults if possible.
949 *
950 * Return 0 if the fault handler was installed successfully, or an error.
951 */
952int iommu_register_device_fault_handler(struct device *dev,
953 iommu_dev_fault_handler_t handler,
954 void *data)
955{
956 struct iommu_param *param = dev->iommu_param;
957 int ret = 0;
958
959 if (!param)
960 return -EINVAL;
961
962 mutex_lock(¶m->lock);
963 /* Only allow one fault handler registered for each device */
964 if (param->fault_param) {
965 ret = -EBUSY;
966 goto done_unlock;
967 }
968
969 get_device(dev);
970 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
971 if (!param->fault_param) {
972 put_device(dev);
973 ret = -ENOMEM;
974 goto done_unlock;
975 }
976 param->fault_param->handler = handler;
977 param->fault_param->data = data;
978 mutex_init(¶m->fault_param->lock);
979 INIT_LIST_HEAD(¶m->fault_param->faults);
980
981done_unlock:
982 mutex_unlock(¶m->lock);
983
984 return ret;
985}
986EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
987
988/**
989 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
990 * @dev: the device
991 *
992 * Remove the device fault handler installed with
993 * iommu_register_device_fault_handler().
994 *
995 * Return 0 on success, or an error.
996 */
997int iommu_unregister_device_fault_handler(struct device *dev)
998{
999 struct iommu_param *param = dev->iommu_param;
1000 int ret = 0;
1001
1002 if (!param)
1003 return -EINVAL;
1004
1005 mutex_lock(¶m->lock);
1006
1007 if (!param->fault_param)
1008 goto unlock;
1009
1010 /* we cannot unregister handler if there are pending faults */
1011 if (!list_empty(¶m->fault_param->faults)) {
1012 ret = -EBUSY;
1013 goto unlock;
1014 }
1015
1016 kfree(param->fault_param);
1017 param->fault_param = NULL;
1018 put_device(dev);
1019unlock:
1020 mutex_unlock(¶m->lock);
1021
1022 return ret;
1023}
1024EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1025
1026/**
1027 * iommu_report_device_fault() - Report fault event to device driver
1028 * @dev: the device
1029 * @evt: fault event data
1030 *
1031 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1032 * handler. When this function fails and the fault is recoverable, it is the
1033 * caller's responsibility to complete the fault.
1034 *
1035 * Return 0 on success, or an error.
1036 */
1037int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1038{
1039 struct iommu_param *param = dev->iommu_param;
1040 struct iommu_fault_event *evt_pending = NULL;
1041 struct iommu_fault_param *fparam;
1042 int ret = 0;
1043
1044 if (!param || !evt)
1045 return -EINVAL;
1046
1047 /* we only report device fault if there is a handler registered */
1048 mutex_lock(¶m->lock);
1049 fparam = param->fault_param;
1050 if (!fparam || !fparam->handler) {
1051 ret = -EINVAL;
1052 goto done_unlock;
1053 }
1054
1055 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1056 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1057 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1058 GFP_KERNEL);
1059 if (!evt_pending) {
1060 ret = -ENOMEM;
1061 goto done_unlock;
1062 }
1063 mutex_lock(&fparam->lock);
1064 list_add_tail(&evt_pending->list, &fparam->faults);
1065 mutex_unlock(&fparam->lock);
1066 }
1067
1068 ret = fparam->handler(&evt->fault, fparam->data);
1069 if (ret && evt_pending) {
1070 mutex_lock(&fparam->lock);
1071 list_del(&evt_pending->list);
1072 mutex_unlock(&fparam->lock);
1073 kfree(evt_pending);
1074 }
1075done_unlock:
1076 mutex_unlock(¶m->lock);
1077 return ret;
1078}
1079EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1080
1081int iommu_page_response(struct device *dev,
1082 struct iommu_page_response *msg)
1083{
1084 bool pasid_valid;
1085 int ret = -EINVAL;
1086 struct iommu_fault_event *evt;
1087 struct iommu_fault_page_request *prm;
1088 struct iommu_param *param = dev->iommu_param;
1089 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1090
1091 if (!domain || !domain->ops->page_response)
1092 return -ENODEV;
1093
1094 if (!param || !param->fault_param)
1095 return -EINVAL;
1096
1097 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1098 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1099 return -EINVAL;
1100
1101 /* Only send response if there is a fault report pending */
1102 mutex_lock(¶m->fault_param->lock);
1103 if (list_empty(¶m->fault_param->faults)) {
1104 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1105 goto done_unlock;
1106 }
1107 /*
1108 * Check if we have a matching page request pending to respond,
1109 * otherwise return -EINVAL
1110 */
1111 list_for_each_entry(evt, ¶m->fault_param->faults, list) {
1112 prm = &evt->fault.prm;
1113 pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1114
1115 if ((pasid_valid && prm->pasid != msg->pasid) ||
1116 prm->grpid != msg->grpid)
1117 continue;
1118
1119 /* Sanitize the reply */
1120 msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1121
1122 ret = domain->ops->page_response(dev, evt, msg);
1123 list_del(&evt->list);
1124 kfree(evt);
1125 break;
1126 }
1127
1128done_unlock:
1129 mutex_unlock(¶m->fault_param->lock);
1130 return ret;
1131}
1132EXPORT_SYMBOL_GPL(iommu_page_response);
1133
1134/**
1135 * iommu_group_id - Return ID for a group
1136 * @group: the group to ID
1137 *
1138 * Return the unique ID for the group matching the sysfs group number.
1139 */
1140int iommu_group_id(struct iommu_group *group)
1141{
1142 return group->id;
1143}
1144EXPORT_SYMBOL_GPL(iommu_group_id);
1145
1146static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1147 unsigned long *devfns);
1148
1149/*
1150 * To consider a PCI device isolated, we require ACS to support Source
1151 * Validation, Request Redirection, Completer Redirection, and Upstream
1152 * Forwarding. This effectively means that devices cannot spoof their
1153 * requester ID, requests and completions cannot be redirected, and all
1154 * transactions are forwarded upstream, even as it passes through a
1155 * bridge where the target device is downstream.
1156 */
1157#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1158
1159/*
1160 * For multifunction devices which are not isolated from each other, find
1161 * all the other non-isolated functions and look for existing groups. For
1162 * each function, we also need to look for aliases to or from other devices
1163 * that may already have a group.
1164 */
1165static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1166 unsigned long *devfns)
1167{
1168 struct pci_dev *tmp = NULL;
1169 struct iommu_group *group;
1170
1171 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1172 return NULL;
1173
1174 for_each_pci_dev(tmp) {
1175 if (tmp == pdev || tmp->bus != pdev->bus ||
1176 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1177 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1178 continue;
1179
1180 group = get_pci_alias_group(tmp, devfns);
1181 if (group) {
1182 pci_dev_put(tmp);
1183 return group;
1184 }
1185 }
1186
1187 return NULL;
1188}
1189
1190/*
1191 * Look for aliases to or from the given device for existing groups. DMA
1192 * aliases are only supported on the same bus, therefore the search
1193 * space is quite small (especially since we're really only looking at pcie
1194 * device, and therefore only expect multiple slots on the root complex or
1195 * downstream switch ports). It's conceivable though that a pair of
1196 * multifunction devices could have aliases between them that would cause a
1197 * loop. To prevent this, we use a bitmap to track where we've been.
1198 */
1199static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1200 unsigned long *devfns)
1201{
1202 struct pci_dev *tmp = NULL;
1203 struct iommu_group *group;
1204
1205 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1206 return NULL;
1207
1208 group = iommu_group_get(&pdev->dev);
1209 if (group)
1210 return group;
1211
1212 for_each_pci_dev(tmp) {
1213 if (tmp == pdev || tmp->bus != pdev->bus)
1214 continue;
1215
1216 /* We alias them or they alias us */
1217 if (pci_devs_are_dma_aliases(pdev, tmp)) {
1218 group = get_pci_alias_group(tmp, devfns);
1219 if (group) {
1220 pci_dev_put(tmp);
1221 return group;
1222 }
1223
1224 group = get_pci_function_alias_group(tmp, devfns);
1225 if (group) {
1226 pci_dev_put(tmp);
1227 return group;
1228 }
1229 }
1230 }
1231
1232 return NULL;
1233}
1234
1235struct group_for_pci_data {
1236 struct pci_dev *pdev;
1237 struct iommu_group *group;
1238};
1239
1240/*
1241 * DMA alias iterator callback, return the last seen device. Stop and return
1242 * the IOMMU group if we find one along the way.
1243 */
1244static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1245{
1246 struct group_for_pci_data *data = opaque;
1247
1248 data->pdev = pdev;
1249 data->group = iommu_group_get(&pdev->dev);
1250
1251 return data->group != NULL;
1252}
1253
1254/*
1255 * Generic device_group call-back function. It just allocates one
1256 * iommu-group per device.
1257 */
1258struct iommu_group *generic_device_group(struct device *dev)
1259{
1260 return iommu_group_alloc();
1261}
1262
1263/*
1264 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1265 * to find or create an IOMMU group for a device.
1266 */
1267struct iommu_group *pci_device_group(struct device *dev)
1268{
1269 struct pci_dev *pdev = to_pci_dev(dev);
1270 struct group_for_pci_data data;
1271 struct pci_bus *bus;
1272 struct iommu_group *group = NULL;
1273 u64 devfns[4] = { 0 };
1274
1275 if (WARN_ON(!dev_is_pci(dev)))
1276 return ERR_PTR(-EINVAL);
1277
1278 /*
1279 * Find the upstream DMA alias for the device. A device must not
1280 * be aliased due to topology in order to have its own IOMMU group.
1281 * If we find an alias along the way that already belongs to a
1282 * group, use it.
1283 */
1284 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1285 return data.group;
1286
1287 pdev = data.pdev;
1288
1289 /*
1290 * Continue upstream from the point of minimum IOMMU granularity
1291 * due to aliases to the point where devices are protected from
1292 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
1293 * group, use it.
1294 */
1295 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1296 if (!bus->self)
1297 continue;
1298
1299 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1300 break;
1301
1302 pdev = bus->self;
1303
1304 group = iommu_group_get(&pdev->dev);
1305 if (group)
1306 return group;
1307 }
1308
1309 /*
1310 * Look for existing groups on device aliases. If we alias another
1311 * device or another device aliases us, use the same group.
1312 */
1313 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1314 if (group)
1315 return group;
1316
1317 /*
1318 * Look for existing groups on non-isolated functions on the same
1319 * slot and aliases of those funcions, if any. No need to clear
1320 * the search bitmap, the tested devfns are still valid.
1321 */
1322 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1323 if (group)
1324 return group;
1325
1326 /* No shared group found, allocate new */
1327 return iommu_group_alloc();
1328}
1329
1330/* Get the IOMMU group for device on fsl-mc bus */
1331struct iommu_group *fsl_mc_device_group(struct device *dev)
1332{
1333 struct device *cont_dev = fsl_mc_cont_dev(dev);
1334 struct iommu_group *group;
1335
1336 group = iommu_group_get(cont_dev);
1337 if (!group)
1338 group = iommu_group_alloc();
1339 return group;
1340}
1341
1342/**
1343 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1344 * @dev: target device
1345 *
1346 * This function is intended to be called by IOMMU drivers and extended to
1347 * support common, bus-defined algorithms when determining or creating the
1348 * IOMMU group for a device. On success, the caller will hold a reference
1349 * to the returned IOMMU group, which will already include the provided
1350 * device. The reference should be released with iommu_group_put().
1351 */
1352struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1353{
1354 const struct iommu_ops *ops = dev->bus->iommu_ops;
1355 struct iommu_group *group;
1356 int ret;
1357
1358 group = iommu_group_get(dev);
1359 if (group)
1360 return group;
1361
1362 if (!ops)
1363 return ERR_PTR(-EINVAL);
1364
1365 group = ops->device_group(dev);
1366 if (WARN_ON_ONCE(group == NULL))
1367 return ERR_PTR(-EINVAL);
1368
1369 if (IS_ERR(group))
1370 return group;
1371
1372 /*
1373 * Try to allocate a default domain - needs support from the
1374 * IOMMU driver.
1375 */
1376 if (!group->default_domain) {
1377 struct iommu_domain *dom;
1378
1379 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1380 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1381 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1382 if (dom) {
1383 dev_warn(dev,
1384 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1385 iommu_def_domain_type);
1386 }
1387 }
1388
1389 group->default_domain = dom;
1390 if (!group->domain)
1391 group->domain = dom;
1392
1393 if (dom && !iommu_dma_strict) {
1394 int attr = 1;
1395 iommu_domain_set_attr(dom,
1396 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1397 &attr);
1398 }
1399 }
1400
1401 ret = iommu_group_add_device(group, dev);
1402 if (ret) {
1403 iommu_group_put(group);
1404 return ERR_PTR(ret);
1405 }
1406
1407 return group;
1408}
1409
1410struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1411{
1412 return group->default_domain;
1413}
1414
1415static int add_iommu_group(struct device *dev, void *data)
1416{
1417 int ret = iommu_probe_device(dev);
1418
1419 /*
1420 * We ignore -ENODEV errors for now, as they just mean that the
1421 * device is not translated by an IOMMU. We still care about
1422 * other errors and fail to initialize when they happen.
1423 */
1424 if (ret == -ENODEV)
1425 ret = 0;
1426
1427 return ret;
1428}
1429
1430static int remove_iommu_group(struct device *dev, void *data)
1431{
1432 iommu_release_device(dev);
1433
1434 return 0;
1435}
1436
1437static int iommu_bus_notifier(struct notifier_block *nb,
1438 unsigned long action, void *data)
1439{
1440 unsigned long group_action = 0;
1441 struct device *dev = data;
1442 struct iommu_group *group;
1443
1444 /*
1445 * ADD/DEL call into iommu driver ops if provided, which may
1446 * result in ADD/DEL notifiers to group->notifier
1447 */
1448 if (action == BUS_NOTIFY_ADD_DEVICE) {
1449 int ret;
1450
1451 ret = iommu_probe_device(dev);
1452 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1453 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1454 iommu_release_device(dev);
1455 return NOTIFY_OK;
1456 }
1457
1458 /*
1459 * Remaining BUS_NOTIFYs get filtered and republished to the
1460 * group, if anyone is listening
1461 */
1462 group = iommu_group_get(dev);
1463 if (!group)
1464 return 0;
1465
1466 switch (action) {
1467 case BUS_NOTIFY_BIND_DRIVER:
1468 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1469 break;
1470 case BUS_NOTIFY_BOUND_DRIVER:
1471 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1472 break;
1473 case BUS_NOTIFY_UNBIND_DRIVER:
1474 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1475 break;
1476 case BUS_NOTIFY_UNBOUND_DRIVER:
1477 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1478 break;
1479 }
1480
1481 if (group_action)
1482 blocking_notifier_call_chain(&group->notifier,
1483 group_action, dev);
1484
1485 iommu_group_put(group);
1486 return 0;
1487}
1488
1489static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1490{
1491 int err;
1492 struct notifier_block *nb;
1493
1494 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1495 if (!nb)
1496 return -ENOMEM;
1497
1498 nb->notifier_call = iommu_bus_notifier;
1499
1500 err = bus_register_notifier(bus, nb);
1501 if (err)
1502 goto out_free;
1503
1504 err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
1505 if (err)
1506 goto out_err;
1507
1508
1509 return 0;
1510
1511out_err:
1512 /* Clean up */
1513 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1514 bus_unregister_notifier(bus, nb);
1515
1516out_free:
1517 kfree(nb);
1518
1519 return err;
1520}
1521
1522/**
1523 * bus_set_iommu - set iommu-callbacks for the bus
1524 * @bus: bus.
1525 * @ops: the callbacks provided by the iommu-driver
1526 *
1527 * This function is called by an iommu driver to set the iommu methods
1528 * used for a particular bus. Drivers for devices on that bus can use
1529 * the iommu-api after these ops are registered.
1530 * This special function is needed because IOMMUs are usually devices on
1531 * the bus itself, so the iommu drivers are not initialized when the bus
1532 * is set up. With this function the iommu-driver can set the iommu-ops
1533 * afterwards.
1534 */
1535int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1536{
1537 int err;
1538
1539 if (bus->iommu_ops != NULL)
1540 return -EBUSY;
1541
1542 bus->iommu_ops = ops;
1543
1544 /* Do IOMMU specific setup for this bus-type */
1545 err = iommu_bus_init(bus, ops);
1546 if (err)
1547 bus->iommu_ops = NULL;
1548
1549 return err;
1550}
1551EXPORT_SYMBOL_GPL(bus_set_iommu);
1552
1553bool iommu_present(struct bus_type *bus)
1554{
1555 return bus->iommu_ops != NULL;
1556}
1557EXPORT_SYMBOL_GPL(iommu_present);
1558
1559bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1560{
1561 if (!bus->iommu_ops || !bus->iommu_ops->capable)
1562 return false;
1563
1564 return bus->iommu_ops->capable(cap);
1565}
1566EXPORT_SYMBOL_GPL(iommu_capable);
1567
1568/**
1569 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1570 * @domain: iommu domain
1571 * @handler: fault handler
1572 * @token: user data, will be passed back to the fault handler
1573 *
1574 * This function should be used by IOMMU users which want to be notified
1575 * whenever an IOMMU fault happens.
1576 *
1577 * The fault handler itself should return 0 on success, and an appropriate
1578 * error code otherwise.
1579 */
1580void iommu_set_fault_handler(struct iommu_domain *domain,
1581 iommu_fault_handler_t handler,
1582 void *token)
1583{
1584 BUG_ON(!domain);
1585
1586 domain->handler = handler;
1587 domain->handler_token = token;
1588}
1589EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1590
1591static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1592 unsigned type)
1593{
1594 struct iommu_domain *domain;
1595
1596 if (bus == NULL || bus->iommu_ops == NULL)
1597 return NULL;
1598
1599 domain = bus->iommu_ops->domain_alloc(type);
1600 if (!domain)
1601 return NULL;
1602
1603 domain->ops = bus->iommu_ops;
1604 domain->type = type;
1605 /* Assume all sizes by default; the driver may override this later */
1606 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1607
1608 return domain;
1609}
1610
1611struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1612{
1613 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1614}
1615EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1616
1617void iommu_domain_free(struct iommu_domain *domain)
1618{
1619 domain->ops->domain_free(domain);
1620}
1621EXPORT_SYMBOL_GPL(iommu_domain_free);
1622
1623static int __iommu_attach_device(struct iommu_domain *domain,
1624 struct device *dev)
1625{
1626 int ret;
1627 if ((domain->ops->is_attach_deferred != NULL) &&
1628 domain->ops->is_attach_deferred(domain, dev))
1629 return 0;
1630
1631 if (unlikely(domain->ops->attach_dev == NULL))
1632 return -ENODEV;
1633
1634 ret = domain->ops->attach_dev(domain, dev);
1635 if (!ret)
1636 trace_attach_device_to_domain(dev);
1637 return ret;
1638}
1639
1640int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1641{
1642 struct iommu_group *group;
1643 int ret;
1644
1645 group = iommu_group_get(dev);
1646 if (!group)
1647 return -ENODEV;
1648
1649 /*
1650 * Lock the group to make sure the device-count doesn't
1651 * change while we are attaching
1652 */
1653 mutex_lock(&group->mutex);
1654 ret = -EINVAL;
1655 if (iommu_group_device_count(group) != 1)
1656 goto out_unlock;
1657
1658 ret = __iommu_attach_group(domain, group);
1659
1660out_unlock:
1661 mutex_unlock(&group->mutex);
1662 iommu_group_put(group);
1663
1664 return ret;
1665}
1666EXPORT_SYMBOL_GPL(iommu_attach_device);
1667
1668static void __iommu_detach_device(struct iommu_domain *domain,
1669 struct device *dev)
1670{
1671 if ((domain->ops->is_attach_deferred != NULL) &&
1672 domain->ops->is_attach_deferred(domain, dev))
1673 return;
1674
1675 if (unlikely(domain->ops->detach_dev == NULL))
1676 return;
1677
1678 domain->ops->detach_dev(domain, dev);
1679 trace_detach_device_from_domain(dev);
1680}
1681
1682void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1683{
1684 struct iommu_group *group;
1685
1686 group = iommu_group_get(dev);
1687 if (!group)
1688 return;
1689
1690 mutex_lock(&group->mutex);
1691 if (iommu_group_device_count(group) != 1) {
1692 WARN_ON(1);
1693 goto out_unlock;
1694 }
1695
1696 __iommu_detach_group(domain, group);
1697
1698out_unlock:
1699 mutex_unlock(&group->mutex);
1700 iommu_group_put(group);
1701}
1702EXPORT_SYMBOL_GPL(iommu_detach_device);
1703
1704struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1705{
1706 struct iommu_domain *domain;
1707 struct iommu_group *group;
1708
1709 group = iommu_group_get(dev);
1710 if (!group)
1711 return NULL;
1712
1713 domain = group->domain;
1714
1715 iommu_group_put(group);
1716
1717 return domain;
1718}
1719EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1720
1721/*
1722 * For IOMMU_DOMAIN_DMA implementations which already provide their own
1723 * guarantees that the group and its default domain are valid and correct.
1724 */
1725struct iommu_domain *iommu_get_dma_domain(struct device *dev)
1726{
1727 return dev->iommu_group->default_domain;
1728}
1729
1730/*
1731 * IOMMU groups are really the natural working unit of the IOMMU, but
1732 * the IOMMU API works on domains and devices. Bridge that gap by
1733 * iterating over the devices in a group. Ideally we'd have a single
1734 * device which represents the requestor ID of the group, but we also
1735 * allow IOMMU drivers to create policy defined minimum sets, where
1736 * the physical hardware may be able to distiguish members, but we
1737 * wish to group them at a higher level (ex. untrusted multi-function
1738 * PCI devices). Thus we attach each device.
1739 */
1740static int iommu_group_do_attach_device(struct device *dev, void *data)
1741{
1742 struct iommu_domain *domain = data;
1743
1744 return __iommu_attach_device(domain, dev);
1745}
1746
1747static int __iommu_attach_group(struct iommu_domain *domain,
1748 struct iommu_group *group)
1749{
1750 int ret;
1751
1752 if (group->default_domain && group->domain != group->default_domain)
1753 return -EBUSY;
1754
1755 ret = __iommu_group_for_each_dev(group, domain,
1756 iommu_group_do_attach_device);
1757 if (ret == 0)
1758 group->domain = domain;
1759
1760 return ret;
1761}
1762
1763int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1764{
1765 int ret;
1766
1767 mutex_lock(&group->mutex);
1768 ret = __iommu_attach_group(domain, group);
1769 mutex_unlock(&group->mutex);
1770
1771 return ret;
1772}
1773EXPORT_SYMBOL_GPL(iommu_attach_group);
1774
1775static int iommu_group_do_detach_device(struct device *dev, void *data)
1776{
1777 struct iommu_domain *domain = data;
1778
1779 __iommu_detach_device(domain, dev);
1780
1781 return 0;
1782}
1783
1784static void __iommu_detach_group(struct iommu_domain *domain,
1785 struct iommu_group *group)
1786{
1787 int ret;
1788
1789 if (!group->default_domain) {
1790 __iommu_group_for_each_dev(group, domain,
1791 iommu_group_do_detach_device);
1792 group->domain = NULL;
1793 return;
1794 }
1795
1796 if (group->domain == group->default_domain)
1797 return;
1798
1799 /* Detach by re-attaching to the default domain */
1800 ret = __iommu_group_for_each_dev(group, group->default_domain,
1801 iommu_group_do_attach_device);
1802 if (ret != 0)
1803 WARN_ON(1);
1804 else
1805 group->domain = group->default_domain;
1806}
1807
1808void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1809{
1810 mutex_lock(&group->mutex);
1811 __iommu_detach_group(domain, group);
1812 mutex_unlock(&group->mutex);
1813}
1814EXPORT_SYMBOL_GPL(iommu_detach_group);
1815
1816phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1817{
1818 if (unlikely(domain->ops->iova_to_phys == NULL))
1819 return 0;
1820
1821 return domain->ops->iova_to_phys(domain, iova);
1822}
1823EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1824
1825static size_t iommu_pgsize(struct iommu_domain *domain,
1826 unsigned long addr_merge, size_t size)
1827{
1828 unsigned int pgsize_idx;
1829 size_t pgsize;
1830
1831 /* Max page size that still fits into 'size' */
1832 pgsize_idx = __fls(size);
1833
1834 /* need to consider alignment requirements ? */
1835 if (likely(addr_merge)) {
1836 /* Max page size allowed by address */
1837 unsigned int align_pgsize_idx = __ffs(addr_merge);
1838 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1839 }
1840
1841 /* build a mask of acceptable page sizes */
1842 pgsize = (1UL << (pgsize_idx + 1)) - 1;
1843
1844 /* throw away page sizes not supported by the hardware */
1845 pgsize &= domain->pgsize_bitmap;
1846
1847 /* make sure we're still sane */
1848 BUG_ON(!pgsize);
1849
1850 /* pick the biggest page */
1851 pgsize_idx = __fls(pgsize);
1852 pgsize = 1UL << pgsize_idx;
1853
1854 return pgsize;
1855}
1856
1857int iommu_map(struct iommu_domain *domain, unsigned long iova,
1858 phys_addr_t paddr, size_t size, int prot)
1859{
1860 const struct iommu_ops *ops = domain->ops;
1861 unsigned long orig_iova = iova;
1862 unsigned int min_pagesz;
1863 size_t orig_size = size;
1864 phys_addr_t orig_paddr = paddr;
1865 int ret = 0;
1866
1867 if (unlikely(ops->map == NULL ||
1868 domain->pgsize_bitmap == 0UL))
1869 return -ENODEV;
1870
1871 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1872 return -EINVAL;
1873
1874 /* find out the minimum page size supported */
1875 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1876
1877 /*
1878 * both the virtual address and the physical one, as well as
1879 * the size of the mapping, must be aligned (at least) to the
1880 * size of the smallest page supported by the hardware
1881 */
1882 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1883 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1884 iova, &paddr, size, min_pagesz);
1885 return -EINVAL;
1886 }
1887
1888 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1889
1890 while (size) {
1891 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1892
1893 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1894 iova, &paddr, pgsize);
1895
1896 ret = ops->map(domain, iova, paddr, pgsize, prot);
1897 if (ret)
1898 break;
1899
1900 iova += pgsize;
1901 paddr += pgsize;
1902 size -= pgsize;
1903 }
1904
1905 if (ops->iotlb_sync_map)
1906 ops->iotlb_sync_map(domain);
1907
1908 /* unroll mapping in case something went wrong */
1909 if (ret)
1910 iommu_unmap(domain, orig_iova, orig_size - size);
1911 else
1912 trace_map(orig_iova, orig_paddr, orig_size);
1913
1914 return ret;
1915}
1916EXPORT_SYMBOL_GPL(iommu_map);
1917
1918static size_t __iommu_unmap(struct iommu_domain *domain,
1919 unsigned long iova, size_t size,
1920 struct iommu_iotlb_gather *iotlb_gather)
1921{
1922 const struct iommu_ops *ops = domain->ops;
1923 size_t unmapped_page, unmapped = 0;
1924 unsigned long orig_iova = iova;
1925 unsigned int min_pagesz;
1926
1927 if (unlikely(ops->unmap == NULL ||
1928 domain->pgsize_bitmap == 0UL))
1929 return 0;
1930
1931 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1932 return 0;
1933
1934 /* find out the minimum page size supported */
1935 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1936
1937 /*
1938 * The virtual address, as well as the size of the mapping, must be
1939 * aligned (at least) to the size of the smallest page supported
1940 * by the hardware
1941 */
1942 if (!IS_ALIGNED(iova | size, min_pagesz)) {
1943 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1944 iova, size, min_pagesz);
1945 return 0;
1946 }
1947
1948 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1949
1950 /*
1951 * Keep iterating until we either unmap 'size' bytes (or more)
1952 * or we hit an area that isn't mapped.
1953 */
1954 while (unmapped < size) {
1955 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1956
1957 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
1958 if (!unmapped_page)
1959 break;
1960
1961 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1962 iova, unmapped_page);
1963
1964 iova += unmapped_page;
1965 unmapped += unmapped_page;
1966 }
1967
1968 trace_unmap(orig_iova, size, unmapped);
1969 return unmapped;
1970}
1971
1972size_t iommu_unmap(struct iommu_domain *domain,
1973 unsigned long iova, size_t size)
1974{
1975 struct iommu_iotlb_gather iotlb_gather;
1976 size_t ret;
1977
1978 iommu_iotlb_gather_init(&iotlb_gather);
1979 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
1980 iommu_tlb_sync(domain, &iotlb_gather);
1981
1982 return ret;
1983}
1984EXPORT_SYMBOL_GPL(iommu_unmap);
1985
1986size_t iommu_unmap_fast(struct iommu_domain *domain,
1987 unsigned long iova, size_t size,
1988 struct iommu_iotlb_gather *iotlb_gather)
1989{
1990 return __iommu_unmap(domain, iova, size, iotlb_gather);
1991}
1992EXPORT_SYMBOL_GPL(iommu_unmap_fast);
1993
1994size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1995 struct scatterlist *sg, unsigned int nents, int prot)
1996{
1997 size_t len = 0, mapped = 0;
1998 phys_addr_t start;
1999 unsigned int i = 0;
2000 int ret;
2001
2002 while (i <= nents) {
2003 phys_addr_t s_phys = sg_phys(sg);
2004
2005 if (len && s_phys != start + len) {
2006 ret = iommu_map(domain, iova + mapped, start, len, prot);
2007 if (ret)
2008 goto out_err;
2009
2010 mapped += len;
2011 len = 0;
2012 }
2013
2014 if (len) {
2015 len += sg->length;
2016 } else {
2017 len = sg->length;
2018 start = s_phys;
2019 }
2020
2021 if (++i < nents)
2022 sg = sg_next(sg);
2023 }
2024
2025 return mapped;
2026
2027out_err:
2028 /* undo mappings already done */
2029 iommu_unmap(domain, iova, mapped);
2030
2031 return 0;
2032
2033}
2034EXPORT_SYMBOL_GPL(iommu_map_sg);
2035
2036int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
2037 phys_addr_t paddr, u64 size, int prot)
2038{
2039 if (unlikely(domain->ops->domain_window_enable == NULL))
2040 return -ENODEV;
2041
2042 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2043 prot);
2044}
2045EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2046
2047void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2048{
2049 if (unlikely(domain->ops->domain_window_disable == NULL))
2050 return;
2051
2052 return domain->ops->domain_window_disable(domain, wnd_nr);
2053}
2054EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
2055
2056/**
2057 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2058 * @domain: the iommu domain where the fault has happened
2059 * @dev: the device where the fault has happened
2060 * @iova: the faulting address
2061 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2062 *
2063 * This function should be called by the low-level IOMMU implementations
2064 * whenever IOMMU faults happen, to allow high-level users, that are
2065 * interested in such events, to know about them.
2066 *
2067 * This event may be useful for several possible use cases:
2068 * - mere logging of the event
2069 * - dynamic TLB/PTE loading
2070 * - if restarting of the faulting device is required
2071 *
2072 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2073 * PTE/TLB loading will one day be supported, implementations will be able
2074 * to tell whether it succeeded or not according to this return value).
2075 *
2076 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2077 * (though fault handlers can also return -ENOSYS, in case they want to
2078 * elicit the default behavior of the IOMMU drivers).
2079 */
2080int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2081 unsigned long iova, int flags)
2082{
2083 int ret = -ENOSYS;
2084
2085 /*
2086 * if upper layers showed interest and installed a fault handler,
2087 * invoke it.
2088 */
2089 if (domain->handler)
2090 ret = domain->handler(domain, dev, iova, flags,
2091 domain->handler_token);
2092
2093 trace_io_page_fault(dev, iova, flags);
2094 return ret;
2095}
2096EXPORT_SYMBOL_GPL(report_iommu_fault);
2097
2098static int __init iommu_init(void)
2099{
2100 iommu_group_kset = kset_create_and_add("iommu_groups",
2101 NULL, kernel_kobj);
2102 BUG_ON(!iommu_group_kset);
2103
2104 iommu_debugfs_setup();
2105
2106 return 0;
2107}
2108core_initcall(iommu_init);
2109
2110int iommu_domain_get_attr(struct iommu_domain *domain,
2111 enum iommu_attr attr, void *data)
2112{
2113 struct iommu_domain_geometry *geometry;
2114 bool *paging;
2115 int ret = 0;
2116
2117 switch (attr) {
2118 case DOMAIN_ATTR_GEOMETRY:
2119 geometry = data;
2120 *geometry = domain->geometry;
2121
2122 break;
2123 case DOMAIN_ATTR_PAGING:
2124 paging = data;
2125 *paging = (domain->pgsize_bitmap != 0UL);
2126 break;
2127 default:
2128 if (!domain->ops->domain_get_attr)
2129 return -EINVAL;
2130
2131 ret = domain->ops->domain_get_attr(domain, attr, data);
2132 }
2133
2134 return ret;
2135}
2136EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2137
2138int iommu_domain_set_attr(struct iommu_domain *domain,
2139 enum iommu_attr attr, void *data)
2140{
2141 int ret = 0;
2142
2143 switch (attr) {
2144 default:
2145 if (domain->ops->domain_set_attr == NULL)
2146 return -EINVAL;
2147
2148 ret = domain->ops->domain_set_attr(domain, attr, data);
2149 }
2150
2151 return ret;
2152}
2153EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2154
2155void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2156{
2157 const struct iommu_ops *ops = dev->bus->iommu_ops;
2158
2159 if (ops && ops->get_resv_regions)
2160 ops->get_resv_regions(dev, list);
2161}
2162
2163void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2164{
2165 const struct iommu_ops *ops = dev->bus->iommu_ops;
2166
2167 if (ops && ops->put_resv_regions)
2168 ops->put_resv_regions(dev, list);
2169}
2170
2171struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2172 size_t length, int prot,
2173 enum iommu_resv_type type)
2174{
2175 struct iommu_resv_region *region;
2176
2177 region = kzalloc(sizeof(*region), GFP_KERNEL);
2178 if (!region)
2179 return NULL;
2180
2181 INIT_LIST_HEAD(®ion->list);
2182 region->start = start;
2183 region->length = length;
2184 region->prot = prot;
2185 region->type = type;
2186 return region;
2187}
2188
2189static int
2190request_default_domain_for_dev(struct device *dev, unsigned long type)
2191{
2192 struct iommu_domain *domain;
2193 struct iommu_group *group;
2194 int ret;
2195
2196 /* Device must already be in a group before calling this function */
2197 group = iommu_group_get(dev);
2198 if (!group)
2199 return -EINVAL;
2200
2201 mutex_lock(&group->mutex);
2202
2203 ret = 0;
2204 if (group->default_domain && group->default_domain->type == type)
2205 goto out;
2206
2207 /* Don't change mappings of existing devices */
2208 ret = -EBUSY;
2209 if (iommu_group_device_count(group) != 1)
2210 goto out;
2211
2212 ret = -ENOMEM;
2213 domain = __iommu_domain_alloc(dev->bus, type);
2214 if (!domain)
2215 goto out;
2216
2217 /* Attach the device to the domain */
2218 ret = __iommu_attach_group(domain, group);
2219 if (ret) {
2220 iommu_domain_free(domain);
2221 goto out;
2222 }
2223
2224 iommu_group_create_direct_mappings(group, dev);
2225
2226 /* Make the domain the default for this group */
2227 if (group->default_domain)
2228 iommu_domain_free(group->default_domain);
2229 group->default_domain = domain;
2230
2231 dev_info(dev, "Using iommu %s mapping\n",
2232 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
2233
2234 ret = 0;
2235out:
2236 mutex_unlock(&group->mutex);
2237 iommu_group_put(group);
2238
2239 return ret;
2240}
2241
2242/* Request that a device is direct mapped by the IOMMU */
2243int iommu_request_dm_for_dev(struct device *dev)
2244{
2245 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
2246}
2247
2248/* Request that a device can't be direct mapped by the IOMMU */
2249int iommu_request_dma_domain_for_dev(struct device *dev)
2250{
2251 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2252}
2253
2254void iommu_set_default_passthrough(bool cmd_line)
2255{
2256 if (cmd_line)
2257 iommu_set_cmd_line_dma_api();
2258
2259 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2260}
2261
2262void iommu_set_default_translated(bool cmd_line)
2263{
2264 if (cmd_line)
2265 iommu_set_cmd_line_dma_api();
2266
2267 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2268}
2269
2270bool iommu_default_passthrough(void)
2271{
2272 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2273}
2274EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2275
2276const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2277{
2278 const struct iommu_ops *ops = NULL;
2279 struct iommu_device *iommu;
2280
2281 spin_lock(&iommu_device_lock);
2282 list_for_each_entry(iommu, &iommu_device_list, list)
2283 if (iommu->fwnode == fwnode) {
2284 ops = iommu->ops;
2285 break;
2286 }
2287 spin_unlock(&iommu_device_lock);
2288 return ops;
2289}
2290
2291int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2292 const struct iommu_ops *ops)
2293{
2294 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2295
2296 if (fwspec)
2297 return ops == fwspec->ops ? 0 : -EINVAL;
2298
2299 fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
2300 if (!fwspec)
2301 return -ENOMEM;
2302
2303 of_node_get(to_of_node(iommu_fwnode));
2304 fwspec->iommu_fwnode = iommu_fwnode;
2305 fwspec->ops = ops;
2306 dev_iommu_fwspec_set(dev, fwspec);
2307 return 0;
2308}
2309EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2310
2311void iommu_fwspec_free(struct device *dev)
2312{
2313 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2314
2315 if (fwspec) {
2316 fwnode_handle_put(fwspec->iommu_fwnode);
2317 kfree(fwspec);
2318 dev_iommu_fwspec_set(dev, NULL);
2319 }
2320}
2321EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2322
2323int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2324{
2325 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2326 size_t size;
2327 int i;
2328
2329 if (!fwspec)
2330 return -EINVAL;
2331
2332 size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
2333 if (size > sizeof(*fwspec)) {
2334 fwspec = krealloc(fwspec, size, GFP_KERNEL);
2335 if (!fwspec)
2336 return -ENOMEM;
2337
2338 dev_iommu_fwspec_set(dev, fwspec);
2339 }
2340
2341 for (i = 0; i < num_ids; i++)
2342 fwspec->ids[fwspec->num_ids + i] = ids[i];
2343
2344 fwspec->num_ids += num_ids;
2345 return 0;
2346}
2347EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2348
2349/*
2350 * Per device IOMMU features.
2351 */
2352bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2353{
2354 const struct iommu_ops *ops = dev->bus->iommu_ops;
2355
2356 if (ops && ops->dev_has_feat)
2357 return ops->dev_has_feat(dev, feat);
2358
2359 return false;
2360}
2361EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2362
2363int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2364{
2365 const struct iommu_ops *ops = dev->bus->iommu_ops;
2366
2367 if (ops && ops->dev_enable_feat)
2368 return ops->dev_enable_feat(dev, feat);
2369
2370 return -ENODEV;
2371}
2372EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2373
2374/*
2375 * The device drivers should do the necessary cleanups before calling this.
2376 * For example, before disabling the aux-domain feature, the device driver
2377 * should detach all aux-domains. Otherwise, this will return -EBUSY.
2378 */
2379int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2380{
2381 const struct iommu_ops *ops = dev->bus->iommu_ops;
2382
2383 if (ops && ops->dev_disable_feat)
2384 return ops->dev_disable_feat(dev, feat);
2385
2386 return -EBUSY;
2387}
2388EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2389
2390bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2391{
2392 const struct iommu_ops *ops = dev->bus->iommu_ops;
2393
2394 if (ops && ops->dev_feat_enabled)
2395 return ops->dev_feat_enabled(dev, feat);
2396
2397 return false;
2398}
2399EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2400
2401/*
2402 * Aux-domain specific attach/detach.
2403 *
2404 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2405 * true. Also, as long as domains are attached to a device through this
2406 * interface, any tries to call iommu_attach_device() should fail
2407 * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2408 * This should make us safe against a device being attached to a guest as a
2409 * whole while there are still pasid users on it (aux and sva).
2410 */
2411int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2412{
2413 int ret = -ENODEV;
2414
2415 if (domain->ops->aux_attach_dev)
2416 ret = domain->ops->aux_attach_dev(domain, dev);
2417
2418 if (!ret)
2419 trace_attach_device_to_domain(dev);
2420
2421 return ret;
2422}
2423EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2424
2425void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2426{
2427 if (domain->ops->aux_detach_dev) {
2428 domain->ops->aux_detach_dev(domain, dev);
2429 trace_detach_device_from_domain(dev);
2430 }
2431}
2432EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2433
2434int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2435{
2436 int ret = -ENODEV;
2437
2438 if (domain->ops->aux_get_pasid)
2439 ret = domain->ops->aux_get_pasid(domain, dev);
2440
2441 return ret;
2442}
2443EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2444
2445/**
2446 * iommu_sva_bind_device() - Bind a process address space to a device
2447 * @dev: the device
2448 * @mm: the mm to bind, caller must hold a reference to it
2449 *
2450 * Create a bond between device and address space, allowing the device to access
2451 * the mm using the returned PASID. If a bond already exists between @device and
2452 * @mm, it is returned and an additional reference is taken. Caller must call
2453 * iommu_sva_unbind_device() to release each reference.
2454 *
2455 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2456 * initialize the required SVA features.
2457 *
2458 * On error, returns an ERR_PTR value.
2459 */
2460struct iommu_sva *
2461iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2462{
2463 struct iommu_group *group;
2464 struct iommu_sva *handle = ERR_PTR(-EINVAL);
2465 const struct iommu_ops *ops = dev->bus->iommu_ops;
2466
2467 if (!ops || !ops->sva_bind)
2468 return ERR_PTR(-ENODEV);
2469
2470 group = iommu_group_get(dev);
2471 if (!group)
2472 return ERR_PTR(-ENODEV);
2473
2474 /* Ensure device count and domain don't change while we're binding */
2475 mutex_lock(&group->mutex);
2476
2477 /*
2478 * To keep things simple, SVA currently doesn't support IOMMU groups
2479 * with more than one device. Existing SVA-capable systems are not
2480 * affected by the problems that required IOMMU groups (lack of ACS
2481 * isolation, device ID aliasing and other hardware issues).
2482 */
2483 if (iommu_group_device_count(group) != 1)
2484 goto out_unlock;
2485
2486 handle = ops->sva_bind(dev, mm, drvdata);
2487
2488out_unlock:
2489 mutex_unlock(&group->mutex);
2490 iommu_group_put(group);
2491
2492 return handle;
2493}
2494EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2495
2496/**
2497 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
2498 * @handle: the handle returned by iommu_sva_bind_device()
2499 *
2500 * Put reference to a bond between device and address space. The device should
2501 * not be issuing any more transaction for this PASID. All outstanding page
2502 * requests for this PASID must have been flushed to the IOMMU.
2503 *
2504 * Returns 0 on success, or an error value
2505 */
2506void iommu_sva_unbind_device(struct iommu_sva *handle)
2507{
2508 struct iommu_group *group;
2509 struct device *dev = handle->dev;
2510 const struct iommu_ops *ops = dev->bus->iommu_ops;
2511
2512 if (!ops || !ops->sva_unbind)
2513 return;
2514
2515 group = iommu_group_get(dev);
2516 if (!group)
2517 return;
2518
2519 mutex_lock(&group->mutex);
2520 ops->sva_unbind(handle);
2521 mutex_unlock(&group->mutex);
2522
2523 iommu_group_put(group);
2524}
2525EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2526
2527int iommu_sva_set_ops(struct iommu_sva *handle,
2528 const struct iommu_sva_ops *sva_ops)
2529{
2530 if (handle->ops && handle->ops != sva_ops)
2531 return -EEXIST;
2532
2533 handle->ops = sva_ops;
2534 return 0;
2535}
2536EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2537
2538int iommu_sva_get_pasid(struct iommu_sva *handle)
2539{
2540 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2541
2542 if (!ops || !ops->sva_get_pasid)
2543 return IOMMU_PASID_INVALID;
2544
2545 return ops->sva_get_pasid(handle);
2546}
2547EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);