Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  3 * Author: Joerg Roedel <joerg.roedel@amd.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published
  7 * by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write to the Free Software
 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 17 */
 18
 
 
 
 
 19#include <linux/bug.h>
 20#include <linux/types.h>
 21#include <linux/module.h>
 22#include <linux/slab.h>
 23#include <linux/errno.h>
 24#include <linux/iommu.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25
 26static struct iommu_ops *iommu_ops;
 
 
 
 
 
 
 
 
 
 27
 28void register_iommu(struct iommu_ops *ops)
 
 
 
 
 
 
 
 
 29{
 30	if (iommu_ops)
 31		BUG();
 
 
 32
 33	iommu_ops = ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34}
 
 35
 36bool iommu_found(void)
 
 
 
 
 
 
 
 
 37{
 38	return iommu_ops != NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 39}
 40EXPORT_SYMBOL_GPL(iommu_found);
 41
 42struct iommu_domain *iommu_domain_alloc(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43{
 44	struct iommu_domain *domain;
 45	int ret;
 46
 47	domain = kmalloc(sizeof(*domain), GFP_KERNEL);
 
 
 
 48	if (!domain)
 49		return NULL;
 50
 51	ret = iommu_ops->domain_init(domain);
 
 
 52	if (ret)
 53		goto out_free;
 54
 55	return domain;
 56
 57out_free:
 58	kfree(domain);
 59
 60	return NULL;
 61}
 62EXPORT_SYMBOL_GPL(iommu_domain_alloc);
 63
 64void iommu_domain_free(struct iommu_domain *domain)
 65{
 66	iommu_ops->domain_destroy(domain);
 
 
 67	kfree(domain);
 68}
 69EXPORT_SYMBOL_GPL(iommu_domain_free);
 70
 71int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
 72{
 73	return iommu_ops->attach_dev(domain, dev);
 
 
 
 
 
 
 
 74}
 75EXPORT_SYMBOL_GPL(iommu_attach_device);
 76
 77void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
 78{
 79	iommu_ops->detach_dev(domain, dev);
 
 
 
 
 80}
 81EXPORT_SYMBOL_GPL(iommu_detach_device);
 82
 83phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
 84			       unsigned long iova)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85{
 86	return iommu_ops->iova_to_phys(domain, iova);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87}
 88EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
 89
 90int iommu_domain_has_cap(struct iommu_domain *domain,
 91			 unsigned long cap)
 92{
 93	return iommu_ops->domain_has_cap(domain, cap);
 
 
 
 94}
 95EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
 96
 97int iommu_map(struct iommu_domain *domain, unsigned long iova,
 98	      phys_addr_t paddr, int gfp_order, int prot)
 99{
100	unsigned long invalid_mask;
101	size_t size;
 
 
 
 
 
 
 
 
 
 
102
103	size         = 0x1000UL << gfp_order;
104	invalid_mask = size - 1;
105
106	BUG_ON((iova | paddr) & invalid_mask);
 
107
108	return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
 
 
 
 
 
 
 
109}
110EXPORT_SYMBOL_GPL(iommu_map);
111
112int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
 
113{
114	unsigned long invalid_mask;
115	size_t size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
117	size         = 0x1000UL << gfp_order;
118	invalid_mask = size - 1;
 
 
 
119
120	BUG_ON(iova & invalid_mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
122	return iommu_ops->unmap(domain, iova, gfp_order);
 
123}
124EXPORT_SYMBOL_GPL(iommu_unmap);
v3.15
  1/*
  2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  3 * Author: Joerg Roedel <joerg.roedel@amd.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published
  7 * by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write to the Free Software
 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 17 */
 18
 19#define pr_fmt(fmt)    "%s: " fmt, __func__
 20
 21#include <linux/device.h>
 22#include <linux/kernel.h>
 23#include <linux/bug.h>
 24#include <linux/types.h>
 25#include <linux/module.h>
 26#include <linux/slab.h>
 27#include <linux/errno.h>
 28#include <linux/iommu.h>
 29#include <linux/idr.h>
 30#include <linux/notifier.h>
 31#include <linux/err.h>
 32#include <trace/events/iommu.h>
 33
 34static struct kset *iommu_group_kset;
 35static struct ida iommu_group_ida;
 36static struct mutex iommu_group_mutex;
 37
 38struct iommu_group {
 39	struct kobject kobj;
 40	struct kobject *devices_kobj;
 41	struct list_head devices;
 42	struct mutex mutex;
 43	struct blocking_notifier_head notifier;
 44	void *iommu_data;
 45	void (*iommu_data_release)(void *iommu_data);
 46	char *name;
 47	int id;
 48};
 49
 50struct iommu_device {
 51	struct list_head list;
 52	struct device *dev;
 53	char *name;
 54};
 55
 56struct iommu_group_attribute {
 57	struct attribute attr;
 58	ssize_t (*show)(struct iommu_group *group, char *buf);
 59	ssize_t (*store)(struct iommu_group *group,
 60			 const char *buf, size_t count);
 61};
 62
 63#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
 64struct iommu_group_attribute iommu_group_attr_##_name =		\
 65	__ATTR(_name, _mode, _show, _store)
 66
 67#define to_iommu_group_attr(_attr)	\
 68	container_of(_attr, struct iommu_group_attribute, attr)
 69#define to_iommu_group(_kobj)		\
 70	container_of(_kobj, struct iommu_group, kobj)
 71
 72static ssize_t iommu_group_attr_show(struct kobject *kobj,
 73				     struct attribute *__attr, char *buf)
 74{
 75	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 76	struct iommu_group *group = to_iommu_group(kobj);
 77	ssize_t ret = -EIO;
 78
 79	if (attr->show)
 80		ret = attr->show(group, buf);
 81	return ret;
 82}
 83
 84static ssize_t iommu_group_attr_store(struct kobject *kobj,
 85				      struct attribute *__attr,
 86				      const char *buf, size_t count)
 87{
 88	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 89	struct iommu_group *group = to_iommu_group(kobj);
 90	ssize_t ret = -EIO;
 91
 92	if (attr->store)
 93		ret = attr->store(group, buf, count);
 94	return ret;
 95}
 96
 97static const struct sysfs_ops iommu_group_sysfs_ops = {
 98	.show = iommu_group_attr_show,
 99	.store = iommu_group_attr_store,
100};
101
102static int iommu_group_create_file(struct iommu_group *group,
103				   struct iommu_group_attribute *attr)
104{
105	return sysfs_create_file(&group->kobj, &attr->attr);
106}
107
108static void iommu_group_remove_file(struct iommu_group *group,
109				    struct iommu_group_attribute *attr)
110{
111	sysfs_remove_file(&group->kobj, &attr->attr);
112}
113
114static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
115{
116	return sprintf(buf, "%s\n", group->name);
117}
118
119static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
120
121static void iommu_group_release(struct kobject *kobj)
122{
123	struct iommu_group *group = to_iommu_group(kobj);
124
125	if (group->iommu_data_release)
126		group->iommu_data_release(group->iommu_data);
127
128	mutex_lock(&iommu_group_mutex);
129	ida_remove(&iommu_group_ida, group->id);
130	mutex_unlock(&iommu_group_mutex);
131
132	kfree(group->name);
133	kfree(group);
134}
135
136static struct kobj_type iommu_group_ktype = {
137	.sysfs_ops = &iommu_group_sysfs_ops,
138	.release = iommu_group_release,
139};
140
141/**
142 * iommu_group_alloc - Allocate a new group
143 * @name: Optional name to associate with group, visible in sysfs
144 *
145 * This function is called by an iommu driver to allocate a new iommu
146 * group.  The iommu group represents the minimum granularity of the iommu.
147 * Upon successful return, the caller holds a reference to the supplied
148 * group in order to hold the group until devices are added.  Use
149 * iommu_group_put() to release this extra reference count, allowing the
150 * group to be automatically reclaimed once it has no devices or external
151 * references.
152 */
153struct iommu_group *iommu_group_alloc(void)
154{
155	struct iommu_group *group;
156	int ret;
157
158	group = kzalloc(sizeof(*group), GFP_KERNEL);
159	if (!group)
160		return ERR_PTR(-ENOMEM);
161
162	group->kobj.kset = iommu_group_kset;
163	mutex_init(&group->mutex);
164	INIT_LIST_HEAD(&group->devices);
165	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
166
167	mutex_lock(&iommu_group_mutex);
168
169again:
170	if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
171		kfree(group);
172		mutex_unlock(&iommu_group_mutex);
173		return ERR_PTR(-ENOMEM);
174	}
175
176	if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
177		goto again;
178
179	mutex_unlock(&iommu_group_mutex);
180
181	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
182				   NULL, "%d", group->id);
183	if (ret) {
184		mutex_lock(&iommu_group_mutex);
185		ida_remove(&iommu_group_ida, group->id);
186		mutex_unlock(&iommu_group_mutex);
187		kfree(group);
188		return ERR_PTR(ret);
189	}
190
191	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
192	if (!group->devices_kobj) {
193		kobject_put(&group->kobj); /* triggers .release & free */
194		return ERR_PTR(-ENOMEM);
195	}
196
197	/*
198	 * The devices_kobj holds a reference on the group kobject, so
199	 * as long as that exists so will the group.  We can therefore
200	 * use the devices_kobj for reference counting.
201	 */
202	kobject_put(&group->kobj);
203
204	return group;
205}
206EXPORT_SYMBOL_GPL(iommu_group_alloc);
207
208struct iommu_group *iommu_group_get_by_id(int id)
209{
210	struct kobject *group_kobj;
211	struct iommu_group *group;
212	const char *name;
213
214	if (!iommu_group_kset)
215		return NULL;
216
217	name = kasprintf(GFP_KERNEL, "%d", id);
218	if (!name)
219		return NULL;
220
221	group_kobj = kset_find_obj(iommu_group_kset, name);
222	kfree(name);
223
224	if (!group_kobj)
225		return NULL;
226
227	group = container_of(group_kobj, struct iommu_group, kobj);
228	BUG_ON(group->id != id);
229
230	kobject_get(group->devices_kobj);
231	kobject_put(&group->kobj);
232
233	return group;
234}
235EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
236
237/**
238 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
239 * @group: the group
240 *
241 * iommu drivers can store data in the group for use when doing iommu
242 * operations.  This function provides a way to retrieve it.  Caller
243 * should hold a group reference.
244 */
245void *iommu_group_get_iommudata(struct iommu_group *group)
246{
247	return group->iommu_data;
248}
249EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
250
251/**
252 * iommu_group_set_iommudata - set iommu_data for a group
253 * @group: the group
254 * @iommu_data: new data
255 * @release: release function for iommu_data
256 *
257 * iommu drivers can store data in the group for use when doing iommu
258 * operations.  This function provides a way to set the data after
259 * the group has been allocated.  Caller should hold a group reference.
260 */
261void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
262			       void (*release)(void *iommu_data))
263{
264	group->iommu_data = iommu_data;
265	group->iommu_data_release = release;
266}
267EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
268
269/**
270 * iommu_group_set_name - set name for a group
271 * @group: the group
272 * @name: name
273 *
274 * Allow iommu driver to set a name for a group.  When set it will
275 * appear in a name attribute file under the group in sysfs.
276 */
277int iommu_group_set_name(struct iommu_group *group, const char *name)
278{
279	int ret;
280
281	if (group->name) {
282		iommu_group_remove_file(group, &iommu_group_attr_name);
283		kfree(group->name);
284		group->name = NULL;
285		if (!name)
286			return 0;
287	}
288
289	group->name = kstrdup(name, GFP_KERNEL);
290	if (!group->name)
291		return -ENOMEM;
292
293	ret = iommu_group_create_file(group, &iommu_group_attr_name);
294	if (ret) {
295		kfree(group->name);
296		group->name = NULL;
297		return ret;
298	}
299
300	return 0;
301}
302EXPORT_SYMBOL_GPL(iommu_group_set_name);
303
304/**
305 * iommu_group_add_device - add a device to an iommu group
306 * @group: the group into which to add the device (reference should be held)
307 * @dev: the device
308 *
309 * This function is called by an iommu driver to add a device into a
310 * group.  Adding a device increments the group reference count.
311 */
312int iommu_group_add_device(struct iommu_group *group, struct device *dev)
313{
314	int ret, i = 0;
315	struct iommu_device *device;
316
317	device = kzalloc(sizeof(*device), GFP_KERNEL);
318	if (!device)
319		return -ENOMEM;
320
321	device->dev = dev;
322
323	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
324	if (ret) {
325		kfree(device);
326		return ret;
327	}
328
329	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
330rename:
331	if (!device->name) {
332		sysfs_remove_link(&dev->kobj, "iommu_group");
333		kfree(device);
334		return -ENOMEM;
335	}
336
337	ret = sysfs_create_link_nowarn(group->devices_kobj,
338				       &dev->kobj, device->name);
339	if (ret) {
340		kfree(device->name);
341		if (ret == -EEXIST && i >= 0) {
342			/*
343			 * Account for the slim chance of collision
344			 * and append an instance to the name.
345			 */
346			device->name = kasprintf(GFP_KERNEL, "%s.%d",
347						 kobject_name(&dev->kobj), i++);
348			goto rename;
349		}
350
351		sysfs_remove_link(&dev->kobj, "iommu_group");
352		kfree(device);
353		return ret;
354	}
355
356	kobject_get(group->devices_kobj);
357
358	dev->iommu_group = group;
359
360	mutex_lock(&group->mutex);
361	list_add_tail(&device->list, &group->devices);
362	mutex_unlock(&group->mutex);
363
364	/* Notify any listeners about change to group. */
365	blocking_notifier_call_chain(&group->notifier,
366				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
367
368	trace_add_device_to_group(group->id, dev);
369	return 0;
370}
371EXPORT_SYMBOL_GPL(iommu_group_add_device);
372
373/**
374 * iommu_group_remove_device - remove a device from it's current group
375 * @dev: device to be removed
376 *
377 * This function is called by an iommu driver to remove the device from
378 * it's current group.  This decrements the iommu group reference count.
379 */
380void iommu_group_remove_device(struct device *dev)
381{
382	struct iommu_group *group = dev->iommu_group;
383	struct iommu_device *tmp_device, *device = NULL;
384
385	/* Pre-notify listeners that a device is being removed. */
386	blocking_notifier_call_chain(&group->notifier,
387				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
388
389	mutex_lock(&group->mutex);
390	list_for_each_entry(tmp_device, &group->devices, list) {
391		if (tmp_device->dev == dev) {
392			device = tmp_device;
393			list_del(&device->list);
394			break;
395		}
396	}
397	mutex_unlock(&group->mutex);
398
399	if (!device)
400		return;
401
402	sysfs_remove_link(group->devices_kobj, device->name);
403	sysfs_remove_link(&dev->kobj, "iommu_group");
404
405	trace_remove_device_from_group(group->id, dev);
406
407	kfree(device->name);
408	kfree(device);
409	dev->iommu_group = NULL;
410	kobject_put(group->devices_kobj);
411}
412EXPORT_SYMBOL_GPL(iommu_group_remove_device);
413
414/**
415 * iommu_group_for_each_dev - iterate over each device in the group
416 * @group: the group
417 * @data: caller opaque data to be passed to callback function
418 * @fn: caller supplied callback function
419 *
420 * This function is called by group users to iterate over group devices.
421 * Callers should hold a reference count to the group during callback.
422 * The group->mutex is held across callbacks, which will block calls to
423 * iommu_group_add/remove_device.
424 */
425int iommu_group_for_each_dev(struct iommu_group *group, void *data,
426			     int (*fn)(struct device *, void *))
427{
428	struct iommu_device *device;
429	int ret = 0;
430
431	mutex_lock(&group->mutex);
432	list_for_each_entry(device, &group->devices, list) {
433		ret = fn(device->dev, data);
434		if (ret)
435			break;
436	}
437	mutex_unlock(&group->mutex);
438	return ret;
439}
440EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
441
442/**
443 * iommu_group_get - Return the group for a device and increment reference
444 * @dev: get the group that this device belongs to
445 *
446 * This function is called by iommu drivers and users to get the group
447 * for the specified device.  If found, the group is returned and the group
448 * reference in incremented, else NULL.
449 */
450struct iommu_group *iommu_group_get(struct device *dev)
451{
452	struct iommu_group *group = dev->iommu_group;
453
454	if (group)
455		kobject_get(group->devices_kobj);
456
457	return group;
458}
459EXPORT_SYMBOL_GPL(iommu_group_get);
460
461/**
462 * iommu_group_put - Decrement group reference
463 * @group: the group to use
464 *
465 * This function is called by iommu drivers and users to release the
466 * iommu group.  Once the reference count is zero, the group is released.
467 */
468void iommu_group_put(struct iommu_group *group)
469{
470	if (group)
471		kobject_put(group->devices_kobj);
472}
473EXPORT_SYMBOL_GPL(iommu_group_put);
474
475/**
476 * iommu_group_register_notifier - Register a notifier for group changes
477 * @group: the group to watch
478 * @nb: notifier block to signal
479 *
480 * This function allows iommu group users to track changes in a group.
481 * See include/linux/iommu.h for actions sent via this notifier.  Caller
482 * should hold a reference to the group throughout notifier registration.
483 */
484int iommu_group_register_notifier(struct iommu_group *group,
485				  struct notifier_block *nb)
486{
487	return blocking_notifier_chain_register(&group->notifier, nb);
488}
489EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
490
491/**
492 * iommu_group_unregister_notifier - Unregister a notifier
493 * @group: the group to watch
494 * @nb: notifier block to signal
495 *
496 * Unregister a previously registered group notifier block.
497 */
498int iommu_group_unregister_notifier(struct iommu_group *group,
499				    struct notifier_block *nb)
500{
501	return blocking_notifier_chain_unregister(&group->notifier, nb);
502}
503EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
504
505/**
506 * iommu_group_id - Return ID for a group
507 * @group: the group to ID
508 *
509 * Return the unique ID for the group matching the sysfs group number.
510 */
511int iommu_group_id(struct iommu_group *group)
512{
513	return group->id;
514}
515EXPORT_SYMBOL_GPL(iommu_group_id);
516
517static int add_iommu_group(struct device *dev, void *data)
518{
519	struct iommu_ops *ops = data;
520
521	if (!ops->add_device)
522		return -ENODEV;
523
524	WARN_ON(dev->iommu_group);
525
526	ops->add_device(dev);
527
528	return 0;
529}
 
530
531static int iommu_bus_notifier(struct notifier_block *nb,
532			      unsigned long action, void *data)
533{
534	struct device *dev = data;
535	struct iommu_ops *ops = dev->bus->iommu_ops;
536	struct iommu_group *group;
537	unsigned long group_action = 0;
538
539	/*
540	 * ADD/DEL call into iommu driver ops if provided, which may
541	 * result in ADD/DEL notifiers to group->notifier
542	 */
543	if (action == BUS_NOTIFY_ADD_DEVICE) {
544		if (ops->add_device)
545			return ops->add_device(dev);
546	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
547		if (ops->remove_device && dev->iommu_group) {
548			ops->remove_device(dev);
549			return 0;
550		}
551	}
552
553	/*
554	 * Remaining BUS_NOTIFYs get filtered and republished to the
555	 * group, if anyone is listening
556	 */
557	group = iommu_group_get(dev);
558	if (!group)
559		return 0;
560
561	switch (action) {
562	case BUS_NOTIFY_BIND_DRIVER:
563		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
564		break;
565	case BUS_NOTIFY_BOUND_DRIVER:
566		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
567		break;
568	case BUS_NOTIFY_UNBIND_DRIVER:
569		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
570		break;
571	case BUS_NOTIFY_UNBOUND_DRIVER:
572		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
573		break;
574	}
575
576	if (group_action)
577		blocking_notifier_call_chain(&group->notifier,
578					     group_action, dev);
579
580	iommu_group_put(group);
581	return 0;
582}
583
584static struct notifier_block iommu_bus_nb = {
585	.notifier_call = iommu_bus_notifier,
586};
587
588static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
589{
590	bus_register_notifier(bus, &iommu_bus_nb);
591	bus_for_each_dev(bus, NULL, ops, add_iommu_group);
592}
593
594/**
595 * bus_set_iommu - set iommu-callbacks for the bus
596 * @bus: bus.
597 * @ops: the callbacks provided by the iommu-driver
598 *
599 * This function is called by an iommu driver to set the iommu methods
600 * used for a particular bus. Drivers for devices on that bus can use
601 * the iommu-api after these ops are registered.
602 * This special function is needed because IOMMUs are usually devices on
603 * the bus itself, so the iommu drivers are not initialized when the bus
604 * is set up. With this function the iommu-driver can set the iommu-ops
605 * afterwards.
606 */
607int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
608{
609	if (bus->iommu_ops != NULL)
610		return -EBUSY;
611
612	bus->iommu_ops = ops;
613
614	/* Do IOMMU specific setup for this bus-type */
615	iommu_bus_init(bus, ops);
616
617	return 0;
618}
619EXPORT_SYMBOL_GPL(bus_set_iommu);
620
621bool iommu_present(struct bus_type *bus)
622{
623	return bus->iommu_ops != NULL;
624}
625EXPORT_SYMBOL_GPL(iommu_present);
626
627/**
628 * iommu_set_fault_handler() - set a fault handler for an iommu domain
629 * @domain: iommu domain
630 * @handler: fault handler
631 * @token: user data, will be passed back to the fault handler
632 *
633 * This function should be used by IOMMU users which want to be notified
634 * whenever an IOMMU fault happens.
635 *
636 * The fault handler itself should return 0 on success, and an appropriate
637 * error code otherwise.
638 */
639void iommu_set_fault_handler(struct iommu_domain *domain,
640					iommu_fault_handler_t handler,
641					void *token)
642{
643	BUG_ON(!domain);
644
645	domain->handler = handler;
646	domain->handler_token = token;
647}
648EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
649
650struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
651{
652	struct iommu_domain *domain;
653	int ret;
654
655	if (bus == NULL || bus->iommu_ops == NULL)
656		return NULL;
657
658	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
659	if (!domain)
660		return NULL;
661
662	domain->ops = bus->iommu_ops;
663
664	ret = domain->ops->domain_init(domain);
665	if (ret)
666		goto out_free;
667
668	return domain;
669
670out_free:
671	kfree(domain);
672
673	return NULL;
674}
675EXPORT_SYMBOL_GPL(iommu_domain_alloc);
676
677void iommu_domain_free(struct iommu_domain *domain)
678{
679	if (likely(domain->ops->domain_destroy != NULL))
680		domain->ops->domain_destroy(domain);
681
682	kfree(domain);
683}
684EXPORT_SYMBOL_GPL(iommu_domain_free);
685
686int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
687{
688	int ret;
689	if (unlikely(domain->ops->attach_dev == NULL))
690		return -ENODEV;
691
692	ret = domain->ops->attach_dev(domain, dev);
693	if (!ret)
694		trace_attach_device_to_domain(dev);
695	return ret;
696}
697EXPORT_SYMBOL_GPL(iommu_attach_device);
698
699void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
700{
701	if (unlikely(domain->ops->detach_dev == NULL))
702		return;
703
704	domain->ops->detach_dev(domain, dev);
705	trace_detach_device_from_domain(dev);
706}
707EXPORT_SYMBOL_GPL(iommu_detach_device);
708
709/*
710 * IOMMU groups are really the natrual working unit of the IOMMU, but
711 * the IOMMU API works on domains and devices.  Bridge that gap by
712 * iterating over the devices in a group.  Ideally we'd have a single
713 * device which represents the requestor ID of the group, but we also
714 * allow IOMMU drivers to create policy defined minimum sets, where
715 * the physical hardware may be able to distiguish members, but we
716 * wish to group them at a higher level (ex. untrusted multi-function
717 * PCI devices).  Thus we attach each device.
718 */
719static int iommu_group_do_attach_device(struct device *dev, void *data)
720{
721	struct iommu_domain *domain = data;
722
723	return iommu_attach_device(domain, dev);
724}
725
726int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
727{
728	return iommu_group_for_each_dev(group, domain,
729					iommu_group_do_attach_device);
730}
731EXPORT_SYMBOL_GPL(iommu_attach_group);
732
733static int iommu_group_do_detach_device(struct device *dev, void *data)
734{
735	struct iommu_domain *domain = data;
736
737	iommu_detach_device(domain, dev);
738
739	return 0;
740}
741
742void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
743{
744	iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
745}
746EXPORT_SYMBOL_GPL(iommu_detach_group);
747
748phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
749{
750	if (unlikely(domain->ops->iova_to_phys == NULL))
751		return 0;
752
753	return domain->ops->iova_to_phys(domain, iova);
754}
755EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
756
757int iommu_domain_has_cap(struct iommu_domain *domain,
758			 unsigned long cap)
759{
760	if (unlikely(domain->ops->domain_has_cap == NULL))
761		return 0;
762
763	return domain->ops->domain_has_cap(domain, cap);
764}
765EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
766
767static size_t iommu_pgsize(struct iommu_domain *domain,
768			   unsigned long addr_merge, size_t size)
769{
770	unsigned int pgsize_idx;
771	size_t pgsize;
772
773	/* Max page size that still fits into 'size' */
774	pgsize_idx = __fls(size);
775
776	/* need to consider alignment requirements ? */
777	if (likely(addr_merge)) {
778		/* Max page size allowed by address */
779		unsigned int align_pgsize_idx = __ffs(addr_merge);
780		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
781	}
782
783	/* build a mask of acceptable page sizes */
784	pgsize = (1UL << (pgsize_idx + 1)) - 1;
785
786	/* throw away page sizes not supported by the hardware */
787	pgsize &= domain->ops->pgsize_bitmap;
788
789	/* make sure we're still sane */
790	BUG_ON(!pgsize);
791
792	/* pick the biggest page */
793	pgsize_idx = __fls(pgsize);
794	pgsize = 1UL << pgsize_idx;
795
796	return pgsize;
797}
 
798
799int iommu_map(struct iommu_domain *domain, unsigned long iova,
800	      phys_addr_t paddr, size_t size, int prot)
801{
802	unsigned long orig_iova = iova;
803	unsigned int min_pagesz;
804	size_t orig_size = size;
805	int ret = 0;
806
807	if (unlikely(domain->ops->unmap == NULL ||
808		     domain->ops->pgsize_bitmap == 0UL))
809		return -ENODEV;
810
811	/* find out the minimum page size supported */
812	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
813
814	/*
815	 * both the virtual address and the physical one, as well as
816	 * the size of the mapping, must be aligned (at least) to the
817	 * size of the smallest page supported by the hardware
818	 */
819	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
820		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
821		       iova, &paddr, size, min_pagesz);
822		return -EINVAL;
823	}
824
825	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
826
827	while (size) {
828		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
829
830		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
831			 iova, &paddr, pgsize);
832
833		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
834		if (ret)
835			break;
836
837		iova += pgsize;
838		paddr += pgsize;
839		size -= pgsize;
840	}
841
842	/* unroll mapping in case something went wrong */
843	if (ret)
844		iommu_unmap(domain, orig_iova, orig_size - size);
845	else
846		trace_map(iova, paddr, size);
847
848	return ret;
849}
850EXPORT_SYMBOL_GPL(iommu_map);
851
852size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
853{
854	size_t unmapped_page, unmapped = 0;
855	unsigned int min_pagesz;
856
857	if (unlikely(domain->ops->unmap == NULL ||
858		     domain->ops->pgsize_bitmap == 0UL))
859		return -ENODEV;
860
861	/* find out the minimum page size supported */
862	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
863
864	/*
865	 * The virtual address, as well as the size of the mapping, must be
866	 * aligned (at least) to the size of the smallest page supported
867	 * by the hardware
868	 */
869	if (!IS_ALIGNED(iova | size, min_pagesz)) {
870		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
871		       iova, size, min_pagesz);
872		return -EINVAL;
873	}
874
875	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
876
877	/*
878	 * Keep iterating until we either unmap 'size' bytes (or more)
879	 * or we hit an area that isn't mapped.
880	 */
881	while (unmapped < size) {
882		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
883
884		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
885		if (!unmapped_page)
886			break;
887
888		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
889			 iova, unmapped_page);
890
891		iova += unmapped_page;
892		unmapped += unmapped_page;
893	}
894
895	trace_unmap(iova, 0, size);
896	return unmapped;
897}
898EXPORT_SYMBOL_GPL(iommu_unmap);
899
900
901int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
902			       phys_addr_t paddr, u64 size, int prot)
903{
904	if (unlikely(domain->ops->domain_window_enable == NULL))
905		return -ENODEV;
906
907	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
908						 prot);
909}
910EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
911
912void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
913{
914	if (unlikely(domain->ops->domain_window_disable == NULL))
915		return;
916
917	return domain->ops->domain_window_disable(domain, wnd_nr);
918}
919EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
920
921static int __init iommu_init(void)
922{
923	iommu_group_kset = kset_create_and_add("iommu_groups",
924					       NULL, kernel_kobj);
925	ida_init(&iommu_group_ida);
926	mutex_init(&iommu_group_mutex);
927
928	BUG_ON(!iommu_group_kset);
929
930	return 0;
931}
932arch_initcall(iommu_init);
933
934int iommu_domain_get_attr(struct iommu_domain *domain,
935			  enum iommu_attr attr, void *data)
936{
937	struct iommu_domain_geometry *geometry;
938	bool *paging;
939	int ret = 0;
940	u32 *count;
941
942	switch (attr) {
943	case DOMAIN_ATTR_GEOMETRY:
944		geometry  = data;
945		*geometry = domain->geometry;
946
947		break;
948	case DOMAIN_ATTR_PAGING:
949		paging  = data;
950		*paging = (domain->ops->pgsize_bitmap != 0UL);
951		break;
952	case DOMAIN_ATTR_WINDOWS:
953		count = data;
954
955		if (domain->ops->domain_get_windows != NULL)
956			*count = domain->ops->domain_get_windows(domain);
957		else
958			ret = -ENODEV;
959
960		break;
961	default:
962		if (!domain->ops->domain_get_attr)
963			return -EINVAL;
964
965		ret = domain->ops->domain_get_attr(domain, attr, data);
966	}
967
968	return ret;
969}
970EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
971
972int iommu_domain_set_attr(struct iommu_domain *domain,
973			  enum iommu_attr attr, void *data)
974{
975	int ret = 0;
976	u32 *count;
977
978	switch (attr) {
979	case DOMAIN_ATTR_WINDOWS:
980		count = data;
981
982		if (domain->ops->domain_set_windows != NULL)
983			ret = domain->ops->domain_set_windows(domain, *count);
984		else
985			ret = -ENODEV;
986
987		break;
988	default:
989		if (domain->ops->domain_set_attr == NULL)
990			return -EINVAL;
991
992		ret = domain->ops->domain_set_attr(domain, attr, data);
993	}
994
995	return ret;
996}
997EXPORT_SYMBOL_GPL(iommu_domain_set_attr);